[Python-checkins] r70972 - in sandbox/trunk/refactor_pkg: 2to3 3to2 HACKING README TODO diff_from_r70785.diff example.py lib2to3 lib2to3/Grammar.txt lib2to3/PatternGrammar.txt lib2to3/__init__.py lib2to3/fixes lib2to3/fixes/__init__.py lib2to3/pgen2 lib2to3/pgen2/__init__.py lib2to3/tests lib2to3/tests/__init__.py lib2to3/tests/data lib2to3/tests/data/README lib2to3/tests/data/fixers lib2to3/tests/data/fixers/bad_order.py lib2to3/tests/data/fixers/myfixes lib2to3/tests/data/fixers/myfixes/__init__.py lib2to3/tests/data/fixers/myfixes/fix_explicit.py lib2to3/tests/data/fixers/myfixes/fix_first.py lib2to3/tests/data/fixers/myfixes/fix_last.py lib2to3/tests/data/fixers/myfixes/fix_parrot.py lib2to3/tests/data/fixers/myfixes/fix_preorder.py lib2to3/tests/data/fixers/no_fixer_cls.py lib2to3/tests/data/fixers/parrot_example.py lib2to3/tests/data/infinite_recursion.py lib2to3/tests/data/py2_test_grammar.py lib2to3/tests/data/py3_test_grammar.py lib2to3/tests/pytree_idempotency.py lib2to3/tests/support.py lib2to3/tests/test_all_fixers.py lib2to3/tests/test_fixers.py lib2to3/tests/test_parser.py lib2to3/tests/test_pytree.py lib2to3/tests/test_refactor.py lib2to3/tests/test_util.py refactor refactor/Grammar.txt refactor/PatternGrammar.txt refactor/__init__.py refactor/fixer_base.py refactor/fixer_util.py refactor/fixes refactor/fixes/__init__.py refactor/fixes/fixer_common.py refactor/fixes/from2 refactor/fixes/from2/__init__.py refactor/fixes/from2/fix_apply.py refactor/fixes/from2/fix_basestring.py refactor/fixes/from2/fix_buffer.py refactor/fixes/from2/fix_callable.py refactor/fixes/from2/fix_dict.py refactor/fixes/from2/fix_except.py refactor/fixes/from2/fix_exec.py refactor/fixes/from2/fix_execfile.py refactor/fixes/from2/fix_filter.py refactor/fixes/from2/fix_funcattrs.py refactor/fixes/from2/fix_future.py refactor/fixes/from2/fix_getcwdu.py refactor/fixes/from2/fix_has_key.py refactor/fixes/from2/fix_idioms.py refactor/fixes/from2/fix_import.py refactor/fixes/from2/fix_imports.py refactor/fixes/from2/fix_imports2.py refactor/fixes/from2/fix_input.py refactor/fixes/from2/fix_intern.py refactor/fixes/from2/fix_isinstance.py refactor/fixes/from2/fix_itertools.py refactor/fixes/from2/fix_itertools_imports.py refactor/fixes/from2/fix_long.py refactor/fixes/from2/fix_map.py refactor/fixes/from2/fix_metaclass.py refactor/fixes/from2/fix_methodattrs.py refactor/fixes/from2/fix_ne.py refactor/fixes/from2/fix_next.py refactor/fixes/from2/fix_nonzero.py refactor/fixes/from2/fix_numliterals.py refactor/fixes/from2/fix_paren.py refactor/fixes/from2/fix_print.py refactor/fixes/from2/fix_raise.py refactor/fixes/from2/fix_raw_input.py refactor/fixes/from2/fix_reduce.py refactor/fixes/from2/fix_renames.py refactor/fixes/from2/fix_repr.py refactor/fixes/from2/fix_set_literal.py refactor/fixes/from2/fix_standarderror.py refactor/fixes/from2/fix_sys_exc.py refactor/fixes/from2/fix_throw.py refactor/fixes/from2/fix_tuple_params.py refactor/fixes/from2/fix_types.py refactor/fixes/from2/fix_unicode.py refactor/fixes/from2/fix_urllib.py refactor/fixes/from2/fix_ws_comma.py re

paul.kippes python-checkins at python.org
Wed Apr 1 21:02:12 CEST 2009


Author: paul.kippes
Date: Wed Apr  1 21:02:05 2009
New Revision: 70972

Log:
Pycon 2009 sprint work containing 2to3 refactoring; based on r70785 of sandbox/2to3 work in progress...

Added:
   sandbox/trunk/refactor_pkg/2to3   (contents, props changed)
   sandbox/trunk/refactor_pkg/3to2   (contents, props changed)
   sandbox/trunk/refactor_pkg/HACKING
   sandbox/trunk/refactor_pkg/README
   sandbox/trunk/refactor_pkg/TODO
   sandbox/trunk/refactor_pkg/diff_from_r70785.diff
   sandbox/trunk/refactor_pkg/example.py
   sandbox/trunk/refactor_pkg/lib2to3/
   sandbox/trunk/refactor_pkg/lib2to3/Grammar.txt
   sandbox/trunk/refactor_pkg/lib2to3/PatternGrammar.txt
   sandbox/trunk/refactor_pkg/lib2to3/__init__.py
   sandbox/trunk/refactor_pkg/lib2to3/fixes/
   sandbox/trunk/refactor_pkg/lib2to3/fixes/__init__.py
   sandbox/trunk/refactor_pkg/lib2to3/pgen2/
   sandbox/trunk/refactor_pkg/lib2to3/pgen2/__init__.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/
   sandbox/trunk/refactor_pkg/lib2to3/tests/__init__.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/README
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/bad_order.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/__init__.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_explicit.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_first.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_last.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_parrot.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_preorder.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/no_fixer_cls.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/parrot_example.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/infinite_recursion.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/py2_test_grammar.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/data/py3_test_grammar.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/pytree_idempotency.py   (contents, props changed)
   sandbox/trunk/refactor_pkg/lib2to3/tests/support.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/test_all_fixers.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/test_fixers.py   (contents, props changed)
   sandbox/trunk/refactor_pkg/lib2to3/tests/test_parser.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/test_pytree.py   (contents, props changed)
   sandbox/trunk/refactor_pkg/lib2to3/tests/test_refactor.py
   sandbox/trunk/refactor_pkg/lib2to3/tests/test_util.py
   sandbox/trunk/refactor_pkg/refactor/
   sandbox/trunk/refactor_pkg/refactor/Grammar.txt
   sandbox/trunk/refactor_pkg/refactor/PatternGrammar.txt
   sandbox/trunk/refactor_pkg/refactor/__init__.py
   sandbox/trunk/refactor_pkg/refactor/fixer_base.py
   sandbox/trunk/refactor_pkg/refactor/fixer_util.py
   sandbox/trunk/refactor_pkg/refactor/fixes/
   sandbox/trunk/refactor_pkg/refactor/fixes/__init__.py
   sandbox/trunk/refactor_pkg/refactor/fixes/fixer_common.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/__init__.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_apply.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_basestring.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_buffer.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_callable.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_dict.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_except.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_exec.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_execfile.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_filter.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_funcattrs.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_future.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_getcwdu.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_has_key.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_idioms.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_import.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_imports.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_imports2.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_input.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_intern.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_isinstance.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_itertools.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_itertools_imports.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_long.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_map.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_metaclass.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_methodattrs.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_ne.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_next.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_nonzero.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_numliterals.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_paren.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_print.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_raise.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_raw_input.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_reduce.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_renames.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_repr.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_set_literal.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_standarderror.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_sys_exc.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_throw.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_tuple_params.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_types.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_unicode.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_urllib.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_ws_comma.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_xrange.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_xreadlines.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_zip.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from3/
   sandbox/trunk/refactor_pkg/refactor/fixes/from3/__init__.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from3/fix_range.py
   sandbox/trunk/refactor_pkg/refactor/fixes/from3/fix_renames.py
   sandbox/trunk/refactor_pkg/refactor/main.py
   sandbox/trunk/refactor_pkg/refactor/patcomp.py
   sandbox/trunk/refactor_pkg/refactor/pgen2/
   sandbox/trunk/refactor_pkg/refactor/pgen2/__init__.py
   sandbox/trunk/refactor_pkg/refactor/pgen2/conv.py
   sandbox/trunk/refactor_pkg/refactor/pgen2/driver.py
   sandbox/trunk/refactor_pkg/refactor/pgen2/grammar.py
   sandbox/trunk/refactor_pkg/refactor/pgen2/literals.py
   sandbox/trunk/refactor_pkg/refactor/pgen2/parse.py
   sandbox/trunk/refactor_pkg/refactor/pgen2/pgen.py
   sandbox/trunk/refactor_pkg/refactor/pgen2/token.py   (contents, props changed)
   sandbox/trunk/refactor_pkg/refactor/pgen2/tokenize.py
   sandbox/trunk/refactor_pkg/refactor/pygram.py
   sandbox/trunk/refactor_pkg/refactor/pytree.py
   sandbox/trunk/refactor_pkg/refactor/refactor.py   (contents, props changed)
   sandbox/trunk/refactor_pkg/refactor/tests/
   sandbox/trunk/refactor_pkg/refactor/tests/__init__.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/
   sandbox/trunk/refactor_pkg/refactor/tests/data/README
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/bad_order.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/__init__.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_explicit.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_first.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_last.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_parrot.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_preorder.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/no_fixer_cls.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/parrot_example.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/infinite_recursion.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/py2_test_grammar.py
   sandbox/trunk/refactor_pkg/refactor/tests/data/py3_test_grammar.py
   sandbox/trunk/refactor_pkg/refactor/tests/pytree_idempotency.py   (contents, props changed)
   sandbox/trunk/refactor_pkg/refactor/tests/support.py
   sandbox/trunk/refactor_pkg/refactor/tests/test_all_fixers.py
   sandbox/trunk/refactor_pkg/refactor/tests/test_fixers.py   (contents, props changed)
   sandbox/trunk/refactor_pkg/refactor/tests/test_parser.py
   sandbox/trunk/refactor_pkg/refactor/tests/test_pytree.py   (contents, props changed)
   sandbox/trunk/refactor_pkg/refactor/tests/test_refactor.py
   sandbox/trunk/refactor_pkg/refactor/tests/test_util.py
   sandbox/trunk/refactor_pkg/scripts/
   sandbox/trunk/refactor_pkg/scripts/benchmark.py
   sandbox/trunk/refactor_pkg/scripts/find_pattern.py   (contents, props changed)
   sandbox/trunk/refactor_pkg/setup.py
   sandbox/trunk/refactor_pkg/test.py   (contents, props changed)

Added: sandbox/trunk/refactor_pkg/2to3
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/2to3	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+import lib2to3
+import sys
+import os
+
+sys.exit(lib2to3.main.main("refactor.fixes.from2"))

Added: sandbox/trunk/refactor_pkg/3to2
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/3to2	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,6 @@
+#!/usr/bin/env python
+from refactor.main import main
+import sys
+import os
+
+sys.exit(main("refactor.fixes.from3"))

Added: sandbox/trunk/refactor_pkg/HACKING
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/HACKING	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,74 @@
+Running tests:
+
+    * 2to3 and 3to2 will fail with Python 2.5, but should work w/ Python trunk
+      $ python test.py --help
+
+    * test lib2to3
+      $ python test.py --base lib2to3
+
+    * test refactor
+      $ python test.py
+
+
+Tips/tricks/hints for writing new fixers:
+
+    * Don't write your own PATTERN from scratch; that's what
+      scripts/find_pattern.py is for.
+
+      e.g.
+      ./scripts/find_pattern.py 
+
+      This will give choices of tokens to parse.
+      Press enter to skip, any key to see the grammar.
+
+      $ ./scripts/find_pattern.py "print('hello, world')"
+      "('hello, world')"
+
+      "print('hello, world')"
+      .
+      print_stmt< 'print' atom< '(' "'hello, world'" ')' > >
+    
+    * If your fixer works by changing a node's children list or a leaf's value,
+      be sure to call the node/leaf's changed() method. This to be sure the main
+      script will recognize that the tree has changed.
+
+
+Putting 2to3 to work somewhere else:
+
+    * By default, 2to3 uses a merger of Python 2.x and Python 3's grammars.  If
+      you want to support a different grammar, just replace the Grammar.txt file
+      with Grammar/Grammar from your chosen Python version.
+
+    * The real heart of 2to3 is the concrete syntax tree parser in pgen2; this
+      chunk of the system is suitable for a wide range of applications that
+      require CST transformation. All that's required is to rip off the fixer
+      layer and replace it with something else that walks the tree. One
+      application would be a tool to check/enforce style guidelines; this could
+      leverage 90% of the existing infrastructure with primarily cosmetic
+      changes (e.g., fixes/fix_*.py -> styles/style_*.py).
+
+
+TODO
+
+    Simple:
+    #######
+    
+    * Refactor common code out of fixes/fix_*.py into fixer_util (on-going).
+
+    * Document how to write fixers.
+
+
+    Complex:
+    ########
+
+    * Come up with a scheme to hide the details of suite indentation (some kind
+      of custom pytree node for suites, probably). This will automatically
+      reindent all code with spaces, tied into a refactor.py flag that allows
+      you to specify the indent level.
+
+    * Remove the need to explicitly assign a node's parent attribute.  This
+      could be gone with a magic children list.
+
+    * Import statements are complicated and a pain to handle, and there are many
+      fixers that manipulate them. It would be nice to have a little API for
+      manipulating imports in fixers.

Added: sandbox/trunk/refactor_pkg/README
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/README	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,260 @@
+Abstract
+========
+
+The refactor package -- 2to3 and back again -- is a fork of lib2to3.
+
+lib2to3:
+
+A refactoring tool for converting Python 2.x code to 3.0.
+
+This is a work in progress! Bugs should be reported to http://bugs.python.org/
+under the "2to3" category.
+
+
+General usage
+=============
+
+Run ``./2to3`` to convert stdin (``-``), files or directories given as
+arguments.  By default, the tool outputs a unified diff-formatted patch on
+standard output and a "what was changed" summary on standard error, but the
+``-w`` option can be given to write back converted files, creating
+``.bak``-named backup files.
+
+2to3 must be run with at least Python 2.5. The intended path for migrating to
+Python 3.x is to first migrate to 2.6 (in order to take advantage of Python
+2.6's runtime compatibility checks).
+
+
+Files
+=====
+
+README                        - this file
+lib2to3/refactor.py           - main program; use this to convert files or directory trees
+test.py                       - runs all unittests for 2to3
+lib2to3/patcomp.py            - pattern compiler
+lib2to3/pytree.py             - parse tree nodes (not specific to Python, despite the name!)
+lib2to3/pygram.py             - code specific to the Python grammar
+example.py                    - example input for play.py and fix_*.py
+find_pattern.py               - script to help determine the PATTERN for a new fix
+lib2to3/Grammar.txt           - Python grammar input (accepts 2.x and 3.x syntax)
+lib2to3/Grammar.pickle        - pickled grammar tables (generated file, not in subversion)
+lib2to3/PatternGrammar.txt    - grammar for the pattern language used by patcomp.py
+lib2to3/PatternGrammar.pickle - pickled pattern grammar tables (generated file)
+lib2to3/pgen2/                - Parser generator and driver ([1]_, [2]_)
+lib2to3/fixes/                - Individual transformations
+lib2to3tests/                 - Test files for pytree, fixers, grammar, etc
+
+
+Capabilities
+============
+
+A quick run-through of 2to3's current fixers:
+
+* **fix_apply** - convert apply() calls to real function calls.
+
+* **fix_callable** - converts callable(obj) into hasattr(obj, '__call__').
+
+* **fix_dict** - fix up dict.keys(), .values(), .items() and their iterator
+  versions.
+  
+* **fix_except** - adjust "except" statements to Python 3 syntax (PEP 3110).
+
+* **fix_exec** - convert "exec" statements to exec() function calls.
+
+* **fix_execfile** - execfile(filename, ...) -> exec(open(filename).read())
+
+* **fix_filter** - changes filter(F, X) into list(filter(F, X)).
+
+* **fix_funcattrs** - fix function attribute names (f.func_x -> f.__x__).
+
+* **fix_has_key** - "d.has_key(x)" -> "x in d".
+
+* **fix_idioms** - convert type(x) == T to isinstance(x, T), "while 1:" to
+  "while True:", plus others. This fixer must be explicitly requested
+  with "-f idioms".
+
+* **fix_imports** - Fix (some) incompatible imports.
+
+* **fix_imports2** - Fix (some) incompatible imports that must run after
+                     **test_imports**.
+
+* **fix_input** - "input()" -> "eval(input())" (PEP 3111).
+
+* **fix_intern** - "intern(x)" -> "sys.intern(x)".
+
+* **fix_long** - remove all usage of explicit longs in favor of ints.
+
+* **fix_map** - generally changes map(F, ...) into list(map(F, ...)).
+
+* **fix_ne** - convert the "<>" operator to "!=".
+
+* **fix_next** - fixer for it.next() -> next(it) (PEP 3114).
+
+* **fix_nonzero** - convert __nonzero__() methods to __bool__() methods.
+
+* **fix_numliterals** - tweak certain numeric literals to be 3.0-compliant.
+
+* **fix_paren** - Add parentheses to places where they are needed in list
+    comprehensions and generator expressions.
+
+* **fix_print** - convert "print" statements to print() function calls.
+
+* **fix_raise** - convert "raise" statements to Python 3 syntax (PEP 3109).
+
+* **fix_raw_input** - "raw_input()" -> "input()" (PEP 3111).
+
+* **fix_repr** - swap backticks for repr() calls.
+
+* **fix_standarderror** - StandardError -> Exception.
+
+* **fix_sys_exc** - Converts * **"sys.exc_info", "sys.exc_type", and
+  "sys.exc_value" to sys.exc_info()
+
+* **fix_throw** - fix generator.throw() calls to be 3.0-compliant (PEP 3109).
+
+* **fix_tuple_params** - remove tuple parameters from function, method and
+  lambda declarations (PEP 3113).
+  
+* **fix_unicode** - convert, e.g., u"..." to "...", unicode(x) to str(x), etc.
+
+* **fix_urllib** - Fix imports for urllib and urllib2.
+  
+* **fix_xrange** - "xrange()" -> "range()".
+
+* **fix_xreadlines** - "for x in f.xreadlines():" -> "for x in f:". Also,
+  "g(f.xreadlines)" -> "g(f.__iter__)".
+
+* **fix_metaclass** - move __metaclass__ = M to class X(metaclass=M)
+
+
+Limitations
+===========
+
+General Limitations
+-------------------
+
+* In general, fixers that convert a function or method call will not detect
+  something like ::
+
+      a = apply
+      a(f, *args)
+    
+  or ::
+
+      m = d.has_key
+      if m(5):
+          ...
+        
+* Fixers that look for attribute references will not detect when getattr() or
+  setattr() is used to access those attributes.
+  
+* The contents of eval() calls and "exec" statements will not be checked by
+  2to3.
+
+        
+Caveats for Specific Fixers
+---------------------------
+
+fix_except
+''''''''''
+
+"except" statements like ::
+
+    except Exception, (a, b):
+        ...
+
+are not fixed up. The ability to treat exceptions as sequences is being
+removed in Python 3, so there is no straightforward, automatic way to
+adjust these statements.
+
+This is seen frequently when dealing with OSError.
+
+
+fix_filter
+''''''''''
+
+The transformation is not correct if the original code depended on
+filter(F, X) returning a string if X is a string (or a tuple if X is a
+tuple, etc).  That would require type inference, which we don't do.  Python
+2.6's Python 3 compatibility mode should be used to detect such cases.
+
+
+fix_has_key
+'''''''''''
+
+While the primary target of this fixer is dict.has_key(), the
+fixer will change any has_key() method call, regardless of what class it
+belongs to. Anyone using non-dictionary classes with has_key() methods is
+advised to pay close attention when using this fixer.
+
+
+fix_map
+'''''''
+
+The transformation is not correct if the original code was depending on
+map(F, X, Y, ...) to go on until the longest argument is exhausted,
+substituting None for missing values -- like zip(), it now stops as
+soon as the shortest argument is exhausted.
+
+
+fix_raise
+'''''''''
+
+"raise E, V" will be incorrectly translated if V is an exception instance.
+The correct Python 3 idiom is ::
+   
+    raise E from V
+        
+but since we can't detect instance-hood by syntax alone and since any client
+code would have to be changed as well, we don't automate this.
+
+Another translation problem is this: ::
+
+    t = ((E, E2), E3)
+    raise t
+    
+2to3 has no way of knowing that t is a tuple, and so this code will raise an
+exception at runtime since the ability to raise tuples is going away.
+
+
+Notes
+=====
+
+.. [#1] I modified tokenize.py to yield a NL pseudo-token for backslash
+        continuations, so the original source can be reproduced exactly.  The
+        modified version can be found at lib2to3/pgen2/tokenize.py.
+
+.. [#2] I developed pgen2 while I was at Elemental Security.  I modified
+        it while at Google to suit the needs of this refactoring tool.
+
+
+Development
+===========
+
+The HACKING file has a list of TODOs -- some simple, some complex -- that would
+make good introductions for anyone new to 2to3.
+
+
+Licensing
+=========
+
+The original pgen2 module is copyrighted by Elemental Security.  All
+new code I wrote specifically for this tool is copyrighted by Google.
+New code by others is copyrighted by the respective authors.  All code
+(whether by me or by others) is licensed to the PSF under a contributor
+agreement.
+
+--Guido van Rossum
+
+
+All code I wrote specifically for this tool before 9 April 2007 is
+copyrighted by me. All new code I wrote specifically for this tool after
+9 April 2007 is copyrighted by Google. Regardless, my contributions are
+licensed to the PSF under a contributor agreement.
+
+--Collin Winter
+
+All of my contributions are copyrighted to me and licensed to PSF under the
+Python contributor agreement.
+
+--Benjamin Peterson

Added: sandbox/trunk/refactor_pkg/TODO
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/TODO	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,9 @@
+2.6:
+    byte lit. without b
+    unicode with u
+    from __future__ import print_statement
+
+2.5:
+    from __future__ import with
+    exceptions
+    print

Added: sandbox/trunk/refactor_pkg/diff_from_r70785.diff
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/diff_from_r70785.diff	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,52557 @@
+diff -r 531f2e948299 .hgignore
+--- a/.hgignore	Mon Mar 30 20:02:09 2009 -0500
++++ b/.hgignore	Wed Apr 01 13:59:47 2009 -0500
+@@ -4,10 +4,11 @@
+ #    * hg add
+ # note that `hg add *` will add files even if they match in this file.
+ 
+-syntax: glob
+-*.pickle
++# syntax: glob
+ 
+ syntax: regexp
++\.out$
++\.pickle$
+ \.log$
+ ~$
+ ^bin/*
+@@ -28,7 +29,7 @@
+ ^\.#
+ (^|/)RCS($|/)
+ ,v$
+-(^|/)\.svn($|/)
++# (^|/)\.svn($|/)
+ (^|/)\.bzr($|/)
+ \_darcs$
+ (^|/)SCCS($|/)
+diff -r 531f2e948299 .svn/entries
+--- a/.svn/entries	Mon Mar 30 20:02:09 2009 -0500
++++ b/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,7 +1,7 @@
+ 9
+ 
+ dir
+-70785
++70822
+ http://svn.python.org/projects/sandbox/trunk/2to3
+ http://svn.python.org/projects
+ 
+diff -r 531f2e948299 2to3
+--- a/2to3	Mon Mar 30 20:02:09 2009 -0500
++++ b/2to3	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,6 +1,6 @@
+ #!/usr/bin/env python
+-from lib2to3.main import main
++import lib2to3
+ import sys
+ import os
+ 
+-sys.exit(main("lib2to3.fixes"))
++sys.exit(lib2to3.main.main("refactor.fixes.from2"))
+diff -r 531f2e948299 3to2
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/3to2	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,6 @@
++#!/usr/bin/env python
++from refactor.main import main
++import sys
++import os
++
++sys.exit(main("refactor.fixes.from3"))
+diff -r 531f2e948299 HACKING
+--- a/HACKING	Mon Mar 30 20:02:09 2009 -0500
++++ b/HACKING	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,7 +1,32 @@
++Running tests:
++
++    * 2to3 and 3to2 will fail with Python 2.5, but should work w/ Python trunk
++      $ python test.py --help
++
++    * test lib2to3
++      $ python test.py --base lib2to3
++
++    * test refactor
++      $ python test.py
++
++
+ Tips/tricks/hints for writing new fixers:
+ 
+     * Don't write your own PATTERN from scratch; that's what
+       scripts/find_pattern.py is for.
++
++      e.g.
++      ./scripts/find_pattern.py 
++
++      This will give choices of tokens to parse.
++      Press enter to skip, any key to see the grammar.
++
++      $ ./scripts/find_pattern.py "print('hello, world')"
++      "('hello, world')"
++
++      "print('hello, world')"
++      .
++      print_stmt< 'print' atom< '(' "'hello, world'" ')' > >
+     
+     * If your fixer works by changing a node's children list or a leaf's value,
+       be sure to call the node/leaf's changed() method. This to be sure the main
+diff -r 531f2e948299 README
+--- a/README	Mon Mar 30 20:02:09 2009 -0500
++++ b/README	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,6 +1,10 @@
+ Abstract
+ ========
+ 
++The refactor package -- 2to3 and back again -- is a fork of lib2to3.
++
++lib2to3:
++
+ A refactoring tool for converting Python 2.x code to 3.0.
+ 
+ This is a work in progress! Bugs should be reported to http://bugs.python.org/
+diff -r 531f2e948299 TODO
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/TODO	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++2.6:
++    byte lit. without b
++    unicode with u
++    from __future__ import print_statement
++
++2.5:
++    from __future__ import with
++    exceptions
++    print
+diff -r 531f2e948299 lib2to3/.svn/entries
+--- a/lib2to3/.svn/entries	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,7 +1,7 @@
+ 9
+ 
+ dir
+-70785
++70822
+ http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3
+ http://svn.python.org/projects
+ 
+diff -r 531f2e948299 lib2to3/Grammar2.7.0.alpha.0.pickle
+Binary file lib2to3/Grammar2.7.0.alpha.0.pickle has changed
+diff -r 531f2e948299 lib2to3/PatternGrammar2.7.0.alpha.0.pickle
+Binary file lib2to3/PatternGrammar2.7.0.alpha.0.pickle has changed
+diff -r 531f2e948299 lib2to3/__init__.py
+--- a/lib2to3/__init__.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/__init__.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,1 +1,1 @@
+-#empty
++from refactor import *
+diff -r 531f2e948299 lib2to3/fixer_base.py
+--- a/lib2to3/fixer_base.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,178 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Base class for fixers (optional, but recommended)."""
+-
+-# Python imports
+-import logging
+-import itertools
+-
+-# Local imports
+-from .patcomp import PatternCompiler
+-from . import pygram
+-from .fixer_util import does_tree_import
+-
+-class BaseFix(object):
+-
+-    """Optional base class for fixers.
+-
+-    The subclass name must be FixFooBar where FooBar is the result of
+-    removing underscores and capitalizing the words of the fix name.
+-    For example, the class name for a fixer named 'has_key' should be
+-    FixHasKey.
+-    """
+-
+-    PATTERN = None  # Most subclasses should override with a string literal
+-    pattern = None  # Compiled pattern, set by compile_pattern()
+-    options = None  # Options object passed to initializer
+-    filename = None # The filename (set by set_filename)
+-    logger = None   # A logger (set by set_filename)
+-    numbers = itertools.count(1) # For new_name()
+-    used_names = set() # A set of all used NAMEs
+-    order = "post" # Does the fixer prefer pre- or post-order traversal
+-    explicit = False # Is this ignored by refactor.py -f all?
+-    run_order = 5   # Fixers will be sorted by run order before execution
+-                    # Lower numbers will be run first.
+-
+-    # Shortcut for access to Python grammar symbols
+-    syms = pygram.python_symbols
+-
+-    def __init__(self, options, log):
+-        """Initializer.  Subclass may override.
+-
+-        Args:
+-            options: an dict containing the options passed to RefactoringTool
+-            that could be used to customize the fixer through the command line.
+-            log: a list to append warnings and other messages to.
+-        """
+-        self.options = options
+-        self.log = log
+-        self.compile_pattern()
+-
+-    def compile_pattern(self):
+-        """Compiles self.PATTERN into self.pattern.
+-
+-        Subclass may override if it doesn't want to use
+-        self.{pattern,PATTERN} in .match().
+-        """
+-        if self.PATTERN is not None:
+-            self.pattern = PatternCompiler().compile_pattern(self.PATTERN)
+-
+-    def set_filename(self, filename):
+-        """Set the filename, and a logger derived from it.
+-
+-        The main refactoring tool should call this.
+-        """
+-        self.filename = filename
+-        self.logger = logging.getLogger(filename)
+-
+-    def match(self, node):
+-        """Returns match for a given parse tree node.
+-
+-        Should return a true or false object (not necessarily a bool).
+-        It may return a non-empty dict of matching sub-nodes as
+-        returned by a matching pattern.
+-
+-        Subclass may override.
+-        """
+-        results = {"node": node}
+-        return self.pattern.match(node, results) and results
+-
+-    def transform(self, node, results):
+-        """Returns the transformation for a given parse tree node.
+-
+-        Args:
+-          node: the root of the parse tree that matched the fixer.
+-          results: a dict mapping symbolic names to part of the match.
+-
+-        Returns:
+-          None, or a node that is a modified copy of the
+-          argument node.  The node argument may also be modified in-place to
+-          effect the same change.
+-
+-        Subclass *must* override.
+-        """
+-        raise NotImplementedError()
+-
+-    def new_name(self, template="xxx_todo_changeme"):
+-        """Return a string suitable for use as an identifier
+-
+-        The new name is guaranteed not to conflict with other identifiers.
+-        """
+-        name = template
+-        while name in self.used_names:
+-            name = template + str(self.numbers.next())
+-        self.used_names.add(name)
+-        return name
+-
+-    def log_message(self, message):
+-        if self.first_log:
+-            self.first_log = False
+-            self.log.append("### In file %s ###" % self.filename)
+-        self.log.append(message)
+-
+-    def cannot_convert(self, node, reason=None):
+-        """Warn the user that a given chunk of code is not valid Python 3,
+-        but that it cannot be converted automatically.
+-
+-        First argument is the top-level node for the code in question.
+-        Optional second argument is why it can't be converted.
+-        """
+-        lineno = node.get_lineno()
+-        for_output = node.clone()
+-        for_output.set_prefix("")
+-        msg = "Line %d: could not convert: %s"
+-        self.log_message(msg % (lineno, for_output))
+-        if reason:
+-            self.log_message(reason)
+-
+-    def warning(self, node, reason):
+-        """Used for warning the user about possible uncertainty in the
+-        translation.
+-
+-        First argument is the top-level node for the code in question.
+-        Optional second argument is why it can't be converted.
+-        """
+-        lineno = node.get_lineno()
+-        self.log_message("Line %d: %s" % (lineno, reason))
+-
+-    def start_tree(self, tree, filename):
+-        """Some fixers need to maintain tree-wide state.
+-        This method is called once, at the start of tree fix-up.
+-
+-        tree - the root node of the tree to be processed.
+-        filename - the name of the file the tree came from.
+-        """
+-        self.used_names = tree.used_names
+-        self.set_filename(filename)
+-        self.numbers = itertools.count(1)
+-        self.first_log = True
+-
+-    def finish_tree(self, tree, filename):
+-        """Some fixers need to maintain tree-wide state.
+-        This method is called once, at the conclusion of tree fix-up.
+-
+-        tree - the root node of the tree to be processed.
+-        filename - the name of the file the tree came from.
+-        """
+-        pass
+-
+-
+-class ConditionalFix(BaseFix):
+-    """ Base class for fixers which not execute if an import is found. """
+-
+-    # This is the name of the import which, if found, will cause the test to be skipped
+-    skip_on = None
+-
+-    def start_tree(self, *args):
+-        super(ConditionalFix, self).start_tree(*args)
+-        self._should_skip = None
+-
+-    def should_skip(self, node):
+-        if self._should_skip is not None:
+-            return self._should_skip
+-        pkg = self.skip_on.split(".")
+-        name = pkg[-1]
+-        pkg = ".".join(pkg[:-1])
+-        self._should_skip = does_tree_import(pkg, name, node)
+-        return self._should_skip
+diff -r 531f2e948299 lib2to3/fixer_util.py
+--- a/lib2to3/fixer_util.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,425 +0,0 @@
+-"""Utility functions, node construction macros, etc."""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .pgen2 import token
+-from .pytree import Leaf, Node
+-from .pygram import python_symbols as syms
+-from . import patcomp
+-
+-
+-###########################################################
+-### Common node-construction "macros"
+-###########################################################
+-
+-def KeywordArg(keyword, value):
+-    return Node(syms.argument,
+-                [keyword, Leaf(token.EQUAL, '='), value])
+-
+-def LParen():
+-    return Leaf(token.LPAR, "(")
+-
+-def RParen():
+-    return Leaf(token.RPAR, ")")
+-
+-def Assign(target, source):
+-    """Build an assignment statement"""
+-    if not isinstance(target, list):
+-        target = [target]
+-    if not isinstance(source, list):
+-        source.set_prefix(" ")
+-        source = [source]
+-
+-    return Node(syms.atom,
+-                target + [Leaf(token.EQUAL, "=", prefix=" ")] + source)
+-
+-def Name(name, prefix=None):
+-    """Return a NAME leaf"""
+-    return Leaf(token.NAME, name, prefix=prefix)
+-
+-def Attr(obj, attr):
+-    """A node tuple for obj.attr"""
+-    return [obj, Node(syms.trailer, [Dot(), attr])]
+-
+-def Comma():
+-    """A comma leaf"""
+-    return Leaf(token.COMMA, ",")
+-
+-def Dot():
+-    """A period (.) leaf"""
+-    return Leaf(token.DOT, ".")
+-
+-def ArgList(args, lparen=LParen(), rparen=RParen()):
+-    """A parenthesised argument list, used by Call()"""
+-    node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
+-    if args:
+-        node.insert_child(1, Node(syms.arglist, args))
+-    return node
+-
+-def Call(func_name, args=None, prefix=None):
+-    """A function call"""
+-    node = Node(syms.power, [func_name, ArgList(args)])
+-    if prefix is not None:
+-        node.set_prefix(prefix)
+-    return node
+-
+-def Newline():
+-    """A newline literal"""
+-    return Leaf(token.NEWLINE, "\n")
+-
+-def BlankLine():
+-    """A blank line"""
+-    return Leaf(token.NEWLINE, "")
+-
+-def Number(n, prefix=None):
+-    return Leaf(token.NUMBER, n, prefix=prefix)
+-
+-def Subscript(index_node):
+-    """A numeric or string subscript"""
+-    return Node(syms.trailer, [Leaf(token.LBRACE, '['),
+-                               index_node,
+-                               Leaf(token.RBRACE, ']')])
+-
+-def String(string, prefix=None):
+-    """A string leaf"""
+-    return Leaf(token.STRING, string, prefix=prefix)
+-
+-def ListComp(xp, fp, it, test=None):
+-    """A list comprehension of the form [xp for fp in it if test].
+-
+-    If test is None, the "if test" part is omitted.
+-    """
+-    xp.set_prefix("")
+-    fp.set_prefix(" ")
+-    it.set_prefix(" ")
+-    for_leaf = Leaf(token.NAME, "for")
+-    for_leaf.set_prefix(" ")
+-    in_leaf = Leaf(token.NAME, "in")
+-    in_leaf.set_prefix(" ")
+-    inner_args = [for_leaf, fp, in_leaf, it]
+-    if test:
+-        test.set_prefix(" ")
+-        if_leaf = Leaf(token.NAME, "if")
+-        if_leaf.set_prefix(" ")
+-        inner_args.append(Node(syms.comp_if, [if_leaf, test]))
+-    inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
+-    return Node(syms.atom,
+-                       [Leaf(token.LBRACE, "["),
+-                        inner,
+-                        Leaf(token.RBRACE, "]")])
+-
+-def FromImport(package_name, name_leafs):
+-    """ Return an import statement in the form:
+-        from package import name_leafs"""
+-    # XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
+-    #assert package_name == '.' or '.' not in package_name, "FromImport has "\
+-    #       "not been tested with dotted package names -- use at your own "\
+-    #       "peril!"
+-
+-    for leaf in name_leafs:
+-        # Pull the leaves out of their old tree
+-        leaf.remove()
+-
+-    children = [Leaf(token.NAME, 'from'),
+-                Leaf(token.NAME, package_name, prefix=" "),
+-                Leaf(token.NAME, 'import', prefix=" "),
+-                Node(syms.import_as_names, name_leafs)]
+-    imp = Node(syms.import_from, children)
+-    return imp
+-
+-
+-###########################################################
+-### Determine whether a node represents a given literal
+-###########################################################
+-
+-def is_tuple(node):
+-    """Does the node represent a tuple literal?"""
+-    if isinstance(node, Node) and node.children == [LParen(), RParen()]:
+-        return True
+-    return (isinstance(node, Node)
+-            and len(node.children) == 3
+-            and isinstance(node.children[0], Leaf)
+-            and isinstance(node.children[1], Node)
+-            and isinstance(node.children[2], Leaf)
+-            and node.children[0].value == "("
+-            and node.children[2].value == ")")
+-
+-def is_list(node):
+-    """Does the node represent a list literal?"""
+-    return (isinstance(node, Node)
+-            and len(node.children) > 1
+-            and isinstance(node.children[0], Leaf)
+-            and isinstance(node.children[-1], Leaf)
+-            and node.children[0].value == "["
+-            and node.children[-1].value == "]")
+-
+-
+-###########################################################
+-### Misc
+-###########################################################
+-
+-def parenthesize(node):
+-    return Node(syms.atom, [LParen(), node, RParen()])
+-
+-
+-consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
+-                       "min", "max"])
+-
+-def attr_chain(obj, attr):
+-    """Follow an attribute chain.
+-
+-    If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
+-    use this to iterate over all objects in the chain. Iteration is
+-    terminated by getattr(x, attr) is None.
+-
+-    Args:
+-        obj: the starting object
+-        attr: the name of the chaining attribute
+-
+-    Yields:
+-        Each successive object in the chain.
+-    """
+-    next = getattr(obj, attr)
+-    while next:
+-        yield next
+-        next = getattr(next, attr)
+-
+-p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
+-        | comp_for< 'for' any 'in' node=any any* >
+-     """
+-p1 = """
+-power<
+-    ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
+-      'any' | 'all' | (any* trailer< '.' 'join' >) )
+-    trailer< '(' node=any ')' >
+-    any*
+->
+-"""
+-p2 = """
+-power<
+-    'sorted'
+-    trailer< '(' arglist<node=any any*> ')' >
+-    any*
+->
+-"""
+-pats_built = False
+-def in_special_context(node):
+-    """ Returns true if node is in an environment where all that is required
+-        of it is being itterable (ie, it doesn't matter if it returns a list
+-        or an itterator).
+-        See test_map_nochange in test_fixers.py for some examples and tests.
+-        """
+-    global p0, p1, p2, pats_built
+-    if not pats_built:
+-        p1 = patcomp.compile_pattern(p1)
+-        p0 = patcomp.compile_pattern(p0)
+-        p2 = patcomp.compile_pattern(p2)
+-        pats_built = True
+-    patterns = [p0, p1, p2]
+-    for pattern, parent in zip(patterns, attr_chain(node, "parent")):
+-        results = {}
+-        if pattern.match(parent, results) and results["node"] is node:
+-            return True
+-    return False
+-
+-def is_probably_builtin(node):
+-    """
+-    Check that something isn't an attribute or function name etc.
+-    """
+-    prev = node.prev_sibling
+-    if prev is not None and prev.type == token.DOT:
+-        # Attribute lookup.
+-        return False
+-    parent = node.parent
+-    if parent.type in (syms.funcdef, syms.classdef):
+-        return False
+-    if parent.type == syms.expr_stmt and parent.children[0] is node:
+-        # Assignment.
+-        return False
+-    if parent.type == syms.parameters or \
+-            (parent.type == syms.typedargslist and (
+-            (prev is not None and prev.type == token.COMMA) or
+-            parent.children[0] is node
+-            )):
+-        # The name of an argument.
+-        return False
+-    return True
+-
+-###########################################################
+-### The following functions are to find bindings in a suite
+-###########################################################
+-
+-def make_suite(node):
+-    if node.type == syms.suite:
+-        return node
+-    node = node.clone()
+-    parent, node.parent = node.parent, None
+-    suite = Node(syms.suite, [node])
+-    suite.parent = parent
+-    return suite
+-
+-def find_root(node):
+-    """Find the top level namespace."""
+-    # Scamper up to the top level namespace
+-    while node.type != syms.file_input:
+-        assert node.parent, "Tree is insane! root found before "\
+-                           "file_input node was found."
+-        node = node.parent
+-    return node
+-
+-def does_tree_import(package, name, node):
+-    """ Returns true if name is imported from package at the
+-        top level of the tree which node belongs to.
+-        To cover the case of an import like 'import foo', use
+-        None for the package and 'foo' for the name. """
+-    binding = find_binding(name, find_root(node), package)
+-    return bool(binding)
+-
+-def is_import(node):
+-    """Returns true if the node is an import statement."""
+-    return node.type in (syms.import_name, syms.import_from)
+-
+-def touch_import(package, name, node):
+-    """ Works like `does_tree_import` but adds an import statement
+-        if it was not imported. """
+-    def is_import_stmt(node):
+-        return node.type == syms.simple_stmt and node.children and \
+-               is_import(node.children[0])
+-
+-    root = find_root(node)
+-
+-    if does_tree_import(package, name, root):
+-        return
+-
+-    add_newline_before = False
+-
+-    # figure out where to insert the new import.  First try to find
+-    # the first import and then skip to the last one.
+-    insert_pos = offset = 0
+-    for idx, node in enumerate(root.children):
+-        if not is_import_stmt(node):
+-            continue
+-        for offset, node2 in enumerate(root.children[idx:]):
+-            if not is_import_stmt(node2):
+-                break
+-        insert_pos = idx + offset
+-        break
+-
+-    # if there are no imports where we can insert, find the docstring.
+-    # if that also fails, we stick to the beginning of the file
+-    if insert_pos == 0:
+-        for idx, node in enumerate(root.children):
+-            if node.type == syms.simple_stmt and node.children and \
+-               node.children[0].type == token.STRING:
+-                insert_pos = idx + 1
+-                add_newline_before
+-                break
+-
+-    if package is None:
+-        import_ = Node(syms.import_name, [
+-            Leaf(token.NAME, 'import'),
+-            Leaf(token.NAME, name, prefix=' ')
+-        ])
+-    else:
+-        import_ = FromImport(package, [Leaf(token.NAME, name, prefix=' ')])
+-
+-    children = [import_, Newline()]
+-    if add_newline_before:
+-        children.insert(0, Newline())
+-    root.insert_child(insert_pos, Node(syms.simple_stmt, children))
+-
+-
+-_def_syms = set([syms.classdef, syms.funcdef])
+-def find_binding(name, node, package=None):
+-    """ Returns the node which binds variable name, otherwise None.
+-        If optional argument package is supplied, only imports will
+-        be returned.
+-        See test cases for examples."""
+-    for child in node.children:
+-        ret = None
+-        if child.type == syms.for_stmt:
+-            if _find(name, child.children[1]):
+-                return child
+-            n = find_binding(name, make_suite(child.children[-1]), package)
+-            if n: ret = n
+-        elif child.type in (syms.if_stmt, syms.while_stmt):
+-            n = find_binding(name, make_suite(child.children[-1]), package)
+-            if n: ret = n
+-        elif child.type == syms.try_stmt:
+-            n = find_binding(name, make_suite(child.children[2]), package)
+-            if n:
+-                ret = n
+-            else:
+-                for i, kid in enumerate(child.children[3:]):
+-                    if kid.type == token.COLON and kid.value == ":":
+-                        # i+3 is the colon, i+4 is the suite
+-                        n = find_binding(name, make_suite(child.children[i+4]), package)
+-                        if n: ret = n
+-        elif child.type in _def_syms and child.children[1].value == name:
+-            ret = child
+-        elif _is_import_binding(child, name, package):
+-            ret = child
+-        elif child.type == syms.simple_stmt:
+-            ret = find_binding(name, child, package)
+-        elif child.type == syms.expr_stmt:
+-            if _find(name, child.children[0]):
+-                ret = child
+-
+-        if ret:
+-            if not package:
+-                return ret
+-            if is_import(ret):
+-                return ret
+-    return None
+-
+-_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
+-def _find(name, node):
+-    nodes = [node]
+-    while nodes:
+-        node = nodes.pop()
+-        if node.type > 256 and node.type not in _block_syms:
+-            nodes.extend(node.children)
+-        elif node.type == token.NAME and node.value == name:
+-            return node
+-    return None
+-
+-def _is_import_binding(node, name, package=None):
+-    """ Will reuturn node if node will import name, or node
+-        will import * from package.  None is returned otherwise.
+-        See test cases for examples. """
+-
+-    if node.type == syms.import_name and not package:
+-        imp = node.children[1]
+-        if imp.type == syms.dotted_as_names:
+-            for child in imp.children:
+-                if child.type == syms.dotted_as_name:
+-                    if child.children[2].value == name:
+-                        return node
+-                elif child.type == token.NAME and child.value == name:
+-                    return node
+-        elif imp.type == syms.dotted_as_name:
+-            last = imp.children[-1]
+-            if last.type == token.NAME and last.value == name:
+-                return node
+-        elif imp.type == token.NAME and imp.value == name:
+-            return node
+-    elif node.type == syms.import_from:
+-        # unicode(...) is used to make life easier here, because
+-        # from a.b import parses to ['import', ['a', '.', 'b'], ...]
+-        if package and unicode(node.children[1]).strip() != package:
+-            return None
+-        n = node.children[3]
+-        if package and _find('as', n):
+-            # See test_from_import_as for explanation
+-            return None
+-        elif n.type == syms.import_as_names and _find(name, n):
+-            return node
+-        elif n.type == syms.import_as_name:
+-            child = n.children[2]
+-            if child.type == token.NAME and child.value == name:
+-                return node
+-        elif n.type == token.NAME and n.value == name:
+-            return node
+-        elif package and n.type == token.STAR:
+-            return node
+-    return None
+diff -r 531f2e948299 lib2to3/fixes/.svn/all-wcprops
+--- a/lib2to3/fixes/.svn/all-wcprops	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,305 +0,0 @@
+-K 25
+-svn:wc:ra_dav:version-url
+-V 57
+-/projects/!svn/ver/69679/sandbox/trunk/2to3/lib2to3/fixes
+-END
+-fix_dict.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 69
+-/projects/!svn/ver/67389/sandbox/trunk/2to3/lib2to3/fixes/fix_dict.py
+-END
+-fix_has_key.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 72
+-/projects/!svn/ver/67769/sandbox/trunk/2to3/lib2to3/fixes/fix_has_key.py
+-END
+-fix_exec.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 69
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_exec.py
+-END
+-fix_idioms.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 71
+-/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_idioms.py
+-END
+-__init__.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 69
+-/projects/!svn/ver/61428/sandbox/trunk/2to3/lib2to3/fixes/__init__.py
+-END
+-fix_urllib.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 71
+-/projects/!svn/ver/68368/sandbox/trunk/2to3/lib2to3/fixes/fix_urllib.py
+-END
+-fix_nonzero.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 72
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_nonzero.py
+-END
+-fix_print.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 70
+-/projects/!svn/ver/66418/sandbox/trunk/2to3/lib2to3/fixes/fix_print.py
+-END
+-fix_imports.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 72
+-/projects/!svn/ver/69054/sandbox/trunk/2to3/lib2to3/fixes/fix_imports.py
+-END
+-fix_numliterals.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 76
+-/projects/!svn/ver/67389/sandbox/trunk/2to3/lib2to3/fixes/fix_numliterals.py
+-END
+-fix_input.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 70
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_input.py
+-END
+-fix_itertools_imports.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 82
+-/projects/!svn/ver/69673/sandbox/trunk/2to3/lib2to3/fixes/fix_itertools_imports.py
+-END
+-fix_getcwdu.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 72
+-/projects/!svn/ver/66782/sandbox/trunk/2to3/lib2to3/fixes/fix_getcwdu.py
+-END
+-fix_zip.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 68
+-/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_zip.py
+-END
+-fix_raise.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 70
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_raise.py
+-END
+-fix_throw.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 70
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_throw.py
+-END
+-fix_types.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 70
+-/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_types.py
+-END
+-fix_paren.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 70
+-/projects/!svn/ver/65981/sandbox/trunk/2to3/lib2to3/fixes/fix_paren.py
+-END
+-fix_ws_comma.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 73
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_ws_comma.py
+-END
+-fix_reduce.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 71
+-/projects/!svn/ver/67657/sandbox/trunk/2to3/lib2to3/fixes/fix_reduce.py
+-END
+-fix_raw_input.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 74
+-/projects/!svn/ver/65887/sandbox/trunk/2to3/lib2to3/fixes/fix_raw_input.py
+-END
+-fix_repr.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 69
+-/projects/!svn/ver/67769/sandbox/trunk/2to3/lib2to3/fixes/fix_repr.py
+-END
+-fix_buffer.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 71
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_buffer.py
+-END
+-fix_funcattrs.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 74
+-/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_funcattrs.py
+-END
+-fix_import.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 71
+-/projects/!svn/ver/67928/sandbox/trunk/2to3/lib2to3/fixes/fix_import.py
+-END
+-fix_standarderror.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 78
+-/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_standarderror.py
+-END
+-fix_map.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 68
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_map.py
+-END
+-fix_next.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 69
+-/projects/!svn/ver/67389/sandbox/trunk/2to3/lib2to3/fixes/fix_next.py
+-END
+-fix_itertools.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 74
+-/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_itertools.py
+-END
+-fix_execfile.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 73
+-/projects/!svn/ver/67901/sandbox/trunk/2to3/lib2to3/fixes/fix_execfile.py
+-END
+-fix_xrange.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 71
+-/projects/!svn/ver/67705/sandbox/trunk/2to3/lib2to3/fixes/fix_xrange.py
+-END
+-fix_apply.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 70
+-/projects/!svn/ver/67769/sandbox/trunk/2to3/lib2to3/fixes/fix_apply.py
+-END
+-fix_filter.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 71
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_filter.py
+-END
+-fix_unicode.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 72
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_unicode.py
+-END
+-fix_except.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 71
+-/projects/!svn/ver/68694/sandbox/trunk/2to3/lib2to3/fixes/fix_except.py
+-END
+-fix_renames.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 72
+-/projects/!svn/ver/67389/sandbox/trunk/2to3/lib2to3/fixes/fix_renames.py
+-END
+-fix_tuple_params.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 77
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_tuple_params.py
+-END
+-fix_methodattrs.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 76
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_methodattrs.py
+-END
+-fix_xreadlines.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 75
+-/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_xreadlines.py
+-END
+-fix_long.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 69
+-/projects/!svn/ver/68110/sandbox/trunk/2to3/lib2to3/fixes/fix_long.py
+-END
+-fix_intern.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 71
+-/projects/!svn/ver/67657/sandbox/trunk/2to3/lib2to3/fixes/fix_intern.py
+-END
+-fix_callable.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 73
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_callable.py
+-END
+-fix_isinstance.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 75
+-/projects/!svn/ver/67767/sandbox/trunk/2to3/lib2to3/fixes/fix_isinstance.py
+-END
+-fix_basestring.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 75
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_basestring.py
+-END
+-fix_ne.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 67
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_ne.py
+-END
+-fix_set_literal.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 76
+-/projects/!svn/ver/69679/sandbox/trunk/2to3/lib2to3/fixes/fix_set_literal.py
+-END
+-fix_future.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 71
+-/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_future.py
+-END
+-fix_metaclass.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 74
+-/projects/!svn/ver/67371/sandbox/trunk/2to3/lib2to3/fixes/fix_metaclass.py
+-END
+-fix_sys_exc.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 72
+-/projects/!svn/ver/65968/sandbox/trunk/2to3/lib2to3/fixes/fix_sys_exc.py
+-END
+-fix_imports2.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 73
+-/projects/!svn/ver/68422/sandbox/trunk/2to3/lib2to3/fixes/fix_imports2.py
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/dir-prop-base
+--- a/lib2to3/fixes/.svn/dir-prop-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,10 +0,0 @@
+-K 10
+-svn:ignore
+-V 25
+-*.pyc
+-*.pyo
+-*.pickle
+-@*
+-
+-
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/entries
+--- a/lib2to3/fixes/.svn/entries	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,1728 +0,0 @@
+-9
+-
+-dir
+-70785
+-http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/fixes
+-http://svn.python.org/projects
+-
+-
+-
+-2009-02-16T17:36:06.789054Z
+-69679
+-benjamin.peterson
+-has-props
+-
+-svn:special svn:externals svn:needs-lock
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-6015fed2-1504-0410-9fe1-9d1591cc4771
+-
+-fix_dict.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-d12677f15a5a34c7754e90cb06bc153e
+-2008-11-25T23:13:17.968453Z
+-67389
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-3588
+-
+-fix_has_key.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-1b88e2b6b4c60df9b85a168a07e13fa7
+-2008-12-14T20:59:10.846867Z
+-67769
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-3209
+-
+-fix_exec.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-679db75847dfd56367a8cd2b4286949c
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-985
+-
+-fix_idioms.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-0281b19c721594c6eb341c83270d37bd
+-2008-11-28T23:18:48.744865Z
+-67433
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-3939
+-
+-__init__.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-97781d2954bbc2eebdc963de519fe2de
+-2006-12-12T14:56:29.604692Z
+-53006
+-guido.van.rossum
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-47
+-
+-fix_urllib.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-c883d34902a6e74c08f4370a978e5b86
+-2009-01-06T23:56:10.682943Z
+-68368
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-7484
+-
+-fix_nonzero.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-6f8983345b023d63ddce248a93c5db83
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-578
+-
+-fix_print.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-478786e57412307d598aee1a20595102
+-2008-09-12T23:49:48.354778Z
+-66418
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-2957
+-
+-fix_imports.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-fa0f30cff73ee261c93c85286007c761
+-2009-01-28T16:01:54.183761Z
+-69054
+-guilherme.polo
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-5692
+-
+-fix_numliterals.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-7e04fa79f3ff3ff475ec1716021b8489
+-2008-11-25T23:13:17.968453Z
+-67389
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-789
+-
+-fix_input.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-3a704e4f30c9f72c236274118f093034
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-692
+-
+-fix_itertools_imports.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-05420b3d189c8130eca6bf051bd31a17
+-2009-02-16T15:38:22.416590Z
+-69673
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1837
+-
+-fix_getcwdu.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-1bf89e0a81cc997173d5d63078f8ea5a
+-2008-10-03T22:51:36.115136Z
+-66782
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-432
+-
+-fix_zip.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-8e61d2105f3122181e793e7c9b4caf31
+-2008-11-28T23:18:48.744865Z
+-67433
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-889
+-
+-fix_throw.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-52c18fcf966a4c7f44940e331784f51c
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1564
+-
+-fix_raise.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-7f69130d4008f2b870fbc5d88ed726de
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-2587
+-
+-fix_types.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-08728aeba77665139ce3f967cb24c2f1
+-2008-11-28T23:18:48.744865Z
+-67433
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1779
+-
+-fix_paren.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-e2b3bd30551e285f3bc45eed6a797014
+-2008-08-22T20:41:30.636639Z
+-65981
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1213
+-
+-fix_ws_comma.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-8e92e7f56434c9b2263874296578ea53
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1108
+-
+-fix_reduce.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-2fee5cc1f796c98a749dc789199da016
+-2008-12-08T00:29:35.627027Z
+-67657
+-armin.ronacher
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-816
+-
+-fix_raw_input.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-18666e7c36b850f0b6d5666504bec0ae
+-2008-08-19T22:45:04.505207Z
+-65887
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-435
+-
+-fix_repr.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-badd6b1054395732bd64df829d16cf96
+-2008-12-14T20:59:10.846867Z
+-67769
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-594
+-
+-fix_buffer.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-d6f8cc141ad7ab3f197f1638b9e3e1aa
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-566
+-
+-fix_funcattrs.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-7a7b9d2abe6fbecfdf2e5c0095978f0b
+-2008-11-28T23:18:48.744865Z
+-67433
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-624
+-
+-fix_import.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-9973617c9e868b2b9afb0a609ef30b35
+-2008-12-27T02:49:30.983707Z
+-67928
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-2953
+-
+-fix_standarderror.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-f76efc435650b1eba8bf73dbdfdeef3e
+-2008-11-28T23:18:48.744865Z
+-67433
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-431
+-
+-fix_map.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-0cdf1b348ed0dc9348377ad6ce1aef42
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-2537
+-
+-fix_next.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-46917a2b5128a18a5224f6ae5dc021db
+-2008-11-25T23:13:17.968453Z
+-67389
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-3205
+-
+-fix_itertools.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-d2b48acbc9d415b64f3c71575fbfb9df
+-2008-11-28T23:18:48.744865Z
+-67433
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1483
+-
+-fix_execfile.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-f968686ed347fd544fc69fd9cb6073cd
+-2008-12-22T20:09:55.444195Z
+-67901
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1974
+-
+-fix_xrange.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-4f054eb9bb8f4d4916f1b33eec5175f9
+-2008-12-11T19:04:08.320821Z
+-67705
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-2291
+-
+-fix_apply.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-2a00b679f13c1dca9b45bc23a3b2a695
+-2008-12-14T20:59:10.846867Z
+-67769
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1894
+-
+-fix_filter.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-0879d4b1af4eeb93b1a8baff1fd298c1
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-2089
+-
+-fix_unicode.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-05e9e9ae6cbc1c396bc11b19b5dab25a
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-832
+-
+-fix_except.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-450c9cbb28a5be9d21719abcb33a59f5
+-2009-01-17T23:55:59.992428Z
+-68694
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-3251
+-
+-fix_renames.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-52f66737c4206d8cfa77bbb07af4a056
+-2008-11-25T23:13:17.968453Z
+-67389
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-2192
+-
+-fix_tuple_params.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-557690cc5399b0ade14c16089df2effb
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-5405
+-
+-fix_methodattrs.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-6ee0925ec01e9ae632326855ab5cb016
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-587
+-
+-fix_xreadlines.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-ade2c0b61ba9f8effa9df543a2fbdc4a
+-2008-11-28T23:18:48.744865Z
+-67433
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-670
+-
+-fix_long.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-2aaca612bae42bfe84dd0d6139260749
+-2008-12-31T20:13:26.408132Z
+-68110
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-538
+-
+-fix_intern.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-00e20c0723e807004c3fd0ae88d26b09
+-2008-12-08T00:29:35.627027Z
+-67657
+-armin.ronacher
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1368
+-
+-fix_callable.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-37990663703ff5ea2fabb3095a9ad189
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-952
+-
+-fix_isinstance.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-921a0f20d0a6e47b4b1291d37599bf09
+-2008-12-14T20:28:12.506842Z
+-67767
+-benjamin.peterson
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1594
+-
+-fix_basestring.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-0fe11afa759b94c75323aa2a3188089d
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-301
+-
+-fix_ne.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-a787f8744fda47bffd7f2b6a9ee4ff38
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-590
+-
+-fix_set_literal.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-fc4742a5a8d78f9dd84b1c5f0040003b
+-2009-02-16T17:36:06.789054Z
+-69679
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1699
+-
+-fix_future.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-0e2786c94aac6b11a47d8ec46d8b19d6
+-2008-06-01T23:09:38.597843Z
+-63880
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-527
+-
+-fix_metaclass.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-c608b0bf4a9c0c1028051ffe82d055f4
+-2008-11-24T22:02:00.590445Z
+-67371
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-8213
+-
+-fix_sys_exc.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-2b412acd29c54b0101163bb8be2ab5c7
+-2008-08-21T23:45:13.840810Z
+-65968
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1030
+-
+-fix_imports2.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:37.000000Z
+-15274809df396bec14aeafccd2ab9875
+-2009-01-09T02:01:03.956074Z
+-68422
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-289
+-
+diff -r 531f2e948299 lib2to3/fixes/.svn/format
+--- a/lib2to3/fixes/.svn/format	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,1 +0,0 @@
+-9
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/__init__.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/__init__.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_apply.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_apply.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_basestring.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_basestring.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_buffer.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_buffer.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_callable.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_callable.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_dict.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_dict.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_except.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_except.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_exec.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_exec.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_execfile.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_execfile.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_filter.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_filter.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_funcattrs.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_funcattrs.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_future.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_future.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 13
+-'Id Revision'
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_getcwdu.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_getcwdu.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_has_key.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_has_key.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_idioms.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_idioms.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_import.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_import.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_imports.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_imports.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_imports2.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_imports2.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_input.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_input.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_intern.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_intern.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_itertools.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_itertools.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_itertools_imports.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_itertools_imports.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_long.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_long.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_map.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_map.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_metaclass.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_metaclass.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_methodattrs.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_methodattrs.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 13
+-'Id Revision'
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_ne.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_ne.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_next.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_next.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_nonzero.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_nonzero.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_numliterals.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_numliterals.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_paren.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_paren.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_print.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_print.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_raise.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_raise.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_raw_input.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_raw_input.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_renames.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_renames.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 13
+-'Id Revision'
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_repr.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_repr.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_set_literal.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_set_literal.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_standarderror.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_standarderror.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_sys_exc.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_sys_exc.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_throw.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_throw.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_tuple_params.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_tuple_params.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_types.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_types.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_unicode.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_unicode.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_urllib.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_urllib.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_ws_comma.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_ws_comma.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_xrange.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_xrange.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_xreadlines.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_xreadlines.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/prop-base/fix_zip.py.svn-base
+--- a/lib2to3/fixes/.svn/prop-base/fix_zip.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,5 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-END
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/__init__.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/__init__.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,1 +0,0 @@
+-# Dummy file to make this directory a package.
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_apply.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_apply.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,58 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for apply().
+-
+-This converts apply(func, v, k) into (func)(*v, **k)."""
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Call, Comma, parenthesize
+-
+-class FixApply(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power< 'apply'
+-        trailer<
+-            '('
+-            arglist<
+-                (not argument<NAME '=' any>) func=any ','
+-                (not argument<NAME '=' any>) args=any [','
+-                (not argument<NAME '=' any>) kwds=any] [',']
+-            >
+-            ')'
+-        >
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        syms = self.syms
+-        assert results
+-        func = results["func"]
+-        args = results["args"]
+-        kwds = results.get("kwds")
+-        prefix = node.get_prefix()
+-        func = func.clone()
+-        if (func.type not in (token.NAME, syms.atom) and
+-            (func.type != syms.power or
+-             func.children[-2].type == token.DOUBLESTAR)):
+-            # Need to parenthesize
+-            func = parenthesize(func)
+-        func.set_prefix("")
+-        args = args.clone()
+-        args.set_prefix("")
+-        if kwds is not None:
+-            kwds = kwds.clone()
+-            kwds.set_prefix("")
+-        l_newargs = [pytree.Leaf(token.STAR, "*"), args]
+-        if kwds is not None:
+-            l_newargs.extend([Comma(),
+-                              pytree.Leaf(token.DOUBLESTAR, "**"),
+-                              kwds])
+-            l_newargs[-2].set_prefix(" ") # that's the ** token
+-        # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
+-        # can be translated into f(x, y, *t) instead of f(*(x, y) + t)
+-        #new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
+-        return Call(func, l_newargs, prefix=prefix)
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_basestring.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_basestring.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,13 +0,0 @@
+-"""Fixer for basestring -> str."""
+-# Author: Christian Heimes
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-class FixBasestring(fixer_base.BaseFix):
+-
+-    PATTERN = "'basestring'"
+-
+-    def transform(self, node, results):
+-        return Name("str", prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_buffer.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_buffer.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,21 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that changes buffer(...) into memoryview(...)."""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-
+-class FixBuffer(fixer_base.BaseFix):
+-
+-    explicit = True # The user must ask for this fixer
+-
+-    PATTERN = """
+-              power< name='buffer' trailer< '(' [any] ')' > >
+-              """
+-
+-    def transform(self, node, results):
+-        name = results["name"]
+-        name.replace(Name("memoryview", prefix=name.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_callable.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_callable.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,31 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for callable().
+-
+-This converts callable(obj) into hasattr(obj, '__call__')."""
+-
+-# Local imports
+-from .. import pytree
+-from .. import fixer_base
+-from ..fixer_util import Call, Name, String
+-
+-class FixCallable(fixer_base.BaseFix):
+-
+-    # Ignore callable(*args) or use of keywords.
+-    # Either could be a hint that the builtin callable() is not being used.
+-    PATTERN = """
+-    power< 'callable'
+-           trailer< lpar='('
+-                    ( not(arglist | argument<any '=' any>) func=any
+-                      | func=arglist<(not argument<any '=' any>) any ','> )
+-                    rpar=')' >
+-           after=any*
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        func = results["func"]
+-
+-        args = [func.clone(), String(', '), String("'__call__'")]
+-        return Call(Name("hasattr"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_dict.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_dict.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,99 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for dict methods.
+-
+-d.keys() -> list(d.keys())
+-d.items() -> list(d.items())
+-d.values() -> list(d.values())
+-
+-d.iterkeys() -> iter(d.keys())
+-d.iteritems() -> iter(d.items())
+-d.itervalues() -> iter(d.values())
+-
+-Except in certain very specific contexts: the iter() can be dropped
+-when the context is list(), sorted(), iter() or for...in; the list()
+-can be dropped when the context is list() or sorted() (but not iter()
+-or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
+-set(), any(), all(), sum().
+-
+-Note: iter(d.keys()) could be written as iter(d) but since the
+-original d.iterkeys() was also redundant we don't fix this.  And there
+-are (rare) contexts where it makes a difference (e.g. when passing it
+-as an argument to a function that introspects the argument).
+-"""
+-
+-# Local imports
+-from .. import pytree
+-from .. import patcomp
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
+-from .. import fixer_util
+-
+-
+-iter_exempt = fixer_util.consuming_calls | set(["iter"])
+-
+-
+-class FixDict(fixer_base.BaseFix):
+-    PATTERN = """
+-    power< head=any+
+-         trailer< '.' method=('keys'|'items'|'values'|
+-                              'iterkeys'|'iteritems'|'itervalues') >
+-         parens=trailer< '(' ')' >
+-         tail=any*
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        head = results["head"]
+-        method = results["method"][0] # Extract node for method name
+-        tail = results["tail"]
+-        syms = self.syms
+-        method_name = method.value
+-        isiter = method_name.startswith("iter")
+-        if isiter:
+-            method_name = method_name[4:]
+-        assert method_name in ("keys", "items", "values"), repr(method)
+-        head = [n.clone() for n in head]
+-        tail = [n.clone() for n in tail]
+-        special = not tail and self.in_special_context(node, isiter)
+-        args = head + [pytree.Node(syms.trailer,
+-                                   [Dot(),
+-                                    Name(method_name,
+-                                         prefix=method.get_prefix())]),
+-                       results["parens"].clone()]
+-        new = pytree.Node(syms.power, args)
+-        if not special:
+-            new.set_prefix("")
+-            new = Call(Name(isiter and "iter" or "list"), [new])
+-        if tail:
+-            new = pytree.Node(syms.power, [new] + tail)
+-        new.set_prefix(node.get_prefix())
+-        return new
+-
+-    P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
+-    p1 = patcomp.compile_pattern(P1)
+-
+-    P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
+-            | comp_for< 'for' any 'in' node=any any* >
+-         """
+-    p2 = patcomp.compile_pattern(P2)
+-
+-    def in_special_context(self, node, isiter):
+-        if node.parent is None:
+-            return False
+-        results = {}
+-        if (node.parent.parent is not None and
+-               self.p1.match(node.parent.parent, results) and
+-               results["node"] is node):
+-            if isiter:
+-                # iter(d.iterkeys()) -> iter(d.keys()), etc.
+-                return results["func"].value in iter_exempt
+-            else:
+-                # list(d.keys()) -> list(d.keys()), etc.
+-                return results["func"].value in fixer_util.consuming_calls
+-        if not isiter:
+-            return False
+-        # for ... in d.iterkeys() -> for ... in d.keys(), etc.
+-        return self.p2.match(node.parent, results) and results["node"] is node
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_except.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_except.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,92 +0,0 @@
+-"""Fixer for except statements with named exceptions.
+-
+-The following cases will be converted:
+-
+-- "except E, T:" where T is a name:
+-
+-    except E as T:
+-
+-- "except E, T:" where T is not a name, tuple or list:
+-
+-        except E as t:
+-            T = t
+-
+-    This is done because the target of an "except" clause must be a
+-    name.
+-
+-- "except E, T:" where T is a tuple or list literal:
+-
+-        except E as t:
+-            T = t.args
+-"""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
+-
+-def find_excepts(nodes):
+-    for i, n in enumerate(nodes):
+-        if n.type == syms.except_clause:
+-            if n.children[0].value == 'except':
+-                yield (n, nodes[i+2])
+-
+-class FixExcept(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    try_stmt< 'try' ':' suite
+-                  cleanup=(except_clause ':' suite)+
+-                  tail=(['except' ':' suite]
+-                        ['else' ':' suite]
+-                        ['finally' ':' suite]) >
+-    """
+-
+-    def transform(self, node, results):
+-        syms = self.syms
+-
+-        tail = [n.clone() for n in results["tail"]]
+-
+-        try_cleanup = [ch.clone() for ch in results["cleanup"]]
+-        for except_clause, e_suite in find_excepts(try_cleanup):
+-            if len(except_clause.children) == 4:
+-                (E, comma, N) = except_clause.children[1:4]
+-                comma.replace(Name("as", prefix=" "))
+-
+-                if N.type != token.NAME:
+-                    # Generate a new N for the except clause
+-                    new_N = Name(self.new_name(), prefix=" ")
+-                    target = N.clone()
+-                    target.set_prefix("")
+-                    N.replace(new_N)
+-                    new_N = new_N.clone()
+-
+-                    # Insert "old_N = new_N" as the first statement in
+-                    #  the except body. This loop skips leading whitespace
+-                    #  and indents
+-                    #TODO(cwinter) suite-cleanup
+-                    suite_stmts = e_suite.children
+-                    for i, stmt in enumerate(suite_stmts):
+-                        if isinstance(stmt, pytree.Node):
+-                            break
+-
+-                    # The assignment is different if old_N is a tuple or list
+-                    # In that case, the assignment is old_N = new_N.args
+-                    if is_tuple(N) or is_list(N):
+-                        assign = Assign(target, Attr(new_N, Name('args')))
+-                    else:
+-                        assign = Assign(target, new_N)
+-
+-                    #TODO(cwinter) stopgap until children becomes a smart list
+-                    for child in reversed(suite_stmts[:i]):
+-                        e_suite.insert_child(0, child)
+-                    e_suite.insert_child(i, assign)
+-                elif N.get_prefix() == "":
+-                    # No space after a comma is legal; no space after "as",
+-                    # not so much.
+-                    N.set_prefix(" ")
+-
+-        #TODO(cwinter) fix this when children becomes a smart list
+-        children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
+-        return pytree.Node(node.type, children)
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_exec.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_exec.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,39 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for exec.
+-
+-This converts usages of the exec statement into calls to a built-in
+-exec() function.
+-
+-exec code in ns1, ns2 -> exec(code, ns1, ns2)
+-"""
+-
+-# Local imports
+-from .. import pytree
+-from .. import fixer_base
+-from ..fixer_util import Comma, Name, Call
+-
+-
+-class FixExec(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
+-    |
+-    exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
+-    """
+-
+-    def transform(self, node, results):
+-        assert results
+-        syms = self.syms
+-        a = results["a"]
+-        b = results.get("b")
+-        c = results.get("c")
+-        args = [a.clone()]
+-        args[0].set_prefix("")
+-        if b is not None:
+-            args.extend([Comma(), b.clone()])
+-        if c is not None:
+-            args.extend([Comma(), c.clone()])
+-
+-        return Call(Name("exec"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_execfile.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_execfile.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,51 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for execfile.
+-
+-This converts usages of the execfile function into calls to the built-in
+-exec() function.
+-"""
+-
+-from .. import fixer_base
+-from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
+-                          ArgList, String, syms)
+-
+-
+-class FixExecfile(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
+-    |
+-    power< 'execfile' trailer< '(' filename=any ')' > >
+-    """
+-
+-    def transform(self, node, results):
+-        assert results
+-        filename = results["filename"]
+-        globals = results.get("globals")
+-        locals = results.get("locals")
+-
+-        # Copy over the prefix from the right parentheses end of the execfile
+-        # call.
+-        execfile_paren = node.children[-1].children[-1].clone()
+-        # Construct open().read().
+-        open_args = ArgList([filename.clone()], rparen=execfile_paren)
+-        open_call = Node(syms.power, [Name("open"), open_args])
+-        read = [Node(syms.trailer, [Dot(), Name('read')]),
+-                Node(syms.trailer, [LParen(), RParen()])]
+-        open_expr = [open_call] + read
+-        # Wrap the open call in a compile call. This is so the filename will be
+-        # preserved in the execed code.
+-        filename_arg = filename.clone()
+-        filename_arg.set_prefix(" ")
+-        exec_str = String("'exec'", " ")
+-        compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
+-        compile_call = Call(Name("compile"), compile_args, "")
+-        # Finally, replace the execfile call with an exec call.
+-        args = [compile_call]
+-        if globals is not None:
+-            args.extend([Comma(), globals.clone()])
+-        if locals is not None:
+-            args.extend([Comma(), locals.clone()])
+-        return Call(Name("exec"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_filter.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_filter.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,75 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that changes filter(F, X) into list(filter(F, X)).
+-
+-We avoid the transformation if the filter() call is directly contained
+-in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
+-for V in <>:.
+-
+-NOTE: This is still not correct if the original code was depending on
+-filter(F, X) to return a string if X is a string and a tuple if X is a
+-tuple.  That would require type inference, which we don't do.  Let
+-Python 2.6 figure it out.
+-"""
+-
+-# Local imports
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, ListComp, in_special_context
+-
+-class FixFilter(fixer_base.ConditionalFix):
+-
+-    PATTERN = """
+-    filter_lambda=power<
+-        'filter'
+-        trailer<
+-            '('
+-            arglist<
+-                lambdef< 'lambda'
+-                         (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
+-                >
+-                ','
+-                it=any
+-            >
+-            ')'
+-        >
+-    >
+-    |
+-    power<
+-        'filter'
+-        trailer< '(' arglist< none='None' ',' seq=any > ')' >
+-    >
+-    |
+-    power<
+-        'filter'
+-        args=trailer< '(' [any] ')' >
+-    >
+-    """
+-
+-    skip_on = "future_builtins.filter"
+-
+-    def transform(self, node, results):
+-        if self.should_skip(node):
+-            return
+-
+-        if "filter_lambda" in results:
+-            new = ListComp(results.get("fp").clone(),
+-                           results.get("fp").clone(),
+-                           results.get("it").clone(),
+-                           results.get("xp").clone())
+-
+-        elif "none" in results:
+-            new = ListComp(Name("_f"),
+-                           Name("_f"),
+-                           results["seq"].clone(),
+-                           Name("_f"))
+-
+-        else:
+-            if in_special_context(node):
+-                return None
+-            new = node.clone()
+-            new.set_prefix("")
+-            new = Call(Name("list"), [new])
+-        new.set_prefix(node.get_prefix())
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_funcattrs.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_funcattrs.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,19 +0,0 @@
+-"""Fix function attribute names (f.func_x -> f.__x__)."""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-
+-class FixFuncattrs(fixer_base.BaseFix):
+-    PATTERN = """
+-    power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals'
+-                                  | 'func_name' | 'func_defaults' | 'func_code'
+-                                  | 'func_dict') > any* >
+-    """
+-
+-    def transform(self, node, results):
+-        attr = results["attr"][0]
+-        attr.replace(Name(("__%s__" % attr.value[5:]),
+-                          prefix=attr.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_future.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_future.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,20 +0,0 @@
+-"""Remove __future__ imports
+-
+-from __future__ import foo is replaced with an empty line.
+-"""
+-# Author: Christian Heimes
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import BlankLine
+-
+-class FixFuture(fixer_base.BaseFix):
+-    PATTERN = """import_from< 'from' module_name="__future__" 'import' any >"""
+-
+-    # This should be run last -- some things check for the import
+-    run_order = 10
+-
+-    def transform(self, node, results):
+-        new = BlankLine()
+-        new.prefix = node.get_prefix()
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_getcwdu.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_getcwdu.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,18 +0,0 @@
+-"""
+-Fixer that changes os.getcwdu() to os.getcwd().
+-"""
+-# Author: Victor Stinner
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-class FixGetcwdu(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              power< 'os' trailer< dot='.' name='getcwdu' > any* >
+-              """
+-
+-    def transform(self, node, results):
+-        name = results["name"]
+-        name.replace(Name("getcwd", prefix=name.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_has_key.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_has_key.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,109 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for has_key().
+-
+-Calls to .has_key() methods are expressed in terms of the 'in'
+-operator:
+-
+-    d.has_key(k) -> k in d
+-
+-CAVEATS:
+-1) While the primary target of this fixer is dict.has_key(), the
+-   fixer will change any has_key() method call, regardless of its
+-   class.
+-
+-2) Cases like this will not be converted:
+-
+-    m = d.has_key
+-    if m(k):
+-        ...
+-
+-   Only *calls* to has_key() are converted. While it is possible to
+-   convert the above to something like
+-
+-    m = d.__contains__
+-    if m(k):
+-        ...
+-
+-   this is currently not done.
+-"""
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, parenthesize
+-
+-
+-class FixHasKey(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    anchor=power<
+-        before=any+
+-        trailer< '.' 'has_key' >
+-        trailer<
+-            '('
+-            ( not(arglist | argument<any '=' any>) arg=any
+-            | arglist<(not argument<any '=' any>) arg=any ','>
+-            )
+-            ')'
+-        >
+-        after=any*
+-    >
+-    |
+-    negation=not_test<
+-        'not'
+-        anchor=power<
+-            before=any+
+-            trailer< '.' 'has_key' >
+-            trailer<
+-                '('
+-                ( not(arglist | argument<any '=' any>) arg=any
+-                | arglist<(not argument<any '=' any>) arg=any ','>
+-                )
+-                ')'
+-            >
+-        >
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        assert results
+-        syms = self.syms
+-        if (node.parent.type == syms.not_test and
+-            self.pattern.match(node.parent)):
+-            # Don't transform a node matching the first alternative of the
+-            # pattern when its parent matches the second alternative
+-            return None
+-        negation = results.get("negation")
+-        anchor = results["anchor"]
+-        prefix = node.get_prefix()
+-        before = [n.clone() for n in results["before"]]
+-        arg = results["arg"].clone()
+-        after = results.get("after")
+-        if after:
+-            after = [n.clone() for n in after]
+-        if arg.type in (syms.comparison, syms.not_test, syms.and_test,
+-                        syms.or_test, syms.test, syms.lambdef, syms.argument):
+-            arg = parenthesize(arg)
+-        if len(before) == 1:
+-            before = before[0]
+-        else:
+-            before = pytree.Node(syms.power, before)
+-        before.set_prefix(" ")
+-        n_op = Name("in", prefix=" ")
+-        if negation:
+-            n_not = Name("not", prefix=" ")
+-            n_op = pytree.Node(syms.comp_op, (n_not, n_op))
+-        new = pytree.Node(syms.comparison, (arg, n_op, before))
+-        if after:
+-            new = parenthesize(new)
+-            new = pytree.Node(syms.power, (new,) + tuple(after))
+-        if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr,
+-                                syms.and_expr, syms.shift_expr,
+-                                syms.arith_expr, syms.term,
+-                                syms.factor, syms.power):
+-            new = parenthesize(new)
+-        new.set_prefix(prefix)
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_idioms.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_idioms.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,134 +0,0 @@
+-"""Adjust some old Python 2 idioms to their modern counterparts.
+-
+-* Change some type comparisons to isinstance() calls:
+-    type(x) == T -> isinstance(x, T)
+-    type(x) is T -> isinstance(x, T)
+-    type(x) != T -> not isinstance(x, T)
+-    type(x) is not T -> not isinstance(x, T)
+-
+-* Change "while 1:" into "while True:".
+-
+-* Change both
+-
+-    v = list(EXPR)
+-    v.sort()
+-    foo(v)
+-
+-and the more general
+-
+-    v = EXPR
+-    v.sort()
+-    foo(v)
+-
+-into
+-
+-    v = sorted(EXPR)
+-    foo(v)
+-"""
+-# Author: Jacques Frechet, Collin Winter
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Call, Comma, Name, Node, syms
+-
+-CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
+-TYPE = "power< 'type' trailer< '(' x=any ')' > >"
+-
+-class FixIdioms(fixer_base.BaseFix):
+-
+-    explicit = True # The user must ask for this fixer
+-
+-    PATTERN = r"""
+-        isinstance=comparison< %s %s T=any >
+-        |
+-        isinstance=comparison< T=any %s %s >
+-        |
+-        while_stmt< 'while' while='1' ':' any+ >
+-        |
+-        sorted=any<
+-            any*
+-            simple_stmt<
+-              expr_stmt< id1=any '='
+-                         power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
+-              >
+-              '\n'
+-            >
+-            sort=
+-            simple_stmt<
+-              power< id2=any
+-                     trailer< '.' 'sort' > trailer< '(' ')' >
+-              >
+-              '\n'
+-            >
+-            next=any*
+-        >
+-        |
+-        sorted=any<
+-            any*
+-            simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
+-            sort=
+-            simple_stmt<
+-              power< id2=any
+-                     trailer< '.' 'sort' > trailer< '(' ')' >
+-              >
+-              '\n'
+-            >
+-            next=any*
+-        >
+-    """ % (TYPE, CMP, CMP, TYPE)
+-
+-    def match(self, node):
+-        r = super(FixIdioms, self).match(node)
+-        # If we've matched one of the sort/sorted subpatterns above, we
+-        # want to reject matches where the initial assignment and the
+-        # subsequent .sort() call involve different identifiers.
+-        if r and "sorted" in r:
+-            if r["id1"] == r["id2"]:
+-                return r
+-            return None
+-        return r
+-
+-    def transform(self, node, results):
+-        if "isinstance" in results:
+-            return self.transform_isinstance(node, results)
+-        elif "while" in results:
+-            return self.transform_while(node, results)
+-        elif "sorted" in results:
+-            return self.transform_sort(node, results)
+-        else:
+-            raise RuntimeError("Invalid match")
+-
+-    def transform_isinstance(self, node, results):
+-        x = results["x"].clone() # The thing inside of type()
+-        T = results["T"].clone() # The type being compared against
+-        x.set_prefix("")
+-        T.set_prefix(" ")
+-        test = Call(Name("isinstance"), [x, Comma(), T])
+-        if "n" in results:
+-            test.set_prefix(" ")
+-            test = Node(syms.not_test, [Name("not"), test])
+-        test.set_prefix(node.get_prefix())
+-        return test
+-
+-    def transform_while(self, node, results):
+-        one = results["while"]
+-        one.replace(Name("True", prefix=one.get_prefix()))
+-
+-    def transform_sort(self, node, results):
+-        sort_stmt = results["sort"]
+-        next_stmt = results["next"]
+-        list_call = results.get("list")
+-        simple_expr = results.get("expr")
+-
+-        if list_call:
+-            list_call.replace(Name("sorted", prefix=list_call.get_prefix()))
+-        elif simple_expr:
+-            new = simple_expr.clone()
+-            new.set_prefix("")
+-            simple_expr.replace(Call(Name("sorted"), [new],
+-                                     prefix=simple_expr.get_prefix()))
+-        else:
+-            raise RuntimeError("should not have reached here")
+-        sort_stmt.remove()
+-        if next_stmt:
+-            next_stmt[0].set_prefix(sort_stmt.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_import.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_import.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,90 +0,0 @@
+-"""Fixer for import statements.
+-If spam is being imported from the local directory, this import:
+-    from spam import eggs
+-Becomes:
+-    from .spam import eggs
+-
+-And this import:
+-    import spam
+-Becomes:
+-    from . import spam
+-"""
+-
+-# Local imports
+-from .. import fixer_base
+-from os.path import dirname, join, exists, pathsep
+-from ..fixer_util import FromImport, syms, token
+-
+-
+-def traverse_imports(names):
+-    """
+-    Walks over all the names imported in a dotted_as_names node.
+-    """
+-    pending = [names]
+-    while pending:
+-        node = pending.pop()
+-        if node.type == token.NAME:
+-            yield node.value
+-        elif node.type == syms.dotted_name:
+-            yield "".join([ch.value for ch in node.children])
+-        elif node.type == syms.dotted_as_name:
+-            pending.append(node.children[0])
+-        elif node.type == syms.dotted_as_names:
+-            pending.extend(node.children[::-2])
+-        else:
+-            raise AssertionError("unkown node type")
+-
+-
+-class FixImport(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    import_from< 'from' imp=any 'import' ['('] any [')'] >
+-    |
+-    import_name< 'import' imp=any >
+-    """
+-
+-    def transform(self, node, results):
+-        imp = results['imp']
+-
+-        if node.type == syms.import_from:
+-            # Some imps are top-level (eg: 'import ham')
+-            # some are first level (eg: 'import ham.eggs')
+-            # some are third level (eg: 'import ham.eggs as spam')
+-            # Hence, the loop
+-            while not hasattr(imp, 'value'):
+-                imp = imp.children[0]
+-            if self.probably_a_local_import(imp.value):
+-                imp.value = "." + imp.value
+-                imp.changed()
+-                return node
+-        else:
+-            have_local = False
+-            have_absolute = False
+-            for mod_name in traverse_imports(imp):
+-                if self.probably_a_local_import(mod_name):
+-                    have_local = True
+-                else:
+-                    have_absolute = True
+-            if have_absolute:
+-                if have_local:
+-                    # We won't handle both sibling and absolute imports in the
+-                    # same statement at the moment.
+-                    self.warning(node, "absolute and local imports together")
+-                return
+-
+-            new = FromImport('.', [imp])
+-            new.set_prefix(node.get_prefix())
+-            return new
+-
+-    def probably_a_local_import(self, imp_name):
+-        imp_name = imp_name.split('.', 1)[0]
+-        base_path = dirname(self.filename)
+-        base_path = join(base_path, imp_name)
+-        # If there is no __init__.py next to the file its not in a package
+-        # so can't be a relative import.
+-        if not exists(join(dirname(base_path), '__init__.py')):
+-            return False
+-        for ext in ['.py', pathsep, '.pyc', '.so', '.sl', '.pyd']:
+-            if exists(base_path + ext):
+-                return True
+-        return False
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_imports.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_imports.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,145 +0,0 @@
+-"""Fix incompatible imports and module references."""
+-# Authors: Collin Winter, Nick Edds
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, attr_chain
+-
+-MAPPING = {'StringIO':  'io',
+-           'cStringIO': 'io',
+-           'cPickle': 'pickle',
+-           '__builtin__' : 'builtins',
+-           'copy_reg': 'copyreg',
+-           'Queue': 'queue',
+-           'SocketServer': 'socketserver',
+-           'ConfigParser': 'configparser',
+-           'repr': 'reprlib',
+-           'FileDialog': 'tkinter.filedialog',
+-           'tkFileDialog': 'tkinter.filedialog',
+-           'SimpleDialog': 'tkinter.simpledialog',
+-           'tkSimpleDialog': 'tkinter.simpledialog',
+-           'tkColorChooser': 'tkinter.colorchooser',
+-           'tkCommonDialog': 'tkinter.commondialog',
+-           'Dialog': 'tkinter.dialog',
+-           'Tkdnd': 'tkinter.dnd',
+-           'tkFont': 'tkinter.font',
+-           'tkMessageBox': 'tkinter.messagebox',
+-           'ScrolledText': 'tkinter.scrolledtext',
+-           'Tkconstants': 'tkinter.constants',
+-           'Tix': 'tkinter.tix',
+-           'ttk': 'tkinter.ttk',
+-           'Tkinter': 'tkinter',
+-           'markupbase': '_markupbase',
+-           '_winreg': 'winreg',
+-           'thread': '_thread',
+-           'dummy_thread': '_dummy_thread',
+-           # anydbm and whichdb are handled by fix_imports2
+-           'dbhash': 'dbm.bsd',
+-           'dumbdbm': 'dbm.dumb',
+-           'dbm': 'dbm.ndbm',
+-           'gdbm': 'dbm.gnu',
+-           'xmlrpclib': 'xmlrpc.client',
+-           'DocXMLRPCServer': 'xmlrpc.server',
+-           'SimpleXMLRPCServer': 'xmlrpc.server',
+-           'httplib': 'http.client',
+-           'htmlentitydefs' : 'html.entities',
+-           'HTMLParser' : 'html.parser',
+-           'Cookie': 'http.cookies',
+-           'cookielib': 'http.cookiejar',
+-           'BaseHTTPServer': 'http.server',
+-           'SimpleHTTPServer': 'http.server',
+-           'CGIHTTPServer': 'http.server',
+-           #'test.test_support': 'test.support',
+-           'commands': 'subprocess',
+-           'UserString' : 'collections',
+-           'UserList' : 'collections',
+-           'urlparse' : 'urllib.parse',
+-           'robotparser' : 'urllib.robotparser',
+-}
+-
+-
+-def alternates(members):
+-    return "(" + "|".join(map(repr, members)) + ")"
+-
+-
+-def build_pattern(mapping=MAPPING):
+-    mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
+-    bare_names = alternates(mapping.keys())
+-
+-    yield """name_import=import_name< 'import' ((%s) |
+-               multiple_imports=dotted_as_names< any* (%s) any* >) >
+-          """ % (mod_list, mod_list)
+-    yield """import_from< 'from' (%s) 'import' ['(']
+-              ( any | import_as_name< any 'as' any > |
+-                import_as_names< any* >)  [')'] >
+-          """ % mod_list
+-    yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
+-               multiple_imports=dotted_as_names<
+-                 any* dotted_as_name< (%s) 'as' any > any* >) >
+-          """ % (mod_list, mod_list)
+-
+-    # Find usages of module members in code e.g. thread.foo(bar)
+-    yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
+-
+-
+-class FixImports(fixer_base.BaseFix):
+-
+-    order = "pre" # Pre-order tree traversal
+-
+-    # This is overridden in fix_imports2.
+-    mapping = MAPPING
+-
+-    # We want to run this fixer late, so fix_import doesn't try to make stdlib
+-    # renames into relative imports.
+-    run_order = 6
+-
+-    def build_pattern(self):
+-        return "|".join(build_pattern(self.mapping))
+-
+-    def compile_pattern(self):
+-        # We override this, so MAPPING can be pragmatically altered and the
+-        # changes will be reflected in PATTERN.
+-        self.PATTERN = self.build_pattern()
+-        super(FixImports, self).compile_pattern()
+-
+-    # Don't match the node if it's within another match.
+-    def match(self, node):
+-        match = super(FixImports, self).match
+-        results = match(node)
+-        if results:
+-            # Module usage could be in the trailer of an attribute lookup, so we
+-            # might have nested matches when "bare_with_attr" is present.
+-            if "bare_with_attr" not in results and \
+-                    any([match(obj) for obj in attr_chain(node, "parent")]):
+-                return False
+-            return results
+-        return False
+-
+-    def start_tree(self, tree, filename):
+-        super(FixImports, self).start_tree(tree, filename)
+-        self.replace = {}
+-
+-    def transform(self, node, results):
+-        import_mod = results.get("module_name")
+-        if import_mod:
+-            mod_name = import_mod.value
+-            new_name = self.mapping[mod_name]
+-            import_mod.replace(Name(new_name, prefix=import_mod.get_prefix()))
+-            if "name_import" in results:
+-                # If it's not a "from x import x, y" or "import x as y" import,
+-                # marked its usage to be replaced.
+-                self.replace[mod_name] = new_name
+-            if "multiple_imports" in results:
+-                # This is a nasty hack to fix multiple imports on a line (e.g.,
+-                # "import StringIO, urlparse"). The problem is that I can't
+-                # figure out an easy way to make a pattern recognize the keys of
+-                # MAPPING randomly sprinkled in an import statement.
+-                results = self.match(node)
+-                if results:
+-                    self.transform(node, results)
+-        else:
+-            # Replace usage of the module.
+-            bare_name = results["bare_with_attr"][0]
+-            new_name = self.replace.get(bare_name.value)
+-            if new_name:
+-                bare_name.replace(Name(new_name, prefix=bare_name.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_imports2.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_imports2.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,16 +0,0 @@
+-"""Fix incompatible imports and module references that must be fixed after
+-fix_imports."""
+-from . import fix_imports
+-
+-
+-MAPPING = {
+-            'whichdb': 'dbm',
+-            'anydbm': 'dbm',
+-          }
+-
+-
+-class FixImports2(fix_imports.FixImports):
+-
+-    run_order = 7
+-
+-    mapping = MAPPING
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_input.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_input.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,26 +0,0 @@
+-"""Fixer that changes input(...) into eval(input(...))."""
+-# Author: Andre Roberge
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Call, Name
+-from .. import patcomp
+-
+-
+-context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >")
+-
+-
+-class FixInput(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              power< 'input' args=trailer< '(' [any] ')' > >
+-              """
+-
+-    def transform(self, node, results):
+-        # If we're already wrapped in a eval() call, we're done.
+-        if context.match(node.parent.parent):
+-            return
+-
+-        new = node.clone()
+-        new.set_prefix("")
+-        return Call(Name("eval"), [new], prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_intern.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_intern.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,44 +0,0 @@
+-# Copyright 2006 Georg Brandl.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for intern().
+-
+-intern(s) -> sys.intern(s)"""
+-
+-# Local imports
+-from .. import pytree
+-from .. import fixer_base
+-from ..fixer_util import Name, Attr, touch_import
+-
+-
+-class FixIntern(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power< 'intern'
+-           trailer< lpar='('
+-                    ( not(arglist | argument<any '=' any>) obj=any
+-                      | obj=arglist<(not argument<any '=' any>) any ','> )
+-                    rpar=')' >
+-           after=any*
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        syms = self.syms
+-        obj = results["obj"].clone()
+-        if obj.type == syms.arglist:
+-            newarglist = obj.clone()
+-        else:
+-            newarglist = pytree.Node(syms.arglist, [obj.clone()])
+-        after = results["after"]
+-        if after:
+-            after = [n.clone() for n in after]
+-        new = pytree.Node(syms.power,
+-                          Attr(Name("sys"), Name("intern")) +
+-                          [pytree.Node(syms.trailer,
+-                                       [results["lpar"].clone(),
+-                                        newarglist,
+-                                        results["rpar"].clone()])] + after)
+-        new.set_prefix(node.get_prefix())
+-        touch_import(None, 'sys', node)
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_isinstance.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_isinstance.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,52 +0,0 @@
+-# Copyright 2008 Armin Ronacher.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that cleans up a tuple argument to isinstance after the tokens
+-in it were fixed.  This is mainly used to remove double occurrences of
+-tokens as a leftover of the long -> int / unicode -> str conversion.
+-
+-eg.  isinstance(x, (int, long)) -> isinstance(x, (int, int))
+-       -> isinstance(x, int)
+-"""
+-
+-from .. import fixer_base
+-from ..fixer_util import token
+-
+-
+-class FixIsinstance(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power<
+-        'isinstance'
+-        trailer< '(' arglist< any ',' atom< '('
+-            args=testlist_gexp< any+ >
+-        ')' > > ')' >
+-    >
+-    """
+-
+-    run_order = 6
+-
+-    def transform(self, node, results):
+-        names_inserted = set()
+-        testlist = results["args"]
+-        args = testlist.children
+-        new_args = []
+-        iterator = enumerate(args)
+-        for idx, arg in iterator:
+-            if arg.type == token.NAME and arg.value in names_inserted:
+-                if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
+-                    iterator.next()
+-                    continue
+-            else:
+-                new_args.append(arg)
+-                if arg.type == token.NAME:
+-                    names_inserted.add(arg.value)
+-        if new_args and new_args[-1].type == token.COMMA:
+-            del new_args[-1]
+-        if len(new_args) == 1:
+-            atom = testlist.parent
+-            new_args[0].set_prefix(atom.get_prefix())
+-            atom.replace(new_args[0])
+-        else:
+-            args[:] = new_args
+-            node.changed()
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_itertools.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_itertools.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,41 +0,0 @@
+-""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
+-    itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
+-
+-    imports from itertools are fixed in fix_itertools_import.py
+-
+-    If itertools is imported as something else (ie: import itertools as it;
+-    it.izip(spam, eggs)) method calls will not get fixed.
+-    """
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-class FixItertools(fixer_base.BaseFix):
+-    it_funcs = "('imap'|'ifilter'|'izip'|'ifilterfalse')"
+-    PATTERN = """
+-              power< it='itertools'
+-                  trailer<
+-                     dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
+-              |
+-              power< func=%(it_funcs)s trailer< '(' [any] ')' > >
+-              """ %(locals())
+-
+-    # Needs to be run after fix_(map|zip|filter)
+-    run_order = 6
+-
+-    def transform(self, node, results):
+-        prefix = None
+-        func = results['func'][0]
+-        if 'it' in results and func.value != 'ifilterfalse':
+-            dot, it = (results['dot'], results['it'])
+-            # Remove the 'itertools'
+-            prefix = it.get_prefix()
+-            it.remove()
+-            # Replace the node wich contains ('.', 'function') with the
+-            # function (to be consistant with the second part of the pattern)
+-            dot.remove()
+-            func.parent.replace(func)
+-
+-        prefix = prefix or func.get_prefix()
+-        func.replace(Name(func.value[1:], prefix=prefix))
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_itertools_imports.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_itertools_imports.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,52 +0,0 @@
+-""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """
+-
+-# Local imports
+-from lib2to3 import fixer_base
+-from lib2to3.fixer_util import BlankLine, syms, token
+-
+-
+-class FixItertoolsImports(fixer_base.BaseFix):
+-    PATTERN = """
+-              import_from< 'from' 'itertools' 'import' imports=any >
+-              """ %(locals())
+-
+-    def transform(self, node, results):
+-        imports = results['imports']
+-        if imports.type == syms.import_as_name or not imports.children:
+-            children = [imports]
+-        else:
+-            children = imports.children
+-        for child in children[::2]:
+-            if child.type == token.NAME:
+-                member = child.value
+-                name_node = child
+-            else:
+-                assert child.type == syms.import_as_name
+-                name_node = child.children[0]
+-            member_name = name_node.value
+-            if member_name in ('imap', 'izip', 'ifilter'):
+-                child.value = None
+-                child.remove()
+-            elif member_name == 'ifilterfalse':
+-                node.changed()
+-                name_node.value = 'filterfalse'
+-
+-        # Make sure the import statement is still sane
+-        children = imports.children[:] or [imports]
+-        remove_comma = True
+-        for child in children:
+-            if remove_comma and child.type == token.COMMA:
+-                child.remove()
+-            else:
+-                remove_comma ^= True
+-
+-        if children[-1].type == token.COMMA:
+-            children[-1].remove()
+-
+-        # If there are no imports left, just get rid of the entire statement
+-        if not (imports.children or getattr(imports, 'value', None)) or \
+-                imports.parent is None:
+-            p = node.get_prefix()
+-            node = BlankLine()
+-            node.prefix = p
+-        return node
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_long.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_long.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,22 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that turns 'long' into 'int' everywhere.
+-"""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, Number, is_probably_builtin
+-
+-
+-class FixLong(fixer_base.BaseFix):
+-
+-    PATTERN = "'long'"
+-
+-    static_int = Name("int")
+-
+-    def transform(self, node, results):
+-        if is_probably_builtin(node):
+-            new = self.static_int.clone()
+-            new.set_prefix(node.get_prefix())
+-            return new
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_map.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_map.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,82 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
+-exists a 'from future_builtins import map' statement in the top-level
+-namespace.
+-
+-As a special case, map(None, X) is changed into list(X).  (This is
+-necessary because the semantics are changed in this case -- the new
+-map(None, X) is equivalent to [(x,) for x in X].)
+-
+-We avoid the transformation (except for the special case mentioned
+-above) if the map() call is directly contained in iter(<>), list(<>),
+-tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
+-
+-NOTE: This is still not correct if the original code was depending on
+-map(F, X, Y, ...) to go on until the longest argument is exhausted,
+-substituting None for missing values -- like zip(), it now stops as
+-soon as the shortest argument is exhausted.
+-"""
+-
+-# Local imports
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, ListComp, in_special_context
+-from ..pygram import python_symbols as syms
+-
+-class FixMap(fixer_base.ConditionalFix):
+-
+-    PATTERN = """
+-    map_none=power<
+-        'map'
+-        trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
+-    >
+-    |
+-    map_lambda=power<
+-        'map'
+-        trailer<
+-            '('
+-            arglist<
+-                lambdef< 'lambda'
+-                         (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
+-                >
+-                ','
+-                it=any
+-            >
+-            ')'
+-        >
+-    >
+-    |
+-    power<
+-        'map'
+-        args=trailer< '(' [any] ')' >
+-    >
+-    """
+-
+-    skip_on = 'future_builtins.map'
+-
+-    def transform(self, node, results):
+-        if self.should_skip(node):
+-            return
+-
+-        if node.parent.type == syms.simple_stmt:
+-            self.warning(node, "You should use a for loop here")
+-            new = node.clone()
+-            new.set_prefix("")
+-            new = Call(Name("list"), [new])
+-        elif "map_lambda" in results:
+-            new = ListComp(results.get("xp").clone(),
+-                           results.get("fp").clone(),
+-                           results.get("it").clone())
+-        else:
+-            if "map_none" in results:
+-                new = results["arg"].clone()
+-            else:
+-                if in_special_context(node):
+-                    return None
+-                new = node.clone()
+-            new.set_prefix("")
+-            new = Call(Name("list"), [new])
+-        new.set_prefix(node.get_prefix())
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_metaclass.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_metaclass.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,227 +0,0 @@
+-"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
+-
+-   The various forms of classef (inherits nothing, inherits once, inherints
+-   many) don't parse the same in the CST so we look at ALL classes for
+-   a __metaclass__ and if we find one normalize the inherits to all be
+-   an arglist.
+-
+-   For one-liner classes ('class X: pass') there is no indent/dedent so
+-   we normalize those into having a suite.
+-
+-   Moving the __metaclass__ into the classdef can also cause the class
+-   body to be empty so there is some special casing for that as well.
+-
+-   This fixer also tries very hard to keep original indenting and spacing
+-   in all those corner cases.
+-
+-"""
+-# Author: Jack Diederich
+-
+-# Local imports
+-from .. import fixer_base
+-from ..pygram import token
+-from ..fixer_util import Name, syms, Node, Leaf
+-
+-
+-def has_metaclass(parent):
+-    """ we have to check the cls_node without changing it.
+-        There are two possiblities:
+-          1)  clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
+-          2)  clsdef => simple_stmt => expr_stmt => Leaf('__meta')
+-    """
+-    for node in parent.children:
+-        if node.type == syms.suite:
+-            return has_metaclass(node)
+-        elif node.type == syms.simple_stmt and node.children:
+-            expr_node = node.children[0]
+-            if expr_node.type == syms.expr_stmt and expr_node.children:
+-                left_side = expr_node.children[0]
+-                if isinstance(left_side, Leaf) and \
+-                        left_side.value == '__metaclass__':
+-                    return True
+-    return False
+-
+-
+-def fixup_parse_tree(cls_node):
+-    """ one-line classes don't get a suite in the parse tree so we add
+-        one to normalize the tree
+-    """
+-    for node in cls_node.children:
+-        if node.type == syms.suite:
+-            # already in the prefered format, do nothing
+-            return
+-
+-    # !%@#! oneliners have no suite node, we have to fake one up
+-    for i, node in enumerate(cls_node.children):
+-        if node.type == token.COLON:
+-            break
+-    else:
+-        raise ValueError("No class suite and no ':'!")
+-
+-    # move everything into a suite node
+-    suite = Node(syms.suite, [])
+-    while cls_node.children[i+1:]:
+-        move_node = cls_node.children[i+1]
+-        suite.append_child(move_node.clone())
+-        move_node.remove()
+-    cls_node.append_child(suite)
+-    node = suite
+-
+-
+-def fixup_simple_stmt(parent, i, stmt_node):
+-    """ if there is a semi-colon all the parts count as part of the same
+-        simple_stmt.  We just want the __metaclass__ part so we move
+-        everything efter the semi-colon into its own simple_stmt node
+-    """
+-    for semi_ind, node in enumerate(stmt_node.children):
+-        if node.type == token.SEMI: # *sigh*
+-            break
+-    else:
+-        return
+-
+-    node.remove() # kill the semicolon
+-    new_expr = Node(syms.expr_stmt, [])
+-    new_stmt = Node(syms.simple_stmt, [new_expr])
+-    while stmt_node.children[semi_ind:]:
+-        move_node = stmt_node.children[semi_ind]
+-        new_expr.append_child(move_node.clone())
+-        move_node.remove()
+-    parent.insert_child(i, new_stmt)
+-    new_leaf1 = new_stmt.children[0].children[0]
+-    old_leaf1 = stmt_node.children[0].children[0]
+-    new_leaf1.set_prefix(old_leaf1.get_prefix())
+-
+-
+-def remove_trailing_newline(node):
+-    if node.children and node.children[-1].type == token.NEWLINE:
+-        node.children[-1].remove()
+-
+-
+-def find_metas(cls_node):
+-    # find the suite node (Mmm, sweet nodes)
+-    for node in cls_node.children:
+-        if node.type == syms.suite:
+-            break
+-    else:
+-        raise ValueError("No class suite!")
+-
+-    # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
+-    for i, simple_node in list(enumerate(node.children)):
+-        if simple_node.type == syms.simple_stmt and simple_node.children:
+-            expr_node = simple_node.children[0]
+-            if expr_node.type == syms.expr_stmt and expr_node.children:
+-                # Check if the expr_node is a simple assignment.
+-                left_node = expr_node.children[0]
+-                if isinstance(left_node, Leaf) and \
+-                        left_node.value == '__metaclass__':
+-                    # We found a assignment to __metaclass__.
+-                    fixup_simple_stmt(node, i, simple_node)
+-                    remove_trailing_newline(simple_node)
+-                    yield (node, i, simple_node)
+-
+-
+-def fixup_indent(suite):
+-    """ If an INDENT is followed by a thing with a prefix then nuke the prefix
+-        Otherwise we get in trouble when removing __metaclass__ at suite start
+-    """
+-    kids = suite.children[::-1]
+-    # find the first indent
+-    while kids:
+-        node = kids.pop()
+-        if node.type == token.INDENT:
+-            break
+-
+-    # find the first Leaf
+-    while kids:
+-        node = kids.pop()
+-        if isinstance(node, Leaf) and node.type != token.DEDENT:
+-            if node.prefix:
+-                node.set_prefix('')
+-            return
+-        else:
+-            kids.extend(node.children[::-1])
+-
+-
+-class FixMetaclass(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    classdef<any*>
+-    """
+-
+-    def transform(self, node, results):
+-        if not has_metaclass(node):
+-            return node
+-
+-        fixup_parse_tree(node)
+-
+-        # find metaclasses, keep the last one
+-        last_metaclass = None
+-        for suite, i, stmt in find_metas(node):
+-            last_metaclass = stmt
+-            stmt.remove()
+-
+-        text_type = node.children[0].type # always Leaf(nnn, 'class')
+-
+-        # figure out what kind of classdef we have
+-        if len(node.children) == 7:
+-            # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
+-            #                 0        1       2    3        4    5    6
+-            if node.children[3].type == syms.arglist:
+-                arglist = node.children[3]
+-            # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
+-            else:
+-                parent = node.children[3].clone()
+-                arglist = Node(syms.arglist, [parent])
+-                node.set_child(3, arglist)
+-        elif len(node.children) == 6:
+-            # Node(classdef, ['class', 'name', '(',  ')', ':', suite])
+-            #                 0        1       2     3    4    5
+-            arglist = Node(syms.arglist, [])
+-            node.insert_child(3, arglist)
+-        elif len(node.children) == 4:
+-            # Node(classdef, ['class', 'name', ':', suite])
+-            #                 0        1       2    3
+-            arglist = Node(syms.arglist, [])
+-            node.insert_child(2, Leaf(token.RPAR, ')'))
+-            node.insert_child(2, arglist)
+-            node.insert_child(2, Leaf(token.LPAR, '('))
+-        else:
+-            raise ValueError("Unexpected class definition")
+-
+-        # now stick the metaclass in the arglist
+-        meta_txt = last_metaclass.children[0].children[0]
+-        meta_txt.value = 'metaclass'
+-        orig_meta_prefix = meta_txt.get_prefix()
+-
+-        if arglist.children:
+-            arglist.append_child(Leaf(token.COMMA, ','))
+-            meta_txt.set_prefix(' ')
+-        else:
+-            meta_txt.set_prefix('')
+-
+-        # compact the expression "metaclass = Meta" -> "metaclass=Meta"
+-        expr_stmt = last_metaclass.children[0]
+-        assert expr_stmt.type == syms.expr_stmt
+-        expr_stmt.children[1].set_prefix('')
+-        expr_stmt.children[2].set_prefix('')
+-
+-        arglist.append_child(last_metaclass)
+-
+-        fixup_indent(suite)
+-
+-        # check for empty suite
+-        if not suite.children:
+-            # one-liner that was just __metaclass_
+-            suite.remove()
+-            pass_leaf = Leaf(text_type, 'pass')
+-            pass_leaf.set_prefix(orig_meta_prefix)
+-            node.append_child(pass_leaf)
+-            node.append_child(Leaf(token.NEWLINE, '\n'))
+-
+-        elif len(suite.children) > 1 and \
+-                 (suite.children[-2].type == token.INDENT and
+-                  suite.children[-1].type == token.DEDENT):
+-            # there was only one line in the class body and it was __metaclass__
+-            pass_leaf = Leaf(text_type, 'pass')
+-            suite.insert_child(-1, pass_leaf)
+-            suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_methodattrs.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_methodattrs.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,23 +0,0 @@
+-"""Fix bound method attributes (method.im_? -> method.__?__).
+-"""
+-# Author: Christian Heimes
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-MAP = {
+-    "im_func" : "__func__",
+-    "im_self" : "__self__",
+-    "im_class" : "__self__.__class__"
+-    }
+-
+-class FixMethodattrs(fixer_base.BaseFix):
+-    PATTERN = """
+-    power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
+-    """
+-
+-    def transform(self, node, results):
+-        attr = results["attr"][0]
+-        new = MAP[attr.value]
+-        attr.replace(Name(new, prefix=attr.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_ne.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_ne.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,22 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that turns <> into !=."""
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-
+-
+-class FixNe(fixer_base.BaseFix):
+-    # This is so simple that we don't need the pattern compiler.
+-
+-    def match(self, node):
+-        # Override
+-        return node.type == token.NOTEQUAL and node.value == "<>"
+-
+-    def transform(self, node, results):
+-        new = pytree.Leaf(token.NOTEQUAL, "!=")
+-        new.set_prefix(node.get_prefix())
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_next.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_next.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,103 +0,0 @@
+-"""Fixer for it.next() -> next(it), per PEP 3114."""
+-# Author: Collin Winter
+-
+-# Things that currently aren't covered:
+-#   - listcomp "next" names aren't warned
+-#   - "with" statement targets aren't checked
+-
+-# Local imports
+-from ..pgen2 import token
+-from ..pygram import python_symbols as syms
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, find_binding
+-
+-bind_warning = "Calls to builtin next() possibly shadowed by global binding"
+-
+-
+-class FixNext(fixer_base.BaseFix):
+-    PATTERN = """
+-    power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
+-    |
+-    power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
+-    |
+-    classdef< 'class' any+ ':'
+-              suite< any*
+-                     funcdef< 'def'
+-                              name='next'
+-                              parameters< '(' NAME ')' > any+ >
+-                     any* > >
+-    |
+-    global=global_stmt< 'global' any* 'next' any* >
+-    """
+-
+-    order = "pre" # Pre-order tree traversal
+-
+-    def start_tree(self, tree, filename):
+-        super(FixNext, self).start_tree(tree, filename)
+-
+-        n = find_binding('next', tree)
+-        if n:
+-            self.warning(n, bind_warning)
+-            self.shadowed_next = True
+-        else:
+-            self.shadowed_next = False
+-
+-    def transform(self, node, results):
+-        assert results
+-
+-        base = results.get("base")
+-        attr = results.get("attr")
+-        name = results.get("name")
+-        mod = results.get("mod")
+-
+-        if base:
+-            if self.shadowed_next:
+-                attr.replace(Name("__next__", prefix=attr.get_prefix()))
+-            else:
+-                base = [n.clone() for n in base]
+-                base[0].set_prefix("")
+-                node.replace(Call(Name("next", prefix=node.get_prefix()), base))
+-        elif name:
+-            n = Name("__next__", prefix=name.get_prefix())
+-            name.replace(n)
+-        elif attr:
+-            # We don't do this transformation if we're assigning to "x.next".
+-            # Unfortunately, it doesn't seem possible to do this in PATTERN,
+-            #  so it's being done here.
+-            if is_assign_target(node):
+-                head = results["head"]
+-                if "".join([str(n) for n in head]).strip() == '__builtin__':
+-                    self.warning(node, bind_warning)
+-                return
+-            attr.replace(Name("__next__"))
+-        elif "global" in results:
+-            self.warning(node, bind_warning)
+-            self.shadowed_next = True
+-
+-
+-### The following functions help test if node is part of an assignment
+-###  target.
+-
+-def is_assign_target(node):
+-    assign = find_assign(node)
+-    if assign is None:
+-        return False
+-
+-    for child in assign.children:
+-        if child.type == token.EQUAL:
+-            return False
+-        elif is_subtree(child, node):
+-            return True
+-    return False
+-
+-def find_assign(node):
+-    if node.type == syms.expr_stmt:
+-        return node
+-    if node.type == syms.simple_stmt or node.parent is None:
+-        return None
+-    return find_assign(node.parent)
+-
+-def is_subtree(root, node):
+-    if root == node:
+-        return True
+-    return any([is_subtree(c, node) for c in root.children])
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_nonzero.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_nonzero.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,20 +0,0 @@
+-"""Fixer for __nonzero__ -> __bool__ methods."""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, syms
+-
+-class FixNonzero(fixer_base.BaseFix):
+-    PATTERN = """
+-    classdef< 'class' any+ ':'
+-              suite< any*
+-                     funcdef< 'def' name='__nonzero__'
+-                              parameters< '(' NAME ')' > any+ >
+-                     any* > >
+-    """
+-
+-    def transform(self, node, results):
+-        name = results["name"]
+-        new = Name("__bool__", prefix=name.get_prefix())
+-        name.replace(new)
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_numliterals.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_numliterals.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,27 +0,0 @@
+-"""Fixer that turns 1L into 1, 0755 into 0o755.
+-"""
+-# Copyright 2007 Georg Brandl.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-# Local imports
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Number
+-
+-
+-class FixNumliterals(fixer_base.BaseFix):
+-    # This is so simple that we don't need the pattern compiler.
+-
+-    def match(self, node):
+-        # Override
+-        return (node.type == token.NUMBER and
+-                (node.value.startswith("0") or node.value[-1] in "Ll"))
+-
+-    def transform(self, node, results):
+-        val = node.value
+-        if val[-1] in 'Ll':
+-            val = val[:-1]
+-        elif val.startswith('0') and val.isdigit() and len(set(val)) > 1:
+-            val = "0o" + val[1:]
+-
+-        return Number(val, prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_paren.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_paren.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,42 +0,0 @@
+-"""Fixer that addes parentheses where they are required
+-
+-This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``."""
+-
+-# By Taek Joo Kim and Benjamin Peterson
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import LParen, RParen
+-
+-# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2]
+-class FixParen(fixer_base.BaseFix):
+-    PATTERN = """
+-        atom< ('[' | '(')
+-            (listmaker< any
+-                comp_for<
+-                    'for' NAME 'in'
+-                    target=testlist_safe< any (',' any)+ [',']
+-                     >
+-                    [any]
+-                >
+-            >
+-            |
+-            testlist_gexp< any
+-                comp_for<
+-                    'for' NAME 'in'
+-                    target=testlist_safe< any (',' any)+ [',']
+-                     >
+-                    [any]
+-                >
+-            >)
+-        (']' | ')') >
+-    """
+-
+-    def transform(self, node, results):
+-        target = results["target"]
+-
+-        lparen = LParen()
+-        lparen.set_prefix(target.get_prefix())
+-        target.set_prefix("") # Make it hug the parentheses
+-        target.insert_child(0, lparen)
+-        target.append_child(RParen())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_print.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_print.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,90 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for print.
+-
+-Change:
+-    'print'          into 'print()'
+-    'print ...'      into 'print(...)'
+-    'print ... ,'    into 'print(..., end=" ")'
+-    'print >>x, ...' into 'print(..., file=x)'
+-
+-No changes are applied if print_function is imported from __future__
+-
+-"""
+-
+-# Local imports
+-from .. import patcomp
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, Comma, String, is_tuple
+-
+-
+-parend_expr = patcomp.compile_pattern(
+-              """atom< '(' [atom|STRING|NAME] ')' >"""
+-              )
+-
+-
+-class FixPrint(fixer_base.ConditionalFix):
+-
+-    PATTERN = """
+-              simple_stmt< any* bare='print' any* > | print_stmt
+-              """
+-
+-    skip_on = '__future__.print_function'
+-
+-    def transform(self, node, results):
+-        assert results
+-
+-        if self.should_skip(node):
+-            return
+-
+-        bare_print = results.get("bare")
+-
+-        if bare_print:
+-            # Special-case print all by itself
+-            bare_print.replace(Call(Name("print"), [],
+-                               prefix=bare_print.get_prefix()))
+-            return
+-        assert node.children[0] == Name("print")
+-        args = node.children[1:]
+-        if len(args) == 1 and parend_expr.match(args[0]):
+-            # We don't want to keep sticking parens around an
+-            # already-parenthesised expression.
+-            return
+-
+-        sep = end = file = None
+-        if args and args[-1] == Comma():
+-            args = args[:-1]
+-            end = " "
+-        if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
+-            assert len(args) >= 2
+-            file = args[1].clone()
+-            args = args[3:] # Strip a possible comma after the file expression
+-        # Now synthesize a print(args, sep=..., end=..., file=...) node.
+-        l_args = [arg.clone() for arg in args]
+-        if l_args:
+-            l_args[0].set_prefix("")
+-        if sep is not None or end is not None or file is not None:
+-            if sep is not None:
+-                self.add_kwarg(l_args, "sep", String(repr(sep)))
+-            if end is not None:
+-                self.add_kwarg(l_args, "end", String(repr(end)))
+-            if file is not None:
+-                self.add_kwarg(l_args, "file", file)
+-        n_stmt = Call(Name("print"), l_args)
+-        n_stmt.set_prefix(node.get_prefix())
+-        return n_stmt
+-
+-    def add_kwarg(self, l_nodes, s_kwd, n_expr):
+-        # XXX All this prefix-setting may lose comments (though rarely)
+-        n_expr.set_prefix("")
+-        n_argument = pytree.Node(self.syms.argument,
+-                                 (Name(s_kwd),
+-                                  pytree.Leaf(token.EQUAL, "="),
+-                                  n_expr))
+-        if l_nodes:
+-            l_nodes.append(Comma())
+-            n_argument.set_prefix(" ")
+-        l_nodes.append(n_argument)
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_raise.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_raise.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,82 +0,0 @@
+-"""Fixer for 'raise E, V, T'
+-
+-raise         -> raise
+-raise E       -> raise E
+-raise E, V    -> raise E(V)
+-raise E, V, T -> raise E(V).with_traceback(T)
+-
+-raise (((E, E'), E''), E'''), V -> raise E(V)
+-raise "foo", V, T               -> warns about string exceptions
+-
+-
+-CAVEATS:
+-1) "raise E, V" will be incorrectly translated if V is an exception
+-   instance. The correct Python 3 idiom is
+-
+-        raise E from V
+-
+-   but since we can't detect instance-hood by syntax alone and since
+-   any client code would have to be changed as well, we don't automate
+-   this.
+-"""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
+-
+-class FixRaise(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
+-    """
+-
+-    def transform(self, node, results):
+-        syms = self.syms
+-
+-        exc = results["exc"].clone()
+-        if exc.type is token.STRING:
+-            self.cannot_convert(node, "Python 3 does not support string exceptions")
+-            return
+-
+-        # Python 2 supports
+-        #  raise ((((E1, E2), E3), E4), E5), V
+-        # as a synonym for
+-        #  raise E1, V
+-        # Since Python 3 will not support this, we recurse down any tuple
+-        # literals, always taking the first element.
+-        if is_tuple(exc):
+-            while is_tuple(exc):
+-                # exc.children[1:-1] is the unparenthesized tuple
+-                # exc.children[1].children[0] is the first element of the tuple
+-                exc = exc.children[1].children[0].clone()
+-            exc.set_prefix(" ")
+-
+-        if "val" not in results:
+-            # One-argument raise
+-            new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
+-            new.set_prefix(node.get_prefix())
+-            return new
+-
+-        val = results["val"].clone()
+-        if is_tuple(val):
+-            args = [c.clone() for c in val.children[1:-1]]
+-        else:
+-            val.set_prefix("")
+-            args = [val]
+-
+-        if "tb" in results:
+-            tb = results["tb"].clone()
+-            tb.set_prefix("")
+-
+-            e = Call(exc, args)
+-            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
+-            new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
+-            new.set_prefix(node.get_prefix())
+-            return new
+-        else:
+-            return pytree.Node(syms.raise_stmt,
+-                               [Name("raise"), Call(exc, args)],
+-                               prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_raw_input.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_raw_input.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,16 +0,0 @@
+-"""Fixer that changes raw_input(...) into input(...)."""
+-# Author: Andre Roberge
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-class FixRawInput(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              power< name='raw_input' trailer< '(' [any] ')' > any* >
+-              """
+-
+-    def transform(self, node, results):
+-        name = results["name"]
+-        name.replace(Name("input", prefix=name.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_reduce.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_reduce.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,33 +0,0 @@
+-# Copyright 2008 Armin Ronacher.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for reduce().
+-
+-Makes sure reduce() is imported from the functools module if reduce is
+-used in that module.
+-"""
+-
+-from .. import pytree
+-from .. import fixer_base
+-from ..fixer_util import Name, Attr, touch_import
+-
+-
+-
+-class FixReduce(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power< 'reduce'
+-        trailer< '('
+-            arglist< (
+-                (not(argument<any '=' any>) any ','
+-                 not(argument<any '=' any>) any) |
+-                (not(argument<any '=' any>) any ','
+-                 not(argument<any '=' any>) any ','
+-                 not(argument<any '=' any>) any)
+-            ) >
+-        ')' >
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        touch_import('functools', 'reduce', node)
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_renames.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_renames.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,69 +0,0 @@
+-"""Fix incompatible renames
+-
+-Fixes:
+-  * sys.maxint -> sys.maxsize
+-"""
+-# Author: Christian Heimes
+-# based on Collin Winter's fix_import
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, attr_chain
+-
+-MAPPING = {"sys":  {"maxint" : "maxsize"},
+-          }
+-LOOKUP = {}
+-
+-def alternates(members):
+-    return "(" + "|".join(map(repr, members)) + ")"
+-
+-
+-def build_pattern():
+-    #bare = set()
+-    for module, replace in MAPPING.items():
+-        for old_attr, new_attr in replace.items():
+-            LOOKUP[(module, old_attr)] = new_attr
+-            #bare.add(module)
+-            #bare.add(old_attr)
+-            #yield """
+-            #      import_name< 'import' (module=%r
+-            #          | dotted_as_names< any* module=%r any* >) >
+-            #      """ % (module, module)
+-            yield """
+-                  import_from< 'from' module_name=%r 'import'
+-                      ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
+-                  """ % (module, old_attr, old_attr)
+-            yield """
+-                  power< module_name=%r trailer< '.' attr_name=%r > any* >
+-                  """ % (module, old_attr)
+-    #yield """bare_name=%s""" % alternates(bare)
+-
+-
+-class FixRenames(fixer_base.BaseFix):
+-    PATTERN = "|".join(build_pattern())
+-
+-    order = "pre" # Pre-order tree traversal
+-
+-    # Don't match the node if it's within another match
+-    def match(self, node):
+-        match = super(FixRenames, self).match
+-        results = match(node)
+-        if results:
+-            if any([match(obj) for obj in attr_chain(node, "parent")]):
+-                return False
+-            return results
+-        return False
+-
+-    #def start_tree(self, tree, filename):
+-    #    super(FixRenames, self).start_tree(tree, filename)
+-    #    self.replace = {}
+-
+-    def transform(self, node, results):
+-        mod_name = results.get("module_name")
+-        attr_name = results.get("attr_name")
+-        #bare_name = results.get("bare_name")
+-        #import_mod = results.get("module")
+-
+-        if mod_name and attr_name:
+-            new_attr = LOOKUP[(mod_name.value, attr_name.value)]
+-            attr_name.replace(Name(new_attr, prefix=attr_name.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_repr.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_repr.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,22 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that transforms `xyzzy` into repr(xyzzy)."""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Call, Name, parenthesize
+-
+-
+-class FixRepr(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              atom < '`' expr=any '`' >
+-              """
+-
+-    def transform(self, node, results):
+-        expr = results["expr"].clone()
+-
+-        if expr.type == self.syms.testlist1:
+-            expr = parenthesize(expr)
+-        return Call(Name("repr"), [expr], prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_set_literal.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_set_literal.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,52 +0,0 @@
+-"""
+-Optional fixer to transform set() calls to set literals.
+-"""
+-
+-# Author: Benjamin Peterson
+-
+-from lib2to3 import fixer_base, pytree
+-from lib2to3.fixer_util import token, syms
+-
+-
+-
+-class FixSetLiteral(fixer_base.BaseFix):
+-
+-    explicit = True
+-
+-    PATTERN = """power< 'set' trailer< '('
+-                     (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) >
+-                                |
+-                                single=any) ']' >
+-                     |
+-                     atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' >
+-                     )
+-                     ')' > >
+-              """
+-
+-    def transform(self, node, results):
+-        single = results.get("single")
+-        if single:
+-            # Make a fake listmaker
+-            fake = pytree.Node(syms.listmaker, [single.clone()])
+-            single.replace(fake)
+-            items = fake
+-        else:
+-            items = results["items"]
+-
+-        # Build the contents of the literal
+-        literal = [pytree.Leaf(token.LBRACE, "{")]
+-        literal.extend(n.clone() for n in items.children)
+-        literal.append(pytree.Leaf(token.RBRACE, "}"))
+-        # Set the prefix of the right brace to that of the ')' or ']'
+-        literal[-1].set_prefix(items.next_sibling.get_prefix())
+-        maker = pytree.Node(syms.dictsetmaker, literal)
+-        maker.set_prefix(node.get_prefix())
+-
+-        # If the original was a one tuple, we need to remove the extra comma.
+-        if len(maker.children) == 4:
+-            n = maker.children[2]
+-            n.remove()
+-            maker.children[-1].set_prefix(n.get_prefix())
+-
+-        # Finally, replace the set call with our shiny new literal.
+-        return maker
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_standarderror.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_standarderror.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,18 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for StandardError -> Exception."""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-
+-class FixStandarderror(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              'StandardError'
+-              """
+-
+-    def transform(self, node, results):
+-        return Name("Exception", prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_sys_exc.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_sys_exc.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,29 +0,0 @@
+-"""Fixer for sys.exc_{type, value, traceback}
+-
+-sys.exc_type -> sys.exc_info()[0]
+-sys.exc_value -> sys.exc_info()[1]
+-sys.exc_traceback -> sys.exc_info()[2]
+-"""
+-
+-# By Jeff Balogh and Benjamin Peterson
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms
+-
+-class FixSysExc(fixer_base.BaseFix):
+-    # This order matches the ordering of sys.exc_info().
+-    exc_info = ["exc_type", "exc_value", "exc_traceback"]
+-    PATTERN = """
+-              power< 'sys' trailer< dot='.' attribute=(%s) > >
+-              """ % '|'.join("'%s'" % e for e in exc_info)
+-
+-    def transform(self, node, results):
+-        sys_attr = results["attribute"][0]
+-        index = Number(self.exc_info.index(sys_attr.value))
+-
+-        call = Call(Name("exc_info"), prefix=sys_attr.get_prefix())
+-        attr = Attr(Name("sys"), call)
+-        attr[1].children[0].set_prefix(results["dot"].get_prefix())
+-        attr.append(Subscript(index))
+-        return Node(syms.power, attr, prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_throw.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_throw.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,56 +0,0 @@
+-"""Fixer for generator.throw(E, V, T).
+-
+-g.throw(E)       -> g.throw(E)
+-g.throw(E, V)    -> g.throw(E(V))
+-g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
+-
+-g.throw("foo"[, V[, T]]) will warn about string exceptions."""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, ArgList, Attr, is_tuple
+-
+-class FixThrow(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power< any trailer< '.' 'throw' >
+-           trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
+-    >
+-    |
+-    power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
+-    """
+-
+-    def transform(self, node, results):
+-        syms = self.syms
+-
+-        exc = results["exc"].clone()
+-        if exc.type is token.STRING:
+-            self.cannot_convert(node, "Python 3 does not support string exceptions")
+-            return
+-
+-        # Leave "g.throw(E)" alone
+-        val = results.get("val")
+-        if val is None:
+-            return
+-
+-        val = val.clone()
+-        if is_tuple(val):
+-            args = [c.clone() for c in val.children[1:-1]]
+-        else:
+-            val.set_prefix("")
+-            args = [val]
+-
+-        throw_args = results["args"]
+-
+-        if "tb" in results:
+-            tb = results["tb"].clone()
+-            tb.set_prefix("")
+-
+-            e = Call(exc, args)
+-            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
+-            throw_args.replace(pytree.Node(syms.power, with_tb))
+-        else:
+-            throw_args.replace(Call(exc, args))
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_tuple_params.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_tuple_params.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,169 +0,0 @@
+-"""Fixer for function definitions with tuple parameters.
+-
+-def func(((a, b), c), d):
+-    ...
+-
+-    ->
+-
+-def func(x, d):
+-    ((a, b), c) = x
+-    ...
+-
+-It will also support lambdas:
+-
+-    lambda (x, y): x + y -> lambda t: t[0] + t[1]
+-
+-    # The parens are a syntax error in Python 3
+-    lambda (x): x + y -> lambda x: x + y
+-"""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms
+-
+-def is_docstring(stmt):
+-    return isinstance(stmt, pytree.Node) and \
+-           stmt.children[0].type == token.STRING
+-
+-class FixTupleParams(fixer_base.BaseFix):
+-    PATTERN = """
+-              funcdef< 'def' any parameters< '(' args=any ')' >
+-                       ['->' any] ':' suite=any+ >
+-              |
+-              lambda=
+-              lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
+-                       ':' body=any
+-              >
+-              """
+-
+-    def transform(self, node, results):
+-        if "lambda" in results:
+-            return self.transform_lambda(node, results)
+-
+-        new_lines = []
+-        suite = results["suite"]
+-        args = results["args"]
+-        # This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
+-        # TODO(cwinter): suite-cleanup
+-        if suite[0].children[1].type == token.INDENT:
+-            start = 2
+-            indent = suite[0].children[1].value
+-            end = Newline()
+-        else:
+-            start = 0
+-            indent = "; "
+-            end = pytree.Leaf(token.INDENT, "")
+-
+-        # We need access to self for new_name(), and making this a method
+-        #  doesn't feel right. Closing over self and new_lines makes the
+-        #  code below cleaner.
+-        def handle_tuple(tuple_arg, add_prefix=False):
+-            n = Name(self.new_name())
+-            arg = tuple_arg.clone()
+-            arg.set_prefix("")
+-            stmt = Assign(arg, n.clone())
+-            if add_prefix:
+-                n.set_prefix(" ")
+-            tuple_arg.replace(n)
+-            new_lines.append(pytree.Node(syms.simple_stmt,
+-                                         [stmt, end.clone()]))
+-
+-        if args.type == syms.tfpdef:
+-            handle_tuple(args)
+-        elif args.type == syms.typedargslist:
+-            for i, arg in enumerate(args.children):
+-                if arg.type == syms.tfpdef:
+-                    # Without add_prefix, the emitted code is correct,
+-                    #  just ugly.
+-                    handle_tuple(arg, add_prefix=(i > 0))
+-
+-        if not new_lines:
+-            return node
+-
+-        # This isn't strictly necessary, but it plays nicely with other fixers.
+-        # TODO(cwinter) get rid of this when children becomes a smart list
+-        for line in new_lines:
+-            line.parent = suite[0]
+-
+-        # TODO(cwinter) suite-cleanup
+-        after = start
+-        if start == 0:
+-            new_lines[0].set_prefix(" ")
+-        elif is_docstring(suite[0].children[start]):
+-            new_lines[0].set_prefix(indent)
+-            after = start + 1
+-
+-        suite[0].children[after:after] = new_lines
+-        for i in range(after+1, after+len(new_lines)+1):
+-            suite[0].children[i].set_prefix(indent)
+-        suite[0].changed()
+-
+-    def transform_lambda(self, node, results):
+-        args = results["args"]
+-        body = results["body"]
+-        inner = simplify_args(results["inner"])
+-
+-        # Replace lambda ((((x)))): x  with lambda x: x
+-        if inner.type == token.NAME:
+-            inner = inner.clone()
+-            inner.set_prefix(" ")
+-            args.replace(inner)
+-            return
+-
+-        params = find_params(args)
+-        to_index = map_to_index(params)
+-        tup_name = self.new_name(tuple_name(params))
+-
+-        new_param = Name(tup_name, prefix=" ")
+-        args.replace(new_param.clone())
+-        for n in body.post_order():
+-            if n.type == token.NAME and n.value in to_index:
+-                subscripts = [c.clone() for c in to_index[n.value]]
+-                new = pytree.Node(syms.power,
+-                                  [new_param.clone()] + subscripts)
+-                new.set_prefix(n.get_prefix())
+-                n.replace(new)
+-
+-
+-### Helper functions for transform_lambda()
+-
+-def simplify_args(node):
+-    if node.type in (syms.vfplist, token.NAME):
+-        return node
+-    elif node.type == syms.vfpdef:
+-        # These look like vfpdef< '(' x ')' > where x is NAME
+-        # or another vfpdef instance (leading to recursion).
+-        while node.type == syms.vfpdef:
+-            node = node.children[1]
+-        return node
+-    raise RuntimeError("Received unexpected node %s" % node)
+-
+-def find_params(node):
+-    if node.type == syms.vfpdef:
+-        return find_params(node.children[1])
+-    elif node.type == token.NAME:
+-        return node.value
+-    return [find_params(c) for c in node.children if c.type != token.COMMA]
+-
+-def map_to_index(param_list, prefix=[], d=None):
+-    if d is None:
+-        d = {}
+-    for i, obj in enumerate(param_list):
+-        trailer = [Subscript(Number(i))]
+-        if isinstance(obj, list):
+-            map_to_index(obj, trailer, d=d)
+-        else:
+-            d[obj] = prefix + trailer
+-    return d
+-
+-def tuple_name(param_list):
+-    l = []
+-    for obj in param_list:
+-        if isinstance(obj, list):
+-            l.append(tuple_name(obj))
+-        else:
+-            l.append(obj)
+-    return "_".join(l)
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_types.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_types.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,62 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for removing uses of the types module.
+-
+-These work for only the known names in the types module.  The forms above
+-can include types. or not.  ie, It is assumed the module is imported either as:
+-
+-    import types
+-    from types import ... # either * or specific types
+-
+-The import statements are not modified.
+-
+-There should be another fixer that handles at least the following constants:
+-
+-   type([]) -> list
+-   type(()) -> tuple
+-   type('') -> str
+-
+-"""
+-
+-# Local imports
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-_TYPE_MAPPING = {
+-        'BooleanType' : 'bool',
+-        'BufferType' : 'memoryview',
+-        'ClassType' : 'type',
+-        'ComplexType' : 'complex',
+-        'DictType': 'dict',
+-        'DictionaryType' : 'dict',
+-        'EllipsisType' : 'type(Ellipsis)',
+-        #'FileType' : 'io.IOBase',
+-        'FloatType': 'float',
+-        'IntType': 'int',
+-        'ListType': 'list',
+-        'LongType': 'int',
+-        'ObjectType' : 'object',
+-        'NoneType': 'type(None)',
+-        'NotImplementedType' : 'type(NotImplemented)',
+-        'SliceType' : 'slice',
+-        'StringType': 'bytes', # XXX ?
+-        'StringTypes' : 'str', # XXX ?
+-        'TupleType': 'tuple',
+-        'TypeType' : 'type',
+-        'UnicodeType': 'str',
+-        'XRangeType' : 'range',
+-    }
+-
+-_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
+-
+-class FixTypes(fixer_base.BaseFix):
+-
+-    PATTERN = '|'.join(_pats)
+-
+-    def transform(self, node, results):
+-        new_value = _TYPE_MAPPING.get(results["name"].value)
+-        if new_value:
+-            return Name(new_value, prefix=node.get_prefix())
+-        return None
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_unicode.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_unicode.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,28 +0,0 @@
+-"""Fixer that changes unicode to str, unichr to chr, and u"..." into "...".
+-
+-"""
+-
+-import re
+-from ..pgen2 import token
+-from .. import fixer_base
+-
+-class FixUnicode(fixer_base.BaseFix):
+-
+-    PATTERN = "STRING | NAME<'unicode' | 'unichr'>"
+-
+-    def transform(self, node, results):
+-        if node.type == token.NAME:
+-            if node.value == "unicode":
+-                new = node.clone()
+-                new.value = "str"
+-                return new
+-            if node.value == "unichr":
+-                new = node.clone()
+-                new.value = "chr"
+-                return new
+-            # XXX Warn when __unicode__ found?
+-        elif node.type == token.STRING:
+-            if re.match(r"[uU][rR]?[\'\"]", node.value):
+-                new = node.clone()
+-                new.value = new.value[1:]
+-                return new
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_urllib.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_urllib.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,180 +0,0 @@
+-"""Fix changes imports of urllib which are now incompatible.
+-   This is rather similar to fix_imports, but because of the more
+-   complex nature of the fixing for urllib, it has its own fixer.
+-"""
+-# Author: Nick Edds
+-
+-# Local imports
+-from .fix_imports import alternates, FixImports
+-from .. import fixer_base
+-from ..fixer_util import Name, Comma, FromImport, Newline, attr_chain
+-
+-MAPPING = {'urllib':  [
+-                ('urllib.request',
+-                    ['URLOpener', 'FancyURLOpener', 'urlretrieve',
+-                     '_urlopener', 'urlcleanup']),
+-                ('urllib.parse',
+-                    ['quote', 'quote_plus', 'unquote', 'unquote_plus',
+-                     'urlencode', 'pathname2url', 'url2pathname', 'splitattr',
+-                     'splithost', 'splitnport', 'splitpasswd', 'splitport',
+-                     'splitquery', 'splittag', 'splittype', 'splituser',
+-                     'splitvalue', ]),
+-                ('urllib.error',
+-                    ['ContentTooShortError'])],
+-           'urllib2' : [
+-                ('urllib.request',
+-                    ['urlopen', 'install_opener', 'build_opener',
+-                     'Request', 'OpenerDirector', 'BaseHandler',
+-                     'HTTPDefaultErrorHandler', 'HTTPRedirectHandler',
+-                     'HTTPCookieProcessor', 'ProxyHandler',
+-                     'HTTPPasswordMgr',
+-                     'HTTPPasswordMgrWithDefaultRealm',
+-                     'AbstractBasicAuthHandler',
+-                     'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler',
+-                     'AbstractDigestAuthHandler',
+-                     'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler',
+-                     'HTTPHandler', 'HTTPSHandler', 'FileHandler',
+-                     'FTPHandler', 'CacheFTPHandler',
+-                     'UnknownHandler']),
+-                ('urllib.error',
+-                    ['URLError', 'HTTPError']),
+-           ]
+-}
+-
+-# Duplicate the url parsing functions for urllib2.
+-MAPPING["urllib2"].append(MAPPING["urllib"][1])
+-
+-
+-def build_pattern():
+-    bare = set()
+-    for old_module, changes in MAPPING.items():
+-        for change in changes:
+-            new_module, members = change
+-            members = alternates(members)
+-            yield """import_name< 'import' (module=%r
+-                                  | dotted_as_names< any* module=%r any* >) >
+-                  """ % (old_module, old_module)
+-            yield """import_from< 'from' mod_member=%r 'import'
+-                       ( member=%s | import_as_name< member=%s 'as' any > |
+-                         import_as_names< members=any*  >) >
+-                  """ % (old_module, members, members)
+-            yield """import_from< 'from' module_star=%r 'import' star='*' >
+-                  """ % old_module
+-            yield """import_name< 'import'
+-                                  dotted_as_name< module_as=%r 'as' any > >
+-                  """ % old_module
+-            yield """power< module_dot=%r trailer< '.' member=%s > any* >
+-                  """ % (old_module, members)
+-
+-
+-class FixUrllib(FixImports):
+-
+-    def build_pattern(self):
+-        return "|".join(build_pattern())
+-
+-    def transform_import(self, node, results):
+-        """Transform for the basic import case. Replaces the old
+-           import name with a comma separated list of its
+-           replacements.
+-        """
+-        import_mod = results.get('module')
+-        pref = import_mod.get_prefix()
+-
+-        names = []
+-
+-        # create a Node list of the replacement modules
+-        for name in MAPPING[import_mod.value][:-1]:
+-            names.extend([Name(name[0], prefix=pref), Comma()])
+-        names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
+-        import_mod.replace(names)
+-
+-    def transform_member(self, node, results):
+-        """Transform for imports of specific module elements. Replaces
+-           the module to be imported from with the appropriate new
+-           module.
+-        """
+-        mod_member = results.get('mod_member')
+-        pref = mod_member.get_prefix()
+-        member = results.get('member')
+-
+-        # Simple case with only a single member being imported
+-        if member:
+-            # this may be a list of length one, or just a node
+-            if isinstance(member, list):
+-                member = member[0]
+-            new_name = None
+-            for change in MAPPING[mod_member.value]:
+-                if member.value in change[1]:
+-                    new_name = change[0]
+-                    break
+-            if new_name:
+-                mod_member.replace(Name(new_name, prefix=pref))
+-            else:
+-                self.cannot_convert(node,
+-                                    'This is an invalid module element')
+-
+-        # Multiple members being imported
+-        else:
+-            # a dictionary for replacements, order matters
+-            modules = []
+-            mod_dict = {}
+-            members = results.get('members')
+-            for member in members:
+-                member = member.value
+-                # we only care about the actual members
+-                if member != ',':
+-                    for change in MAPPING[mod_member.value]:
+-                        if member in change[1]:
+-                            if change[0] in mod_dict:
+-                                mod_dict[change[0]].append(member)
+-                            else:
+-                                mod_dict[change[0]] = [member]
+-                                modules.append(change[0])
+-
+-            new_nodes = []
+-            for module in modules:
+-                elts = mod_dict[module]
+-                names = []
+-                for elt in elts[:-1]:
+-                    names.extend([Name(elt, prefix=pref), Comma()])
+-                names.append(Name(elts[-1], prefix=pref))
+-                new_nodes.append(FromImport(module, names))
+-            if new_nodes:
+-                nodes = []
+-                for new_node in new_nodes[:-1]:
+-                    nodes.extend([new_node, Newline()])
+-                nodes.append(new_nodes[-1])
+-                node.replace(nodes)
+-            else:
+-                self.cannot_convert(node, 'All module elements are invalid')
+-
+-    def transform_dot(self, node, results):
+-        """Transform for calls to module members in code."""
+-        module_dot = results.get('module_dot')
+-        member = results.get('member')
+-        # this may be a list of length one, or just a node
+-        if isinstance(member, list):
+-            member = member[0]
+-        new_name = None
+-        for change in MAPPING[module_dot.value]:
+-            if member.value in change[1]:
+-                new_name = change[0]
+-                break
+-        if new_name:
+-            module_dot.replace(Name(new_name,
+-                                    prefix=module_dot.get_prefix()))
+-        else:
+-            self.cannot_convert(node, 'This is an invalid module element')
+-
+-    def transform(self, node, results):
+-        if results.get('module'):
+-            self.transform_import(node, results)
+-        elif results.get('mod_member'):
+-            self.transform_member(node, results)
+-        elif results.get('module_dot'):
+-            self.transform_dot(node, results)
+-        # Renaming and star imports are not supported for these modules.
+-        elif results.get('module_star'):
+-            self.cannot_convert(node, 'Cannot handle star imports.')
+-        elif results.get('module_as'):
+-            self.cannot_convert(node, 'This module is now multiple modules')
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_ws_comma.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_ws_comma.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,39 +0,0 @@
+-"""Fixer that changes 'a ,b' into 'a, b'.
+-
+-This also changes '{a :b}' into '{a: b}', but does not touch other
+-uses of colons.  It does not touch other uses of whitespace.
+-
+-"""
+-
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-
+-class FixWsComma(fixer_base.BaseFix):
+-
+-    explicit = True # The user must ask for this fixers
+-
+-    PATTERN = """
+-    any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]>
+-    """
+-
+-    COMMA = pytree.Leaf(token.COMMA, ",")
+-    COLON = pytree.Leaf(token.COLON, ":")
+-    SEPS = (COMMA, COLON)
+-
+-    def transform(self, node, results):
+-        new = node.clone()
+-        comma = False
+-        for child in new.children:
+-            if child in self.SEPS:
+-                prefix = child.get_prefix()
+-                if prefix.isspace() and "\n" not in prefix:
+-                    child.set_prefix("")
+-                comma = True
+-            else:
+-                if comma:
+-                    prefix = child.get_prefix()
+-                    if not prefix:
+-                        child.set_prefix(" ")
+-                comma = False
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_xrange.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_xrange.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,64 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that changes xrange(...) into range(...)."""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, consuming_calls
+-from .. import patcomp
+-
+-
+-class FixXrange(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              power<
+-                 (name='range'|name='xrange') trailer< '(' args=any ')' >
+-              rest=any* >
+-              """
+-
+-    def transform(self, node, results):
+-        name = results["name"]
+-        if name.value == "xrange":
+-            return self.transform_xrange(node, results)
+-        elif name.value == "range":
+-            return self.transform_range(node, results)
+-        else:
+-            raise ValueError(repr(name))
+-
+-    def transform_xrange(self, node, results):
+-        name = results["name"]
+-        name.replace(Name("range", prefix=name.get_prefix()))
+-
+-    def transform_range(self, node, results):
+-        if not self.in_special_context(node):
+-            range_call = Call(Name("range"), [results["args"].clone()])
+-            # Encase the range call in list().
+-            list_call = Call(Name("list"), [range_call],
+-                             prefix=node.get_prefix())
+-            # Put things that were after the range() call after the list call.
+-            for n in results["rest"]:
+-                list_call.append_child(n)
+-            return list_call
+-        return node
+-
+-    P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
+-    p1 = patcomp.compile_pattern(P1)
+-
+-    P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
+-            | comp_for< 'for' any 'in' node=any any* >
+-            | comparison< any 'in' node=any any*>
+-         """
+-    p2 = patcomp.compile_pattern(P2)
+-
+-    def in_special_context(self, node):
+-        if node.parent is None:
+-            return False
+-        results = {}
+-        if (node.parent.parent is not None and
+-               self.p1.match(node.parent.parent, results) and
+-               results["node"] is node):
+-            # list(d.keys()) -> list(d.keys()), etc.
+-            return results["func"].value in consuming_calls
+-        # for ... in d.iterkeys() -> for ... in d.keys(), etc.
+-        return self.p2.match(node.parent, results) and results["node"] is node
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_xreadlines.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_xreadlines.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,24 +0,0 @@
+-"""Fix "for x in f.xreadlines()" -> "for x in f".
+-
+-This fixer will also convert g(f.xreadlines) into g(f.__iter__)."""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-
+-class FixXreadlines(fixer_base.BaseFix):
+-    PATTERN = """
+-    power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > >
+-    |
+-    power< any+ trailer< '.' no_call='xreadlines' > >
+-    """
+-
+-    def transform(self, node, results):
+-        no_call = results.get("no_call")
+-
+-        if no_call:
+-            no_call.replace(Name("__iter__", prefix=no_call.get_prefix()))
+-        else:
+-            node.replace([x.clone() for x in results["call"]])
+diff -r 531f2e948299 lib2to3/fixes/.svn/text-base/fix_zip.py.svn-base
+--- a/lib2to3/fixes/.svn/text-base/fix_zip.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,34 +0,0 @@
+-"""
+-Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...)
+-unless there exists a 'from future_builtins import zip' statement in the
+-top-level namespace.
+-
+-We avoid the transformation if the zip() call is directly contained in
+-iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
+-"""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, in_special_context
+-
+-class FixZip(fixer_base.ConditionalFix):
+-
+-    PATTERN = """
+-    power< 'zip' args=trailer< '(' [any] ')' >
+-    >
+-    """
+-
+-    skip_on = "future_builtins.zip"
+-
+-    def transform(self, node, results):
+-        if self.should_skip(node):
+-            return
+-
+-        if in_special_context(node):
+-            return None
+-
+-        new = node.clone()
+-        new.set_prefix("")
+-        new = Call(Name("list"), [new])
+-        new.set_prefix(node.get_prefix())
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/__init__.py
+--- a/lib2to3/fixes/__init__.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/fixes/__init__.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,1 +1,2 @@
+-# Dummy file to make this directory a package.
++from refactor.fixes import from2
++from refactor.fixes.from2 import *
+diff -r 531f2e948299 lib2to3/fixes/fix_apply.py
+--- a/lib2to3/fixes/fix_apply.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,58 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for apply().
+-
+-This converts apply(func, v, k) into (func)(*v, **k)."""
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Call, Comma, parenthesize
+-
+-class FixApply(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power< 'apply'
+-        trailer<
+-            '('
+-            arglist<
+-                (not argument<NAME '=' any>) func=any ','
+-                (not argument<NAME '=' any>) args=any [','
+-                (not argument<NAME '=' any>) kwds=any] [',']
+-            >
+-            ')'
+-        >
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        syms = self.syms
+-        assert results
+-        func = results["func"]
+-        args = results["args"]
+-        kwds = results.get("kwds")
+-        prefix = node.get_prefix()
+-        func = func.clone()
+-        if (func.type not in (token.NAME, syms.atom) and
+-            (func.type != syms.power or
+-             func.children[-2].type == token.DOUBLESTAR)):
+-            # Need to parenthesize
+-            func = parenthesize(func)
+-        func.set_prefix("")
+-        args = args.clone()
+-        args.set_prefix("")
+-        if kwds is not None:
+-            kwds = kwds.clone()
+-            kwds.set_prefix("")
+-        l_newargs = [pytree.Leaf(token.STAR, "*"), args]
+-        if kwds is not None:
+-            l_newargs.extend([Comma(),
+-                              pytree.Leaf(token.DOUBLESTAR, "**"),
+-                              kwds])
+-            l_newargs[-2].set_prefix(" ") # that's the ** token
+-        # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
+-        # can be translated into f(x, y, *t) instead of f(*(x, y) + t)
+-        #new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
+-        return Call(func, l_newargs, prefix=prefix)
+diff -r 531f2e948299 lib2to3/fixes/fix_basestring.py
+--- a/lib2to3/fixes/fix_basestring.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,13 +0,0 @@
+-"""Fixer for basestring -> str."""
+-# Author: Christian Heimes
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-class FixBasestring(fixer_base.BaseFix):
+-
+-    PATTERN = "'basestring'"
+-
+-    def transform(self, node, results):
+-        return Name("str", prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_buffer.py
+--- a/lib2to3/fixes/fix_buffer.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,21 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that changes buffer(...) into memoryview(...)."""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-
+-class FixBuffer(fixer_base.BaseFix):
+-
+-    explicit = True # The user must ask for this fixer
+-
+-    PATTERN = """
+-              power< name='buffer' trailer< '(' [any] ')' > >
+-              """
+-
+-    def transform(self, node, results):
+-        name = results["name"]
+-        name.replace(Name("memoryview", prefix=name.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/fix_callable.py
+--- a/lib2to3/fixes/fix_callable.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,31 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for callable().
+-
+-This converts callable(obj) into hasattr(obj, '__call__')."""
+-
+-# Local imports
+-from .. import pytree
+-from .. import fixer_base
+-from ..fixer_util import Call, Name, String
+-
+-class FixCallable(fixer_base.BaseFix):
+-
+-    # Ignore callable(*args) or use of keywords.
+-    # Either could be a hint that the builtin callable() is not being used.
+-    PATTERN = """
+-    power< 'callable'
+-           trailer< lpar='('
+-                    ( not(arglist | argument<any '=' any>) func=any
+-                      | func=arglist<(not argument<any '=' any>) any ','> )
+-                    rpar=')' >
+-           after=any*
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        func = results["func"]
+-
+-        args = [func.clone(), String(', '), String("'__call__'")]
+-        return Call(Name("hasattr"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_dict.py
+--- a/lib2to3/fixes/fix_dict.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,99 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for dict methods.
+-
+-d.keys() -> list(d.keys())
+-d.items() -> list(d.items())
+-d.values() -> list(d.values())
+-
+-d.iterkeys() -> iter(d.keys())
+-d.iteritems() -> iter(d.items())
+-d.itervalues() -> iter(d.values())
+-
+-Except in certain very specific contexts: the iter() can be dropped
+-when the context is list(), sorted(), iter() or for...in; the list()
+-can be dropped when the context is list() or sorted() (but not iter()
+-or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
+-set(), any(), all(), sum().
+-
+-Note: iter(d.keys()) could be written as iter(d) but since the
+-original d.iterkeys() was also redundant we don't fix this.  And there
+-are (rare) contexts where it makes a difference (e.g. when passing it
+-as an argument to a function that introspects the argument).
+-"""
+-
+-# Local imports
+-from .. import pytree
+-from .. import patcomp
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
+-from .. import fixer_util
+-
+-
+-iter_exempt = fixer_util.consuming_calls | set(["iter"])
+-
+-
+-class FixDict(fixer_base.BaseFix):
+-    PATTERN = """
+-    power< head=any+
+-         trailer< '.' method=('keys'|'items'|'values'|
+-                              'iterkeys'|'iteritems'|'itervalues') >
+-         parens=trailer< '(' ')' >
+-         tail=any*
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        head = results["head"]
+-        method = results["method"][0] # Extract node for method name
+-        tail = results["tail"]
+-        syms = self.syms
+-        method_name = method.value
+-        isiter = method_name.startswith("iter")
+-        if isiter:
+-            method_name = method_name[4:]
+-        assert method_name in ("keys", "items", "values"), repr(method)
+-        head = [n.clone() for n in head]
+-        tail = [n.clone() for n in tail]
+-        special = not tail and self.in_special_context(node, isiter)
+-        args = head + [pytree.Node(syms.trailer,
+-                                   [Dot(),
+-                                    Name(method_name,
+-                                         prefix=method.get_prefix())]),
+-                       results["parens"].clone()]
+-        new = pytree.Node(syms.power, args)
+-        if not special:
+-            new.set_prefix("")
+-            new = Call(Name(isiter and "iter" or "list"), [new])
+-        if tail:
+-            new = pytree.Node(syms.power, [new] + tail)
+-        new.set_prefix(node.get_prefix())
+-        return new
+-
+-    P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
+-    p1 = patcomp.compile_pattern(P1)
+-
+-    P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
+-            | comp_for< 'for' any 'in' node=any any* >
+-         """
+-    p2 = patcomp.compile_pattern(P2)
+-
+-    def in_special_context(self, node, isiter):
+-        if node.parent is None:
+-            return False
+-        results = {}
+-        if (node.parent.parent is not None and
+-               self.p1.match(node.parent.parent, results) and
+-               results["node"] is node):
+-            if isiter:
+-                # iter(d.iterkeys()) -> iter(d.keys()), etc.
+-                return results["func"].value in iter_exempt
+-            else:
+-                # list(d.keys()) -> list(d.keys()), etc.
+-                return results["func"].value in fixer_util.consuming_calls
+-        if not isiter:
+-            return False
+-        # for ... in d.iterkeys() -> for ... in d.keys(), etc.
+-        return self.p2.match(node.parent, results) and results["node"] is node
+diff -r 531f2e948299 lib2to3/fixes/fix_except.py
+--- a/lib2to3/fixes/fix_except.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,92 +0,0 @@
+-"""Fixer for except statements with named exceptions.
+-
+-The following cases will be converted:
+-
+-- "except E, T:" where T is a name:
+-
+-    except E as T:
+-
+-- "except E, T:" where T is not a name, tuple or list:
+-
+-        except E as t:
+-            T = t
+-
+-    This is done because the target of an "except" clause must be a
+-    name.
+-
+-- "except E, T:" where T is a tuple or list literal:
+-
+-        except E as t:
+-            T = t.args
+-"""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
+-
+-def find_excepts(nodes):
+-    for i, n in enumerate(nodes):
+-        if n.type == syms.except_clause:
+-            if n.children[0].value == 'except':
+-                yield (n, nodes[i+2])
+-
+-class FixExcept(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    try_stmt< 'try' ':' suite
+-                  cleanup=(except_clause ':' suite)+
+-                  tail=(['except' ':' suite]
+-                        ['else' ':' suite]
+-                        ['finally' ':' suite]) >
+-    """
+-
+-    def transform(self, node, results):
+-        syms = self.syms
+-
+-        tail = [n.clone() for n in results["tail"]]
+-
+-        try_cleanup = [ch.clone() for ch in results["cleanup"]]
+-        for except_clause, e_suite in find_excepts(try_cleanup):
+-            if len(except_clause.children) == 4:
+-                (E, comma, N) = except_clause.children[1:4]
+-                comma.replace(Name("as", prefix=" "))
+-
+-                if N.type != token.NAME:
+-                    # Generate a new N for the except clause
+-                    new_N = Name(self.new_name(), prefix=" ")
+-                    target = N.clone()
+-                    target.set_prefix("")
+-                    N.replace(new_N)
+-                    new_N = new_N.clone()
+-
+-                    # Insert "old_N = new_N" as the first statement in
+-                    #  the except body. This loop skips leading whitespace
+-                    #  and indents
+-                    #TODO(cwinter) suite-cleanup
+-                    suite_stmts = e_suite.children
+-                    for i, stmt in enumerate(suite_stmts):
+-                        if isinstance(stmt, pytree.Node):
+-                            break
+-
+-                    # The assignment is different if old_N is a tuple or list
+-                    # In that case, the assignment is old_N = new_N.args
+-                    if is_tuple(N) or is_list(N):
+-                        assign = Assign(target, Attr(new_N, Name('args')))
+-                    else:
+-                        assign = Assign(target, new_N)
+-
+-                    #TODO(cwinter) stopgap until children becomes a smart list
+-                    for child in reversed(suite_stmts[:i]):
+-                        e_suite.insert_child(0, child)
+-                    e_suite.insert_child(i, assign)
+-                elif N.get_prefix() == "":
+-                    # No space after a comma is legal; no space after "as",
+-                    # not so much.
+-                    N.set_prefix(" ")
+-
+-        #TODO(cwinter) fix this when children becomes a smart list
+-        children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
+-        return pytree.Node(node.type, children)
+diff -r 531f2e948299 lib2to3/fixes/fix_exec.py
+--- a/lib2to3/fixes/fix_exec.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,39 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for exec.
+-
+-This converts usages of the exec statement into calls to a built-in
+-exec() function.
+-
+-exec code in ns1, ns2 -> exec(code, ns1, ns2)
+-"""
+-
+-# Local imports
+-from .. import pytree
+-from .. import fixer_base
+-from ..fixer_util import Comma, Name, Call
+-
+-
+-class FixExec(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
+-    |
+-    exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
+-    """
+-
+-    def transform(self, node, results):
+-        assert results
+-        syms = self.syms
+-        a = results["a"]
+-        b = results.get("b")
+-        c = results.get("c")
+-        args = [a.clone()]
+-        args[0].set_prefix("")
+-        if b is not None:
+-            args.extend([Comma(), b.clone()])
+-        if c is not None:
+-            args.extend([Comma(), c.clone()])
+-
+-        return Call(Name("exec"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_execfile.py
+--- a/lib2to3/fixes/fix_execfile.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,51 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for execfile.
+-
+-This converts usages of the execfile function into calls to the built-in
+-exec() function.
+-"""
+-
+-from .. import fixer_base
+-from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
+-                          ArgList, String, syms)
+-
+-
+-class FixExecfile(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
+-    |
+-    power< 'execfile' trailer< '(' filename=any ')' > >
+-    """
+-
+-    def transform(self, node, results):
+-        assert results
+-        filename = results["filename"]
+-        globals = results.get("globals")
+-        locals = results.get("locals")
+-
+-        # Copy over the prefix from the right parentheses end of the execfile
+-        # call.
+-        execfile_paren = node.children[-1].children[-1].clone()
+-        # Construct open().read().
+-        open_args = ArgList([filename.clone()], rparen=execfile_paren)
+-        open_call = Node(syms.power, [Name("open"), open_args])
+-        read = [Node(syms.trailer, [Dot(), Name('read')]),
+-                Node(syms.trailer, [LParen(), RParen()])]
+-        open_expr = [open_call] + read
+-        # Wrap the open call in a compile call. This is so the filename will be
+-        # preserved in the execed code.
+-        filename_arg = filename.clone()
+-        filename_arg.set_prefix(" ")
+-        exec_str = String("'exec'", " ")
+-        compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
+-        compile_call = Call(Name("compile"), compile_args, "")
+-        # Finally, replace the execfile call with an exec call.
+-        args = [compile_call]
+-        if globals is not None:
+-            args.extend([Comma(), globals.clone()])
+-        if locals is not None:
+-            args.extend([Comma(), locals.clone()])
+-        return Call(Name("exec"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_filter.py
+--- a/lib2to3/fixes/fix_filter.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,75 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that changes filter(F, X) into list(filter(F, X)).
+-
+-We avoid the transformation if the filter() call is directly contained
+-in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
+-for V in <>:.
+-
+-NOTE: This is still not correct if the original code was depending on
+-filter(F, X) to return a string if X is a string and a tuple if X is a
+-tuple.  That would require type inference, which we don't do.  Let
+-Python 2.6 figure it out.
+-"""
+-
+-# Local imports
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, ListComp, in_special_context
+-
+-class FixFilter(fixer_base.ConditionalFix):
+-
+-    PATTERN = """
+-    filter_lambda=power<
+-        'filter'
+-        trailer<
+-            '('
+-            arglist<
+-                lambdef< 'lambda'
+-                         (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
+-                >
+-                ','
+-                it=any
+-            >
+-            ')'
+-        >
+-    >
+-    |
+-    power<
+-        'filter'
+-        trailer< '(' arglist< none='None' ',' seq=any > ')' >
+-    >
+-    |
+-    power<
+-        'filter'
+-        args=trailer< '(' [any] ')' >
+-    >
+-    """
+-
+-    skip_on = "future_builtins.filter"
+-
+-    def transform(self, node, results):
+-        if self.should_skip(node):
+-            return
+-
+-        if "filter_lambda" in results:
+-            new = ListComp(results.get("fp").clone(),
+-                           results.get("fp").clone(),
+-                           results.get("it").clone(),
+-                           results.get("xp").clone())
+-
+-        elif "none" in results:
+-            new = ListComp(Name("_f"),
+-                           Name("_f"),
+-                           results["seq"].clone(),
+-                           Name("_f"))
+-
+-        else:
+-            if in_special_context(node):
+-                return None
+-            new = node.clone()
+-            new.set_prefix("")
+-            new = Call(Name("list"), [new])
+-        new.set_prefix(node.get_prefix())
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/fix_funcattrs.py
+--- a/lib2to3/fixes/fix_funcattrs.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,19 +0,0 @@
+-"""Fix function attribute names (f.func_x -> f.__x__)."""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-
+-class FixFuncattrs(fixer_base.BaseFix):
+-    PATTERN = """
+-    power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals'
+-                                  | 'func_name' | 'func_defaults' | 'func_code'
+-                                  | 'func_dict') > any* >
+-    """
+-
+-    def transform(self, node, results):
+-        attr = results["attr"][0]
+-        attr.replace(Name(("__%s__" % attr.value[5:]),
+-                          prefix=attr.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/fix_future.py
+--- a/lib2to3/fixes/fix_future.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,20 +0,0 @@
+-"""Remove __future__ imports
+-
+-from __future__ import foo is replaced with an empty line.
+-"""
+-# Author: Christian Heimes
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import BlankLine
+-
+-class FixFuture(fixer_base.BaseFix):
+-    PATTERN = """import_from< 'from' module_name="__future__" 'import' any >"""
+-
+-    # This should be run last -- some things check for the import
+-    run_order = 10
+-
+-    def transform(self, node, results):
+-        new = BlankLine()
+-        new.prefix = node.get_prefix()
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/fix_getcwdu.py
+--- a/lib2to3/fixes/fix_getcwdu.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,18 +0,0 @@
+-"""
+-Fixer that changes os.getcwdu() to os.getcwd().
+-"""
+-# Author: Victor Stinner
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-class FixGetcwdu(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              power< 'os' trailer< dot='.' name='getcwdu' > any* >
+-              """
+-
+-    def transform(self, node, results):
+-        name = results["name"]
+-        name.replace(Name("getcwd", prefix=name.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/fix_has_key.py
+--- a/lib2to3/fixes/fix_has_key.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,109 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for has_key().
+-
+-Calls to .has_key() methods are expressed in terms of the 'in'
+-operator:
+-
+-    d.has_key(k) -> k in d
+-
+-CAVEATS:
+-1) While the primary target of this fixer is dict.has_key(), the
+-   fixer will change any has_key() method call, regardless of its
+-   class.
+-
+-2) Cases like this will not be converted:
+-
+-    m = d.has_key
+-    if m(k):
+-        ...
+-
+-   Only *calls* to has_key() are converted. While it is possible to
+-   convert the above to something like
+-
+-    m = d.__contains__
+-    if m(k):
+-        ...
+-
+-   this is currently not done.
+-"""
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, parenthesize
+-
+-
+-class FixHasKey(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    anchor=power<
+-        before=any+
+-        trailer< '.' 'has_key' >
+-        trailer<
+-            '('
+-            ( not(arglist | argument<any '=' any>) arg=any
+-            | arglist<(not argument<any '=' any>) arg=any ','>
+-            )
+-            ')'
+-        >
+-        after=any*
+-    >
+-    |
+-    negation=not_test<
+-        'not'
+-        anchor=power<
+-            before=any+
+-            trailer< '.' 'has_key' >
+-            trailer<
+-                '('
+-                ( not(arglist | argument<any '=' any>) arg=any
+-                | arglist<(not argument<any '=' any>) arg=any ','>
+-                )
+-                ')'
+-            >
+-        >
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        assert results
+-        syms = self.syms
+-        if (node.parent.type == syms.not_test and
+-            self.pattern.match(node.parent)):
+-            # Don't transform a node matching the first alternative of the
+-            # pattern when its parent matches the second alternative
+-            return None
+-        negation = results.get("negation")
+-        anchor = results["anchor"]
+-        prefix = node.get_prefix()
+-        before = [n.clone() for n in results["before"]]
+-        arg = results["arg"].clone()
+-        after = results.get("after")
+-        if after:
+-            after = [n.clone() for n in after]
+-        if arg.type in (syms.comparison, syms.not_test, syms.and_test,
+-                        syms.or_test, syms.test, syms.lambdef, syms.argument):
+-            arg = parenthesize(arg)
+-        if len(before) == 1:
+-            before = before[0]
+-        else:
+-            before = pytree.Node(syms.power, before)
+-        before.set_prefix(" ")
+-        n_op = Name("in", prefix=" ")
+-        if negation:
+-            n_not = Name("not", prefix=" ")
+-            n_op = pytree.Node(syms.comp_op, (n_not, n_op))
+-        new = pytree.Node(syms.comparison, (arg, n_op, before))
+-        if after:
+-            new = parenthesize(new)
+-            new = pytree.Node(syms.power, (new,) + tuple(after))
+-        if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr,
+-                                syms.and_expr, syms.shift_expr,
+-                                syms.arith_expr, syms.term,
+-                                syms.factor, syms.power):
+-            new = parenthesize(new)
+-        new.set_prefix(prefix)
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/fix_idioms.py
+--- a/lib2to3/fixes/fix_idioms.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,134 +0,0 @@
+-"""Adjust some old Python 2 idioms to their modern counterparts.
+-
+-* Change some type comparisons to isinstance() calls:
+-    type(x) == T -> isinstance(x, T)
+-    type(x) is T -> isinstance(x, T)
+-    type(x) != T -> not isinstance(x, T)
+-    type(x) is not T -> not isinstance(x, T)
+-
+-* Change "while 1:" into "while True:".
+-
+-* Change both
+-
+-    v = list(EXPR)
+-    v.sort()
+-    foo(v)
+-
+-and the more general
+-
+-    v = EXPR
+-    v.sort()
+-    foo(v)
+-
+-into
+-
+-    v = sorted(EXPR)
+-    foo(v)
+-"""
+-# Author: Jacques Frechet, Collin Winter
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Call, Comma, Name, Node, syms
+-
+-CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
+-TYPE = "power< 'type' trailer< '(' x=any ')' > >"
+-
+-class FixIdioms(fixer_base.BaseFix):
+-
+-    explicit = True # The user must ask for this fixer
+-
+-    PATTERN = r"""
+-        isinstance=comparison< %s %s T=any >
+-        |
+-        isinstance=comparison< T=any %s %s >
+-        |
+-        while_stmt< 'while' while='1' ':' any+ >
+-        |
+-        sorted=any<
+-            any*
+-            simple_stmt<
+-              expr_stmt< id1=any '='
+-                         power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
+-              >
+-              '\n'
+-            >
+-            sort=
+-            simple_stmt<
+-              power< id2=any
+-                     trailer< '.' 'sort' > trailer< '(' ')' >
+-              >
+-              '\n'
+-            >
+-            next=any*
+-        >
+-        |
+-        sorted=any<
+-            any*
+-            simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
+-            sort=
+-            simple_stmt<
+-              power< id2=any
+-                     trailer< '.' 'sort' > trailer< '(' ')' >
+-              >
+-              '\n'
+-            >
+-            next=any*
+-        >
+-    """ % (TYPE, CMP, CMP, TYPE)
+-
+-    def match(self, node):
+-        r = super(FixIdioms, self).match(node)
+-        # If we've matched one of the sort/sorted subpatterns above, we
+-        # want to reject matches where the initial assignment and the
+-        # subsequent .sort() call involve different identifiers.
+-        if r and "sorted" in r:
+-            if r["id1"] == r["id2"]:
+-                return r
+-            return None
+-        return r
+-
+-    def transform(self, node, results):
+-        if "isinstance" in results:
+-            return self.transform_isinstance(node, results)
+-        elif "while" in results:
+-            return self.transform_while(node, results)
+-        elif "sorted" in results:
+-            return self.transform_sort(node, results)
+-        else:
+-            raise RuntimeError("Invalid match")
+-
+-    def transform_isinstance(self, node, results):
+-        x = results["x"].clone() # The thing inside of type()
+-        T = results["T"].clone() # The type being compared against
+-        x.set_prefix("")
+-        T.set_prefix(" ")
+-        test = Call(Name("isinstance"), [x, Comma(), T])
+-        if "n" in results:
+-            test.set_prefix(" ")
+-            test = Node(syms.not_test, [Name("not"), test])
+-        test.set_prefix(node.get_prefix())
+-        return test
+-
+-    def transform_while(self, node, results):
+-        one = results["while"]
+-        one.replace(Name("True", prefix=one.get_prefix()))
+-
+-    def transform_sort(self, node, results):
+-        sort_stmt = results["sort"]
+-        next_stmt = results["next"]
+-        list_call = results.get("list")
+-        simple_expr = results.get("expr")
+-
+-        if list_call:
+-            list_call.replace(Name("sorted", prefix=list_call.get_prefix()))
+-        elif simple_expr:
+-            new = simple_expr.clone()
+-            new.set_prefix("")
+-            simple_expr.replace(Call(Name("sorted"), [new],
+-                                     prefix=simple_expr.get_prefix()))
+-        else:
+-            raise RuntimeError("should not have reached here")
+-        sort_stmt.remove()
+-        if next_stmt:
+-            next_stmt[0].set_prefix(sort_stmt.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_import.py
+--- a/lib2to3/fixes/fix_import.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,90 +0,0 @@
+-"""Fixer for import statements.
+-If spam is being imported from the local directory, this import:
+-    from spam import eggs
+-Becomes:
+-    from .spam import eggs
+-
+-And this import:
+-    import spam
+-Becomes:
+-    from . import spam
+-"""
+-
+-# Local imports
+-from .. import fixer_base
+-from os.path import dirname, join, exists, pathsep
+-from ..fixer_util import FromImport, syms, token
+-
+-
+-def traverse_imports(names):
+-    """
+-    Walks over all the names imported in a dotted_as_names node.
+-    """
+-    pending = [names]
+-    while pending:
+-        node = pending.pop()
+-        if node.type == token.NAME:
+-            yield node.value
+-        elif node.type == syms.dotted_name:
+-            yield "".join([ch.value for ch in node.children])
+-        elif node.type == syms.dotted_as_name:
+-            pending.append(node.children[0])
+-        elif node.type == syms.dotted_as_names:
+-            pending.extend(node.children[::-2])
+-        else:
+-            raise AssertionError("unkown node type")
+-
+-
+-class FixImport(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    import_from< 'from' imp=any 'import' ['('] any [')'] >
+-    |
+-    import_name< 'import' imp=any >
+-    """
+-
+-    def transform(self, node, results):
+-        imp = results['imp']
+-
+-        if node.type == syms.import_from:
+-            # Some imps are top-level (eg: 'import ham')
+-            # some are first level (eg: 'import ham.eggs')
+-            # some are third level (eg: 'import ham.eggs as spam')
+-            # Hence, the loop
+-            while not hasattr(imp, 'value'):
+-                imp = imp.children[0]
+-            if self.probably_a_local_import(imp.value):
+-                imp.value = "." + imp.value
+-                imp.changed()
+-                return node
+-        else:
+-            have_local = False
+-            have_absolute = False
+-            for mod_name in traverse_imports(imp):
+-                if self.probably_a_local_import(mod_name):
+-                    have_local = True
+-                else:
+-                    have_absolute = True
+-            if have_absolute:
+-                if have_local:
+-                    # We won't handle both sibling and absolute imports in the
+-                    # same statement at the moment.
+-                    self.warning(node, "absolute and local imports together")
+-                return
+-
+-            new = FromImport('.', [imp])
+-            new.set_prefix(node.get_prefix())
+-            return new
+-
+-    def probably_a_local_import(self, imp_name):
+-        imp_name = imp_name.split('.', 1)[0]
+-        base_path = dirname(self.filename)
+-        base_path = join(base_path, imp_name)
+-        # If there is no __init__.py next to the file its not in a package
+-        # so can't be a relative import.
+-        if not exists(join(dirname(base_path), '__init__.py')):
+-            return False
+-        for ext in ['.py', pathsep, '.pyc', '.so', '.sl', '.pyd']:
+-            if exists(base_path + ext):
+-                return True
+-        return False
+diff -r 531f2e948299 lib2to3/fixes/fix_imports.py
+--- a/lib2to3/fixes/fix_imports.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,145 +0,0 @@
+-"""Fix incompatible imports and module references."""
+-# Authors: Collin Winter, Nick Edds
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, attr_chain
+-
+-MAPPING = {'StringIO':  'io',
+-           'cStringIO': 'io',
+-           'cPickle': 'pickle',
+-           '__builtin__' : 'builtins',
+-           'copy_reg': 'copyreg',
+-           'Queue': 'queue',
+-           'SocketServer': 'socketserver',
+-           'ConfigParser': 'configparser',
+-           'repr': 'reprlib',
+-           'FileDialog': 'tkinter.filedialog',
+-           'tkFileDialog': 'tkinter.filedialog',
+-           'SimpleDialog': 'tkinter.simpledialog',
+-           'tkSimpleDialog': 'tkinter.simpledialog',
+-           'tkColorChooser': 'tkinter.colorchooser',
+-           'tkCommonDialog': 'tkinter.commondialog',
+-           'Dialog': 'tkinter.dialog',
+-           'Tkdnd': 'tkinter.dnd',
+-           'tkFont': 'tkinter.font',
+-           'tkMessageBox': 'tkinter.messagebox',
+-           'ScrolledText': 'tkinter.scrolledtext',
+-           'Tkconstants': 'tkinter.constants',
+-           'Tix': 'tkinter.tix',
+-           'ttk': 'tkinter.ttk',
+-           'Tkinter': 'tkinter',
+-           'markupbase': '_markupbase',
+-           '_winreg': 'winreg',
+-           'thread': '_thread',
+-           'dummy_thread': '_dummy_thread',
+-           # anydbm and whichdb are handled by fix_imports2
+-           'dbhash': 'dbm.bsd',
+-           'dumbdbm': 'dbm.dumb',
+-           'dbm': 'dbm.ndbm',
+-           'gdbm': 'dbm.gnu',
+-           'xmlrpclib': 'xmlrpc.client',
+-           'DocXMLRPCServer': 'xmlrpc.server',
+-           'SimpleXMLRPCServer': 'xmlrpc.server',
+-           'httplib': 'http.client',
+-           'htmlentitydefs' : 'html.entities',
+-           'HTMLParser' : 'html.parser',
+-           'Cookie': 'http.cookies',
+-           'cookielib': 'http.cookiejar',
+-           'BaseHTTPServer': 'http.server',
+-           'SimpleHTTPServer': 'http.server',
+-           'CGIHTTPServer': 'http.server',
+-           #'test.test_support': 'test.support',
+-           'commands': 'subprocess',
+-           'UserString' : 'collections',
+-           'UserList' : 'collections',
+-           'urlparse' : 'urllib.parse',
+-           'robotparser' : 'urllib.robotparser',
+-}
+-
+-
+-def alternates(members):
+-    return "(" + "|".join(map(repr, members)) + ")"
+-
+-
+-def build_pattern(mapping=MAPPING):
+-    mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
+-    bare_names = alternates(mapping.keys())
+-
+-    yield """name_import=import_name< 'import' ((%s) |
+-               multiple_imports=dotted_as_names< any* (%s) any* >) >
+-          """ % (mod_list, mod_list)
+-    yield """import_from< 'from' (%s) 'import' ['(']
+-              ( any | import_as_name< any 'as' any > |
+-                import_as_names< any* >)  [')'] >
+-          """ % mod_list
+-    yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
+-               multiple_imports=dotted_as_names<
+-                 any* dotted_as_name< (%s) 'as' any > any* >) >
+-          """ % (mod_list, mod_list)
+-
+-    # Find usages of module members in code e.g. thread.foo(bar)
+-    yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
+-
+-
+-class FixImports(fixer_base.BaseFix):
+-
+-    order = "pre" # Pre-order tree traversal
+-
+-    # This is overridden in fix_imports2.
+-    mapping = MAPPING
+-
+-    # We want to run this fixer late, so fix_import doesn't try to make stdlib
+-    # renames into relative imports.
+-    run_order = 6
+-
+-    def build_pattern(self):
+-        return "|".join(build_pattern(self.mapping))
+-
+-    def compile_pattern(self):
+-        # We override this, so MAPPING can be pragmatically altered and the
+-        # changes will be reflected in PATTERN.
+-        self.PATTERN = self.build_pattern()
+-        super(FixImports, self).compile_pattern()
+-
+-    # Don't match the node if it's within another match.
+-    def match(self, node):
+-        match = super(FixImports, self).match
+-        results = match(node)
+-        if results:
+-            # Module usage could be in the trailer of an attribute lookup, so we
+-            # might have nested matches when "bare_with_attr" is present.
+-            if "bare_with_attr" not in results and \
+-                    any([match(obj) for obj in attr_chain(node, "parent")]):
+-                return False
+-            return results
+-        return False
+-
+-    def start_tree(self, tree, filename):
+-        super(FixImports, self).start_tree(tree, filename)
+-        self.replace = {}
+-
+-    def transform(self, node, results):
+-        import_mod = results.get("module_name")
+-        if import_mod:
+-            mod_name = import_mod.value
+-            new_name = self.mapping[mod_name]
+-            import_mod.replace(Name(new_name, prefix=import_mod.get_prefix()))
+-            if "name_import" in results:
+-                # If it's not a "from x import x, y" or "import x as y" import,
+-                # marked its usage to be replaced.
+-                self.replace[mod_name] = new_name
+-            if "multiple_imports" in results:
+-                # This is a nasty hack to fix multiple imports on a line (e.g.,
+-                # "import StringIO, urlparse"). The problem is that I can't
+-                # figure out an easy way to make a pattern recognize the keys of
+-                # MAPPING randomly sprinkled in an import statement.
+-                results = self.match(node)
+-                if results:
+-                    self.transform(node, results)
+-        else:
+-            # Replace usage of the module.
+-            bare_name = results["bare_with_attr"][0]
+-            new_name = self.replace.get(bare_name.value)
+-            if new_name:
+-                bare_name.replace(Name(new_name, prefix=bare_name.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/fix_imports2.py
+--- a/lib2to3/fixes/fix_imports2.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,16 +0,0 @@
+-"""Fix incompatible imports and module references that must be fixed after
+-fix_imports."""
+-from . import fix_imports
+-
+-
+-MAPPING = {
+-            'whichdb': 'dbm',
+-            'anydbm': 'dbm',
+-          }
+-
+-
+-class FixImports2(fix_imports.FixImports):
+-
+-    run_order = 7
+-
+-    mapping = MAPPING
+diff -r 531f2e948299 lib2to3/fixes/fix_input.py
+--- a/lib2to3/fixes/fix_input.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,26 +0,0 @@
+-"""Fixer that changes input(...) into eval(input(...))."""
+-# Author: Andre Roberge
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Call, Name
+-from .. import patcomp
+-
+-
+-context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >")
+-
+-
+-class FixInput(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              power< 'input' args=trailer< '(' [any] ')' > >
+-              """
+-
+-    def transform(self, node, results):
+-        # If we're already wrapped in a eval() call, we're done.
+-        if context.match(node.parent.parent):
+-            return
+-
+-        new = node.clone()
+-        new.set_prefix("")
+-        return Call(Name("eval"), [new], prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_intern.py
+--- a/lib2to3/fixes/fix_intern.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,44 +0,0 @@
+-# Copyright 2006 Georg Brandl.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for intern().
+-
+-intern(s) -> sys.intern(s)"""
+-
+-# Local imports
+-from .. import pytree
+-from .. import fixer_base
+-from ..fixer_util import Name, Attr, touch_import
+-
+-
+-class FixIntern(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power< 'intern'
+-           trailer< lpar='('
+-                    ( not(arglist | argument<any '=' any>) obj=any
+-                      | obj=arglist<(not argument<any '=' any>) any ','> )
+-                    rpar=')' >
+-           after=any*
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        syms = self.syms
+-        obj = results["obj"].clone()
+-        if obj.type == syms.arglist:
+-            newarglist = obj.clone()
+-        else:
+-            newarglist = pytree.Node(syms.arglist, [obj.clone()])
+-        after = results["after"]
+-        if after:
+-            after = [n.clone() for n in after]
+-        new = pytree.Node(syms.power,
+-                          Attr(Name("sys"), Name("intern")) +
+-                          [pytree.Node(syms.trailer,
+-                                       [results["lpar"].clone(),
+-                                        newarglist,
+-                                        results["rpar"].clone()])] + after)
+-        new.set_prefix(node.get_prefix())
+-        touch_import(None, 'sys', node)
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/fix_isinstance.py
+--- a/lib2to3/fixes/fix_isinstance.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,52 +0,0 @@
+-# Copyright 2008 Armin Ronacher.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that cleans up a tuple argument to isinstance after the tokens
+-in it were fixed.  This is mainly used to remove double occurrences of
+-tokens as a leftover of the long -> int / unicode -> str conversion.
+-
+-eg.  isinstance(x, (int, long)) -> isinstance(x, (int, int))
+-       -> isinstance(x, int)
+-"""
+-
+-from .. import fixer_base
+-from ..fixer_util import token
+-
+-
+-class FixIsinstance(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power<
+-        'isinstance'
+-        trailer< '(' arglist< any ',' atom< '('
+-            args=testlist_gexp< any+ >
+-        ')' > > ')' >
+-    >
+-    """
+-
+-    run_order = 6
+-
+-    def transform(self, node, results):
+-        names_inserted = set()
+-        testlist = results["args"]
+-        args = testlist.children
+-        new_args = []
+-        iterator = enumerate(args)
+-        for idx, arg in iterator:
+-            if arg.type == token.NAME and arg.value in names_inserted:
+-                if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
+-                    iterator.next()
+-                    continue
+-            else:
+-                new_args.append(arg)
+-                if arg.type == token.NAME:
+-                    names_inserted.add(arg.value)
+-        if new_args and new_args[-1].type == token.COMMA:
+-            del new_args[-1]
+-        if len(new_args) == 1:
+-            atom = testlist.parent
+-            new_args[0].set_prefix(atom.get_prefix())
+-            atom.replace(new_args[0])
+-        else:
+-            args[:] = new_args
+-            node.changed()
+diff -r 531f2e948299 lib2to3/fixes/fix_itertools.py
+--- a/lib2to3/fixes/fix_itertools.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,41 +0,0 @@
+-""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
+-    itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
+-
+-    imports from itertools are fixed in fix_itertools_import.py
+-
+-    If itertools is imported as something else (ie: import itertools as it;
+-    it.izip(spam, eggs)) method calls will not get fixed.
+-    """
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-class FixItertools(fixer_base.BaseFix):
+-    it_funcs = "('imap'|'ifilter'|'izip'|'ifilterfalse')"
+-    PATTERN = """
+-              power< it='itertools'
+-                  trailer<
+-                     dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
+-              |
+-              power< func=%(it_funcs)s trailer< '(' [any] ')' > >
+-              """ %(locals())
+-
+-    # Needs to be run after fix_(map|zip|filter)
+-    run_order = 6
+-
+-    def transform(self, node, results):
+-        prefix = None
+-        func = results['func'][0]
+-        if 'it' in results and func.value != 'ifilterfalse':
+-            dot, it = (results['dot'], results['it'])
+-            # Remove the 'itertools'
+-            prefix = it.get_prefix()
+-            it.remove()
+-            # Replace the node wich contains ('.', 'function') with the
+-            # function (to be consistant with the second part of the pattern)
+-            dot.remove()
+-            func.parent.replace(func)
+-
+-        prefix = prefix or func.get_prefix()
+-        func.replace(Name(func.value[1:], prefix=prefix))
+diff -r 531f2e948299 lib2to3/fixes/fix_itertools_imports.py
+--- a/lib2to3/fixes/fix_itertools_imports.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,52 +0,0 @@
+-""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """
+-
+-# Local imports
+-from lib2to3 import fixer_base
+-from lib2to3.fixer_util import BlankLine, syms, token
+-
+-
+-class FixItertoolsImports(fixer_base.BaseFix):
+-    PATTERN = """
+-              import_from< 'from' 'itertools' 'import' imports=any >
+-              """ %(locals())
+-
+-    def transform(self, node, results):
+-        imports = results['imports']
+-        if imports.type == syms.import_as_name or not imports.children:
+-            children = [imports]
+-        else:
+-            children = imports.children
+-        for child in children[::2]:
+-            if child.type == token.NAME:
+-                member = child.value
+-                name_node = child
+-            else:
+-                assert child.type == syms.import_as_name
+-                name_node = child.children[0]
+-            member_name = name_node.value
+-            if member_name in ('imap', 'izip', 'ifilter'):
+-                child.value = None
+-                child.remove()
+-            elif member_name == 'ifilterfalse':
+-                node.changed()
+-                name_node.value = 'filterfalse'
+-
+-        # Make sure the import statement is still sane
+-        children = imports.children[:] or [imports]
+-        remove_comma = True
+-        for child in children:
+-            if remove_comma and child.type == token.COMMA:
+-                child.remove()
+-            else:
+-                remove_comma ^= True
+-
+-        if children[-1].type == token.COMMA:
+-            children[-1].remove()
+-
+-        # If there are no imports left, just get rid of the entire statement
+-        if not (imports.children or getattr(imports, 'value', None)) or \
+-                imports.parent is None:
+-            p = node.get_prefix()
+-            node = BlankLine()
+-            node.prefix = p
+-        return node
+diff -r 531f2e948299 lib2to3/fixes/fix_long.py
+--- a/lib2to3/fixes/fix_long.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,22 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that turns 'long' into 'int' everywhere.
+-"""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, Number, is_probably_builtin
+-
+-
+-class FixLong(fixer_base.BaseFix):
+-
+-    PATTERN = "'long'"
+-
+-    static_int = Name("int")
+-
+-    def transform(self, node, results):
+-        if is_probably_builtin(node):
+-            new = self.static_int.clone()
+-            new.set_prefix(node.get_prefix())
+-            return new
+diff -r 531f2e948299 lib2to3/fixes/fix_map.py
+--- a/lib2to3/fixes/fix_map.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,82 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
+-exists a 'from future_builtins import map' statement in the top-level
+-namespace.
+-
+-As a special case, map(None, X) is changed into list(X).  (This is
+-necessary because the semantics are changed in this case -- the new
+-map(None, X) is equivalent to [(x,) for x in X].)
+-
+-We avoid the transformation (except for the special case mentioned
+-above) if the map() call is directly contained in iter(<>), list(<>),
+-tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
+-
+-NOTE: This is still not correct if the original code was depending on
+-map(F, X, Y, ...) to go on until the longest argument is exhausted,
+-substituting None for missing values -- like zip(), it now stops as
+-soon as the shortest argument is exhausted.
+-"""
+-
+-# Local imports
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, ListComp, in_special_context
+-from ..pygram import python_symbols as syms
+-
+-class FixMap(fixer_base.ConditionalFix):
+-
+-    PATTERN = """
+-    map_none=power<
+-        'map'
+-        trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
+-    >
+-    |
+-    map_lambda=power<
+-        'map'
+-        trailer<
+-            '('
+-            arglist<
+-                lambdef< 'lambda'
+-                         (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
+-                >
+-                ','
+-                it=any
+-            >
+-            ')'
+-        >
+-    >
+-    |
+-    power<
+-        'map'
+-        args=trailer< '(' [any] ')' >
+-    >
+-    """
+-
+-    skip_on = 'future_builtins.map'
+-
+-    def transform(self, node, results):
+-        if self.should_skip(node):
+-            return
+-
+-        if node.parent.type == syms.simple_stmt:
+-            self.warning(node, "You should use a for loop here")
+-            new = node.clone()
+-            new.set_prefix("")
+-            new = Call(Name("list"), [new])
+-        elif "map_lambda" in results:
+-            new = ListComp(results.get("xp").clone(),
+-                           results.get("fp").clone(),
+-                           results.get("it").clone())
+-        else:
+-            if "map_none" in results:
+-                new = results["arg"].clone()
+-            else:
+-                if in_special_context(node):
+-                    return None
+-                new = node.clone()
+-            new.set_prefix("")
+-            new = Call(Name("list"), [new])
+-        new.set_prefix(node.get_prefix())
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/fix_metaclass.py
+--- a/lib2to3/fixes/fix_metaclass.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,227 +0,0 @@
+-"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
+-
+-   The various forms of classef (inherits nothing, inherits once, inherints
+-   many) don't parse the same in the CST so we look at ALL classes for
+-   a __metaclass__ and if we find one normalize the inherits to all be
+-   an arglist.
+-
+-   For one-liner classes ('class X: pass') there is no indent/dedent so
+-   we normalize those into having a suite.
+-
+-   Moving the __metaclass__ into the classdef can also cause the class
+-   body to be empty so there is some special casing for that as well.
+-
+-   This fixer also tries very hard to keep original indenting and spacing
+-   in all those corner cases.
+-
+-"""
+-# Author: Jack Diederich
+-
+-# Local imports
+-from .. import fixer_base
+-from ..pygram import token
+-from ..fixer_util import Name, syms, Node, Leaf
+-
+-
+-def has_metaclass(parent):
+-    """ we have to check the cls_node without changing it.
+-        There are two possiblities:
+-          1)  clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
+-          2)  clsdef => simple_stmt => expr_stmt => Leaf('__meta')
+-    """
+-    for node in parent.children:
+-        if node.type == syms.suite:
+-            return has_metaclass(node)
+-        elif node.type == syms.simple_stmt and node.children:
+-            expr_node = node.children[0]
+-            if expr_node.type == syms.expr_stmt and expr_node.children:
+-                left_side = expr_node.children[0]
+-                if isinstance(left_side, Leaf) and \
+-                        left_side.value == '__metaclass__':
+-                    return True
+-    return False
+-
+-
+-def fixup_parse_tree(cls_node):
+-    """ one-line classes don't get a suite in the parse tree so we add
+-        one to normalize the tree
+-    """
+-    for node in cls_node.children:
+-        if node.type == syms.suite:
+-            # already in the prefered format, do nothing
+-            return
+-
+-    # !%@#! oneliners have no suite node, we have to fake one up
+-    for i, node in enumerate(cls_node.children):
+-        if node.type == token.COLON:
+-            break
+-    else:
+-        raise ValueError("No class suite and no ':'!")
+-
+-    # move everything into a suite node
+-    suite = Node(syms.suite, [])
+-    while cls_node.children[i+1:]:
+-        move_node = cls_node.children[i+1]
+-        suite.append_child(move_node.clone())
+-        move_node.remove()
+-    cls_node.append_child(suite)
+-    node = suite
+-
+-
+-def fixup_simple_stmt(parent, i, stmt_node):
+-    """ if there is a semi-colon all the parts count as part of the same
+-        simple_stmt.  We just want the __metaclass__ part so we move
+-        everything efter the semi-colon into its own simple_stmt node
+-    """
+-    for semi_ind, node in enumerate(stmt_node.children):
+-        if node.type == token.SEMI: # *sigh*
+-            break
+-    else:
+-        return
+-
+-    node.remove() # kill the semicolon
+-    new_expr = Node(syms.expr_stmt, [])
+-    new_stmt = Node(syms.simple_stmt, [new_expr])
+-    while stmt_node.children[semi_ind:]:
+-        move_node = stmt_node.children[semi_ind]
+-        new_expr.append_child(move_node.clone())
+-        move_node.remove()
+-    parent.insert_child(i, new_stmt)
+-    new_leaf1 = new_stmt.children[0].children[0]
+-    old_leaf1 = stmt_node.children[0].children[0]
+-    new_leaf1.set_prefix(old_leaf1.get_prefix())
+-
+-
+-def remove_trailing_newline(node):
+-    if node.children and node.children[-1].type == token.NEWLINE:
+-        node.children[-1].remove()
+-
+-
+-def find_metas(cls_node):
+-    # find the suite node (Mmm, sweet nodes)
+-    for node in cls_node.children:
+-        if node.type == syms.suite:
+-            break
+-    else:
+-        raise ValueError("No class suite!")
+-
+-    # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
+-    for i, simple_node in list(enumerate(node.children)):
+-        if simple_node.type == syms.simple_stmt and simple_node.children:
+-            expr_node = simple_node.children[0]
+-            if expr_node.type == syms.expr_stmt and expr_node.children:
+-                # Check if the expr_node is a simple assignment.
+-                left_node = expr_node.children[0]
+-                if isinstance(left_node, Leaf) and \
+-                        left_node.value == '__metaclass__':
+-                    # We found a assignment to __metaclass__.
+-                    fixup_simple_stmt(node, i, simple_node)
+-                    remove_trailing_newline(simple_node)
+-                    yield (node, i, simple_node)
+-
+-
+-def fixup_indent(suite):
+-    """ If an INDENT is followed by a thing with a prefix then nuke the prefix
+-        Otherwise we get in trouble when removing __metaclass__ at suite start
+-    """
+-    kids = suite.children[::-1]
+-    # find the first indent
+-    while kids:
+-        node = kids.pop()
+-        if node.type == token.INDENT:
+-            break
+-
+-    # find the first Leaf
+-    while kids:
+-        node = kids.pop()
+-        if isinstance(node, Leaf) and node.type != token.DEDENT:
+-            if node.prefix:
+-                node.set_prefix('')
+-            return
+-        else:
+-            kids.extend(node.children[::-1])
+-
+-
+-class FixMetaclass(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    classdef<any*>
+-    """
+-
+-    def transform(self, node, results):
+-        if not has_metaclass(node):
+-            return node
+-
+-        fixup_parse_tree(node)
+-
+-        # find metaclasses, keep the last one
+-        last_metaclass = None
+-        for suite, i, stmt in find_metas(node):
+-            last_metaclass = stmt
+-            stmt.remove()
+-
+-        text_type = node.children[0].type # always Leaf(nnn, 'class')
+-
+-        # figure out what kind of classdef we have
+-        if len(node.children) == 7:
+-            # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
+-            #                 0        1       2    3        4    5    6
+-            if node.children[3].type == syms.arglist:
+-                arglist = node.children[3]
+-            # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
+-            else:
+-                parent = node.children[3].clone()
+-                arglist = Node(syms.arglist, [parent])
+-                node.set_child(3, arglist)
+-        elif len(node.children) == 6:
+-            # Node(classdef, ['class', 'name', '(',  ')', ':', suite])
+-            #                 0        1       2     3    4    5
+-            arglist = Node(syms.arglist, [])
+-            node.insert_child(3, arglist)
+-        elif len(node.children) == 4:
+-            # Node(classdef, ['class', 'name', ':', suite])
+-            #                 0        1       2    3
+-            arglist = Node(syms.arglist, [])
+-            node.insert_child(2, Leaf(token.RPAR, ')'))
+-            node.insert_child(2, arglist)
+-            node.insert_child(2, Leaf(token.LPAR, '('))
+-        else:
+-            raise ValueError("Unexpected class definition")
+-
+-        # now stick the metaclass in the arglist
+-        meta_txt = last_metaclass.children[0].children[0]
+-        meta_txt.value = 'metaclass'
+-        orig_meta_prefix = meta_txt.get_prefix()
+-
+-        if arglist.children:
+-            arglist.append_child(Leaf(token.COMMA, ','))
+-            meta_txt.set_prefix(' ')
+-        else:
+-            meta_txt.set_prefix('')
+-
+-        # compact the expression "metaclass = Meta" -> "metaclass=Meta"
+-        expr_stmt = last_metaclass.children[0]
+-        assert expr_stmt.type == syms.expr_stmt
+-        expr_stmt.children[1].set_prefix('')
+-        expr_stmt.children[2].set_prefix('')
+-
+-        arglist.append_child(last_metaclass)
+-
+-        fixup_indent(suite)
+-
+-        # check for empty suite
+-        if not suite.children:
+-            # one-liner that was just __metaclass_
+-            suite.remove()
+-            pass_leaf = Leaf(text_type, 'pass')
+-            pass_leaf.set_prefix(orig_meta_prefix)
+-            node.append_child(pass_leaf)
+-            node.append_child(Leaf(token.NEWLINE, '\n'))
+-
+-        elif len(suite.children) > 1 and \
+-                 (suite.children[-2].type == token.INDENT and
+-                  suite.children[-1].type == token.DEDENT):
+-            # there was only one line in the class body and it was __metaclass__
+-            pass_leaf = Leaf(text_type, 'pass')
+-            suite.insert_child(-1, pass_leaf)
+-            suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
+diff -r 531f2e948299 lib2to3/fixes/fix_methodattrs.py
+--- a/lib2to3/fixes/fix_methodattrs.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,23 +0,0 @@
+-"""Fix bound method attributes (method.im_? -> method.__?__).
+-"""
+-# Author: Christian Heimes
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-MAP = {
+-    "im_func" : "__func__",
+-    "im_self" : "__self__",
+-    "im_class" : "__self__.__class__"
+-    }
+-
+-class FixMethodattrs(fixer_base.BaseFix):
+-    PATTERN = """
+-    power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
+-    """
+-
+-    def transform(self, node, results):
+-        attr = results["attr"][0]
+-        new = MAP[attr.value]
+-        attr.replace(Name(new, prefix=attr.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/fix_ne.py
+--- a/lib2to3/fixes/fix_ne.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,22 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that turns <> into !=."""
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-
+-
+-class FixNe(fixer_base.BaseFix):
+-    # This is so simple that we don't need the pattern compiler.
+-
+-    def match(self, node):
+-        # Override
+-        return node.type == token.NOTEQUAL and node.value == "<>"
+-
+-    def transform(self, node, results):
+-        new = pytree.Leaf(token.NOTEQUAL, "!=")
+-        new.set_prefix(node.get_prefix())
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/fix_next.py
+--- a/lib2to3/fixes/fix_next.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,103 +0,0 @@
+-"""Fixer for it.next() -> next(it), per PEP 3114."""
+-# Author: Collin Winter
+-
+-# Things that currently aren't covered:
+-#   - listcomp "next" names aren't warned
+-#   - "with" statement targets aren't checked
+-
+-# Local imports
+-from ..pgen2 import token
+-from ..pygram import python_symbols as syms
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, find_binding
+-
+-bind_warning = "Calls to builtin next() possibly shadowed by global binding"
+-
+-
+-class FixNext(fixer_base.BaseFix):
+-    PATTERN = """
+-    power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
+-    |
+-    power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
+-    |
+-    classdef< 'class' any+ ':'
+-              suite< any*
+-                     funcdef< 'def'
+-                              name='next'
+-                              parameters< '(' NAME ')' > any+ >
+-                     any* > >
+-    |
+-    global=global_stmt< 'global' any* 'next' any* >
+-    """
+-
+-    order = "pre" # Pre-order tree traversal
+-
+-    def start_tree(self, tree, filename):
+-        super(FixNext, self).start_tree(tree, filename)
+-
+-        n = find_binding('next', tree)
+-        if n:
+-            self.warning(n, bind_warning)
+-            self.shadowed_next = True
+-        else:
+-            self.shadowed_next = False
+-
+-    def transform(self, node, results):
+-        assert results
+-
+-        base = results.get("base")
+-        attr = results.get("attr")
+-        name = results.get("name")
+-        mod = results.get("mod")
+-
+-        if base:
+-            if self.shadowed_next:
+-                attr.replace(Name("__next__", prefix=attr.get_prefix()))
+-            else:
+-                base = [n.clone() for n in base]
+-                base[0].set_prefix("")
+-                node.replace(Call(Name("next", prefix=node.get_prefix()), base))
+-        elif name:
+-            n = Name("__next__", prefix=name.get_prefix())
+-            name.replace(n)
+-        elif attr:
+-            # We don't do this transformation if we're assigning to "x.next".
+-            # Unfortunately, it doesn't seem possible to do this in PATTERN,
+-            #  so it's being done here.
+-            if is_assign_target(node):
+-                head = results["head"]
+-                if "".join([str(n) for n in head]).strip() == '__builtin__':
+-                    self.warning(node, bind_warning)
+-                return
+-            attr.replace(Name("__next__"))
+-        elif "global" in results:
+-            self.warning(node, bind_warning)
+-            self.shadowed_next = True
+-
+-
+-### The following functions help test if node is part of an assignment
+-###  target.
+-
+-def is_assign_target(node):
+-    assign = find_assign(node)
+-    if assign is None:
+-        return False
+-
+-    for child in assign.children:
+-        if child.type == token.EQUAL:
+-            return False
+-        elif is_subtree(child, node):
+-            return True
+-    return False
+-
+-def find_assign(node):
+-    if node.type == syms.expr_stmt:
+-        return node
+-    if node.type == syms.simple_stmt or node.parent is None:
+-        return None
+-    return find_assign(node.parent)
+-
+-def is_subtree(root, node):
+-    if root == node:
+-        return True
+-    return any([is_subtree(c, node) for c in root.children])
+diff -r 531f2e948299 lib2to3/fixes/fix_nonzero.py
+--- a/lib2to3/fixes/fix_nonzero.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,20 +0,0 @@
+-"""Fixer for __nonzero__ -> __bool__ methods."""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, syms
+-
+-class FixNonzero(fixer_base.BaseFix):
+-    PATTERN = """
+-    classdef< 'class' any+ ':'
+-              suite< any*
+-                     funcdef< 'def' name='__nonzero__'
+-                              parameters< '(' NAME ')' > any+ >
+-                     any* > >
+-    """
+-
+-    def transform(self, node, results):
+-        name = results["name"]
+-        new = Name("__bool__", prefix=name.get_prefix())
+-        name.replace(new)
+diff -r 531f2e948299 lib2to3/fixes/fix_numliterals.py
+--- a/lib2to3/fixes/fix_numliterals.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,27 +0,0 @@
+-"""Fixer that turns 1L into 1, 0755 into 0o755.
+-"""
+-# Copyright 2007 Georg Brandl.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-# Local imports
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Number
+-
+-
+-class FixNumliterals(fixer_base.BaseFix):
+-    # This is so simple that we don't need the pattern compiler.
+-
+-    def match(self, node):
+-        # Override
+-        return (node.type == token.NUMBER and
+-                (node.value.startswith("0") or node.value[-1] in "Ll"))
+-
+-    def transform(self, node, results):
+-        val = node.value
+-        if val[-1] in 'Ll':
+-            val = val[:-1]
+-        elif val.startswith('0') and val.isdigit() and len(set(val)) > 1:
+-            val = "0o" + val[1:]
+-
+-        return Number(val, prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_paren.py
+--- a/lib2to3/fixes/fix_paren.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,42 +0,0 @@
+-"""Fixer that addes parentheses where they are required
+-
+-This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``."""
+-
+-# By Taek Joo Kim and Benjamin Peterson
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import LParen, RParen
+-
+-# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2]
+-class FixParen(fixer_base.BaseFix):
+-    PATTERN = """
+-        atom< ('[' | '(')
+-            (listmaker< any
+-                comp_for<
+-                    'for' NAME 'in'
+-                    target=testlist_safe< any (',' any)+ [',']
+-                     >
+-                    [any]
+-                >
+-            >
+-            |
+-            testlist_gexp< any
+-                comp_for<
+-                    'for' NAME 'in'
+-                    target=testlist_safe< any (',' any)+ [',']
+-                     >
+-                    [any]
+-                >
+-            >)
+-        (']' | ')') >
+-    """
+-
+-    def transform(self, node, results):
+-        target = results["target"]
+-
+-        lparen = LParen()
+-        lparen.set_prefix(target.get_prefix())
+-        target.set_prefix("") # Make it hug the parentheses
+-        target.insert_child(0, lparen)
+-        target.append_child(RParen())
+diff -r 531f2e948299 lib2to3/fixes/fix_print.py
+--- a/lib2to3/fixes/fix_print.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,90 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for print.
+-
+-Change:
+-    'print'          into 'print()'
+-    'print ...'      into 'print(...)'
+-    'print ... ,'    into 'print(..., end=" ")'
+-    'print >>x, ...' into 'print(..., file=x)'
+-
+-No changes are applied if print_function is imported from __future__
+-
+-"""
+-
+-# Local imports
+-from .. import patcomp
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, Comma, String, is_tuple
+-
+-
+-parend_expr = patcomp.compile_pattern(
+-              """atom< '(' [atom|STRING|NAME] ')' >"""
+-              )
+-
+-
+-class FixPrint(fixer_base.ConditionalFix):
+-
+-    PATTERN = """
+-              simple_stmt< any* bare='print' any* > | print_stmt
+-              """
+-
+-    skip_on = '__future__.print_function'
+-
+-    def transform(self, node, results):
+-        assert results
+-
+-        if self.should_skip(node):
+-            return
+-
+-        bare_print = results.get("bare")
+-
+-        if bare_print:
+-            # Special-case print all by itself
+-            bare_print.replace(Call(Name("print"), [],
+-                               prefix=bare_print.get_prefix()))
+-            return
+-        assert node.children[0] == Name("print")
+-        args = node.children[1:]
+-        if len(args) == 1 and parend_expr.match(args[0]):
+-            # We don't want to keep sticking parens around an
+-            # already-parenthesised expression.
+-            return
+-
+-        sep = end = file = None
+-        if args and args[-1] == Comma():
+-            args = args[:-1]
+-            end = " "
+-        if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
+-            assert len(args) >= 2
+-            file = args[1].clone()
+-            args = args[3:] # Strip a possible comma after the file expression
+-        # Now synthesize a print(args, sep=..., end=..., file=...) node.
+-        l_args = [arg.clone() for arg in args]
+-        if l_args:
+-            l_args[0].set_prefix("")
+-        if sep is not None or end is not None or file is not None:
+-            if sep is not None:
+-                self.add_kwarg(l_args, "sep", String(repr(sep)))
+-            if end is not None:
+-                self.add_kwarg(l_args, "end", String(repr(end)))
+-            if file is not None:
+-                self.add_kwarg(l_args, "file", file)
+-        n_stmt = Call(Name("print"), l_args)
+-        n_stmt.set_prefix(node.get_prefix())
+-        return n_stmt
+-
+-    def add_kwarg(self, l_nodes, s_kwd, n_expr):
+-        # XXX All this prefix-setting may lose comments (though rarely)
+-        n_expr.set_prefix("")
+-        n_argument = pytree.Node(self.syms.argument,
+-                                 (Name(s_kwd),
+-                                  pytree.Leaf(token.EQUAL, "="),
+-                                  n_expr))
+-        if l_nodes:
+-            l_nodes.append(Comma())
+-            n_argument.set_prefix(" ")
+-        l_nodes.append(n_argument)
+diff -r 531f2e948299 lib2to3/fixes/fix_raise.py
+--- a/lib2to3/fixes/fix_raise.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,82 +0,0 @@
+-"""Fixer for 'raise E, V, T'
+-
+-raise         -> raise
+-raise E       -> raise E
+-raise E, V    -> raise E(V)
+-raise E, V, T -> raise E(V).with_traceback(T)
+-
+-raise (((E, E'), E''), E'''), V -> raise E(V)
+-raise "foo", V, T               -> warns about string exceptions
+-
+-
+-CAVEATS:
+-1) "raise E, V" will be incorrectly translated if V is an exception
+-   instance. The correct Python 3 idiom is
+-
+-        raise E from V
+-
+-   but since we can't detect instance-hood by syntax alone and since
+-   any client code would have to be changed as well, we don't automate
+-   this.
+-"""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
+-
+-class FixRaise(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
+-    """
+-
+-    def transform(self, node, results):
+-        syms = self.syms
+-
+-        exc = results["exc"].clone()
+-        if exc.type is token.STRING:
+-            self.cannot_convert(node, "Python 3 does not support string exceptions")
+-            return
+-
+-        # Python 2 supports
+-        #  raise ((((E1, E2), E3), E4), E5), V
+-        # as a synonym for
+-        #  raise E1, V
+-        # Since Python 3 will not support this, we recurse down any tuple
+-        # literals, always taking the first element.
+-        if is_tuple(exc):
+-            while is_tuple(exc):
+-                # exc.children[1:-1] is the unparenthesized tuple
+-                # exc.children[1].children[0] is the first element of the tuple
+-                exc = exc.children[1].children[0].clone()
+-            exc.set_prefix(" ")
+-
+-        if "val" not in results:
+-            # One-argument raise
+-            new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
+-            new.set_prefix(node.get_prefix())
+-            return new
+-
+-        val = results["val"].clone()
+-        if is_tuple(val):
+-            args = [c.clone() for c in val.children[1:-1]]
+-        else:
+-            val.set_prefix("")
+-            args = [val]
+-
+-        if "tb" in results:
+-            tb = results["tb"].clone()
+-            tb.set_prefix("")
+-
+-            e = Call(exc, args)
+-            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
+-            new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
+-            new.set_prefix(node.get_prefix())
+-            return new
+-        else:
+-            return pytree.Node(syms.raise_stmt,
+-                               [Name("raise"), Call(exc, args)],
+-                               prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_raw_input.py
+--- a/lib2to3/fixes/fix_raw_input.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,16 +0,0 @@
+-"""Fixer that changes raw_input(...) into input(...)."""
+-# Author: Andre Roberge
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-class FixRawInput(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              power< name='raw_input' trailer< '(' [any] ')' > any* >
+-              """
+-
+-    def transform(self, node, results):
+-        name = results["name"]
+-        name.replace(Name("input", prefix=name.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/fix_reduce.py
+--- a/lib2to3/fixes/fix_reduce.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,33 +0,0 @@
+-# Copyright 2008 Armin Ronacher.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for reduce().
+-
+-Makes sure reduce() is imported from the functools module if reduce is
+-used in that module.
+-"""
+-
+-from .. import pytree
+-from .. import fixer_base
+-from ..fixer_util import Name, Attr, touch_import
+-
+-
+-
+-class FixReduce(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power< 'reduce'
+-        trailer< '('
+-            arglist< (
+-                (not(argument<any '=' any>) any ','
+-                 not(argument<any '=' any>) any) |
+-                (not(argument<any '=' any>) any ','
+-                 not(argument<any '=' any>) any ','
+-                 not(argument<any '=' any>) any)
+-            ) >
+-        ')' >
+-    >
+-    """
+-
+-    def transform(self, node, results):
+-        touch_import('functools', 'reduce', node)
+diff -r 531f2e948299 lib2to3/fixes/fix_renames.py
+--- a/lib2to3/fixes/fix_renames.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,69 +0,0 @@
+-"""Fix incompatible renames
+-
+-Fixes:
+-  * sys.maxint -> sys.maxsize
+-"""
+-# Author: Christian Heimes
+-# based on Collin Winter's fix_import
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, attr_chain
+-
+-MAPPING = {"sys":  {"maxint" : "maxsize"},
+-          }
+-LOOKUP = {}
+-
+-def alternates(members):
+-    return "(" + "|".join(map(repr, members)) + ")"
+-
+-
+-def build_pattern():
+-    #bare = set()
+-    for module, replace in MAPPING.items():
+-        for old_attr, new_attr in replace.items():
+-            LOOKUP[(module, old_attr)] = new_attr
+-            #bare.add(module)
+-            #bare.add(old_attr)
+-            #yield """
+-            #      import_name< 'import' (module=%r
+-            #          | dotted_as_names< any* module=%r any* >) >
+-            #      """ % (module, module)
+-            yield """
+-                  import_from< 'from' module_name=%r 'import'
+-                      ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
+-                  """ % (module, old_attr, old_attr)
+-            yield """
+-                  power< module_name=%r trailer< '.' attr_name=%r > any* >
+-                  """ % (module, old_attr)
+-    #yield """bare_name=%s""" % alternates(bare)
+-
+-
+-class FixRenames(fixer_base.BaseFix):
+-    PATTERN = "|".join(build_pattern())
+-
+-    order = "pre" # Pre-order tree traversal
+-
+-    # Don't match the node if it's within another match
+-    def match(self, node):
+-        match = super(FixRenames, self).match
+-        results = match(node)
+-        if results:
+-            if any([match(obj) for obj in attr_chain(node, "parent")]):
+-                return False
+-            return results
+-        return False
+-
+-    #def start_tree(self, tree, filename):
+-    #    super(FixRenames, self).start_tree(tree, filename)
+-    #    self.replace = {}
+-
+-    def transform(self, node, results):
+-        mod_name = results.get("module_name")
+-        attr_name = results.get("attr_name")
+-        #bare_name = results.get("bare_name")
+-        #import_mod = results.get("module")
+-
+-        if mod_name and attr_name:
+-            new_attr = LOOKUP[(mod_name.value, attr_name.value)]
+-            attr_name.replace(Name(new_attr, prefix=attr_name.get_prefix()))
+diff -r 531f2e948299 lib2to3/fixes/fix_repr.py
+--- a/lib2to3/fixes/fix_repr.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,22 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that transforms `xyzzy` into repr(xyzzy)."""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Call, Name, parenthesize
+-
+-
+-class FixRepr(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              atom < '`' expr=any '`' >
+-              """
+-
+-    def transform(self, node, results):
+-        expr = results["expr"].clone()
+-
+-        if expr.type == self.syms.testlist1:
+-            expr = parenthesize(expr)
+-        return Call(Name("repr"), [expr], prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_set_literal.py
+--- a/lib2to3/fixes/fix_set_literal.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,52 +0,0 @@
+-"""
+-Optional fixer to transform set() calls to set literals.
+-"""
+-
+-# Author: Benjamin Peterson
+-
+-from lib2to3 import fixer_base, pytree
+-from lib2to3.fixer_util import token, syms
+-
+-
+-
+-class FixSetLiteral(fixer_base.BaseFix):
+-
+-    explicit = True
+-
+-    PATTERN = """power< 'set' trailer< '('
+-                     (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) >
+-                                |
+-                                single=any) ']' >
+-                     |
+-                     atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' >
+-                     )
+-                     ')' > >
+-              """
+-
+-    def transform(self, node, results):
+-        single = results.get("single")
+-        if single:
+-            # Make a fake listmaker
+-            fake = pytree.Node(syms.listmaker, [single.clone()])
+-            single.replace(fake)
+-            items = fake
+-        else:
+-            items = results["items"]
+-
+-        # Build the contents of the literal
+-        literal = [pytree.Leaf(token.LBRACE, "{")]
+-        literal.extend(n.clone() for n in items.children)
+-        literal.append(pytree.Leaf(token.RBRACE, "}"))
+-        # Set the prefix of the right brace to that of the ')' or ']'
+-        literal[-1].set_prefix(items.next_sibling.get_prefix())
+-        maker = pytree.Node(syms.dictsetmaker, literal)
+-        maker.set_prefix(node.get_prefix())
+-
+-        # If the original was a one tuple, we need to remove the extra comma.
+-        if len(maker.children) == 4:
+-            n = maker.children[2]
+-            n.remove()
+-            maker.children[-1].set_prefix(n.get_prefix())
+-
+-        # Finally, replace the set call with our shiny new literal.
+-        return maker
+diff -r 531f2e948299 lib2to3/fixes/fix_standarderror.py
+--- a/lib2to3/fixes/fix_standarderror.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,18 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for StandardError -> Exception."""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-
+-class FixStandarderror(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              'StandardError'
+-              """
+-
+-    def transform(self, node, results):
+-        return Name("Exception", prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_sys_exc.py
+--- a/lib2to3/fixes/fix_sys_exc.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,29 +0,0 @@
+-"""Fixer for sys.exc_{type, value, traceback}
+-
+-sys.exc_type -> sys.exc_info()[0]
+-sys.exc_value -> sys.exc_info()[1]
+-sys.exc_traceback -> sys.exc_info()[2]
+-"""
+-
+-# By Jeff Balogh and Benjamin Peterson
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms
+-
+-class FixSysExc(fixer_base.BaseFix):
+-    # This order matches the ordering of sys.exc_info().
+-    exc_info = ["exc_type", "exc_value", "exc_traceback"]
+-    PATTERN = """
+-              power< 'sys' trailer< dot='.' attribute=(%s) > >
+-              """ % '|'.join("'%s'" % e for e in exc_info)
+-
+-    def transform(self, node, results):
+-        sys_attr = results["attribute"][0]
+-        index = Number(self.exc_info.index(sys_attr.value))
+-
+-        call = Call(Name("exc_info"), prefix=sys_attr.get_prefix())
+-        attr = Attr(Name("sys"), call)
+-        attr[1].children[0].set_prefix(results["dot"].get_prefix())
+-        attr.append(Subscript(index))
+-        return Node(syms.power, attr, prefix=node.get_prefix())
+diff -r 531f2e948299 lib2to3/fixes/fix_throw.py
+--- a/lib2to3/fixes/fix_throw.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,56 +0,0 @@
+-"""Fixer for generator.throw(E, V, T).
+-
+-g.throw(E)       -> g.throw(E)
+-g.throw(E, V)    -> g.throw(E(V))
+-g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
+-
+-g.throw("foo"[, V[, T]]) will warn about string exceptions."""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, ArgList, Attr, is_tuple
+-
+-class FixThrow(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-    power< any trailer< '.' 'throw' >
+-           trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
+-    >
+-    |
+-    power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
+-    """
+-
+-    def transform(self, node, results):
+-        syms = self.syms
+-
+-        exc = results["exc"].clone()
+-        if exc.type is token.STRING:
+-            self.cannot_convert(node, "Python 3 does not support string exceptions")
+-            return
+-
+-        # Leave "g.throw(E)" alone
+-        val = results.get("val")
+-        if val is None:
+-            return
+-
+-        val = val.clone()
+-        if is_tuple(val):
+-            args = [c.clone() for c in val.children[1:-1]]
+-        else:
+-            val.set_prefix("")
+-            args = [val]
+-
+-        throw_args = results["args"]
+-
+-        if "tb" in results:
+-            tb = results["tb"].clone()
+-            tb.set_prefix("")
+-
+-            e = Call(exc, args)
+-            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
+-            throw_args.replace(pytree.Node(syms.power, with_tb))
+-        else:
+-            throw_args.replace(Call(exc, args))
+diff -r 531f2e948299 lib2to3/fixes/fix_tuple_params.py
+--- a/lib2to3/fixes/fix_tuple_params.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,169 +0,0 @@
+-"""Fixer for function definitions with tuple parameters.
+-
+-def func(((a, b), c), d):
+-    ...
+-
+-    ->
+-
+-def func(x, d):
+-    ((a, b), c) = x
+-    ...
+-
+-It will also support lambdas:
+-
+-    lambda (x, y): x + y -> lambda t: t[0] + t[1]
+-
+-    # The parens are a syntax error in Python 3
+-    lambda (x): x + y -> lambda x: x + y
+-"""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms
+-
+-def is_docstring(stmt):
+-    return isinstance(stmt, pytree.Node) and \
+-           stmt.children[0].type == token.STRING
+-
+-class FixTupleParams(fixer_base.BaseFix):
+-    PATTERN = """
+-              funcdef< 'def' any parameters< '(' args=any ')' >
+-                       ['->' any] ':' suite=any+ >
+-              |
+-              lambda=
+-              lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
+-                       ':' body=any
+-              >
+-              """
+-
+-    def transform(self, node, results):
+-        if "lambda" in results:
+-            return self.transform_lambda(node, results)
+-
+-        new_lines = []
+-        suite = results["suite"]
+-        args = results["args"]
+-        # This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
+-        # TODO(cwinter): suite-cleanup
+-        if suite[0].children[1].type == token.INDENT:
+-            start = 2
+-            indent = suite[0].children[1].value
+-            end = Newline()
+-        else:
+-            start = 0
+-            indent = "; "
+-            end = pytree.Leaf(token.INDENT, "")
+-
+-        # We need access to self for new_name(), and making this a method
+-        #  doesn't feel right. Closing over self and new_lines makes the
+-        #  code below cleaner.
+-        def handle_tuple(tuple_arg, add_prefix=False):
+-            n = Name(self.new_name())
+-            arg = tuple_arg.clone()
+-            arg.set_prefix("")
+-            stmt = Assign(arg, n.clone())
+-            if add_prefix:
+-                n.set_prefix(" ")
+-            tuple_arg.replace(n)
+-            new_lines.append(pytree.Node(syms.simple_stmt,
+-                                         [stmt, end.clone()]))
+-
+-        if args.type == syms.tfpdef:
+-            handle_tuple(args)
+-        elif args.type == syms.typedargslist:
+-            for i, arg in enumerate(args.children):
+-                if arg.type == syms.tfpdef:
+-                    # Without add_prefix, the emitted code is correct,
+-                    #  just ugly.
+-                    handle_tuple(arg, add_prefix=(i > 0))
+-
+-        if not new_lines:
+-            return node
+-
+-        # This isn't strictly necessary, but it plays nicely with other fixers.
+-        # TODO(cwinter) get rid of this when children becomes a smart list
+-        for line in new_lines:
+-            line.parent = suite[0]
+-
+-        # TODO(cwinter) suite-cleanup
+-        after = start
+-        if start == 0:
+-            new_lines[0].set_prefix(" ")
+-        elif is_docstring(suite[0].children[start]):
+-            new_lines[0].set_prefix(indent)
+-            after = start + 1
+-
+-        suite[0].children[after:after] = new_lines
+-        for i in range(after+1, after+len(new_lines)+1):
+-            suite[0].children[i].set_prefix(indent)
+-        suite[0].changed()
+-
+-    def transform_lambda(self, node, results):
+-        args = results["args"]
+-        body = results["body"]
+-        inner = simplify_args(results["inner"])
+-
+-        # Replace lambda ((((x)))): x  with lambda x: x
+-        if inner.type == token.NAME:
+-            inner = inner.clone()
+-            inner.set_prefix(" ")
+-            args.replace(inner)
+-            return
+-
+-        params = find_params(args)
+-        to_index = map_to_index(params)
+-        tup_name = self.new_name(tuple_name(params))
+-
+-        new_param = Name(tup_name, prefix=" ")
+-        args.replace(new_param.clone())
+-        for n in body.post_order():
+-            if n.type == token.NAME and n.value in to_index:
+-                subscripts = [c.clone() for c in to_index[n.value]]
+-                new = pytree.Node(syms.power,
+-                                  [new_param.clone()] + subscripts)
+-                new.set_prefix(n.get_prefix())
+-                n.replace(new)
+-
+-
+-### Helper functions for transform_lambda()
+-
+-def simplify_args(node):
+-    if node.type in (syms.vfplist, token.NAME):
+-        return node
+-    elif node.type == syms.vfpdef:
+-        # These look like vfpdef< '(' x ')' > where x is NAME
+-        # or another vfpdef instance (leading to recursion).
+-        while node.type == syms.vfpdef:
+-            node = node.children[1]
+-        return node
+-    raise RuntimeError("Received unexpected node %s" % node)
+-
+-def find_params(node):
+-    if node.type == syms.vfpdef:
+-        return find_params(node.children[1])
+-    elif node.type == token.NAME:
+-        return node.value
+-    return [find_params(c) for c in node.children if c.type != token.COMMA]
+-
+-def map_to_index(param_list, prefix=[], d=None):
+-    if d is None:
+-        d = {}
+-    for i, obj in enumerate(param_list):
+-        trailer = [Subscript(Number(i))]
+-        if isinstance(obj, list):
+-            map_to_index(obj, trailer, d=d)
+-        else:
+-            d[obj] = prefix + trailer
+-    return d
+-
+-def tuple_name(param_list):
+-    l = []
+-    for obj in param_list:
+-        if isinstance(obj, list):
+-            l.append(tuple_name(obj))
+-        else:
+-            l.append(obj)
+-    return "_".join(l)
+diff -r 531f2e948299 lib2to3/fixes/fix_types.py
+--- a/lib2to3/fixes/fix_types.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,62 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer for removing uses of the types module.
+-
+-These work for only the known names in the types module.  The forms above
+-can include types. or not.  ie, It is assumed the module is imported either as:
+-
+-    import types
+-    from types import ... # either * or specific types
+-
+-The import statements are not modified.
+-
+-There should be another fixer that handles at least the following constants:
+-
+-   type([]) -> list
+-   type(()) -> tuple
+-   type('') -> str
+-
+-"""
+-
+-# Local imports
+-from ..pgen2 import token
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-_TYPE_MAPPING = {
+-        'BooleanType' : 'bool',
+-        'BufferType' : 'memoryview',
+-        'ClassType' : 'type',
+-        'ComplexType' : 'complex',
+-        'DictType': 'dict',
+-        'DictionaryType' : 'dict',
+-        'EllipsisType' : 'type(Ellipsis)',
+-        #'FileType' : 'io.IOBase',
+-        'FloatType': 'float',
+-        'IntType': 'int',
+-        'ListType': 'list',
+-        'LongType': 'int',
+-        'ObjectType' : 'object',
+-        'NoneType': 'type(None)',
+-        'NotImplementedType' : 'type(NotImplemented)',
+-        'SliceType' : 'slice',
+-        'StringType': 'bytes', # XXX ?
+-        'StringTypes' : 'str', # XXX ?
+-        'TupleType': 'tuple',
+-        'TypeType' : 'type',
+-        'UnicodeType': 'str',
+-        'XRangeType' : 'range',
+-    }
+-
+-_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
+-
+-class FixTypes(fixer_base.BaseFix):
+-
+-    PATTERN = '|'.join(_pats)
+-
+-    def transform(self, node, results):
+-        new_value = _TYPE_MAPPING.get(results["name"].value)
+-        if new_value:
+-            return Name(new_value, prefix=node.get_prefix())
+-        return None
+diff -r 531f2e948299 lib2to3/fixes/fix_unicode.py
+--- a/lib2to3/fixes/fix_unicode.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,28 +0,0 @@
+-"""Fixer that changes unicode to str, unichr to chr, and u"..." into "...".
+-
+-"""
+-
+-import re
+-from ..pgen2 import token
+-from .. import fixer_base
+-
+-class FixUnicode(fixer_base.BaseFix):
+-
+-    PATTERN = "STRING | NAME<'unicode' | 'unichr'>"
+-
+-    def transform(self, node, results):
+-        if node.type == token.NAME:
+-            if node.value == "unicode":
+-                new = node.clone()
+-                new.value = "str"
+-                return new
+-            if node.value == "unichr":
+-                new = node.clone()
+-                new.value = "chr"
+-                return new
+-            # XXX Warn when __unicode__ found?
+-        elif node.type == token.STRING:
+-            if re.match(r"[uU][rR]?[\'\"]", node.value):
+-                new = node.clone()
+-                new.value = new.value[1:]
+-                return new
+diff -r 531f2e948299 lib2to3/fixes/fix_urllib.py
+--- a/lib2to3/fixes/fix_urllib.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,180 +0,0 @@
+-"""Fix changes imports of urllib which are now incompatible.
+-   This is rather similar to fix_imports, but because of the more
+-   complex nature of the fixing for urllib, it has its own fixer.
+-"""
+-# Author: Nick Edds
+-
+-# Local imports
+-from .fix_imports import alternates, FixImports
+-from .. import fixer_base
+-from ..fixer_util import Name, Comma, FromImport, Newline, attr_chain
+-
+-MAPPING = {'urllib':  [
+-                ('urllib.request',
+-                    ['URLOpener', 'FancyURLOpener', 'urlretrieve',
+-                     '_urlopener', 'urlcleanup']),
+-                ('urllib.parse',
+-                    ['quote', 'quote_plus', 'unquote', 'unquote_plus',
+-                     'urlencode', 'pathname2url', 'url2pathname', 'splitattr',
+-                     'splithost', 'splitnport', 'splitpasswd', 'splitport',
+-                     'splitquery', 'splittag', 'splittype', 'splituser',
+-                     'splitvalue', ]),
+-                ('urllib.error',
+-                    ['ContentTooShortError'])],
+-           'urllib2' : [
+-                ('urllib.request',
+-                    ['urlopen', 'install_opener', 'build_opener',
+-                     'Request', 'OpenerDirector', 'BaseHandler',
+-                     'HTTPDefaultErrorHandler', 'HTTPRedirectHandler',
+-                     'HTTPCookieProcessor', 'ProxyHandler',
+-                     'HTTPPasswordMgr',
+-                     'HTTPPasswordMgrWithDefaultRealm',
+-                     'AbstractBasicAuthHandler',
+-                     'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler',
+-                     'AbstractDigestAuthHandler',
+-                     'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler',
+-                     'HTTPHandler', 'HTTPSHandler', 'FileHandler',
+-                     'FTPHandler', 'CacheFTPHandler',
+-                     'UnknownHandler']),
+-                ('urllib.error',
+-                    ['URLError', 'HTTPError']),
+-           ]
+-}
+-
+-# Duplicate the url parsing functions for urllib2.
+-MAPPING["urllib2"].append(MAPPING["urllib"][1])
+-
+-
+-def build_pattern():
+-    bare = set()
+-    for old_module, changes in MAPPING.items():
+-        for change in changes:
+-            new_module, members = change
+-            members = alternates(members)
+-            yield """import_name< 'import' (module=%r
+-                                  | dotted_as_names< any* module=%r any* >) >
+-                  """ % (old_module, old_module)
+-            yield """import_from< 'from' mod_member=%r 'import'
+-                       ( member=%s | import_as_name< member=%s 'as' any > |
+-                         import_as_names< members=any*  >) >
+-                  """ % (old_module, members, members)
+-            yield """import_from< 'from' module_star=%r 'import' star='*' >
+-                  """ % old_module
+-            yield """import_name< 'import'
+-                                  dotted_as_name< module_as=%r 'as' any > >
+-                  """ % old_module
+-            yield """power< module_dot=%r trailer< '.' member=%s > any* >
+-                  """ % (old_module, members)
+-
+-
+-class FixUrllib(FixImports):
+-
+-    def build_pattern(self):
+-        return "|".join(build_pattern())
+-
+-    def transform_import(self, node, results):
+-        """Transform for the basic import case. Replaces the old
+-           import name with a comma separated list of its
+-           replacements.
+-        """
+-        import_mod = results.get('module')
+-        pref = import_mod.get_prefix()
+-
+-        names = []
+-
+-        # create a Node list of the replacement modules
+-        for name in MAPPING[import_mod.value][:-1]:
+-            names.extend([Name(name[0], prefix=pref), Comma()])
+-        names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
+-        import_mod.replace(names)
+-
+-    def transform_member(self, node, results):
+-        """Transform for imports of specific module elements. Replaces
+-           the module to be imported from with the appropriate new
+-           module.
+-        """
+-        mod_member = results.get('mod_member')
+-        pref = mod_member.get_prefix()
+-        member = results.get('member')
+-
+-        # Simple case with only a single member being imported
+-        if member:
+-            # this may be a list of length one, or just a node
+-            if isinstance(member, list):
+-                member = member[0]
+-            new_name = None
+-            for change in MAPPING[mod_member.value]:
+-                if member.value in change[1]:
+-                    new_name = change[0]
+-                    break
+-            if new_name:
+-                mod_member.replace(Name(new_name, prefix=pref))
+-            else:
+-                self.cannot_convert(node,
+-                                    'This is an invalid module element')
+-
+-        # Multiple members being imported
+-        else:
+-            # a dictionary for replacements, order matters
+-            modules = []
+-            mod_dict = {}
+-            members = results.get('members')
+-            for member in members:
+-                member = member.value
+-                # we only care about the actual members
+-                if member != ',':
+-                    for change in MAPPING[mod_member.value]:
+-                        if member in change[1]:
+-                            if change[0] in mod_dict:
+-                                mod_dict[change[0]].append(member)
+-                            else:
+-                                mod_dict[change[0]] = [member]
+-                                modules.append(change[0])
+-
+-            new_nodes = []
+-            for module in modules:
+-                elts = mod_dict[module]
+-                names = []
+-                for elt in elts[:-1]:
+-                    names.extend([Name(elt, prefix=pref), Comma()])
+-                names.append(Name(elts[-1], prefix=pref))
+-                new_nodes.append(FromImport(module, names))
+-            if new_nodes:
+-                nodes = []
+-                for new_node in new_nodes[:-1]:
+-                    nodes.extend([new_node, Newline()])
+-                nodes.append(new_nodes[-1])
+-                node.replace(nodes)
+-            else:
+-                self.cannot_convert(node, 'All module elements are invalid')
+-
+-    def transform_dot(self, node, results):
+-        """Transform for calls to module members in code."""
+-        module_dot = results.get('module_dot')
+-        member = results.get('member')
+-        # this may be a list of length one, or just a node
+-        if isinstance(member, list):
+-            member = member[0]
+-        new_name = None
+-        for change in MAPPING[module_dot.value]:
+-            if member.value in change[1]:
+-                new_name = change[0]
+-                break
+-        if new_name:
+-            module_dot.replace(Name(new_name,
+-                                    prefix=module_dot.get_prefix()))
+-        else:
+-            self.cannot_convert(node, 'This is an invalid module element')
+-
+-    def transform(self, node, results):
+-        if results.get('module'):
+-            self.transform_import(node, results)
+-        elif results.get('mod_member'):
+-            self.transform_member(node, results)
+-        elif results.get('module_dot'):
+-            self.transform_dot(node, results)
+-        # Renaming and star imports are not supported for these modules.
+-        elif results.get('module_star'):
+-            self.cannot_convert(node, 'Cannot handle star imports.')
+-        elif results.get('module_as'):
+-            self.cannot_convert(node, 'This module is now multiple modules')
+diff -r 531f2e948299 lib2to3/fixes/fix_ws_comma.py
+--- a/lib2to3/fixes/fix_ws_comma.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,39 +0,0 @@
+-"""Fixer that changes 'a ,b' into 'a, b'.
+-
+-This also changes '{a :b}' into '{a: b}', but does not touch other
+-uses of colons.  It does not touch other uses of whitespace.
+-
+-"""
+-
+-from .. import pytree
+-from ..pgen2 import token
+-from .. import fixer_base
+-
+-class FixWsComma(fixer_base.BaseFix):
+-
+-    explicit = True # The user must ask for this fixers
+-
+-    PATTERN = """
+-    any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]>
+-    """
+-
+-    COMMA = pytree.Leaf(token.COMMA, ",")
+-    COLON = pytree.Leaf(token.COLON, ":")
+-    SEPS = (COMMA, COLON)
+-
+-    def transform(self, node, results):
+-        new = node.clone()
+-        comma = False
+-        for child in new.children:
+-            if child in self.SEPS:
+-                prefix = child.get_prefix()
+-                if prefix.isspace() and "\n" not in prefix:
+-                    child.set_prefix("")
+-                comma = True
+-            else:
+-                if comma:
+-                    prefix = child.get_prefix()
+-                    if not prefix:
+-                        child.set_prefix(" ")
+-                comma = False
+-        return new
+diff -r 531f2e948299 lib2to3/fixes/fix_xrange.py
+--- a/lib2to3/fixes/fix_xrange.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,64 +0,0 @@
+-# Copyright 2007 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Fixer that changes xrange(...) into range(...)."""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, consuming_calls
+-from .. import patcomp
+-
+-
+-class FixXrange(fixer_base.BaseFix):
+-
+-    PATTERN = """
+-              power<
+-                 (name='range'|name='xrange') trailer< '(' args=any ')' >
+-              rest=any* >
+-              """
+-
+-    def transform(self, node, results):
+-        name = results["name"]
+-        if name.value == "xrange":
+-            return self.transform_xrange(node, results)
+-        elif name.value == "range":
+-            return self.transform_range(node, results)
+-        else:
+-            raise ValueError(repr(name))
+-
+-    def transform_xrange(self, node, results):
+-        name = results["name"]
+-        name.replace(Name("range", prefix=name.get_prefix()))
+-
+-    def transform_range(self, node, results):
+-        if not self.in_special_context(node):
+-            range_call = Call(Name("range"), [results["args"].clone()])
+-            # Encase the range call in list().
+-            list_call = Call(Name("list"), [range_call],
+-                             prefix=node.get_prefix())
+-            # Put things that were after the range() call after the list call.
+-            for n in results["rest"]:
+-                list_call.append_child(n)
+-            return list_call
+-        return node
+-
+-    P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
+-    p1 = patcomp.compile_pattern(P1)
+-
+-    P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
+-            | comp_for< 'for' any 'in' node=any any* >
+-            | comparison< any 'in' node=any any*>
+-         """
+-    p2 = patcomp.compile_pattern(P2)
+-
+-    def in_special_context(self, node):
+-        if node.parent is None:
+-            return False
+-        results = {}
+-        if (node.parent.parent is not None and
+-               self.p1.match(node.parent.parent, results) and
+-               results["node"] is node):
+-            # list(d.keys()) -> list(d.keys()), etc.
+-            return results["func"].value in consuming_calls
+-        # for ... in d.iterkeys() -> for ... in d.keys(), etc.
+-        return self.p2.match(node.parent, results) and results["node"] is node
+diff -r 531f2e948299 lib2to3/fixes/fix_xreadlines.py
+--- a/lib2to3/fixes/fix_xreadlines.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,24 +0,0 @@
+-"""Fix "for x in f.xreadlines()" -> "for x in f".
+-
+-This fixer will also convert g(f.xreadlines) into g(f.__iter__)."""
+-# Author: Collin Winter
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name
+-
+-
+-class FixXreadlines(fixer_base.BaseFix):
+-    PATTERN = """
+-    power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > >
+-    |
+-    power< any+ trailer< '.' no_call='xreadlines' > >
+-    """
+-
+-    def transform(self, node, results):
+-        no_call = results.get("no_call")
+-
+-        if no_call:
+-            no_call.replace(Name("__iter__", prefix=no_call.get_prefix()))
+-        else:
+-            node.replace([x.clone() for x in results["call"]])
+diff -r 531f2e948299 lib2to3/fixes/fix_zip.py
+--- a/lib2to3/fixes/fix_zip.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,34 +0,0 @@
+-"""
+-Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...)
+-unless there exists a 'from future_builtins import zip' statement in the
+-top-level namespace.
+-
+-We avoid the transformation if the zip() call is directly contained in
+-iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
+-"""
+-
+-# Local imports
+-from .. import fixer_base
+-from ..fixer_util import Name, Call, in_special_context
+-
+-class FixZip(fixer_base.ConditionalFix):
+-
+-    PATTERN = """
+-    power< 'zip' args=trailer< '(' [any] ')' >
+-    >
+-    """
+-
+-    skip_on = "future_builtins.zip"
+-
+-    def transform(self, node, results):
+-        if self.should_skip(node):
+-            return
+-
+-        if in_special_context(node):
+-            return None
+-
+-        new = node.clone()
+-        new.set_prefix("")
+-        new = Call(Name("list"), [new])
+-        new.set_prefix(node.get_prefix())
+-        return new
+diff -r 531f2e948299 lib2to3/main.py
+--- a/lib2to3/main.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,133 +0,0 @@
+-"""
+-Main program for 2to3.
+-"""
+-
+-import sys
+-import os
+-import logging
+-import shutil
+-import optparse
+-
+-from . import refactor
+-
+-
+-class StdoutRefactoringTool(refactor.RefactoringTool):
+-    """
+-    Prints output to stdout.
+-    """
+-
+-    def __init__(self, fixers, options, explicit, nobackups):
+-        self.nobackups = nobackups
+-        super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
+-
+-    def log_error(self, msg, *args, **kwargs):
+-        self.errors.append((msg, args, kwargs))
+-        self.logger.error(msg, *args, **kwargs)
+-
+-    def write_file(self, new_text, filename, old_text):
+-        if not self.nobackups:
+-            # Make backup
+-            backup = filename + ".bak"
+-            if os.path.lexists(backup):
+-                try:
+-                    os.remove(backup)
+-                except os.error, err:
+-                    self.log_message("Can't remove backup %s", backup)
+-            try:
+-                os.rename(filename, backup)
+-            except os.error, err:
+-                self.log_message("Can't rename %s to %s", filename, backup)
+-        # Actually write the new file
+-        super(StdoutRefactoringTool, self).write_file(new_text,
+-                                                      filename, old_text)
+-        if not self.nobackups:
+-            shutil.copymode(backup, filename)
+-
+-    def print_output(self, lines):
+-        for line in lines:
+-            print line
+-
+-
+-def main(fixer_pkg, args=None):
+-    """Main program.
+-
+-    Args:
+-        fixer_pkg: the name of a package where the fixers are located.
+-        args: optional; a list of command line arguments. If omitted,
+-              sys.argv[1:] is used.
+-
+-    Returns a suggested exit status (0, 1, 2).
+-    """
+-    # Set up option parser
+-    parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
+-    parser.add_option("-d", "--doctests_only", action="store_true",
+-                      help="Fix up doctests only")
+-    parser.add_option("-f", "--fix", action="append", default=[],
+-                      help="Each FIX specifies a transformation; default: all")
+-    parser.add_option("-x", "--nofix", action="append", default=[],
+-                      help="Prevent a fixer from being run.")
+-    parser.add_option("-l", "--list-fixes", action="store_true",
+-                      help="List available transformations (fixes/fix_*.py)")
+-    parser.add_option("-p", "--print-function", action="store_true",
+-                      help="Modify the grammar so that print() is a function")
+-    parser.add_option("-v", "--verbose", action="store_true",
+-                      help="More verbose logging")
+-    parser.add_option("-w", "--write", action="store_true",
+-                      help="Write back modified files")
+-    parser.add_option("-n", "--nobackups", action="store_true", default=False,
+-                      help="Don't write backups for modified files.")
+-
+-    # Parse command line arguments
+-    refactor_stdin = False
+-    options, args = parser.parse_args(args)
+-    if not options.write and options.nobackups:
+-        parser.error("Can't use -n without -w")
+-    if options.list_fixes:
+-        print "Available transformations for the -f/--fix option:"
+-        for fixname in refactor.get_all_fix_names(fixer_pkg):
+-            print fixname
+-        if not args:
+-            return 0
+-    if not args:
+-        print >>sys.stderr, "At least one file or directory argument required."
+-        print >>sys.stderr, "Use --help to show usage."
+-        return 2
+-    if "-" in args:
+-        refactor_stdin = True
+-        if options.write:
+-            print >>sys.stderr, "Can't write to stdin."
+-            return 2
+-
+-    # Set up logging handler
+-    level = logging.DEBUG if options.verbose else logging.INFO
+-    logging.basicConfig(format='%(name)s: %(message)s', level=level)
+-
+-    # Initialize the refactoring tool
+-    rt_opts = {"print_function" : options.print_function}
+-    avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
+-    unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
+-    explicit = set()
+-    if options.fix:
+-        all_present = False
+-        for fix in options.fix:
+-            if fix == "all":
+-                all_present = True
+-            else:
+-                explicit.add(fixer_pkg + ".fix_" + fix)
+-        requested = avail_fixes.union(explicit) if all_present else explicit
+-    else:
+-        requested = avail_fixes.union(explicit)
+-    fixer_names = requested.difference(unwanted_fixes)
+-    rt = StdoutRefactoringTool(sorted(fixer_names), rt_opts, sorted(explicit),
+-                               options.nobackups)
+-
+-    # Refactor all files and directories passed as arguments
+-    if not rt.errors:
+-        if refactor_stdin:
+-            rt.refactor_stdin()
+-        else:
+-            rt.refactor(args, options.write, options.doctests_only)
+-        rt.summarize()
+-
+-    # Return error status (0 if rt.errors is zero)
+-    return int(bool(rt.errors))
+diff -r 531f2e948299 lib2to3/patcomp.py
+--- a/lib2to3/patcomp.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,186 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Pattern compiler.
+-
+-The grammer is taken from PatternGrammar.txt.
+-
+-The compiler compiles a pattern to a pytree.*Pattern instance.
+-"""
+-
+-__author__ = "Guido van Rossum <guido at python.org>"
+-
+-# Python imports
+-import os
+-
+-# Fairly local imports
+-from .pgen2 import driver
+-from .pgen2 import literals
+-from .pgen2 import token
+-from .pgen2 import tokenize
+-
+-# Really local imports
+-from . import pytree
+-from . import pygram
+-
+-# The pattern grammar file
+-_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
+-                                     "PatternGrammar.txt")
+-
+-
+-def tokenize_wrapper(input):
+-    """Tokenizes a string suppressing significant whitespace."""
+-    skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
+-    tokens = tokenize.generate_tokens(driver.generate_lines(input).next)
+-    for quintuple in tokens:
+-        type, value, start, end, line_text = quintuple
+-        if type not in skip:
+-            yield quintuple
+-
+-
+-class PatternCompiler(object):
+-
+-    def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
+-        """Initializer.
+-
+-        Takes an optional alternative filename for the pattern grammar.
+-        """
+-        self.grammar = driver.load_grammar(grammar_file)
+-        self.syms = pygram.Symbols(self.grammar)
+-        self.pygrammar = pygram.python_grammar
+-        self.pysyms = pygram.python_symbols
+-        self.driver = driver.Driver(self.grammar, convert=pattern_convert)
+-
+-    def compile_pattern(self, input, debug=False):
+-        """Compiles a pattern string to a nested pytree.*Pattern object."""
+-        tokens = tokenize_wrapper(input)
+-        root = self.driver.parse_tokens(tokens, debug=debug)
+-        return self.compile_node(root)
+-
+-    def compile_node(self, node):
+-        """Compiles a node, recursively.
+-
+-        This is one big switch on the node type.
+-        """
+-        # XXX Optimize certain Wildcard-containing-Wildcard patterns
+-        # that can be merged
+-        if node.type == self.syms.Matcher:
+-            node = node.children[0] # Avoid unneeded recursion
+-
+-        if node.type == self.syms.Alternatives:
+-            # Skip the odd children since they are just '|' tokens
+-            alts = [self.compile_node(ch) for ch in node.children[::2]]
+-            if len(alts) == 1:
+-                return alts[0]
+-            p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
+-            return p.optimize()
+-
+-        if node.type == self.syms.Alternative:
+-            units = [self.compile_node(ch) for ch in node.children]
+-            if len(units) == 1:
+-                return units[0]
+-            p = pytree.WildcardPattern([units], min=1, max=1)
+-            return p.optimize()
+-
+-        if node.type == self.syms.NegatedUnit:
+-            pattern = self.compile_basic(node.children[1:])
+-            p = pytree.NegatedPattern(pattern)
+-            return p.optimize()
+-
+-        assert node.type == self.syms.Unit
+-
+-        name = None
+-        nodes = node.children
+-        if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
+-            name = nodes[0].value
+-            nodes = nodes[2:]
+-        repeat = None
+-        if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
+-            repeat = nodes[-1]
+-            nodes = nodes[:-1]
+-
+-        # Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
+-        pattern = self.compile_basic(nodes, repeat)
+-
+-        if repeat is not None:
+-            assert repeat.type == self.syms.Repeater
+-            children = repeat.children
+-            child = children[0]
+-            if child.type == token.STAR:
+-                min = 0
+-                max = pytree.HUGE
+-            elif child.type == token.PLUS:
+-                min = 1
+-                max = pytree.HUGE
+-            elif child.type == token.LBRACE:
+-                assert children[-1].type == token.RBRACE
+-                assert  len(children) in (3, 5)
+-                min = max = self.get_int(children[1])
+-                if len(children) == 5:
+-                    max = self.get_int(children[3])
+-            else:
+-                assert False
+-            if min != 1 or max != 1:
+-                pattern = pattern.optimize()
+-                pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
+-
+-        if name is not None:
+-            pattern.name = name
+-        return pattern.optimize()
+-
+-    def compile_basic(self, nodes, repeat=None):
+-        # Compile STRING | NAME [Details] | (...) | [...]
+-        assert len(nodes) >= 1
+-        node = nodes[0]
+-        if node.type == token.STRING:
+-            value = literals.evalString(node.value)
+-            return pytree.LeafPattern(content=value)
+-        elif node.type == token.NAME:
+-            value = node.value
+-            if value.isupper():
+-                if value not in TOKEN_MAP:
+-                    raise SyntaxError("Invalid token: %r" % value)
+-                return pytree.LeafPattern(TOKEN_MAP[value])
+-            else:
+-                if value == "any":
+-                    type = None
+-                elif not value.startswith("_"):
+-                    type = getattr(self.pysyms, value, None)
+-                    if type is None:
+-                        raise SyntaxError("Invalid symbol: %r" % value)
+-                if nodes[1:]: # Details present
+-                    content = [self.compile_node(nodes[1].children[1])]
+-                else:
+-                    content = None
+-                return pytree.NodePattern(type, content)
+-        elif node.value == "(":
+-            return self.compile_node(nodes[1])
+-        elif node.value == "[":
+-            assert repeat is None
+-            subpattern = self.compile_node(nodes[1])
+-            return pytree.WildcardPattern([[subpattern]], min=0, max=1)
+-        assert False, node
+-
+-    def get_int(self, node):
+-        assert node.type == token.NUMBER
+-        return int(node.value)
+-
+-
+-# Map named tokens to the type value for a LeafPattern
+-TOKEN_MAP = {"NAME": token.NAME,
+-             "STRING": token.STRING,
+-             "NUMBER": token.NUMBER,
+-             "TOKEN": None}
+-
+-
+-def pattern_convert(grammar, raw_node_info):
+-    """Converts raw node information to a Node or Leaf instance."""
+-    type, value, context, children = raw_node_info
+-    if children or type in grammar.number2symbol:
+-        return pytree.Node(type, children, context=context)
+-    else:
+-        return pytree.Leaf(type, value, context=context)
+-
+-
+-def compile_pattern(pattern):
+-    return PatternCompiler().compile_pattern(pattern)
+diff -r 531f2e948299 lib2to3/pgen2/.svn/all-wcprops
+--- a/lib2to3/pgen2/.svn/all-wcprops	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,59 +0,0 @@
+-K 25
+-svn:wc:ra_dav:version-url
+-V 57
+-/projects/!svn/ver/68340/sandbox/trunk/2to3/lib2to3/pgen2
+-END
+-tokenize.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 69
+-/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/tokenize.py
+-END
+-pgen.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 65
+-/projects/!svn/ver/61629/sandbox/trunk/2to3/lib2to3/pgen2/pgen.py
+-END
+-parse.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 66
+-/projects/!svn/ver/67389/sandbox/trunk/2to3/lib2to3/pgen2/parse.py
+-END
+-driver.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 67
+-/projects/!svn/ver/68340/sandbox/trunk/2to3/lib2to3/pgen2/driver.py
+-END
+-__init__.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 69
+-/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/__init__.py
+-END
+-literals.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 69
+-/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/literals.py
+-END
+-token.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 66
+-/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/token.py
+-END
+-conv.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 65
+-/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/conv.py
+-END
+-grammar.py
+-K 25
+-svn:wc:ra_dav:version-url
+-V 68
+-/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/grammar.py
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/dir-prop-base
+--- a/lib2to3/pgen2/.svn/dir-prop-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,8 +0,0 @@
+-K 10
+-svn:ignore
+-V 13
+-*.pyc
+-*.pyo
+-
+-
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/entries
+--- a/lib2to3/pgen2/.svn/entries	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,334 +0,0 @@
+-9
+-
+-dir
+-70785
+-http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/pgen2
+-http://svn.python.org/projects
+-
+-
+-
+-2009-01-05T08:11:39.704315Z
+-68340
+-georg.brandl
+-has-props
+-
+-svn:special svn:externals svn:needs-lock
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-6015fed2-1504-0410-9fe1-9d1591cc4771
+-
+-tokenize.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:32.000000Z
+-06aea8121aa7b0fc71345d011813d4b4
+-2008-03-17T16:59:51.273602Z
+-61441
+-martin.v.loewis
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-16184
+-
+-pgen.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:32.000000Z
+-40f1eec8af5247a511bf6acc34eac994
+-2008-03-19T16:58:19.069158Z
+-61629
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-13740
+-
+-parse.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:32.000000Z
+-80c0ee069eab8de116e1c13572d6cd4b
+-2008-11-25T23:13:17.968453Z
+-67389
+-benjamin.peterson
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-8053
+-
+-driver.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:32.000000Z
+-e2c063aca0163f8f47fefeab1a5cdff7
+-2009-01-05T08:11:39.704315Z
+-68340
+-georg.brandl
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-4809
+-
+-__init__.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:32.000000Z
+-5cb6bc9b6c96e165df87b615f2df9f1a
+-2006-11-29T17:38:40.278528Z
+-52858
+-guido.van.rossum
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-143
+-
+-literals.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:32.000000Z
+-e3b1d03cade5fa0c3a1a5324e0b1e539
+-2006-11-29T17:38:40.278528Z
+-52858
+-guido.van.rossum
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1614
+-
+-token.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:32.000000Z
+-8fd1f5c3fc2ad1b2afa7e17064b0ba04
+-2007-02-12T23:59:44.048119Z
+-53758
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-1244
+-
+-conv.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:32.000000Z
+-942a8910f37b9e5d202806ea05f7b2f1
+-2007-02-12T23:59:44.048119Z
+-53758
+-collin.winter
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-9625
+-
+-grammar.py
+-file
+-
+-
+-
+-
+-2009-03-31T00:29:32.000000Z
+-612ee8e1a84660a7c44f7d5af3e7db69
+-2008-03-17T16:59:51.273602Z
+-61441
+-martin.v.loewis
+-has-props
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-4947
+-
+diff -r 531f2e948299 lib2to3/pgen2/.svn/format
+--- a/lib2to3/pgen2/.svn/format	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,1 +0,0 @@
+-9
+diff -r 531f2e948299 lib2to3/pgen2/.svn/prop-base/__init__.py.svn-base
+--- a/lib2to3/pgen2/.svn/prop-base/__init__.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/prop-base/conv.py.svn-base
+--- a/lib2to3/pgen2/.svn/prop-base/conv.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/prop-base/driver.py.svn-base
+--- a/lib2to3/pgen2/.svn/prop-base/driver.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/prop-base/grammar.py.svn-base
+--- a/lib2to3/pgen2/.svn/prop-base/grammar.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/prop-base/literals.py.svn-base
+--- a/lib2to3/pgen2/.svn/prop-base/literals.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/prop-base/parse.py.svn-base
+--- a/lib2to3/pgen2/.svn/prop-base/parse.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/prop-base/pgen.py.svn-base
+--- a/lib2to3/pgen2/.svn/prop-base/pgen.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/prop-base/token.py.svn-base
+--- a/lib2to3/pgen2/.svn/prop-base/token.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,13 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 14
+-svn:executable
+-V 1
+-*
+-K 12
+-svn:keywords
+-V 2
+-Id
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/prop-base/tokenize.py.svn-base
+--- a/lib2to3/pgen2/.svn/prop-base/tokenize.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,9 +0,0 @@
+-K 13
+-svn:eol-style
+-V 6
+-native
+-K 12
+-svn:keywords
+-V 23
+-Author Date Id Revision
+-END
+diff -r 531f2e948299 lib2to3/pgen2/.svn/text-base/__init__.py.svn-base
+--- a/lib2to3/pgen2/.svn/text-base/__init__.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,4 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""The pgen2 package."""
+diff -r 531f2e948299 lib2to3/pgen2/.svn/text-base/conv.py.svn-base
+--- a/lib2to3/pgen2/.svn/text-base/conv.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,257 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Convert graminit.[ch] spit out by pgen to Python code.
+-
+-Pgen is the Python parser generator.  It is useful to quickly create a
+-parser from a grammar file in Python's grammar notation.  But I don't
+-want my parsers to be written in C (yet), so I'm translating the
+-parsing tables to Python data structures and writing a Python parse
+-engine.
+-
+-Note that the token numbers are constants determined by the standard
+-Python tokenizer.  The standard token module defines these numbers and
+-their names (the names are not used much).  The token numbers are
+-hardcoded into the Python tokenizer and into pgen.  A Python
+-implementation of the Python tokenizer is also available, in the
+-standard tokenize module.
+-
+-On the other hand, symbol numbers (representing the grammar's
+-non-terminals) are assigned by pgen based on the actual grammar
+-input.
+-
+-Note: this module is pretty much obsolete; the pgen module generates
+-equivalent grammar tables directly from the Grammar.txt input file
+-without having to invoke the Python pgen C program.
+-
+-"""
+-
+-# Python imports
+-import re
+-
+-# Local imports
+-from pgen2 import grammar, token
+-
+-
+-class Converter(grammar.Grammar):
+-    """Grammar subclass that reads classic pgen output files.
+-
+-    The run() method reads the tables as produced by the pgen parser
+-    generator, typically contained in two C files, graminit.h and
+-    graminit.c.  The other methods are for internal use only.
+-
+-    See the base class for more documentation.
+-
+-    """
+-
+-    def run(self, graminit_h, graminit_c):
+-        """Load the grammar tables from the text files written by pgen."""
+-        self.parse_graminit_h(graminit_h)
+-        self.parse_graminit_c(graminit_c)
+-        self.finish_off()
+-
+-    def parse_graminit_h(self, filename):
+-        """Parse the .h file writen by pgen.  (Internal)
+-
+-        This file is a sequence of #define statements defining the
+-        nonterminals of the grammar as numbers.  We build two tables
+-        mapping the numbers to names and back.
+-
+-        """
+-        try:
+-            f = open(filename)
+-        except IOError, err:
+-            print "Can't open %s: %s" % (filename, err)
+-            return False
+-        self.symbol2number = {}
+-        self.number2symbol = {}
+-        lineno = 0
+-        for line in f:
+-            lineno += 1
+-            mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
+-            if not mo and line.strip():
+-                print "%s(%s): can't parse %s" % (filename, lineno,
+-                                                  line.strip())
+-            else:
+-                symbol, number = mo.groups()
+-                number = int(number)
+-                assert symbol not in self.symbol2number
+-                assert number not in self.number2symbol
+-                self.symbol2number[symbol] = number
+-                self.number2symbol[number] = symbol
+-        return True
+-
+-    def parse_graminit_c(self, filename):
+-        """Parse the .c file writen by pgen.  (Internal)
+-
+-        The file looks as follows.  The first two lines are always this:
+-
+-        #include "pgenheaders.h"
+-        #include "grammar.h"
+-
+-        After that come four blocks:
+-
+-        1) one or more state definitions
+-        2) a table defining dfas
+-        3) a table defining labels
+-        4) a struct defining the grammar
+-
+-        A state definition has the following form:
+-        - one or more arc arrays, each of the form:
+-          static arc arcs_<n>_<m>[<k>] = {
+-                  {<i>, <j>},
+-                  ...
+-          };
+-        - followed by a state array, of the form:
+-          static state states_<s>[<t>] = {
+-                  {<k>, arcs_<n>_<m>},
+-                  ...
+-          };
+-
+-        """
+-        try:
+-            f = open(filename)
+-        except IOError, err:
+-            print "Can't open %s: %s" % (filename, err)
+-            return False
+-        # The code below essentially uses f's iterator-ness!
+-        lineno = 0
+-
+-        # Expect the two #include lines
+-        lineno, line = lineno+1, f.next()
+-        assert line == '#include "pgenheaders.h"\n', (lineno, line)
+-        lineno, line = lineno+1, f.next()
+-        assert line == '#include "grammar.h"\n', (lineno, line)
+-
+-        # Parse the state definitions
+-        lineno, line = lineno+1, f.next()
+-        allarcs = {}
+-        states = []
+-        while line.startswith("static arc "):
+-            while line.startswith("static arc "):
+-                mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
+-                              line)
+-                assert mo, (lineno, line)
+-                n, m, k = map(int, mo.groups())
+-                arcs = []
+-                for _ in range(k):
+-                    lineno, line = lineno+1, f.next()
+-                    mo = re.match(r"\s+{(\d+), (\d+)},$", line)
+-                    assert mo, (lineno, line)
+-                    i, j = map(int, mo.groups())
+-                    arcs.append((i, j))
+-                lineno, line = lineno+1, f.next()
+-                assert line == "};\n", (lineno, line)
+-                allarcs[(n, m)] = arcs
+-                lineno, line = lineno+1, f.next()
+-            mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
+-            assert mo, (lineno, line)
+-            s, t = map(int, mo.groups())
+-            assert s == len(states), (lineno, line)
+-            state = []
+-            for _ in range(t):
+-                lineno, line = lineno+1, f.next()
+-                mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
+-                assert mo, (lineno, line)
+-                k, n, m = map(int, mo.groups())
+-                arcs = allarcs[n, m]
+-                assert k == len(arcs), (lineno, line)
+-                state.append(arcs)
+-            states.append(state)
+-            lineno, line = lineno+1, f.next()
+-            assert line == "};\n", (lineno, line)
+-            lineno, line = lineno+1, f.next()
+-        self.states = states
+-
+-        # Parse the dfas
+-        dfas = {}
+-        mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
+-        assert mo, (lineno, line)
+-        ndfas = int(mo.group(1))
+-        for i in range(ndfas):
+-            lineno, line = lineno+1, f.next()
+-            mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
+-                          line)
+-            assert mo, (lineno, line)
+-            symbol = mo.group(2)
+-            number, x, y, z = map(int, mo.group(1, 3, 4, 5))
+-            assert self.symbol2number[symbol] == number, (lineno, line)
+-            assert self.number2symbol[number] == symbol, (lineno, line)
+-            assert x == 0, (lineno, line)
+-            state = states[z]
+-            assert y == len(state), (lineno, line)
+-            lineno, line = lineno+1, f.next()
+-            mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
+-            assert mo, (lineno, line)
+-            first = {}
+-            rawbitset = eval(mo.group(1))
+-            for i, c in enumerate(rawbitset):
+-                byte = ord(c)
+-                for j in range(8):
+-                    if byte & (1<<j):
+-                        first[i*8 + j] = 1
+-            dfas[number] = (state, first)
+-        lineno, line = lineno+1, f.next()
+-        assert line == "};\n", (lineno, line)
+-        self.dfas = dfas
+-
+-        # Parse the labels
+-        labels = []
+-        lineno, line = lineno+1, f.next()
+-        mo = re.match(r"static label labels\[(\d+)\] = {$", line)
+-        assert mo, (lineno, line)
+-        nlabels = int(mo.group(1))
+-        for i in range(nlabels):
+-            lineno, line = lineno+1, f.next()
+-            mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
+-            assert mo, (lineno, line)
+-            x, y = mo.groups()
+-            x = int(x)
+-            if y == "0":
+-                y = None
+-            else:
+-                y = eval(y)
+-            labels.append((x, y))
+-        lineno, line = lineno+1, f.next()
+-        assert line == "};\n", (lineno, line)
+-        self.labels = labels
+-
+-        # Parse the grammar struct
+-        lineno, line = lineno+1, f.next()
+-        assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
+-        lineno, line = lineno+1, f.next()
+-        mo = re.match(r"\s+(\d+),$", line)
+-        assert mo, (lineno, line)
+-        ndfas = int(mo.group(1))
+-        assert ndfas == len(self.dfas)
+-        lineno, line = lineno+1, f.next()
+-        assert line == "\tdfas,\n", (lineno, line)
+-        lineno, line = lineno+1, f.next()
+-        mo = re.match(r"\s+{(\d+), labels},$", line)
+-        assert mo, (lineno, line)
+-        nlabels = int(mo.group(1))
+-        assert nlabels == len(self.labels), (lineno, line)
+-        lineno, line = lineno+1, f.next()
+-        mo = re.match(r"\s+(\d+)$", line)
+-        assert mo, (lineno, line)
+-        start = int(mo.group(1))
+-        assert start in self.number2symbol, (lineno, line)
+-        self.start = start
+-        lineno, line = lineno+1, f.next()
+-        assert line == "};\n", (lineno, line)
+-        try:
+-            lineno, line = lineno+1, f.next()
+-        except StopIteration:
+-            pass
+-        else:
+-            assert 0, (lineno, line)
+-
+-    def finish_off(self):
+-        """Create additional useful structures.  (Internal)."""
+-        self.keywords = {} # map from keyword strings to arc labels
+-        self.tokens = {}   # map from numeric token values to arc labels
+-        for ilabel, (type, value) in enumerate(self.labels):
+-            if type == token.NAME and value is not None:
+-                self.keywords[value] = ilabel
+-            elif value is None:
+-                self.tokens[type] = ilabel
+diff -r 531f2e948299 lib2to3/pgen2/.svn/text-base/driver.py.svn-base
+--- a/lib2to3/pgen2/.svn/text-base/driver.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,146 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-# Modifications:
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Parser driver.
+-
+-This provides a high-level interface to parse a file into a syntax tree.
+-
+-"""
+-
+-__author__ = "Guido van Rossum <guido at python.org>"
+-
+-__all__ = ["Driver", "load_grammar"]
+-
+-# Python imports
+-import os
+-import logging
+-import sys
+-
+-# Pgen imports
+-from . import grammar, parse, token, tokenize, pgen
+-
+-
+-class Driver(object):
+-
+-    def __init__(self, grammar, convert=None, logger=None):
+-        self.grammar = grammar
+-        if logger is None:
+-            logger = logging.getLogger()
+-        self.logger = logger
+-        self.convert = convert
+-
+-    def parse_tokens(self, tokens, debug=False):
+-        """Parse a series of tokens and return the syntax tree."""
+-        # XXX Move the prefix computation into a wrapper around tokenize.
+-        p = parse.Parser(self.grammar, self.convert)
+-        p.setup()
+-        lineno = 1
+-        column = 0
+-        type = value = start = end = line_text = None
+-        prefix = ""
+-        for quintuple in tokens:
+-            type, value, start, end, line_text = quintuple
+-            if start != (lineno, column):
+-                assert (lineno, column) <= start, ((lineno, column), start)
+-                s_lineno, s_column = start
+-                if lineno < s_lineno:
+-                    prefix += "\n" * (s_lineno - lineno)
+-                    lineno = s_lineno
+-                    column = 0
+-                if column < s_column:
+-                    prefix += line_text[column:s_column]
+-                    column = s_column
+-            if type in (tokenize.COMMENT, tokenize.NL):
+-                prefix += value
+-                lineno, column = end
+-                if value.endswith("\n"):
+-                    lineno += 1
+-                    column = 0
+-                continue
+-            if type == token.OP:
+-                type = grammar.opmap[value]
+-            if debug:
+-                self.logger.debug("%s %r (prefix=%r)",
+-                                  token.tok_name[type], value, prefix)
+-            if p.addtoken(type, value, (prefix, start)):
+-                if debug:
+-                    self.logger.debug("Stop.")
+-                break
+-            prefix = ""
+-            lineno, column = end
+-            if value.endswith("\n"):
+-                lineno += 1
+-                column = 0
+-        else:
+-            # We never broke out -- EOF is too soon (how can this happen???)
+-            raise parse.ParseError("incomplete input",
+-                                   type, value, (prefix, start))
+-        return p.rootnode
+-
+-    def parse_stream_raw(self, stream, debug=False):
+-        """Parse a stream and return the syntax tree."""
+-        tokens = tokenize.generate_tokens(stream.readline)
+-        return self.parse_tokens(tokens, debug)
+-
+-    def parse_stream(self, stream, debug=False):
+-        """Parse a stream and return the syntax tree."""
+-        return self.parse_stream_raw(stream, debug)
+-
+-    def parse_file(self, filename, debug=False):
+-        """Parse a file and return the syntax tree."""
+-        stream = open(filename)
+-        try:
+-            return self.parse_stream(stream, debug)
+-        finally:
+-            stream.close()
+-
+-    def parse_string(self, text, debug=False):
+-        """Parse a string and return the syntax tree."""
+-        tokens = tokenize.generate_tokens(generate_lines(text).next)
+-        return self.parse_tokens(tokens, debug)
+-
+-
+-def generate_lines(text):
+-    """Generator that behaves like readline without using StringIO."""
+-    for line in text.splitlines(True):
+-        yield line
+-    while True:
+-        yield ""
+-
+-
+-def load_grammar(gt="Grammar.txt", gp=None,
+-                 save=True, force=False, logger=None):
+-    """Load the grammar (maybe from a pickle)."""
+-    if logger is None:
+-        logger = logging.getLogger()
+-    if gp is None:
+-        head, tail = os.path.splitext(gt)
+-        if tail == ".txt":
+-            tail = ""
+-        gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
+-    if force or not _newer(gp, gt):
+-        logger.info("Generating grammar tables from %s", gt)
+-        g = pgen.generate_grammar(gt)
+-        if save:
+-            logger.info("Writing grammar tables to %s", gp)
+-            try:
+-                g.dump(gp)
+-            except IOError, e:
+-                logger.info("Writing failed:"+str(e))
+-    else:
+-        g = grammar.Grammar()
+-        g.load(gp)
+-    return g
+-
+-
+-def _newer(a, b):
+-    """Inquire whether file a was written since file b."""
+-    if not os.path.exists(a):
+-        return False
+-    if not os.path.exists(b):
+-        return True
+-    return os.path.getmtime(a) >= os.path.getmtime(b)
+diff -r 531f2e948299 lib2to3/pgen2/.svn/text-base/grammar.py.svn-base
+--- a/lib2to3/pgen2/.svn/text-base/grammar.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,171 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""This module defines the data structures used to represent a grammar.
+-
+-These are a bit arcane because they are derived from the data
+-structures used by Python's 'pgen' parser generator.
+-
+-There's also a table here mapping operators to their names in the
+-token module; the Python tokenize module reports all operators as the
+-fallback token code OP, but the parser needs the actual token code.
+-
+-"""
+-
+-# Python imports
+-import pickle
+-
+-# Local imports
+-from . import token, tokenize
+-
+-
+-class Grammar(object):
+-    """Pgen parsing tables tables conversion class.
+-
+-    Once initialized, this class supplies the grammar tables for the
+-    parsing engine implemented by parse.py.  The parsing engine
+-    accesses the instance variables directly.  The class here does not
+-    provide initialization of the tables; several subclasses exist to
+-    do this (see the conv and pgen modules).
+-
+-    The load() method reads the tables from a pickle file, which is
+-    much faster than the other ways offered by subclasses.  The pickle
+-    file is written by calling dump() (after loading the grammar
+-    tables using a subclass).  The report() method prints a readable
+-    representation of the tables to stdout, for debugging.
+-
+-    The instance variables are as follows:
+-
+-    symbol2number -- a dict mapping symbol names to numbers.  Symbol
+-                     numbers are always 256 or higher, to distinguish
+-                     them from token numbers, which are between 0 and
+-                     255 (inclusive).
+-
+-    number2symbol -- a dict mapping numbers to symbol names;
+-                     these two are each other's inverse.
+-
+-    states        -- a list of DFAs, where each DFA is a list of
+-                     states, each state is is a list of arcs, and each
+-                     arc is a (i, j) pair where i is a label and j is
+-                     a state number.  The DFA number is the index into
+-                     this list.  (This name is slightly confusing.)
+-                     Final states are represented by a special arc of
+-                     the form (0, j) where j is its own state number.
+-
+-    dfas          -- a dict mapping symbol numbers to (DFA, first)
+-                     pairs, where DFA is an item from the states list
+-                     above, and first is a set of tokens that can
+-                     begin this grammar rule (represented by a dict
+-                     whose values are always 1).
+-
+-    labels        -- a list of (x, y) pairs where x is either a token
+-                     number or a symbol number, and y is either None
+-                     or a string; the strings are keywords.  The label
+-                     number is the index in this list; label numbers
+-                     are used to mark state transitions (arcs) in the
+-                     DFAs.
+-
+-    start         -- the number of the grammar's start symbol.
+-
+-    keywords      -- a dict mapping keyword strings to arc labels.
+-
+-    tokens        -- a dict mapping token numbers to arc labels.
+-
+-    """
+-
+-    def __init__(self):
+-        self.symbol2number = {}
+-        self.number2symbol = {}
+-        self.states = []
+-        self.dfas = {}
+-        self.labels = [(0, "EMPTY")]
+-        self.keywords = {}
+-        self.tokens = {}
+-        self.symbol2label = {}
+-        self.start = 256
+-
+-    def dump(self, filename):
+-        """Dump the grammar tables to a pickle file."""
+-        f = open(filename, "wb")
+-        pickle.dump(self.__dict__, f, 2)
+-        f.close()
+-
+-    def load(self, filename):
+-        """Load the grammar tables from a pickle file."""
+-        f = open(filename, "rb")
+-        d = pickle.load(f)
+-        f.close()
+-        self.__dict__.update(d)
+-
+-    def report(self):
+-        """Dump the grammar tables to standard output, for debugging."""
+-        from pprint import pprint
+-        print "s2n"
+-        pprint(self.symbol2number)
+-        print "n2s"
+-        pprint(self.number2symbol)
+-        print "states"
+-        pprint(self.states)
+-        print "dfas"
+-        pprint(self.dfas)
+-        print "labels"
+-        pprint(self.labels)
+-        print "start", self.start
+-
+-
+-# Map from operator to number (since tokenize doesn't do this)
+-
+-opmap_raw = """
+-( LPAR
+-) RPAR
+-[ LSQB
+-] RSQB
+-: COLON
+-, COMMA
+-; SEMI
+-+ PLUS
+-- MINUS
+-* STAR
+-/ SLASH
+-| VBAR
+-& AMPER
+-< LESS
+-> GREATER
+-= EQUAL
+-. DOT
+-% PERCENT
+-` BACKQUOTE
+-{ LBRACE
+-} RBRACE
+-@ AT
+-== EQEQUAL
+-!= NOTEQUAL
+-<> NOTEQUAL
+-<= LESSEQUAL
+->= GREATEREQUAL
+-~ TILDE
+-^ CIRCUMFLEX
+-<< LEFTSHIFT
+->> RIGHTSHIFT
+-** DOUBLESTAR
+-+= PLUSEQUAL
+--= MINEQUAL
+-*= STAREQUAL
+-/= SLASHEQUAL
+-%= PERCENTEQUAL
+-&= AMPEREQUAL
+-|= VBAREQUAL
+-^= CIRCUMFLEXEQUAL
+-<<= LEFTSHIFTEQUAL
+->>= RIGHTSHIFTEQUAL
+-**= DOUBLESTAREQUAL
+-// DOUBLESLASH
+-//= DOUBLESLASHEQUAL
+--> RARROW
+-"""
+-
+-opmap = {}
+-for line in opmap_raw.splitlines():
+-    if line:
+-        op, name = line.split()
+-        opmap[op] = getattr(token, name)
+diff -r 531f2e948299 lib2to3/pgen2/.svn/text-base/literals.py.svn-base
+--- a/lib2to3/pgen2/.svn/text-base/literals.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,60 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Safely evaluate Python string literals without using eval()."""
+-
+-import re
+-
+-simple_escapes = {"a": "\a",
+-                  "b": "\b",
+-                  "f": "\f",
+-                  "n": "\n",
+-                  "r": "\r",
+-                  "t": "\t",
+-                  "v": "\v",
+-                  "'": "'",
+-                  '"': '"',
+-                  "\\": "\\"}
+-
+-def escape(m):
+-    all, tail = m.group(0, 1)
+-    assert all.startswith("\\")
+-    esc = simple_escapes.get(tail)
+-    if esc is not None:
+-        return esc
+-    if tail.startswith("x"):
+-        hexes = tail[1:]
+-        if len(hexes) < 2:
+-            raise ValueError("invalid hex string escape ('\\%s')" % tail)
+-        try:
+-            i = int(hexes, 16)
+-        except ValueError:
+-            raise ValueError("invalid hex string escape ('\\%s')" % tail)
+-    else:
+-        try:
+-            i = int(tail, 8)
+-        except ValueError:
+-            raise ValueError("invalid octal string escape ('\\%s')" % tail)
+-    return chr(i)
+-
+-def evalString(s):
+-    assert s.startswith("'") or s.startswith('"'), repr(s[:1])
+-    q = s[0]
+-    if s[:3] == q*3:
+-        q = q*3
+-    assert s.endswith(q), repr(s[-len(q):])
+-    assert len(s) >= 2*len(q)
+-    s = s[len(q):-len(q)]
+-    return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)
+-
+-def test():
+-    for i in range(256):
+-        c = chr(i)
+-        s = repr(c)
+-        e = evalString(s)
+-        if e != c:
+-            print i, c, s, e
+-
+-
+-if __name__ == "__main__":
+-    test()
+diff -r 531f2e948299 lib2to3/pgen2/.svn/text-base/parse.py.svn-base
+--- a/lib2to3/pgen2/.svn/text-base/parse.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,201 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Parser engine for the grammar tables generated by pgen.
+-
+-The grammar table must be loaded first.
+-
+-See Parser/parser.c in the Python distribution for additional info on
+-how this parsing engine works.
+-
+-"""
+-
+-# Local imports
+-from . import token
+-
+-class ParseError(Exception):
+-    """Exception to signal the parser is stuck."""
+-
+-    def __init__(self, msg, type, value, context):
+-        Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
+-                           (msg, type, value, context))
+-        self.msg = msg
+-        self.type = type
+-        self.value = value
+-        self.context = context
+-
+-class Parser(object):
+-    """Parser engine.
+-
+-    The proper usage sequence is:
+-
+-    p = Parser(grammar, [converter])  # create instance
+-    p.setup([start])                  # prepare for parsing
+-    <for each input token>:
+-        if p.addtoken(...):           # parse a token; may raise ParseError
+-            break
+-    root = p.rootnode                 # root of abstract syntax tree
+-
+-    A Parser instance may be reused by calling setup() repeatedly.
+-
+-    A Parser instance contains state pertaining to the current token
+-    sequence, and should not be used concurrently by different threads
+-    to parse separate token sequences.
+-
+-    See driver.py for how to get input tokens by tokenizing a file or
+-    string.
+-
+-    Parsing is complete when addtoken() returns True; the root of the
+-    abstract syntax tree can then be retrieved from the rootnode
+-    instance variable.  When a syntax error occurs, addtoken() raises
+-    the ParseError exception.  There is no error recovery; the parser
+-    cannot be used after a syntax error was reported (but it can be
+-    reinitialized by calling setup()).
+-
+-    """
+-
+-    def __init__(self, grammar, convert=None):
+-        """Constructor.
+-
+-        The grammar argument is a grammar.Grammar instance; see the
+-        grammar module for more information.
+-
+-        The parser is not ready yet for parsing; you must call the
+-        setup() method to get it started.
+-
+-        The optional convert argument is a function mapping concrete
+-        syntax tree nodes to abstract syntax tree nodes.  If not
+-        given, no conversion is done and the syntax tree produced is
+-        the concrete syntax tree.  If given, it must be a function of
+-        two arguments, the first being the grammar (a grammar.Grammar
+-        instance), and the second being the concrete syntax tree node
+-        to be converted.  The syntax tree is converted from the bottom
+-        up.
+-
+-        A concrete syntax tree node is a (type, value, context, nodes)
+-        tuple, where type is the node type (a token or symbol number),
+-        value is None for symbols and a string for tokens, context is
+-        None or an opaque value used for error reporting (typically a
+-        (lineno, offset) pair), and nodes is a list of children for
+-        symbols, and None for tokens.
+-
+-        An abstract syntax tree node may be anything; this is entirely
+-        up to the converter function.
+-
+-        """
+-        self.grammar = grammar
+-        self.convert = convert or (lambda grammar, node: node)
+-
+-    def setup(self, start=None):
+-        """Prepare for parsing.
+-
+-        This *must* be called before starting to parse.
+-
+-        The optional argument is an alternative start symbol; it
+-        defaults to the grammar's start symbol.
+-
+-        You can use a Parser instance to parse any number of programs;
+-        each time you call setup() the parser is reset to an initial
+-        state determined by the (implicit or explicit) start symbol.
+-
+-        """
+-        if start is None:
+-            start = self.grammar.start
+-        # Each stack entry is a tuple: (dfa, state, node).
+-        # A node is a tuple: (type, value, context, children),
+-        # where children is a list of nodes or None, and context may be None.
+-        newnode = (start, None, None, [])
+-        stackentry = (self.grammar.dfas[start], 0, newnode)
+-        self.stack = [stackentry]
+-        self.rootnode = None
+-        self.used_names = set() # Aliased to self.rootnode.used_names in pop()
+-
+-    def addtoken(self, type, value, context):
+-        """Add a token; return True iff this is the end of the program."""
+-        # Map from token to label
+-        ilabel = self.classify(type, value, context)
+-        # Loop until the token is shifted; may raise exceptions
+-        while True:
+-            dfa, state, node = self.stack[-1]
+-            states, first = dfa
+-            arcs = states[state]
+-            # Look for a state with this label
+-            for i, newstate in arcs:
+-                t, v = self.grammar.labels[i]
+-                if ilabel == i:
+-                    # Look it up in the list of labels
+-                    assert t < 256
+-                    # Shift a token; we're done with it
+-                    self.shift(type, value, newstate, context)
+-                    # Pop while we are in an accept-only state
+-                    state = newstate
+-                    while states[state] == [(0, state)]:
+-                        self.pop()
+-                        if not self.stack:
+-                            # Done parsing!
+-                            return True
+-                        dfa, state, node = self.stack[-1]
+-                        states, first = dfa
+-                    # Done with this token
+-                    return False
+-                elif t >= 256:
+-                    # See if it's a symbol and if we're in its first set
+-                    itsdfa = self.grammar.dfas[t]
+-                    itsstates, itsfirst = itsdfa
+-                    if ilabel in itsfirst:
+-                        # Push a symbol
+-                        self.push(t, self.grammar.dfas[t], newstate, context)
+-                        break # To continue the outer while loop
+-            else:
+-                if (0, state) in arcs:
+-                    # An accepting state, pop it and try something else
+-                    self.pop()
+-                    if not self.stack:
+-                        # Done parsing, but another token is input
+-                        raise ParseError("too much input",
+-                                         type, value, context)
+-                else:
+-                    # No success finding a transition
+-                    raise ParseError("bad input", type, value, context)
+-
+-    def classify(self, type, value, context):
+-        """Turn a token into a label.  (Internal)"""
+-        if type == token.NAME:
+-            # Keep a listing of all used names
+-            self.used_names.add(value)
+-            # Check for reserved words
+-            ilabel = self.grammar.keywords.get(value)
+-            if ilabel is not None:
+-                return ilabel
+-        ilabel = self.grammar.tokens.get(type)
+-        if ilabel is None:
+-            raise ParseError("bad token", type, value, context)
+-        return ilabel
+-
+-    def shift(self, type, value, newstate, context):
+-        """Shift a token.  (Internal)"""
+-        dfa, state, node = self.stack[-1]
+-        newnode = (type, value, context, None)
+-        newnode = self.convert(self.grammar, newnode)
+-        if newnode is not None:
+-            node[-1].append(newnode)
+-        self.stack[-1] = (dfa, newstate, node)
+-
+-    def push(self, type, newdfa, newstate, context):
+-        """Push a nonterminal.  (Internal)"""
+-        dfa, state, node = self.stack[-1]
+-        newnode = (type, None, context, [])
+-        self.stack[-1] = (dfa, newstate, node)
+-        self.stack.append((newdfa, 0, newnode))
+-
+-    def pop(self):
+-        """Pop a nonterminal.  (Internal)"""
+-        popdfa, popstate, popnode = self.stack.pop()
+-        newnode = self.convert(self.grammar, popnode)
+-        if newnode is not None:
+-            if self.stack:
+-                dfa, state, node = self.stack[-1]
+-                node[-1].append(newnode)
+-            else:
+-                self.rootnode = newnode
+-                self.rootnode.used_names = self.used_names
+diff -r 531f2e948299 lib2to3/pgen2/.svn/text-base/pgen.py.svn-base
+--- a/lib2to3/pgen2/.svn/text-base/pgen.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,384 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-# Pgen imports
+-from . import grammar, token, tokenize
+-
+-class PgenGrammar(grammar.Grammar):
+-    pass
+-
+-class ParserGenerator(object):
+-
+-    def __init__(self, filename, stream=None):
+-        close_stream = None
+-        if stream is None:
+-            stream = open(filename)
+-            close_stream = stream.close
+-        self.filename = filename
+-        self.stream = stream
+-        self.generator = tokenize.generate_tokens(stream.readline)
+-        self.gettoken() # Initialize lookahead
+-        self.dfas, self.startsymbol = self.parse()
+-        if close_stream is not None:
+-            close_stream()
+-        self.first = {} # map from symbol name to set of tokens
+-        self.addfirstsets()
+-
+-    def make_grammar(self):
+-        c = PgenGrammar()
+-        names = self.dfas.keys()
+-        names.sort()
+-        names.remove(self.startsymbol)
+-        names.insert(0, self.startsymbol)
+-        for name in names:
+-            i = 256 + len(c.symbol2number)
+-            c.symbol2number[name] = i
+-            c.number2symbol[i] = name
+-        for name in names:
+-            dfa = self.dfas[name]
+-            states = []
+-            for state in dfa:
+-                arcs = []
+-                for label, next in state.arcs.iteritems():
+-                    arcs.append((self.make_label(c, label), dfa.index(next)))
+-                if state.isfinal:
+-                    arcs.append((0, dfa.index(state)))
+-                states.append(arcs)
+-            c.states.append(states)
+-            c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
+-        c.start = c.symbol2number[self.startsymbol]
+-        return c
+-
+-    def make_first(self, c, name):
+-        rawfirst = self.first[name]
+-        first = {}
+-        for label in rawfirst:
+-            ilabel = self.make_label(c, label)
+-            ##assert ilabel not in first # XXX failed on <> ... !=
+-            first[ilabel] = 1
+-        return first
+-
+-    def make_label(self, c, label):
+-        # XXX Maybe this should be a method on a subclass of converter?
+-        ilabel = len(c.labels)
+-        if label[0].isalpha():
+-            # Either a symbol name or a named token
+-            if label in c.symbol2number:
+-                # A symbol name (a non-terminal)
+-                if label in c.symbol2label:
+-                    return c.symbol2label[label]
+-                else:
+-                    c.labels.append((c.symbol2number[label], None))
+-                    c.symbol2label[label] = ilabel
+-                    return ilabel
+-            else:
+-                # A named token (NAME, NUMBER, STRING)
+-                itoken = getattr(token, label, None)
+-                assert isinstance(itoken, int), label
+-                assert itoken in token.tok_name, label
+-                if itoken in c.tokens:
+-                    return c.tokens[itoken]
+-                else:
+-                    c.labels.append((itoken, None))
+-                    c.tokens[itoken] = ilabel
+-                    return ilabel
+-        else:
+-            # Either a keyword or an operator
+-            assert label[0] in ('"', "'"), label
+-            value = eval(label)
+-            if value[0].isalpha():
+-                # A keyword
+-                if value in c.keywords:
+-                    return c.keywords[value]
+-                else:
+-                    c.labels.append((token.NAME, value))
+-                    c.keywords[value] = ilabel
+-                    return ilabel
+-            else:
+-                # An operator (any non-numeric token)
+-                itoken = grammar.opmap[value] # Fails if unknown token
+-                if itoken in c.tokens:
+-                    return c.tokens[itoken]
+-                else:
+-                    c.labels.append((itoken, None))
+-                    c.tokens[itoken] = ilabel
+-                    return ilabel
+-
+-    def addfirstsets(self):
+-        names = self.dfas.keys()
+-        names.sort()
+-        for name in names:
+-            if name not in self.first:
+-                self.calcfirst(name)
+-            #print name, self.first[name].keys()
+-
+-    def calcfirst(self, name):
+-        dfa = self.dfas[name]
+-        self.first[name] = None # dummy to detect left recursion
+-        state = dfa[0]
+-        totalset = {}
+-        overlapcheck = {}
+-        for label, next in state.arcs.iteritems():
+-            if label in self.dfas:
+-                if label in self.first:
+-                    fset = self.first[label]
+-                    if fset is None:
+-                        raise ValueError("recursion for rule %r" % name)
+-                else:
+-                    self.calcfirst(label)
+-                    fset = self.first[label]
+-                totalset.update(fset)
+-                overlapcheck[label] = fset
+-            else:
+-                totalset[label] = 1
+-                overlapcheck[label] = {label: 1}
+-        inverse = {}
+-        for label, itsfirst in overlapcheck.iteritems():
+-            for symbol in itsfirst:
+-                if symbol in inverse:
+-                    raise ValueError("rule %s is ambiguous; %s is in the"
+-                                     " first sets of %s as well as %s" %
+-                                     (name, symbol, label, inverse[symbol]))
+-                inverse[symbol] = label
+-        self.first[name] = totalset
+-
+-    def parse(self):
+-        dfas = {}
+-        startsymbol = None
+-        # MSTART: (NEWLINE | RULE)* ENDMARKER
+-        while self.type != token.ENDMARKER:
+-            while self.type == token.NEWLINE:
+-                self.gettoken()
+-            # RULE: NAME ':' RHS NEWLINE
+-            name = self.expect(token.NAME)
+-            self.expect(token.OP, ":")
+-            a, z = self.parse_rhs()
+-            self.expect(token.NEWLINE)
+-            #self.dump_nfa(name, a, z)
+-            dfa = self.make_dfa(a, z)
+-            #self.dump_dfa(name, dfa)
+-            oldlen = len(dfa)
+-            self.simplify_dfa(dfa)
+-            newlen = len(dfa)
+-            dfas[name] = dfa
+-            #print name, oldlen, newlen
+-            if startsymbol is None:
+-                startsymbol = name
+-        return dfas, startsymbol
+-
+-    def make_dfa(self, start, finish):
+-        # To turn an NFA into a DFA, we define the states of the DFA
+-        # to correspond to *sets* of states of the NFA.  Then do some
+-        # state reduction.  Let's represent sets as dicts with 1 for
+-        # values.
+-        assert isinstance(start, NFAState)
+-        assert isinstance(finish, NFAState)
+-        def closure(state):
+-            base = {}
+-            addclosure(state, base)
+-            return base
+-        def addclosure(state, base):
+-            assert isinstance(state, NFAState)
+-            if state in base:
+-                return
+-            base[state] = 1
+-            for label, next in state.arcs:
+-                if label is None:
+-                    addclosure(next, base)
+-        states = [DFAState(closure(start), finish)]
+-        for state in states: # NB states grows while we're iterating
+-            arcs = {}
+-            for nfastate in state.nfaset:
+-                for label, next in nfastate.arcs:
+-                    if label is not None:
+-                        addclosure(next, arcs.setdefault(label, {}))
+-            for label, nfaset in arcs.iteritems():
+-                for st in states:
+-                    if st.nfaset == nfaset:
+-                        break
+-                else:
+-                    st = DFAState(nfaset, finish)
+-                    states.append(st)
+-                state.addarc(st, label)
+-        return states # List of DFAState instances; first one is start
+-
+-    def dump_nfa(self, name, start, finish):
+-        print "Dump of NFA for", name
+-        todo = [start]
+-        for i, state in enumerate(todo):
+-            print "  State", i, state is finish and "(final)" or ""
+-            for label, next in state.arcs:
+-                if next in todo:
+-                    j = todo.index(next)
+-                else:
+-                    j = len(todo)
+-                    todo.append(next)
+-                if label is None:
+-                    print "    -> %d" % j
+-                else:
+-                    print "    %s -> %d" % (label, j)
+-
+-    def dump_dfa(self, name, dfa):
+-        print "Dump of DFA for", name
+-        for i, state in enumerate(dfa):
+-            print "  State", i, state.isfinal and "(final)" or ""
+-            for label, next in state.arcs.iteritems():
+-                print "    %s -> %d" % (label, dfa.index(next))
+-
+-    def simplify_dfa(self, dfa):
+-        # This is not theoretically optimal, but works well enough.
+-        # Algorithm: repeatedly look for two states that have the same
+-        # set of arcs (same labels pointing to the same nodes) and
+-        # unify them, until things stop changing.
+-
+-        # dfa is a list of DFAState instances
+-        changes = True
+-        while changes:
+-            changes = False
+-            for i, state_i in enumerate(dfa):
+-                for j in range(i+1, len(dfa)):
+-                    state_j = dfa[j]
+-                    if state_i == state_j:
+-                        #print "  unify", i, j
+-                        del dfa[j]
+-                        for state in dfa:
+-                            state.unifystate(state_j, state_i)
+-                        changes = True
+-                        break
+-
+-    def parse_rhs(self):
+-        # RHS: ALT ('|' ALT)*
+-        a, z = self.parse_alt()
+-        if self.value != "|":
+-            return a, z
+-        else:
+-            aa = NFAState()
+-            zz = NFAState()
+-            aa.addarc(a)
+-            z.addarc(zz)
+-            while self.value == "|":
+-                self.gettoken()
+-                a, z = self.parse_alt()
+-                aa.addarc(a)
+-                z.addarc(zz)
+-            return aa, zz
+-
+-    def parse_alt(self):
+-        # ALT: ITEM+
+-        a, b = self.parse_item()
+-        while (self.value in ("(", "[") or
+-               self.type in (token.NAME, token.STRING)):
+-            c, d = self.parse_item()
+-            b.addarc(c)
+-            b = d
+-        return a, b
+-
+-    def parse_item(self):
+-        # ITEM: '[' RHS ']' | ATOM ['+' | '*']
+-        if self.value == "[":
+-            self.gettoken()
+-            a, z = self.parse_rhs()
+-            self.expect(token.OP, "]")
+-            a.addarc(z)
+-            return a, z
+-        else:
+-            a, z = self.parse_atom()
+-            value = self.value
+-            if value not in ("+", "*"):
+-                return a, z
+-            self.gettoken()
+-            z.addarc(a)
+-            if value == "+":
+-                return a, z
+-            else:
+-                return a, a
+-
+-    def parse_atom(self):
+-        # ATOM: '(' RHS ')' | NAME | STRING
+-        if self.value == "(":
+-            self.gettoken()
+-            a, z = self.parse_rhs()
+-            self.expect(token.OP, ")")
+-            return a, z
+-        elif self.type in (token.NAME, token.STRING):
+-            a = NFAState()
+-            z = NFAState()
+-            a.addarc(z, self.value)
+-            self.gettoken()
+-            return a, z
+-        else:
+-            self.raise_error("expected (...) or NAME or STRING, got %s/%s",
+-                             self.type, self.value)
+-
+-    def expect(self, type, value=None):
+-        if self.type != type or (value is not None and self.value != value):
+-            self.raise_error("expected %s/%s, got %s/%s",
+-                             type, value, self.type, self.value)
+-        value = self.value
+-        self.gettoken()
+-        return value
+-
+-    def gettoken(self):
+-        tup = self.generator.next()
+-        while tup[0] in (tokenize.COMMENT, tokenize.NL):
+-            tup = self.generator.next()
+-        self.type, self.value, self.begin, self.end, self.line = tup
+-        #print token.tok_name[self.type], repr(self.value)
+-
+-    def raise_error(self, msg, *args):
+-        if args:
+-            try:
+-                msg = msg % args
+-            except:
+-                msg = " ".join([msg] + map(str, args))
+-        raise SyntaxError(msg, (self.filename, self.end[0],
+-                                self.end[1], self.line))
+-
+-class NFAState(object):
+-
+-    def __init__(self):
+-        self.arcs = [] # list of (label, NFAState) pairs
+-
+-    def addarc(self, next, label=None):
+-        assert label is None or isinstance(label, str)
+-        assert isinstance(next, NFAState)
+-        self.arcs.append((label, next))
+-
+-class DFAState(object):
+-
+-    def __init__(self, nfaset, final):
+-        assert isinstance(nfaset, dict)
+-        assert isinstance(iter(nfaset).next(), NFAState)
+-        assert isinstance(final, NFAState)
+-        self.nfaset = nfaset
+-        self.isfinal = final in nfaset
+-        self.arcs = {} # map from label to DFAState
+-
+-    def addarc(self, next, label):
+-        assert isinstance(label, str)
+-        assert label not in self.arcs
+-        assert isinstance(next, DFAState)
+-        self.arcs[label] = next
+-
+-    def unifystate(self, old, new):
+-        for label, next in self.arcs.iteritems():
+-            if next is old:
+-                self.arcs[label] = new
+-
+-    def __eq__(self, other):
+-        # Equality test -- ignore the nfaset instance variable
+-        assert isinstance(other, DFAState)
+-        if self.isfinal != other.isfinal:
+-            return False
+-        # Can't just return self.arcs == other.arcs, because that
+-        # would invoke this method recursively, with cycles...
+-        if len(self.arcs) != len(other.arcs):
+-            return False
+-        for label, next in self.arcs.iteritems():
+-            if next is not other.arcs.get(label):
+-                return False
+-        return True
+-
+-def generate_grammar(filename="Grammar.txt"):
+-    p = ParserGenerator(filename)
+-    return p.make_grammar()
+diff -r 531f2e948299 lib2to3/pgen2/.svn/text-base/token.py.svn-base
+--- a/lib2to3/pgen2/.svn/text-base/token.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,82 +0,0 @@
+-#! /usr/bin/env python
+-
+-"""Token constants (from "token.h")."""
+-
+-#  Taken from Python (r53757) and modified to include some tokens
+-#   originally monkeypatched in by pgen2.tokenize
+-
+-#--start constants--
+-ENDMARKER = 0
+-NAME = 1
+-NUMBER = 2
+-STRING = 3
+-NEWLINE = 4
+-INDENT = 5
+-DEDENT = 6
+-LPAR = 7
+-RPAR = 8
+-LSQB = 9
+-RSQB = 10
+-COLON = 11
+-COMMA = 12
+-SEMI = 13
+-PLUS = 14
+-MINUS = 15
+-STAR = 16
+-SLASH = 17
+-VBAR = 18
+-AMPER = 19
+-LESS = 20
+-GREATER = 21
+-EQUAL = 22
+-DOT = 23
+-PERCENT = 24
+-BACKQUOTE = 25
+-LBRACE = 26
+-RBRACE = 27
+-EQEQUAL = 28
+-NOTEQUAL = 29
+-LESSEQUAL = 30
+-GREATEREQUAL = 31
+-TILDE = 32
+-CIRCUMFLEX = 33
+-LEFTSHIFT = 34
+-RIGHTSHIFT = 35
+-DOUBLESTAR = 36
+-PLUSEQUAL = 37
+-MINEQUAL = 38
+-STAREQUAL = 39
+-SLASHEQUAL = 40
+-PERCENTEQUAL = 41
+-AMPEREQUAL = 42
+-VBAREQUAL = 43
+-CIRCUMFLEXEQUAL = 44
+-LEFTSHIFTEQUAL = 45
+-RIGHTSHIFTEQUAL = 46
+-DOUBLESTAREQUAL = 47
+-DOUBLESLASH = 48
+-DOUBLESLASHEQUAL = 49
+-AT = 50
+-OP = 51
+-COMMENT = 52
+-NL = 53
+-RARROW = 54
+-ERRORTOKEN = 55
+-N_TOKENS = 56
+-NT_OFFSET = 256
+-#--end constants--
+-
+-tok_name = {}
+-for _name, _value in globals().items():
+-    if type(_value) is type(0):
+-        tok_name[_value] = _name
+-
+-
+-def ISTERMINAL(x):
+-    return x < NT_OFFSET
+-
+-def ISNONTERMINAL(x):
+-    return x >= NT_OFFSET
+-
+-def ISEOF(x):
+-    return x == ENDMARKER
+diff -r 531f2e948299 lib2to3/pgen2/.svn/text-base/tokenize.py.svn-base
+--- a/lib2to3/pgen2/.svn/text-base/tokenize.py.svn-base	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,405 +0,0 @@
+-# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
+-# All rights reserved.
+-
+-"""Tokenization help for Python programs.
+-
+-generate_tokens(readline) is a generator that breaks a stream of
+-text into Python tokens.  It accepts a readline-like method which is called
+-repeatedly to get the next line of input (or "" for EOF).  It generates
+-5-tuples with these members:
+-
+-    the token type (see token.py)
+-    the token (a string)
+-    the starting (row, column) indices of the token (a 2-tuple of ints)
+-    the ending (row, column) indices of the token (a 2-tuple of ints)
+-    the original line (string)
+-
+-It is designed to match the working of the Python tokenizer exactly, except
+-that it produces COMMENT tokens for comments and gives type OP for all
+-operators
+-
+-Older entry points
+-    tokenize_loop(readline, tokeneater)
+-    tokenize(readline, tokeneater=printtoken)
+-are the same, except instead of generating tokens, tokeneater is a callback
+-function to which the 5 fields described above are passed as 5 arguments,
+-each time a new token is found."""
+-
+-__author__ = 'Ka-Ping Yee <ping at lfw.org>'
+-__credits__ = \
+-    'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
+-
+-import string, re
+-from lib2to3.pgen2.token import *
+-
+-from . import token
+-__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
+-           "generate_tokens", "untokenize"]
+-del token
+-
+-def group(*choices): return '(' + '|'.join(choices) + ')'
+-def any(*choices): return group(*choices) + '*'
+-def maybe(*choices): return group(*choices) + '?'
+-
+-Whitespace = r'[ \f\t]*'
+-Comment = r'#[^\r\n]*'
+-Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+-Name = r'[a-zA-Z_]\w*'
+-
+-Binnumber = r'0[bB][01]*'
+-Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
+-Octnumber = r'0[oO]?[0-7]*[lL]?'
+-Decnumber = r'[1-9]\d*[lL]?'
+-Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
+-Exponent = r'[eE][-+]?\d+'
+-Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
+-Expfloat = r'\d+' + Exponent
+-Floatnumber = group(Pointfloat, Expfloat)
+-Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
+-Number = group(Imagnumber, Floatnumber, Intnumber)
+-
+-# Tail end of ' string.
+-Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+-# Tail end of " string.
+-Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+-# Tail end of ''' string.
+-Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+-# Tail end of """ string.
+-Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+-Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
+-# Single-line ' or " string.
+-String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+-               r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+-
+-# Because of leftmost-then-longest match semantics, be sure to put the
+-# longest operators first (e.g., if = came before ==, == would get
+-# recognized as two instances of =).
+-Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
+-                 r"//=?", r"->",
+-                 r"[+\-*/%&|^=<>]=?",
+-                 r"~")
+-
+-Bracket = '[][(){}]'
+-Special = group(r'\r?\n', r'[:;.,`@]')
+-Funny = group(Operator, Bracket, Special)
+-
+-PlainToken = group(Number, Funny, String, Name)
+-Token = Ignore + PlainToken
+-
+-# First (or only) line of ' or " string.
+-ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+-                group("'", r'\\\r?\n'),
+-                r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+-                group('"', r'\\\r?\n'))
+-PseudoExtras = group(r'\\\r?\n', Comment, Triple)
+-PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+-
+-tokenprog, pseudoprog, single3prog, double3prog = map(
+-    re.compile, (Token, PseudoToken, Single3, Double3))
+-endprogs = {"'": re.compile(Single), '"': re.compile(Double),
+-            "'''": single3prog, '"""': double3prog,
+-            "r'''": single3prog, 'r"""': double3prog,
+-            "u'''": single3prog, 'u"""': double3prog,
+-            "b'''": single3prog, 'b"""': double3prog,
+-            "ur'''": single3prog, 'ur"""': double3prog,
+-            "br'''": single3prog, 'br"""': double3prog,
+-            "R'''": single3prog, 'R"""': double3prog,
+-            "U'''": single3prog, 'U"""': double3prog,
+-            "B'''": single3prog, 'B"""': double3prog,
+-            "uR'''": single3prog, 'uR"""': double3prog,
+-            "Ur'''": single3prog, 'Ur"""': double3prog,
+-            "UR'''": single3prog, 'UR"""': double3prog,
+-            "bR'''": single3prog, 'bR"""': double3prog,
+-            "Br'''": single3prog, 'Br"""': double3prog,
+-            "BR'''": single3prog, 'BR"""': double3prog,
+-            'r': None, 'R': None,
+-            'u': None, 'U': None,
+-            'b': None, 'B': None}
+-
+-triple_quoted = {}
+-for t in ("'''", '"""',
+-          "r'''", 'r"""', "R'''", 'R"""',
+-          "u'''", 'u"""', "U'''", 'U"""',
+-          "b'''", 'b"""', "B'''", 'B"""',
+-          "ur'''", 'ur"""', "Ur'''", 'Ur"""',
+-          "uR'''", 'uR"""', "UR'''", 'UR"""',
+-          "br'''", 'br"""', "Br'''", 'Br"""',
+-          "bR'''", 'bR"""', "BR'''", 'BR"""',):
+-    triple_quoted[t] = t
+-single_quoted = {}
+-for t in ("'", '"',
+-          "r'", 'r"', "R'", 'R"',
+-          "u'", 'u"', "U'", 'U"',
+-          "b'", 'b"', "B'", 'B"',
+-          "ur'", 'ur"', "Ur'", 'Ur"',
+-          "uR'", 'uR"', "UR'", 'UR"',
+-          "br'", 'br"', "Br'", 'Br"',
+-          "bR'", 'bR"', "BR'", 'BR"', ):
+-    single_quoted[t] = t
+-
+-tabsize = 8
+-
+-class TokenError(Exception): pass
+-
+-class StopTokenizing(Exception): pass
+-
+-def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
+-    print "%d,%d-%d,%d:\t%s\t%s" % \
+-        (srow, scol, erow, ecol, tok_name[type], repr(token))
+-
+-def tokenize(readline, tokeneater=printtoken):
+-    """
+-    The tokenize() function accepts two parameters: one representing the
+-    input stream, and one providing an output mechanism for tokenize().
+-
+-    The first parameter, readline, must be a callable object which provides
+-    the same interface as the readline() method of built-in file objects.
+-    Each call to the function should return one line of input as a string.
+-
+-    The second parameter, tokeneater, must also be a callable object. It is
+-    called once for each token, with five arguments, corresponding to the
+-    tuples generated by generate_tokens().
+-    """
+-    try:
+-        tokenize_loop(readline, tokeneater)
+-    except StopTokenizing:
+-        pass
+-
+-# backwards compatible interface
+-def tokenize_loop(readline, tokeneater):
+-    for token_info in generate_tokens(readline):
+-        tokeneater(*token_info)
+-
+-class Untokenizer:
+-
+-    def __init__(self):
+-        self.tokens = []
+-        self.prev_row = 1
+-        self.prev_col = 0
+-
+-    def add_whitespace(self, start):
+-        row, col = start
+-        assert row <= self.prev_row
+-        col_offset = col - self.prev_col
+-        if col_offset:
+-            self.tokens.append(" " * col_offset)
+-
+-    def untokenize(self, iterable):
+-        for t in iterable:
+-            if len(t) == 2:
+-                self.compat(t, iterable)
+-                break
+-            tok_type, token, start, end, line = t
+-            self.add_whitespace(start)
+-            self.tokens.append(token)
+-            self.prev_row, self.prev_col = end
+-            if tok_type in (NEWLINE, NL):
+-                self.prev_row += 1
+-                self.prev_col = 0
+-        return "".join(self.tokens)
+-
+-    def compat(self, token, iterable):
+-        startline = False
+-        indents = []
+-        toks_append = self.tokens.append
+-        toknum, tokval = token
+-        if toknum in (NAME, NUMBER):
+-            tokval += ' '
+-        if toknum in (NEWLINE, NL):
+-            startline = True
+-        for tok in iterable:
+-            toknum, tokval = tok[:2]
+-
+-            if toknum in (NAME, NUMBER):
+-                tokval += ' '
+-
+-            if toknum == INDENT:
+-                indents.append(tokval)
+-                continue
+-            elif toknum == DEDENT:
+-                indents.pop()
+-                continue
+-            elif toknum in (NEWLINE, NL):
+-                startline = True
+-            elif startline and indents:
+-                toks_append(indents[-1])
+-                startline = False
+-            toks_append(tokval)
+-
+-def untokenize(iterable):
+-    """Transform tokens back into Python source code.
+-
+-    Each element returned by the iterable must be a token sequence
+-    with at least two elements, a token number and token value.  If
+-    only two tokens are passed, the resulting output is poor.
+-
+-    Round-trip invariant for full input:
+-        Untokenized source will match input source exactly
+-
+-    Round-trip invariant for limited intput:
+-        # Output text will tokenize the back to the input
+-        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
+-        newcode = untokenize(t1)
+-        readline = iter(newcode.splitlines(1)).next
+-        t2 = [tok[:2] for tokin generate_tokens(readline)]
+-        assert t1 == t2
+-    """
+-    ut = Untokenizer()
+-    return ut.untokenize(iterable)
+-
+-def generate_tokens(readline):
+-    """
+-    The generate_tokens() generator requires one argment, readline, which
+-    must be a callable object which provides the same interface as the
+-    readline() method of built-in file objects. Each call to the function
+-    should return one line of input as a string.  Alternately, readline
+-    can be a callable function terminating with StopIteration:
+-        readline = open(myfile).next    # Example of alternate readline
+-
+-    The generator produces 5-tuples with these members: the token type; the
+-    token string; a 2-tuple (srow, scol) of ints specifying the row and
+-    column where the token begins in the source; a 2-tuple (erow, ecol) of
+-    ints specifying the row and column where the token ends in the source;
+-    and the line on which the token was found. The line passed is the
+-    logical line; continuation lines are included.
+-    """
+-    lnum = parenlev = continued = 0
+-    namechars, numchars = string.ascii_letters + '_', '0123456789'
+-    contstr, needcont = '', 0
+-    contline = None
+-    indents = [0]
+-
+-    while 1:                                   # loop over lines in stream
+-        try:
+-            line = readline()
+-        except StopIteration:
+-            line = ''
+-        lnum = lnum + 1
+-        pos, max = 0, len(line)
+-
+-        if contstr:                            # continued string
+-            if not line:
+-                raise TokenError, ("EOF in multi-line string", strstart)
+-            endmatch = endprog.match(line)
+-            if endmatch:
+-                pos = end = endmatch.end(0)
+-                yield (STRING, contstr + line[:end],
+-                       strstart, (lnum, end), contline + line)
+-                contstr, needcont = '', 0
+-                contline = None
+-            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+-                yield (ERRORTOKEN, contstr + line,
+-                           strstart, (lnum, len(line)), contline)
+-                contstr = ''
+-                contline = None
+-                continue
+-            else:
+-                contstr = contstr + line
+-                contline = contline + line
+-                continue
+-
+-        elif parenlev == 0 and not continued:  # new statement
+-            if not line: break
+-            column = 0
+-            while pos < max:                   # measure leading whitespace
+-                if line[pos] == ' ': column = column + 1
+-                elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
+-                elif line[pos] == '\f': column = 0
+-                else: break
+-                pos = pos + 1
+-            if pos == max: break
+-
+-            if line[pos] in '#\r\n':           # skip comments or blank lines
+-                if line[pos] == '#':
+-                    comment_token = line[pos:].rstrip('\r\n')
+-                    nl_pos = pos + len(comment_token)
+-                    yield (COMMENT, comment_token,
+-                           (lnum, pos), (lnum, pos + len(comment_token)), line)
+-                    yield (NL, line[nl_pos:],
+-                           (lnum, nl_pos), (lnum, len(line)), line)
+-                else:
+-                    yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
+-                           (lnum, pos), (lnum, len(line)), line)
+-                continue
+-
+-            if column > indents[-1]:           # count indents or dedents
+-                indents.append(column)
+-                yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+-            while column < indents[-1]:
+-                if column not in indents:
+-                    raise IndentationError(
+-                        "unindent does not match any outer indentation level",
+-                        ("<tokenize>", lnum, pos, line))
+-                indents = indents[:-1]
+-                yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
+-
+-        else:                                  # continued statement
+-            if not line:
+-                raise TokenError, ("EOF in multi-line statement", (lnum, 0))
+-            continued = 0
+-
+-        while pos < max:
+-            pseudomatch = pseudoprog.match(line, pos)
+-            if pseudomatch:                                # scan for tokens
+-                start, end = pseudomatch.span(1)
+-                spos, epos, pos = (lnum, start), (lnum, end), end
+-                token, initial = line[start:end], line[start]
+-
+-                if initial in numchars or \
+-                   (initial == '.' and token != '.'):      # ordinary number
+-                    yield (NUMBER, token, spos, epos, line)
+-                elif initial in '\r\n':
+-                    newline = NEWLINE
+-                    if parenlev > 0:
+-                        newline = NL
+-                    yield (newline, token, spos, epos, line)
+-                elif initial == '#':
+-                    assert not token.endswith("\n")
+-                    yield (COMMENT, token, spos, epos, line)
+-                elif token in triple_quoted:
+-                    endprog = endprogs[token]
+-                    endmatch = endprog.match(line, pos)
+-                    if endmatch:                           # all on one line
+-                        pos = endmatch.end(0)
+-                        token = line[start:pos]
+-                        yield (STRING, token, spos, (lnum, pos), line)
+-                    else:
+-                        strstart = (lnum, start)           # multiple lines
+-                        contstr = line[start:]
+-                        contline = line
+-                        break
+-                elif initial in single_quoted or \
+-                    token[:2] in single_quoted or \
+-                    token[:3] in single_quoted:
+-                    if token[-1] == '\n':                  # continued string
+-                        strstart = (lnum, start)
+-                        endprog = (endprogs[initial] or endprogs[token[1]] or
+-                                   endprogs[token[2]])
+-                        contstr, needcont = line[start:], 1
+-                        contline = line
+-                        break
+-                    else:                                  # ordinary string
+-                        yield (STRING, token, spos, epos, line)
+-                elif initial in namechars:                 # ordinary name
+-                    yield (NAME, token, spos, epos, line)
+-                elif initial == '\\':                      # continued stmt
+-                    # This yield is new; needed for better idempotency:
+-                    yield (NL, token, spos, (lnum, pos), line)
+-                    continued = 1
+-                else:
+-                    if initial in '([{': parenlev = parenlev + 1
+-                    elif initial in ')]}': parenlev = parenlev - 1
+-                    yield (OP, token, spos, epos, line)
+-            else:
+-                yield (ERRORTOKEN, line[pos],
+-                           (lnum, pos), (lnum, pos+1), line)
+-                pos = pos + 1
+-
+-    for indent in indents[1:]:                 # pop remaining indent levels
+-        yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
+-    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+-
+-if __name__ == '__main__':                     # testing
+-    import sys
+-    if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
+-    else: tokenize(sys.stdin.readline)
+diff -r 531f2e948299 lib2to3/pgen2/__init__.py
+--- a/lib2to3/pgen2/__init__.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/pgen2/__init__.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,4 +1,1 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""The pgen2 package."""
++from refactor.pgen2 import *
+diff -r 531f2e948299 lib2to3/pgen2/conv.py
+--- a/lib2to3/pgen2/conv.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,257 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Convert graminit.[ch] spit out by pgen to Python code.
+-
+-Pgen is the Python parser generator.  It is useful to quickly create a
+-parser from a grammar file in Python's grammar notation.  But I don't
+-want my parsers to be written in C (yet), so I'm translating the
+-parsing tables to Python data structures and writing a Python parse
+-engine.
+-
+-Note that the token numbers are constants determined by the standard
+-Python tokenizer.  The standard token module defines these numbers and
+-their names (the names are not used much).  The token numbers are
+-hardcoded into the Python tokenizer and into pgen.  A Python
+-implementation of the Python tokenizer is also available, in the
+-standard tokenize module.
+-
+-On the other hand, symbol numbers (representing the grammar's
+-non-terminals) are assigned by pgen based on the actual grammar
+-input.
+-
+-Note: this module is pretty much obsolete; the pgen module generates
+-equivalent grammar tables directly from the Grammar.txt input file
+-without having to invoke the Python pgen C program.
+-
+-"""
+-
+-# Python imports
+-import re
+-
+-# Local imports
+-from pgen2 import grammar, token
+-
+-
+-class Converter(grammar.Grammar):
+-    """Grammar subclass that reads classic pgen output files.
+-
+-    The run() method reads the tables as produced by the pgen parser
+-    generator, typically contained in two C files, graminit.h and
+-    graminit.c.  The other methods are for internal use only.
+-
+-    See the base class for more documentation.
+-
+-    """
+-
+-    def run(self, graminit_h, graminit_c):
+-        """Load the grammar tables from the text files written by pgen."""
+-        self.parse_graminit_h(graminit_h)
+-        self.parse_graminit_c(graminit_c)
+-        self.finish_off()
+-
+-    def parse_graminit_h(self, filename):
+-        """Parse the .h file writen by pgen.  (Internal)
+-
+-        This file is a sequence of #define statements defining the
+-        nonterminals of the grammar as numbers.  We build two tables
+-        mapping the numbers to names and back.
+-
+-        """
+-        try:
+-            f = open(filename)
+-        except IOError, err:
+-            print "Can't open %s: %s" % (filename, err)
+-            return False
+-        self.symbol2number = {}
+-        self.number2symbol = {}
+-        lineno = 0
+-        for line in f:
+-            lineno += 1
+-            mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
+-            if not mo and line.strip():
+-                print "%s(%s): can't parse %s" % (filename, lineno,
+-                                                  line.strip())
+-            else:
+-                symbol, number = mo.groups()
+-                number = int(number)
+-                assert symbol not in self.symbol2number
+-                assert number not in self.number2symbol
+-                self.symbol2number[symbol] = number
+-                self.number2symbol[number] = symbol
+-        return True
+-
+-    def parse_graminit_c(self, filename):
+-        """Parse the .c file writen by pgen.  (Internal)
+-
+-        The file looks as follows.  The first two lines are always this:
+-
+-        #include "pgenheaders.h"
+-        #include "grammar.h"
+-
+-        After that come four blocks:
+-
+-        1) one or more state definitions
+-        2) a table defining dfas
+-        3) a table defining labels
+-        4) a struct defining the grammar
+-
+-        A state definition has the following form:
+-        - one or more arc arrays, each of the form:
+-          static arc arcs_<n>_<m>[<k>] = {
+-                  {<i>, <j>},
+-                  ...
+-          };
+-        - followed by a state array, of the form:
+-          static state states_<s>[<t>] = {
+-                  {<k>, arcs_<n>_<m>},
+-                  ...
+-          };
+-
+-        """
+-        try:
+-            f = open(filename)
+-        except IOError, err:
+-            print "Can't open %s: %s" % (filename, err)
+-            return False
+-        # The code below essentially uses f's iterator-ness!
+-        lineno = 0
+-
+-        # Expect the two #include lines
+-        lineno, line = lineno+1, f.next()
+-        assert line == '#include "pgenheaders.h"\n', (lineno, line)
+-        lineno, line = lineno+1, f.next()
+-        assert line == '#include "grammar.h"\n', (lineno, line)
+-
+-        # Parse the state definitions
+-        lineno, line = lineno+1, f.next()
+-        allarcs = {}
+-        states = []
+-        while line.startswith("static arc "):
+-            while line.startswith("static arc "):
+-                mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
+-                              line)
+-                assert mo, (lineno, line)
+-                n, m, k = map(int, mo.groups())
+-                arcs = []
+-                for _ in range(k):
+-                    lineno, line = lineno+1, f.next()
+-                    mo = re.match(r"\s+{(\d+), (\d+)},$", line)
+-                    assert mo, (lineno, line)
+-                    i, j = map(int, mo.groups())
+-                    arcs.append((i, j))
+-                lineno, line = lineno+1, f.next()
+-                assert line == "};\n", (lineno, line)
+-                allarcs[(n, m)] = arcs
+-                lineno, line = lineno+1, f.next()
+-            mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
+-            assert mo, (lineno, line)
+-            s, t = map(int, mo.groups())
+-            assert s == len(states), (lineno, line)
+-            state = []
+-            for _ in range(t):
+-                lineno, line = lineno+1, f.next()
+-                mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
+-                assert mo, (lineno, line)
+-                k, n, m = map(int, mo.groups())
+-                arcs = allarcs[n, m]
+-                assert k == len(arcs), (lineno, line)
+-                state.append(arcs)
+-            states.append(state)
+-            lineno, line = lineno+1, f.next()
+-            assert line == "};\n", (lineno, line)
+-            lineno, line = lineno+1, f.next()
+-        self.states = states
+-
+-        # Parse the dfas
+-        dfas = {}
+-        mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
+-        assert mo, (lineno, line)
+-        ndfas = int(mo.group(1))
+-        for i in range(ndfas):
+-            lineno, line = lineno+1, f.next()
+-            mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
+-                          line)
+-            assert mo, (lineno, line)
+-            symbol = mo.group(2)
+-            number, x, y, z = map(int, mo.group(1, 3, 4, 5))
+-            assert self.symbol2number[symbol] == number, (lineno, line)
+-            assert self.number2symbol[number] == symbol, (lineno, line)
+-            assert x == 0, (lineno, line)
+-            state = states[z]
+-            assert y == len(state), (lineno, line)
+-            lineno, line = lineno+1, f.next()
+-            mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
+-            assert mo, (lineno, line)
+-            first = {}
+-            rawbitset = eval(mo.group(1))
+-            for i, c in enumerate(rawbitset):
+-                byte = ord(c)
+-                for j in range(8):
+-                    if byte & (1<<j):
+-                        first[i*8 + j] = 1
+-            dfas[number] = (state, first)
+-        lineno, line = lineno+1, f.next()
+-        assert line == "};\n", (lineno, line)
+-        self.dfas = dfas
+-
+-        # Parse the labels
+-        labels = []
+-        lineno, line = lineno+1, f.next()
+-        mo = re.match(r"static label labels\[(\d+)\] = {$", line)
+-        assert mo, (lineno, line)
+-        nlabels = int(mo.group(1))
+-        for i in range(nlabels):
+-            lineno, line = lineno+1, f.next()
+-            mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
+-            assert mo, (lineno, line)
+-            x, y = mo.groups()
+-            x = int(x)
+-            if y == "0":
+-                y = None
+-            else:
+-                y = eval(y)
+-            labels.append((x, y))
+-        lineno, line = lineno+1, f.next()
+-        assert line == "};\n", (lineno, line)
+-        self.labels = labels
+-
+-        # Parse the grammar struct
+-        lineno, line = lineno+1, f.next()
+-        assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
+-        lineno, line = lineno+1, f.next()
+-        mo = re.match(r"\s+(\d+),$", line)
+-        assert mo, (lineno, line)
+-        ndfas = int(mo.group(1))
+-        assert ndfas == len(self.dfas)
+-        lineno, line = lineno+1, f.next()
+-        assert line == "\tdfas,\n", (lineno, line)
+-        lineno, line = lineno+1, f.next()
+-        mo = re.match(r"\s+{(\d+), labels},$", line)
+-        assert mo, (lineno, line)
+-        nlabels = int(mo.group(1))
+-        assert nlabels == len(self.labels), (lineno, line)
+-        lineno, line = lineno+1, f.next()
+-        mo = re.match(r"\s+(\d+)$", line)
+-        assert mo, (lineno, line)
+-        start = int(mo.group(1))
+-        assert start in self.number2symbol, (lineno, line)
+-        self.start = start
+-        lineno, line = lineno+1, f.next()
+-        assert line == "};\n", (lineno, line)
+-        try:
+-            lineno, line = lineno+1, f.next()
+-        except StopIteration:
+-            pass
+-        else:
+-            assert 0, (lineno, line)
+-
+-    def finish_off(self):
+-        """Create additional useful structures.  (Internal)."""
+-        self.keywords = {} # map from keyword strings to arc labels
+-        self.tokens = {}   # map from numeric token values to arc labels
+-        for ilabel, (type, value) in enumerate(self.labels):
+-            if type == token.NAME and value is not None:
+-                self.keywords[value] = ilabel
+-            elif value is None:
+-                self.tokens[type] = ilabel
+diff -r 531f2e948299 lib2to3/pgen2/driver.py
+--- a/lib2to3/pgen2/driver.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,146 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-# Modifications:
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Parser driver.
+-
+-This provides a high-level interface to parse a file into a syntax tree.
+-
+-"""
+-
+-__author__ = "Guido van Rossum <guido at python.org>"
+-
+-__all__ = ["Driver", "load_grammar"]
+-
+-# Python imports
+-import os
+-import logging
+-import sys
+-
+-# Pgen imports
+-from . import grammar, parse, token, tokenize, pgen
+-
+-
+-class Driver(object):
+-
+-    def __init__(self, grammar, convert=None, logger=None):
+-        self.grammar = grammar
+-        if logger is None:
+-            logger = logging.getLogger()
+-        self.logger = logger
+-        self.convert = convert
+-
+-    def parse_tokens(self, tokens, debug=False):
+-        """Parse a series of tokens and return the syntax tree."""
+-        # XXX Move the prefix computation into a wrapper around tokenize.
+-        p = parse.Parser(self.grammar, self.convert)
+-        p.setup()
+-        lineno = 1
+-        column = 0
+-        type = value = start = end = line_text = None
+-        prefix = ""
+-        for quintuple in tokens:
+-            type, value, start, end, line_text = quintuple
+-            if start != (lineno, column):
+-                assert (lineno, column) <= start, ((lineno, column), start)
+-                s_lineno, s_column = start
+-                if lineno < s_lineno:
+-                    prefix += "\n" * (s_lineno - lineno)
+-                    lineno = s_lineno
+-                    column = 0
+-                if column < s_column:
+-                    prefix += line_text[column:s_column]
+-                    column = s_column
+-            if type in (tokenize.COMMENT, tokenize.NL):
+-                prefix += value
+-                lineno, column = end
+-                if value.endswith("\n"):
+-                    lineno += 1
+-                    column = 0
+-                continue
+-            if type == token.OP:
+-                type = grammar.opmap[value]
+-            if debug:
+-                self.logger.debug("%s %r (prefix=%r)",
+-                                  token.tok_name[type], value, prefix)
+-            if p.addtoken(type, value, (prefix, start)):
+-                if debug:
+-                    self.logger.debug("Stop.")
+-                break
+-            prefix = ""
+-            lineno, column = end
+-            if value.endswith("\n"):
+-                lineno += 1
+-                column = 0
+-        else:
+-            # We never broke out -- EOF is too soon (how can this happen???)
+-            raise parse.ParseError("incomplete input",
+-                                   type, value, (prefix, start))
+-        return p.rootnode
+-
+-    def parse_stream_raw(self, stream, debug=False):
+-        """Parse a stream and return the syntax tree."""
+-        tokens = tokenize.generate_tokens(stream.readline)
+-        return self.parse_tokens(tokens, debug)
+-
+-    def parse_stream(self, stream, debug=False):
+-        """Parse a stream and return the syntax tree."""
+-        return self.parse_stream_raw(stream, debug)
+-
+-    def parse_file(self, filename, debug=False):
+-        """Parse a file and return the syntax tree."""
+-        stream = open(filename)
+-        try:
+-            return self.parse_stream(stream, debug)
+-        finally:
+-            stream.close()
+-
+-    def parse_string(self, text, debug=False):
+-        """Parse a string and return the syntax tree."""
+-        tokens = tokenize.generate_tokens(generate_lines(text).next)
+-        return self.parse_tokens(tokens, debug)
+-
+-
+-def generate_lines(text):
+-    """Generator that behaves like readline without using StringIO."""
+-    for line in text.splitlines(True):
+-        yield line
+-    while True:
+-        yield ""
+-
+-
+-def load_grammar(gt="Grammar.txt", gp=None,
+-                 save=True, force=False, logger=None):
+-    """Load the grammar (maybe from a pickle)."""
+-    if logger is None:
+-        logger = logging.getLogger()
+-    if gp is None:
+-        head, tail = os.path.splitext(gt)
+-        if tail == ".txt":
+-            tail = ""
+-        gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
+-    if force or not _newer(gp, gt):
+-        logger.info("Generating grammar tables from %s", gt)
+-        g = pgen.generate_grammar(gt)
+-        if save:
+-            logger.info("Writing grammar tables to %s", gp)
+-            try:
+-                g.dump(gp)
+-            except IOError, e:
+-                logger.info("Writing failed:"+str(e))
+-    else:
+-        g = grammar.Grammar()
+-        g.load(gp)
+-    return g
+-
+-
+-def _newer(a, b):
+-    """Inquire whether file a was written since file b."""
+-    if not os.path.exists(a):
+-        return False
+-    if not os.path.exists(b):
+-        return True
+-    return os.path.getmtime(a) >= os.path.getmtime(b)
+diff -r 531f2e948299 lib2to3/pgen2/grammar.py
+--- a/lib2to3/pgen2/grammar.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,171 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""This module defines the data structures used to represent a grammar.
+-
+-These are a bit arcane because they are derived from the data
+-structures used by Python's 'pgen' parser generator.
+-
+-There's also a table here mapping operators to their names in the
+-token module; the Python tokenize module reports all operators as the
+-fallback token code OP, but the parser needs the actual token code.
+-
+-"""
+-
+-# Python imports
+-import pickle
+-
+-# Local imports
+-from . import token, tokenize
+-
+-
+-class Grammar(object):
+-    """Pgen parsing tables tables conversion class.
+-
+-    Once initialized, this class supplies the grammar tables for the
+-    parsing engine implemented by parse.py.  The parsing engine
+-    accesses the instance variables directly.  The class here does not
+-    provide initialization of the tables; several subclasses exist to
+-    do this (see the conv and pgen modules).
+-
+-    The load() method reads the tables from a pickle file, which is
+-    much faster than the other ways offered by subclasses.  The pickle
+-    file is written by calling dump() (after loading the grammar
+-    tables using a subclass).  The report() method prints a readable
+-    representation of the tables to stdout, for debugging.
+-
+-    The instance variables are as follows:
+-
+-    symbol2number -- a dict mapping symbol names to numbers.  Symbol
+-                     numbers are always 256 or higher, to distinguish
+-                     them from token numbers, which are between 0 and
+-                     255 (inclusive).
+-
+-    number2symbol -- a dict mapping numbers to symbol names;
+-                     these two are each other's inverse.
+-
+-    states        -- a list of DFAs, where each DFA is a list of
+-                     states, each state is is a list of arcs, and each
+-                     arc is a (i, j) pair where i is a label and j is
+-                     a state number.  The DFA number is the index into
+-                     this list.  (This name is slightly confusing.)
+-                     Final states are represented by a special arc of
+-                     the form (0, j) where j is its own state number.
+-
+-    dfas          -- a dict mapping symbol numbers to (DFA, first)
+-                     pairs, where DFA is an item from the states list
+-                     above, and first is a set of tokens that can
+-                     begin this grammar rule (represented by a dict
+-                     whose values are always 1).
+-
+-    labels        -- a list of (x, y) pairs where x is either a token
+-                     number or a symbol number, and y is either None
+-                     or a string; the strings are keywords.  The label
+-                     number is the index in this list; label numbers
+-                     are used to mark state transitions (arcs) in the
+-                     DFAs.
+-
+-    start         -- the number of the grammar's start symbol.
+-
+-    keywords      -- a dict mapping keyword strings to arc labels.
+-
+-    tokens        -- a dict mapping token numbers to arc labels.
+-
+-    """
+-
+-    def __init__(self):
+-        self.symbol2number = {}
+-        self.number2symbol = {}
+-        self.states = []
+-        self.dfas = {}
+-        self.labels = [(0, "EMPTY")]
+-        self.keywords = {}
+-        self.tokens = {}
+-        self.symbol2label = {}
+-        self.start = 256
+-
+-    def dump(self, filename):
+-        """Dump the grammar tables to a pickle file."""
+-        f = open(filename, "wb")
+-        pickle.dump(self.__dict__, f, 2)
+-        f.close()
+-
+-    def load(self, filename):
+-        """Load the grammar tables from a pickle file."""
+-        f = open(filename, "rb")
+-        d = pickle.load(f)
+-        f.close()
+-        self.__dict__.update(d)
+-
+-    def report(self):
+-        """Dump the grammar tables to standard output, for debugging."""
+-        from pprint import pprint
+-        print "s2n"
+-        pprint(self.symbol2number)
+-        print "n2s"
+-        pprint(self.number2symbol)
+-        print "states"
+-        pprint(self.states)
+-        print "dfas"
+-        pprint(self.dfas)
+-        print "labels"
+-        pprint(self.labels)
+-        print "start", self.start
+-
+-
+-# Map from operator to number (since tokenize doesn't do this)
+-
+-opmap_raw = """
+-( LPAR
+-) RPAR
+-[ LSQB
+-] RSQB
+-: COLON
+-, COMMA
+-; SEMI
+-+ PLUS
+-- MINUS
+-* STAR
+-/ SLASH
+-| VBAR
+-& AMPER
+-< LESS
+-> GREATER
+-= EQUAL
+-. DOT
+-% PERCENT
+-` BACKQUOTE
+-{ LBRACE
+-} RBRACE
+-@ AT
+-== EQEQUAL
+-!= NOTEQUAL
+-<> NOTEQUAL
+-<= LESSEQUAL
+->= GREATEREQUAL
+-~ TILDE
+-^ CIRCUMFLEX
+-<< LEFTSHIFT
+->> RIGHTSHIFT
+-** DOUBLESTAR
+-+= PLUSEQUAL
+--= MINEQUAL
+-*= STAREQUAL
+-/= SLASHEQUAL
+-%= PERCENTEQUAL
+-&= AMPEREQUAL
+-|= VBAREQUAL
+-^= CIRCUMFLEXEQUAL
+-<<= LEFTSHIFTEQUAL
+->>= RIGHTSHIFTEQUAL
+-**= DOUBLESTAREQUAL
+-// DOUBLESLASH
+-//= DOUBLESLASHEQUAL
+--> RARROW
+-"""
+-
+-opmap = {}
+-for line in opmap_raw.splitlines():
+-    if line:
+-        op, name = line.split()
+-        opmap[op] = getattr(token, name)
+diff -r 531f2e948299 lib2to3/pgen2/literals.py
+--- a/lib2to3/pgen2/literals.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,60 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Safely evaluate Python string literals without using eval()."""
+-
+-import re
+-
+-simple_escapes = {"a": "\a",
+-                  "b": "\b",
+-                  "f": "\f",
+-                  "n": "\n",
+-                  "r": "\r",
+-                  "t": "\t",
+-                  "v": "\v",
+-                  "'": "'",
+-                  '"': '"',
+-                  "\\": "\\"}
+-
+-def escape(m):
+-    all, tail = m.group(0, 1)
+-    assert all.startswith("\\")
+-    esc = simple_escapes.get(tail)
+-    if esc is not None:
+-        return esc
+-    if tail.startswith("x"):
+-        hexes = tail[1:]
+-        if len(hexes) < 2:
+-            raise ValueError("invalid hex string escape ('\\%s')" % tail)
+-        try:
+-            i = int(hexes, 16)
+-        except ValueError:
+-            raise ValueError("invalid hex string escape ('\\%s')" % tail)
+-    else:
+-        try:
+-            i = int(tail, 8)
+-        except ValueError:
+-            raise ValueError("invalid octal string escape ('\\%s')" % tail)
+-    return chr(i)
+-
+-def evalString(s):
+-    assert s.startswith("'") or s.startswith('"'), repr(s[:1])
+-    q = s[0]
+-    if s[:3] == q*3:
+-        q = q*3
+-    assert s.endswith(q), repr(s[-len(q):])
+-    assert len(s) >= 2*len(q)
+-    s = s[len(q):-len(q)]
+-    return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)
+-
+-def test():
+-    for i in range(256):
+-        c = chr(i)
+-        s = repr(c)
+-        e = evalString(s)
+-        if e != c:
+-            print i, c, s, e
+-
+-
+-if __name__ == "__main__":
+-    test()
+diff -r 531f2e948299 lib2to3/pgen2/parse.py
+--- a/lib2to3/pgen2/parse.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,201 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Parser engine for the grammar tables generated by pgen.
+-
+-The grammar table must be loaded first.
+-
+-See Parser/parser.c in the Python distribution for additional info on
+-how this parsing engine works.
+-
+-"""
+-
+-# Local imports
+-from . import token
+-
+-class ParseError(Exception):
+-    """Exception to signal the parser is stuck."""
+-
+-    def __init__(self, msg, type, value, context):
+-        Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
+-                           (msg, type, value, context))
+-        self.msg = msg
+-        self.type = type
+-        self.value = value
+-        self.context = context
+-
+-class Parser(object):
+-    """Parser engine.
+-
+-    The proper usage sequence is:
+-
+-    p = Parser(grammar, [converter])  # create instance
+-    p.setup([start])                  # prepare for parsing
+-    <for each input token>:
+-        if p.addtoken(...):           # parse a token; may raise ParseError
+-            break
+-    root = p.rootnode                 # root of abstract syntax tree
+-
+-    A Parser instance may be reused by calling setup() repeatedly.
+-
+-    A Parser instance contains state pertaining to the current token
+-    sequence, and should not be used concurrently by different threads
+-    to parse separate token sequences.
+-
+-    See driver.py for how to get input tokens by tokenizing a file or
+-    string.
+-
+-    Parsing is complete when addtoken() returns True; the root of the
+-    abstract syntax tree can then be retrieved from the rootnode
+-    instance variable.  When a syntax error occurs, addtoken() raises
+-    the ParseError exception.  There is no error recovery; the parser
+-    cannot be used after a syntax error was reported (but it can be
+-    reinitialized by calling setup()).
+-
+-    """
+-
+-    def __init__(self, grammar, convert=None):
+-        """Constructor.
+-
+-        The grammar argument is a grammar.Grammar instance; see the
+-        grammar module for more information.
+-
+-        The parser is not ready yet for parsing; you must call the
+-        setup() method to get it started.
+-
+-        The optional convert argument is a function mapping concrete
+-        syntax tree nodes to abstract syntax tree nodes.  If not
+-        given, no conversion is done and the syntax tree produced is
+-        the concrete syntax tree.  If given, it must be a function of
+-        two arguments, the first being the grammar (a grammar.Grammar
+-        instance), and the second being the concrete syntax tree node
+-        to be converted.  The syntax tree is converted from the bottom
+-        up.
+-
+-        A concrete syntax tree node is a (type, value, context, nodes)
+-        tuple, where type is the node type (a token or symbol number),
+-        value is None for symbols and a string for tokens, context is
+-        None or an opaque value used for error reporting (typically a
+-        (lineno, offset) pair), and nodes is a list of children for
+-        symbols, and None for tokens.
+-
+-        An abstract syntax tree node may be anything; this is entirely
+-        up to the converter function.
+-
+-        """
+-        self.grammar = grammar
+-        self.convert = convert or (lambda grammar, node: node)
+-
+-    def setup(self, start=None):
+-        """Prepare for parsing.
+-
+-        This *must* be called before starting to parse.
+-
+-        The optional argument is an alternative start symbol; it
+-        defaults to the grammar's start symbol.
+-
+-        You can use a Parser instance to parse any number of programs;
+-        each time you call setup() the parser is reset to an initial
+-        state determined by the (implicit or explicit) start symbol.
+-
+-        """
+-        if start is None:
+-            start = self.grammar.start
+-        # Each stack entry is a tuple: (dfa, state, node).
+-        # A node is a tuple: (type, value, context, children),
+-        # where children is a list of nodes or None, and context may be None.
+-        newnode = (start, None, None, [])
+-        stackentry = (self.grammar.dfas[start], 0, newnode)
+-        self.stack = [stackentry]
+-        self.rootnode = None
+-        self.used_names = set() # Aliased to self.rootnode.used_names in pop()
+-
+-    def addtoken(self, type, value, context):
+-        """Add a token; return True iff this is the end of the program."""
+-        # Map from token to label
+-        ilabel = self.classify(type, value, context)
+-        # Loop until the token is shifted; may raise exceptions
+-        while True:
+-            dfa, state, node = self.stack[-1]
+-            states, first = dfa
+-            arcs = states[state]
+-            # Look for a state with this label
+-            for i, newstate in arcs:
+-                t, v = self.grammar.labels[i]
+-                if ilabel == i:
+-                    # Look it up in the list of labels
+-                    assert t < 256
+-                    # Shift a token; we're done with it
+-                    self.shift(type, value, newstate, context)
+-                    # Pop while we are in an accept-only state
+-                    state = newstate
+-                    while states[state] == [(0, state)]:
+-                        self.pop()
+-                        if not self.stack:
+-                            # Done parsing!
+-                            return True
+-                        dfa, state, node = self.stack[-1]
+-                        states, first = dfa
+-                    # Done with this token
+-                    return False
+-                elif t >= 256:
+-                    # See if it's a symbol and if we're in its first set
+-                    itsdfa = self.grammar.dfas[t]
+-                    itsstates, itsfirst = itsdfa
+-                    if ilabel in itsfirst:
+-                        # Push a symbol
+-                        self.push(t, self.grammar.dfas[t], newstate, context)
+-                        break # To continue the outer while loop
+-            else:
+-                if (0, state) in arcs:
+-                    # An accepting state, pop it and try something else
+-                    self.pop()
+-                    if not self.stack:
+-                        # Done parsing, but another token is input
+-                        raise ParseError("too much input",
+-                                         type, value, context)
+-                else:
+-                    # No success finding a transition
+-                    raise ParseError("bad input", type, value, context)
+-
+-    def classify(self, type, value, context):
+-        """Turn a token into a label.  (Internal)"""
+-        if type == token.NAME:
+-            # Keep a listing of all used names
+-            self.used_names.add(value)
+-            # Check for reserved words
+-            ilabel = self.grammar.keywords.get(value)
+-            if ilabel is not None:
+-                return ilabel
+-        ilabel = self.grammar.tokens.get(type)
+-        if ilabel is None:
+-            raise ParseError("bad token", type, value, context)
+-        return ilabel
+-
+-    def shift(self, type, value, newstate, context):
+-        """Shift a token.  (Internal)"""
+-        dfa, state, node = self.stack[-1]
+-        newnode = (type, value, context, None)
+-        newnode = self.convert(self.grammar, newnode)
+-        if newnode is not None:
+-            node[-1].append(newnode)
+-        self.stack[-1] = (dfa, newstate, node)
+-
+-    def push(self, type, newdfa, newstate, context):
+-        """Push a nonterminal.  (Internal)"""
+-        dfa, state, node = self.stack[-1]
+-        newnode = (type, None, context, [])
+-        self.stack[-1] = (dfa, newstate, node)
+-        self.stack.append((newdfa, 0, newnode))
+-
+-    def pop(self):
+-        """Pop a nonterminal.  (Internal)"""
+-        popdfa, popstate, popnode = self.stack.pop()
+-        newnode = self.convert(self.grammar, popnode)
+-        if newnode is not None:
+-            if self.stack:
+-                dfa, state, node = self.stack[-1]
+-                node[-1].append(newnode)
+-            else:
+-                self.rootnode = newnode
+-                self.rootnode.used_names = self.used_names
+diff -r 531f2e948299 lib2to3/pgen2/pgen.py
+--- a/lib2to3/pgen2/pgen.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,384 +0,0 @@
+-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-# Pgen imports
+-from . import grammar, token, tokenize
+-
+-class PgenGrammar(grammar.Grammar):
+-    pass
+-
+-class ParserGenerator(object):
+-
+-    def __init__(self, filename, stream=None):
+-        close_stream = None
+-        if stream is None:
+-            stream = open(filename)
+-            close_stream = stream.close
+-        self.filename = filename
+-        self.stream = stream
+-        self.generator = tokenize.generate_tokens(stream.readline)
+-        self.gettoken() # Initialize lookahead
+-        self.dfas, self.startsymbol = self.parse()
+-        if close_stream is not None:
+-            close_stream()
+-        self.first = {} # map from symbol name to set of tokens
+-        self.addfirstsets()
+-
+-    def make_grammar(self):
+-        c = PgenGrammar()
+-        names = self.dfas.keys()
+-        names.sort()
+-        names.remove(self.startsymbol)
+-        names.insert(0, self.startsymbol)
+-        for name in names:
+-            i = 256 + len(c.symbol2number)
+-            c.symbol2number[name] = i
+-            c.number2symbol[i] = name
+-        for name in names:
+-            dfa = self.dfas[name]
+-            states = []
+-            for state in dfa:
+-                arcs = []
+-                for label, next in state.arcs.iteritems():
+-                    arcs.append((self.make_label(c, label), dfa.index(next)))
+-                if state.isfinal:
+-                    arcs.append((0, dfa.index(state)))
+-                states.append(arcs)
+-            c.states.append(states)
+-            c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
+-        c.start = c.symbol2number[self.startsymbol]
+-        return c
+-
+-    def make_first(self, c, name):
+-        rawfirst = self.first[name]
+-        first = {}
+-        for label in rawfirst:
+-            ilabel = self.make_label(c, label)
+-            ##assert ilabel not in first # XXX failed on <> ... !=
+-            first[ilabel] = 1
+-        return first
+-
+-    def make_label(self, c, label):
+-        # XXX Maybe this should be a method on a subclass of converter?
+-        ilabel = len(c.labels)
+-        if label[0].isalpha():
+-            # Either a symbol name or a named token
+-            if label in c.symbol2number:
+-                # A symbol name (a non-terminal)
+-                if label in c.symbol2label:
+-                    return c.symbol2label[label]
+-                else:
+-                    c.labels.append((c.symbol2number[label], None))
+-                    c.symbol2label[label] = ilabel
+-                    return ilabel
+-            else:
+-                # A named token (NAME, NUMBER, STRING)
+-                itoken = getattr(token, label, None)
+-                assert isinstance(itoken, int), label
+-                assert itoken in token.tok_name, label
+-                if itoken in c.tokens:
+-                    return c.tokens[itoken]
+-                else:
+-                    c.labels.append((itoken, None))
+-                    c.tokens[itoken] = ilabel
+-                    return ilabel
+-        else:
+-            # Either a keyword or an operator
+-            assert label[0] in ('"', "'"), label
+-            value = eval(label)
+-            if value[0].isalpha():
+-                # A keyword
+-                if value in c.keywords:
+-                    return c.keywords[value]
+-                else:
+-                    c.labels.append((token.NAME, value))
+-                    c.keywords[value] = ilabel
+-                    return ilabel
+-            else:
+-                # An operator (any non-numeric token)
+-                itoken = grammar.opmap[value] # Fails if unknown token
+-                if itoken in c.tokens:
+-                    return c.tokens[itoken]
+-                else:
+-                    c.labels.append((itoken, None))
+-                    c.tokens[itoken] = ilabel
+-                    return ilabel
+-
+-    def addfirstsets(self):
+-        names = self.dfas.keys()
+-        names.sort()
+-        for name in names:
+-            if name not in self.first:
+-                self.calcfirst(name)
+-            #print name, self.first[name].keys()
+-
+-    def calcfirst(self, name):
+-        dfa = self.dfas[name]
+-        self.first[name] = None # dummy to detect left recursion
+-        state = dfa[0]
+-        totalset = {}
+-        overlapcheck = {}
+-        for label, next in state.arcs.iteritems():
+-            if label in self.dfas:
+-                if label in self.first:
+-                    fset = self.first[label]
+-                    if fset is None:
+-                        raise ValueError("recursion for rule %r" % name)
+-                else:
+-                    self.calcfirst(label)
+-                    fset = self.first[label]
+-                totalset.update(fset)
+-                overlapcheck[label] = fset
+-            else:
+-                totalset[label] = 1
+-                overlapcheck[label] = {label: 1}
+-        inverse = {}
+-        for label, itsfirst in overlapcheck.iteritems():
+-            for symbol in itsfirst:
+-                if symbol in inverse:
+-                    raise ValueError("rule %s is ambiguous; %s is in the"
+-                                     " first sets of %s as well as %s" %
+-                                     (name, symbol, label, inverse[symbol]))
+-                inverse[symbol] = label
+-        self.first[name] = totalset
+-
+-    def parse(self):
+-        dfas = {}
+-        startsymbol = None
+-        # MSTART: (NEWLINE | RULE)* ENDMARKER
+-        while self.type != token.ENDMARKER:
+-            while self.type == token.NEWLINE:
+-                self.gettoken()
+-            # RULE: NAME ':' RHS NEWLINE
+-            name = self.expect(token.NAME)
+-            self.expect(token.OP, ":")
+-            a, z = self.parse_rhs()
+-            self.expect(token.NEWLINE)
+-            #self.dump_nfa(name, a, z)
+-            dfa = self.make_dfa(a, z)
+-            #self.dump_dfa(name, dfa)
+-            oldlen = len(dfa)
+-            self.simplify_dfa(dfa)
+-            newlen = len(dfa)
+-            dfas[name] = dfa
+-            #print name, oldlen, newlen
+-            if startsymbol is None:
+-                startsymbol = name
+-        return dfas, startsymbol
+-
+-    def make_dfa(self, start, finish):
+-        # To turn an NFA into a DFA, we define the states of the DFA
+-        # to correspond to *sets* of states of the NFA.  Then do some
+-        # state reduction.  Let's represent sets as dicts with 1 for
+-        # values.
+-        assert isinstance(start, NFAState)
+-        assert isinstance(finish, NFAState)
+-        def closure(state):
+-            base = {}
+-            addclosure(state, base)
+-            return base
+-        def addclosure(state, base):
+-            assert isinstance(state, NFAState)
+-            if state in base:
+-                return
+-            base[state] = 1
+-            for label, next in state.arcs:
+-                if label is None:
+-                    addclosure(next, base)
+-        states = [DFAState(closure(start), finish)]
+-        for state in states: # NB states grows while we're iterating
+-            arcs = {}
+-            for nfastate in state.nfaset:
+-                for label, next in nfastate.arcs:
+-                    if label is not None:
+-                        addclosure(next, arcs.setdefault(label, {}))
+-            for label, nfaset in arcs.iteritems():
+-                for st in states:
+-                    if st.nfaset == nfaset:
+-                        break
+-                else:
+-                    st = DFAState(nfaset, finish)
+-                    states.append(st)
+-                state.addarc(st, label)
+-        return states # List of DFAState instances; first one is start
+-
+-    def dump_nfa(self, name, start, finish):
+-        print "Dump of NFA for", name
+-        todo = [start]
+-        for i, state in enumerate(todo):
+-            print "  State", i, state is finish and "(final)" or ""
+-            for label, next in state.arcs:
+-                if next in todo:
+-                    j = todo.index(next)
+-                else:
+-                    j = len(todo)
+-                    todo.append(next)
+-                if label is None:
+-                    print "    -> %d" % j
+-                else:
+-                    print "    %s -> %d" % (label, j)
+-
+-    def dump_dfa(self, name, dfa):
+-        print "Dump of DFA for", name
+-        for i, state in enumerate(dfa):
+-            print "  State", i, state.isfinal and "(final)" or ""
+-            for label, next in state.arcs.iteritems():
+-                print "    %s -> %d" % (label, dfa.index(next))
+-
+-    def simplify_dfa(self, dfa):
+-        # This is not theoretically optimal, but works well enough.
+-        # Algorithm: repeatedly look for two states that have the same
+-        # set of arcs (same labels pointing to the same nodes) and
+-        # unify them, until things stop changing.
+-
+-        # dfa is a list of DFAState instances
+-        changes = True
+-        while changes:
+-            changes = False
+-            for i, state_i in enumerate(dfa):
+-                for j in range(i+1, len(dfa)):
+-                    state_j = dfa[j]
+-                    if state_i == state_j:
+-                        #print "  unify", i, j
+-                        del dfa[j]
+-                        for state in dfa:
+-                            state.unifystate(state_j, state_i)
+-                        changes = True
+-                        break
+-
+-    def parse_rhs(self):
+-        # RHS: ALT ('|' ALT)*
+-        a, z = self.parse_alt()
+-        if self.value != "|":
+-            return a, z
+-        else:
+-            aa = NFAState()
+-            zz = NFAState()
+-            aa.addarc(a)
+-            z.addarc(zz)
+-            while self.value == "|":
+-                self.gettoken()
+-                a, z = self.parse_alt()
+-                aa.addarc(a)
+-                z.addarc(zz)
+-            return aa, zz
+-
+-    def parse_alt(self):
+-        # ALT: ITEM+
+-        a, b = self.parse_item()
+-        while (self.value in ("(", "[") or
+-               self.type in (token.NAME, token.STRING)):
+-            c, d = self.parse_item()
+-            b.addarc(c)
+-            b = d
+-        return a, b
+-
+-    def parse_item(self):
+-        # ITEM: '[' RHS ']' | ATOM ['+' | '*']
+-        if self.value == "[":
+-            self.gettoken()
+-            a, z = self.parse_rhs()
+-            self.expect(token.OP, "]")
+-            a.addarc(z)
+-            return a, z
+-        else:
+-            a, z = self.parse_atom()
+-            value = self.value
+-            if value not in ("+", "*"):
+-                return a, z
+-            self.gettoken()
+-            z.addarc(a)
+-            if value == "+":
+-                return a, z
+-            else:
+-                return a, a
+-
+-    def parse_atom(self):
+-        # ATOM: '(' RHS ')' | NAME | STRING
+-        if self.value == "(":
+-            self.gettoken()
+-            a, z = self.parse_rhs()
+-            self.expect(token.OP, ")")
+-            return a, z
+-        elif self.type in (token.NAME, token.STRING):
+-            a = NFAState()
+-            z = NFAState()
+-            a.addarc(z, self.value)
+-            self.gettoken()
+-            return a, z
+-        else:
+-            self.raise_error("expected (...) or NAME or STRING, got %s/%s",
+-                             self.type, self.value)
+-
+-    def expect(self, type, value=None):
+-        if self.type != type or (value is not None and self.value != value):
+-            self.raise_error("expected %s/%s, got %s/%s",
+-                             type, value, self.type, self.value)
+-        value = self.value
+-        self.gettoken()
+-        return value
+-
+-    def gettoken(self):
+-        tup = self.generator.next()
+-        while tup[0] in (tokenize.COMMENT, tokenize.NL):
+-            tup = self.generator.next()
+-        self.type, self.value, self.begin, self.end, self.line = tup
+-        #print token.tok_name[self.type], repr(self.value)
+-
+-    def raise_error(self, msg, *args):
+-        if args:
+-            try:
+-                msg = msg % args
+-            except:
+-                msg = " ".join([msg] + map(str, args))
+-        raise SyntaxError(msg, (self.filename, self.end[0],
+-                                self.end[1], self.line))
+-
+-class NFAState(object):
+-
+-    def __init__(self):
+-        self.arcs = [] # list of (label, NFAState) pairs
+-
+-    def addarc(self, next, label=None):
+-        assert label is None or isinstance(label, str)
+-        assert isinstance(next, NFAState)
+-        self.arcs.append((label, next))
+-
+-class DFAState(object):
+-
+-    def __init__(self, nfaset, final):
+-        assert isinstance(nfaset, dict)
+-        assert isinstance(iter(nfaset).next(), NFAState)
+-        assert isinstance(final, NFAState)
+-        self.nfaset = nfaset
+-        self.isfinal = final in nfaset
+-        self.arcs = {} # map from label to DFAState
+-
+-    def addarc(self, next, label):
+-        assert isinstance(label, str)
+-        assert label not in self.arcs
+-        assert isinstance(next, DFAState)
+-        self.arcs[label] = next
+-
+-    def unifystate(self, old, new):
+-        for label, next in self.arcs.iteritems():
+-            if next is old:
+-                self.arcs[label] = new
+-
+-    def __eq__(self, other):
+-        # Equality test -- ignore the nfaset instance variable
+-        assert isinstance(other, DFAState)
+-        if self.isfinal != other.isfinal:
+-            return False
+-        # Can't just return self.arcs == other.arcs, because that
+-        # would invoke this method recursively, with cycles...
+-        if len(self.arcs) != len(other.arcs):
+-            return False
+-        for label, next in self.arcs.iteritems():
+-            if next is not other.arcs.get(label):
+-                return False
+-        return True
+-
+-def generate_grammar(filename="Grammar.txt"):
+-    p = ParserGenerator(filename)
+-    return p.make_grammar()
+diff -r 531f2e948299 lib2to3/pgen2/token.py
+--- a/lib2to3/pgen2/token.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,82 +0,0 @@
+-#! /usr/bin/env python
+-
+-"""Token constants (from "token.h")."""
+-
+-#  Taken from Python (r53757) and modified to include some tokens
+-#   originally monkeypatched in by pgen2.tokenize
+-
+-#--start constants--
+-ENDMARKER = 0
+-NAME = 1
+-NUMBER = 2
+-STRING = 3
+-NEWLINE = 4
+-INDENT = 5
+-DEDENT = 6
+-LPAR = 7
+-RPAR = 8
+-LSQB = 9
+-RSQB = 10
+-COLON = 11
+-COMMA = 12
+-SEMI = 13
+-PLUS = 14
+-MINUS = 15
+-STAR = 16
+-SLASH = 17
+-VBAR = 18
+-AMPER = 19
+-LESS = 20
+-GREATER = 21
+-EQUAL = 22
+-DOT = 23
+-PERCENT = 24
+-BACKQUOTE = 25
+-LBRACE = 26
+-RBRACE = 27
+-EQEQUAL = 28
+-NOTEQUAL = 29
+-LESSEQUAL = 30
+-GREATEREQUAL = 31
+-TILDE = 32
+-CIRCUMFLEX = 33
+-LEFTSHIFT = 34
+-RIGHTSHIFT = 35
+-DOUBLESTAR = 36
+-PLUSEQUAL = 37
+-MINEQUAL = 38
+-STAREQUAL = 39
+-SLASHEQUAL = 40
+-PERCENTEQUAL = 41
+-AMPEREQUAL = 42
+-VBAREQUAL = 43
+-CIRCUMFLEXEQUAL = 44
+-LEFTSHIFTEQUAL = 45
+-RIGHTSHIFTEQUAL = 46
+-DOUBLESTAREQUAL = 47
+-DOUBLESLASH = 48
+-DOUBLESLASHEQUAL = 49
+-AT = 50
+-OP = 51
+-COMMENT = 52
+-NL = 53
+-RARROW = 54
+-ERRORTOKEN = 55
+-N_TOKENS = 56
+-NT_OFFSET = 256
+-#--end constants--
+-
+-tok_name = {}
+-for _name, _value in globals().items():
+-    if type(_value) is type(0):
+-        tok_name[_value] = _name
+-
+-
+-def ISTERMINAL(x):
+-    return x < NT_OFFSET
+-
+-def ISNONTERMINAL(x):
+-    return x >= NT_OFFSET
+-
+-def ISEOF(x):
+-    return x == ENDMARKER
+diff -r 531f2e948299 lib2to3/pgen2/tokenize.py
+--- a/lib2to3/pgen2/tokenize.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,405 +0,0 @@
+-# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
+-# All rights reserved.
+-
+-"""Tokenization help for Python programs.
+-
+-generate_tokens(readline) is a generator that breaks a stream of
+-text into Python tokens.  It accepts a readline-like method which is called
+-repeatedly to get the next line of input (or "" for EOF).  It generates
+-5-tuples with these members:
+-
+-    the token type (see token.py)
+-    the token (a string)
+-    the starting (row, column) indices of the token (a 2-tuple of ints)
+-    the ending (row, column) indices of the token (a 2-tuple of ints)
+-    the original line (string)
+-
+-It is designed to match the working of the Python tokenizer exactly, except
+-that it produces COMMENT tokens for comments and gives type OP for all
+-operators
+-
+-Older entry points
+-    tokenize_loop(readline, tokeneater)
+-    tokenize(readline, tokeneater=printtoken)
+-are the same, except instead of generating tokens, tokeneater is a callback
+-function to which the 5 fields described above are passed as 5 arguments,
+-each time a new token is found."""
+-
+-__author__ = 'Ka-Ping Yee <ping at lfw.org>'
+-__credits__ = \
+-    'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
+-
+-import string, re
+-from lib2to3.pgen2.token import *
+-
+-from . import token
+-__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
+-           "generate_tokens", "untokenize"]
+-del token
+-
+-def group(*choices): return '(' + '|'.join(choices) + ')'
+-def any(*choices): return group(*choices) + '*'
+-def maybe(*choices): return group(*choices) + '?'
+-
+-Whitespace = r'[ \f\t]*'
+-Comment = r'#[^\r\n]*'
+-Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+-Name = r'[a-zA-Z_]\w*'
+-
+-Binnumber = r'0[bB][01]*'
+-Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
+-Octnumber = r'0[oO]?[0-7]*[lL]?'
+-Decnumber = r'[1-9]\d*[lL]?'
+-Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
+-Exponent = r'[eE][-+]?\d+'
+-Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
+-Expfloat = r'\d+' + Exponent
+-Floatnumber = group(Pointfloat, Expfloat)
+-Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
+-Number = group(Imagnumber, Floatnumber, Intnumber)
+-
+-# Tail end of ' string.
+-Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+-# Tail end of " string.
+-Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+-# Tail end of ''' string.
+-Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+-# Tail end of """ string.
+-Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+-Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
+-# Single-line ' or " string.
+-String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+-               r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+-
+-# Because of leftmost-then-longest match semantics, be sure to put the
+-# longest operators first (e.g., if = came before ==, == would get
+-# recognized as two instances of =).
+-Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
+-                 r"//=?", r"->",
+-                 r"[+\-*/%&|^=<>]=?",
+-                 r"~")
+-
+-Bracket = '[][(){}]'
+-Special = group(r'\r?\n', r'[:;.,`@]')
+-Funny = group(Operator, Bracket, Special)
+-
+-PlainToken = group(Number, Funny, String, Name)
+-Token = Ignore + PlainToken
+-
+-# First (or only) line of ' or " string.
+-ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+-                group("'", r'\\\r?\n'),
+-                r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+-                group('"', r'\\\r?\n'))
+-PseudoExtras = group(r'\\\r?\n', Comment, Triple)
+-PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+-
+-tokenprog, pseudoprog, single3prog, double3prog = map(
+-    re.compile, (Token, PseudoToken, Single3, Double3))
+-endprogs = {"'": re.compile(Single), '"': re.compile(Double),
+-            "'''": single3prog, '"""': double3prog,
+-            "r'''": single3prog, 'r"""': double3prog,
+-            "u'''": single3prog, 'u"""': double3prog,
+-            "b'''": single3prog, 'b"""': double3prog,
+-            "ur'''": single3prog, 'ur"""': double3prog,
+-            "br'''": single3prog, 'br"""': double3prog,
+-            "R'''": single3prog, 'R"""': double3prog,
+-            "U'''": single3prog, 'U"""': double3prog,
+-            "B'''": single3prog, 'B"""': double3prog,
+-            "uR'''": single3prog, 'uR"""': double3prog,
+-            "Ur'''": single3prog, 'Ur"""': double3prog,
+-            "UR'''": single3prog, 'UR"""': double3prog,
+-            "bR'''": single3prog, 'bR"""': double3prog,
+-            "Br'''": single3prog, 'Br"""': double3prog,
+-            "BR'''": single3prog, 'BR"""': double3prog,
+-            'r': None, 'R': None,
+-            'u': None, 'U': None,
+-            'b': None, 'B': None}
+-
+-triple_quoted = {}
+-for t in ("'''", '"""',
+-          "r'''", 'r"""', "R'''", 'R"""',
+-          "u'''", 'u"""', "U'''", 'U"""',
+-          "b'''", 'b"""', "B'''", 'B"""',
+-          "ur'''", 'ur"""', "Ur'''", 'Ur"""',
+-          "uR'''", 'uR"""', "UR'''", 'UR"""',
+-          "br'''", 'br"""', "Br'''", 'Br"""',
+-          "bR'''", 'bR"""', "BR'''", 'BR"""',):
+-    triple_quoted[t] = t
+-single_quoted = {}
+-for t in ("'", '"',
+-          "r'", 'r"', "R'", 'R"',
+-          "u'", 'u"', "U'", 'U"',
+-          "b'", 'b"', "B'", 'B"',
+-          "ur'", 'ur"', "Ur'", 'Ur"',
+-          "uR'", 'uR"', "UR'", 'UR"',
+-          "br'", 'br"', "Br'", 'Br"',
+-          "bR'", 'bR"', "BR'", 'BR"', ):
+-    single_quoted[t] = t
+-
+-tabsize = 8
+-
+-class TokenError(Exception): pass
+-
+-class StopTokenizing(Exception): pass
+-
+-def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
+-    print "%d,%d-%d,%d:\t%s\t%s" % \
+-        (srow, scol, erow, ecol, tok_name[type], repr(token))
+-
+-def tokenize(readline, tokeneater=printtoken):
+-    """
+-    The tokenize() function accepts two parameters: one representing the
+-    input stream, and one providing an output mechanism for tokenize().
+-
+-    The first parameter, readline, must be a callable object which provides
+-    the same interface as the readline() method of built-in file objects.
+-    Each call to the function should return one line of input as a string.
+-
+-    The second parameter, tokeneater, must also be a callable object. It is
+-    called once for each token, with five arguments, corresponding to the
+-    tuples generated by generate_tokens().
+-    """
+-    try:
+-        tokenize_loop(readline, tokeneater)
+-    except StopTokenizing:
+-        pass
+-
+-# backwards compatible interface
+-def tokenize_loop(readline, tokeneater):
+-    for token_info in generate_tokens(readline):
+-        tokeneater(*token_info)
+-
+-class Untokenizer:
+-
+-    def __init__(self):
+-        self.tokens = []
+-        self.prev_row = 1
+-        self.prev_col = 0
+-
+-    def add_whitespace(self, start):
+-        row, col = start
+-        assert row <= self.prev_row
+-        col_offset = col - self.prev_col
+-        if col_offset:
+-            self.tokens.append(" " * col_offset)
+-
+-    def untokenize(self, iterable):
+-        for t in iterable:
+-            if len(t) == 2:
+-                self.compat(t, iterable)
+-                break
+-            tok_type, token, start, end, line = t
+-            self.add_whitespace(start)
+-            self.tokens.append(token)
+-            self.prev_row, self.prev_col = end
+-            if tok_type in (NEWLINE, NL):
+-                self.prev_row += 1
+-                self.prev_col = 0
+-        return "".join(self.tokens)
+-
+-    def compat(self, token, iterable):
+-        startline = False
+-        indents = []
+-        toks_append = self.tokens.append
+-        toknum, tokval = token
+-        if toknum in (NAME, NUMBER):
+-            tokval += ' '
+-        if toknum in (NEWLINE, NL):
+-            startline = True
+-        for tok in iterable:
+-            toknum, tokval = tok[:2]
+-
+-            if toknum in (NAME, NUMBER):
+-                tokval += ' '
+-
+-            if toknum == INDENT:
+-                indents.append(tokval)
+-                continue
+-            elif toknum == DEDENT:
+-                indents.pop()
+-                continue
+-            elif toknum in (NEWLINE, NL):
+-                startline = True
+-            elif startline and indents:
+-                toks_append(indents[-1])
+-                startline = False
+-            toks_append(tokval)
+-
+-def untokenize(iterable):
+-    """Transform tokens back into Python source code.
+-
+-    Each element returned by the iterable must be a token sequence
+-    with at least two elements, a token number and token value.  If
+-    only two tokens are passed, the resulting output is poor.
+-
+-    Round-trip invariant for full input:
+-        Untokenized source will match input source exactly
+-
+-    Round-trip invariant for limited intput:
+-        # Output text will tokenize the back to the input
+-        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
+-        newcode = untokenize(t1)
+-        readline = iter(newcode.splitlines(1)).next
+-        t2 = [tok[:2] for tokin generate_tokens(readline)]
+-        assert t1 == t2
+-    """
+-    ut = Untokenizer()
+-    return ut.untokenize(iterable)
+-
+-def generate_tokens(readline):
+-    """
+-    The generate_tokens() generator requires one argment, readline, which
+-    must be a callable object which provides the same interface as the
+-    readline() method of built-in file objects. Each call to the function
+-    should return one line of input as a string.  Alternately, readline
+-    can be a callable function terminating with StopIteration:
+-        readline = open(myfile).next    # Example of alternate readline
+-
+-    The generator produces 5-tuples with these members: the token type; the
+-    token string; a 2-tuple (srow, scol) of ints specifying the row and
+-    column where the token begins in the source; a 2-tuple (erow, ecol) of
+-    ints specifying the row and column where the token ends in the source;
+-    and the line on which the token was found. The line passed is the
+-    logical line; continuation lines are included.
+-    """
+-    lnum = parenlev = continued = 0
+-    namechars, numchars = string.ascii_letters + '_', '0123456789'
+-    contstr, needcont = '', 0
+-    contline = None
+-    indents = [0]
+-
+-    while 1:                                   # loop over lines in stream
+-        try:
+-            line = readline()
+-        except StopIteration:
+-            line = ''
+-        lnum = lnum + 1
+-        pos, max = 0, len(line)
+-
+-        if contstr:                            # continued string
+-            if not line:
+-                raise TokenError, ("EOF in multi-line string", strstart)
+-            endmatch = endprog.match(line)
+-            if endmatch:
+-                pos = end = endmatch.end(0)
+-                yield (STRING, contstr + line[:end],
+-                       strstart, (lnum, end), contline + line)
+-                contstr, needcont = '', 0
+-                contline = None
+-            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+-                yield (ERRORTOKEN, contstr + line,
+-                           strstart, (lnum, len(line)), contline)
+-                contstr = ''
+-                contline = None
+-                continue
+-            else:
+-                contstr = contstr + line
+-                contline = contline + line
+-                continue
+-
+-        elif parenlev == 0 and not continued:  # new statement
+-            if not line: break
+-            column = 0
+-            while pos < max:                   # measure leading whitespace
+-                if line[pos] == ' ': column = column + 1
+-                elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
+-                elif line[pos] == '\f': column = 0
+-                else: break
+-                pos = pos + 1
+-            if pos == max: break
+-
+-            if line[pos] in '#\r\n':           # skip comments or blank lines
+-                if line[pos] == '#':
+-                    comment_token = line[pos:].rstrip('\r\n')
+-                    nl_pos = pos + len(comment_token)
+-                    yield (COMMENT, comment_token,
+-                           (lnum, pos), (lnum, pos + len(comment_token)), line)
+-                    yield (NL, line[nl_pos:],
+-                           (lnum, nl_pos), (lnum, len(line)), line)
+-                else:
+-                    yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
+-                           (lnum, pos), (lnum, len(line)), line)
+-                continue
+-
+-            if column > indents[-1]:           # count indents or dedents
+-                indents.append(column)
+-                yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+-            while column < indents[-1]:
+-                if column not in indents:
+-                    raise IndentationError(
+-                        "unindent does not match any outer indentation level",
+-                        ("<tokenize>", lnum, pos, line))
+-                indents = indents[:-1]
+-                yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
+-
+-        else:                                  # continued statement
+-            if not line:
+-                raise TokenError, ("EOF in multi-line statement", (lnum, 0))
+-            continued = 0
+-
+-        while pos < max:
+-            pseudomatch = pseudoprog.match(line, pos)
+-            if pseudomatch:                                # scan for tokens
+-                start, end = pseudomatch.span(1)
+-                spos, epos, pos = (lnum, start), (lnum, end), end
+-                token, initial = line[start:end], line[start]
+-
+-                if initial in numchars or \
+-                   (initial == '.' and token != '.'):      # ordinary number
+-                    yield (NUMBER, token, spos, epos, line)
+-                elif initial in '\r\n':
+-                    newline = NEWLINE
+-                    if parenlev > 0:
+-                        newline = NL
+-                    yield (newline, token, spos, epos, line)
+-                elif initial == '#':
+-                    assert not token.endswith("\n")
+-                    yield (COMMENT, token, spos, epos, line)
+-                elif token in triple_quoted:
+-                    endprog = endprogs[token]
+-                    endmatch = endprog.match(line, pos)
+-                    if endmatch:                           # all on one line
+-                        pos = endmatch.end(0)
+-                        token = line[start:pos]
+-                        yield (STRING, token, spos, (lnum, pos), line)
+-                    else:
+-                        strstart = (lnum, start)           # multiple lines
+-                        contstr = line[start:]
+-                        contline = line
+-                        break
+-                elif initial in single_quoted or \
+-                    token[:2] in single_quoted or \
+-                    token[:3] in single_quoted:
+-                    if token[-1] == '\n':                  # continued string
+-                        strstart = (lnum, start)
+-                        endprog = (endprogs[initial] or endprogs[token[1]] or
+-                                   endprogs[token[2]])
+-                        contstr, needcont = line[start:], 1
+-                        contline = line
+-                        break
+-                    else:                                  # ordinary string
+-                        yield (STRING, token, spos, epos, line)
+-                elif initial in namechars:                 # ordinary name
+-                    yield (NAME, token, spos, epos, line)
+-                elif initial == '\\':                      # continued stmt
+-                    # This yield is new; needed for better idempotency:
+-                    yield (NL, token, spos, (lnum, pos), line)
+-                    continued = 1
+-                else:
+-                    if initial in '([{': parenlev = parenlev + 1
+-                    elif initial in ')]}': parenlev = parenlev - 1
+-                    yield (OP, token, spos, epos, line)
+-            else:
+-                yield (ERRORTOKEN, line[pos],
+-                           (lnum, pos), (lnum, pos+1), line)
+-                pos = pos + 1
+-
+-    for indent in indents[1:]:                 # pop remaining indent levels
+-        yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
+-    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+-
+-if __name__ == '__main__':                     # testing
+-    import sys
+-    if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
+-    else: tokenize(sys.stdin.readline)
+diff -r 531f2e948299 lib2to3/pygram.py
+--- a/lib2to3/pygram.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,31 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Export the Python grammar and symbols."""
+-
+-# Python imports
+-import os
+-
+-# Local imports
+-from .pgen2 import token
+-from .pgen2 import driver
+-from . import pytree
+-
+-# The grammar file
+-_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
+-
+-
+-class Symbols(object):
+-
+-    def __init__(self, grammar):
+-        """Initializer.
+-
+-        Creates an attribute for each grammar symbol (nonterminal),
+-        whose value is the symbol's type (an int >= 256).
+-        """
+-        for name, symbol in grammar.symbol2number.iteritems():
+-            setattr(self, name, symbol)
+-
+-
+-python_grammar = driver.load_grammar(_GRAMMAR_FILE)
+-python_symbols = Symbols(python_grammar)
+diff -r 531f2e948299 lib2to3/pytree.py
+--- a/lib2to3/pytree.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,846 +0,0 @@
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""
+-Python parse tree definitions.
+-
+-This is a very concrete parse tree; we need to keep every token and
+-even the comments and whitespace between tokens.
+-
+-There's also a pattern matching implementation here.
+-"""
+-
+-__author__ = "Guido van Rossum <guido at python.org>"
+-
+-import sys
+-from StringIO import StringIO
+-
+-
+-HUGE = 0x7FFFFFFF  # maximum repeat count, default max
+-
+-_type_reprs = {}
+-def type_repr(type_num):
+-    global _type_reprs
+-    if not _type_reprs:
+-        from .pygram import python_symbols
+-        # printing tokens is possible but not as useful
+-        # from .pgen2 import token // token.__dict__.items():
+-        for name, val in python_symbols.__dict__.items():
+-            if type(val) == int: _type_reprs[val] = name
+-    return _type_reprs.setdefault(type_num, type_num)
+-
+-
+-class Base(object):
+-
+-    """
+-    Abstract base class for Node and Leaf.
+-
+-    This provides some default functionality and boilerplate using the
+-    template pattern.
+-
+-    A node may be a subnode of at most one parent.
+-    """
+-
+-    # Default values for instance variables
+-    type = None    # int: token number (< 256) or symbol number (>= 256)
+-    parent = None  # Parent node pointer, or None
+-    children = ()  # Tuple of subnodes
+-    was_changed = False
+-
+-    def __new__(cls, *args, **kwds):
+-        """Constructor that prevents Base from being instantiated."""
+-        assert cls is not Base, "Cannot instantiate Base"
+-        return object.__new__(cls)
+-
+-    def __eq__(self, other):
+-        """
+-        Compare two nodes for equality.
+-
+-        This calls the method _eq().
+-        """
+-        if self.__class__ is not other.__class__:
+-            return NotImplemented
+-        return self._eq(other)
+-
+-    def __ne__(self, other):
+-        """
+-        Compare two nodes for inequality.
+-
+-        This calls the method _eq().
+-        """
+-        if self.__class__ is not other.__class__:
+-            return NotImplemented
+-        return not self._eq(other)
+-
+-    def _eq(self, other):
+-        """
+-        Compare two nodes for equality.
+-
+-        This is called by __eq__ and __ne__.  It is only called if the two nodes
+-        have the same type.  This must be implemented by the concrete subclass.
+-        Nodes should be considered equal if they have the same structure,
+-        ignoring the prefix string and other context information.
+-        """
+-        raise NotImplementedError
+-
+-    def clone(self):
+-        """
+-        Return a cloned (deep) copy of self.
+-
+-        This must be implemented by the concrete subclass.
+-        """
+-        raise NotImplementedError
+-
+-    def post_order(self):
+-        """
+-        Return a post-order iterator for the tree.
+-
+-        This must be implemented by the concrete subclass.
+-        """
+-        raise NotImplementedError
+-
+-    def pre_order(self):
+-        """
+-        Return a pre-order iterator for the tree.
+-
+-        This must be implemented by the concrete subclass.
+-        """
+-        raise NotImplementedError
+-
+-    def set_prefix(self, prefix):
+-        """
+-        Set the prefix for the node (see Leaf class).
+-
+-        This must be implemented by the concrete subclass.
+-        """
+-        raise NotImplementedError
+-
+-    def get_prefix(self):
+-        """
+-        Return the prefix for the node (see Leaf class).
+-
+-        This must be implemented by the concrete subclass.
+-        """
+-        raise NotImplementedError
+-
+-    def replace(self, new):
+-        """Replace this node with a new one in the parent."""
+-        assert self.parent is not None, str(self)
+-        assert new is not None
+-        if not isinstance(new, list):
+-            new = [new]
+-        l_children = []
+-        found = False
+-        for ch in self.parent.children:
+-            if ch is self:
+-                assert not found, (self.parent.children, self, new)
+-                if new is not None:
+-                    l_children.extend(new)
+-                found = True
+-            else:
+-                l_children.append(ch)
+-        assert found, (self.children, self, new)
+-        self.parent.changed()
+-        self.parent.children = l_children
+-        for x in new:
+-            x.parent = self.parent
+-        self.parent = None
+-
+-    def get_lineno(self):
+-        """Return the line number which generated the invocant node."""
+-        node = self
+-        while not isinstance(node, Leaf):
+-            if not node.children:
+-                return
+-            node = node.children[0]
+-        return node.lineno
+-
+-    def changed(self):
+-        if self.parent:
+-            self.parent.changed()
+-        self.was_changed = True
+-
+-    def remove(self):
+-        """
+-        Remove the node from the tree. Returns the position of the node in its
+-        parent's children before it was removed.
+-        """
+-        if self.parent:
+-            for i, node in enumerate(self.parent.children):
+-                if node is self:
+-                    self.parent.changed()
+-                    del self.parent.children[i]
+-                    self.parent = None
+-                    return i
+-
+-    @property
+-    def next_sibling(self):
+-        """
+-        The node immediately following the invocant in their parent's children
+-        list. If the invocant does not have a next sibling, it is None
+-        """
+-        if self.parent is None:
+-            return None
+-
+-        # Can't use index(); we need to test by identity
+-        for i, child in enumerate(self.parent.children):
+-            if child is self:
+-                try:
+-                    return self.parent.children[i+1]
+-                except IndexError:
+-                    return None
+-
+-    @property
+-    def prev_sibling(self):
+-        """
+-        The node immediately preceding the invocant in their parent's children
+-        list. If the invocant does not have a previous sibling, it is None.
+-        """
+-        if self.parent is None:
+-            return None
+-
+-        # Can't use index(); we need to test by identity
+-        for i, child in enumerate(self.parent.children):
+-            if child is self:
+-                if i == 0:
+-                    return None
+-                return self.parent.children[i-1]
+-
+-    def get_suffix(self):
+-        """
+-        Return the string immediately following the invocant node. This is
+-        effectively equivalent to node.next_sibling.get_prefix()
+-        """
+-        next_sib = self.next_sibling
+-        if next_sib is None:
+-            return ""
+-        return next_sib.get_prefix()
+-
+-
+-class Node(Base):
+-
+-    """Concrete implementation for interior nodes."""
+-
+-    def __init__(self, type, children, context=None, prefix=None):
+-        """
+-        Initializer.
+-
+-        Takes a type constant (a symbol number >= 256), a sequence of
+-        child nodes, and an optional context keyword argument.
+-
+-        As a side effect, the parent pointers of the children are updated.
+-        """
+-        assert type >= 256, type
+-        self.type = type
+-        self.children = list(children)
+-        for ch in self.children:
+-            assert ch.parent is None, repr(ch)
+-            ch.parent = self
+-        if prefix is not None:
+-            self.set_prefix(prefix)
+-
+-    def __repr__(self):
+-        """Return a canonical string representation."""
+-        return "%s(%s, %r)" % (self.__class__.__name__,
+-                               type_repr(self.type),
+-                               self.children)
+-
+-    def __str__(self):
+-        """
+-        Return a pretty string representation.
+-
+-        This reproduces the input source exactly.
+-        """
+-        return "".join(map(str, self.children))
+-
+-    def _eq(self, other):
+-        """Compare two nodes for equality."""
+-        return (self.type, self.children) == (other.type, other.children)
+-
+-    def clone(self):
+-        """Return a cloned (deep) copy of self."""
+-        return Node(self.type, [ch.clone() for ch in self.children])
+-
+-    def post_order(self):
+-        """Return a post-order iterator for the tree."""
+-        for child in self.children:
+-            for node in child.post_order():
+-                yield node
+-        yield self
+-
+-    def pre_order(self):
+-        """Return a pre-order iterator for the tree."""
+-        yield self
+-        for child in self.children:
+-            for node in child.post_order():
+-                yield node
+-
+-    def set_prefix(self, prefix):
+-        """
+-        Set the prefix for the node.
+-
+-        This passes the responsibility on to the first child.
+-        """
+-        if self.children:
+-            self.children[0].set_prefix(prefix)
+-
+-    def get_prefix(self):
+-        """
+-        Return the prefix for the node.
+-
+-        This passes the call on to the first child.
+-        """
+-        if not self.children:
+-            return ""
+-        return self.children[0].get_prefix()
+-
+-    def set_child(self, i, child):
+-        """
+-        Equivalent to 'node.children[i] = child'. This method also sets the
+-        child's parent attribute appropriately.
+-        """
+-        child.parent = self
+-        self.children[i].parent = None
+-        self.children[i] = child
+-        self.changed()
+-
+-    def insert_child(self, i, child):
+-        """
+-        Equivalent to 'node.children.insert(i, child)'. This method also sets
+-        the child's parent attribute appropriately.
+-        """
+-        child.parent = self
+-        self.children.insert(i, child)
+-        self.changed()
+-
+-    def append_child(self, child):
+-        """
+-        Equivalent to 'node.children.append(child)'. This method also sets the
+-        child's parent attribute appropriately.
+-        """
+-        child.parent = self
+-        self.children.append(child)
+-        self.changed()
+-
+-
+-class Leaf(Base):
+-
+-    """Concrete implementation for leaf nodes."""
+-
+-    # Default values for instance variables
+-    prefix = ""  # Whitespace and comments preceding this token in the input
+-    lineno = 0   # Line where this token starts in the input
+-    column = 0   # Column where this token tarts in the input
+-
+-    def __init__(self, type, value, context=None, prefix=None):
+-        """
+-        Initializer.
+-
+-        Takes a type constant (a token number < 256), a string value, and an
+-        optional context keyword argument.
+-        """
+-        assert 0 <= type < 256, type
+-        if context is not None:
+-            self.prefix, (self.lineno, self.column) = context
+-        self.type = type
+-        self.value = value
+-        if prefix is not None:
+-            self.prefix = prefix
+-
+-    def __repr__(self):
+-        """Return a canonical string representation."""
+-        return "%s(%r, %r)" % (self.__class__.__name__,
+-                               self.type,
+-                               self.value)
+-
+-    def __str__(self):
+-        """
+-        Return a pretty string representation.
+-
+-        This reproduces the input source exactly.
+-        """
+-        return self.prefix + str(self.value)
+-
+-    def _eq(self, other):
+-        """Compare two nodes for equality."""
+-        return (self.type, self.value) == (other.type, other.value)
+-
+-    def clone(self):
+-        """Return a cloned (deep) copy of self."""
+-        return Leaf(self.type, self.value,
+-                    (self.prefix, (self.lineno, self.column)))
+-
+-    def post_order(self):
+-        """Return a post-order iterator for the tree."""
+-        yield self
+-
+-    def pre_order(self):
+-        """Return a pre-order iterator for the tree."""
+-        yield self
+-
+-    def set_prefix(self, prefix):
+-        """Set the prefix for the node."""
+-        self.changed()
+-        self.prefix = prefix
+-
+-    def get_prefix(self):
+-        """Return the prefix for the node."""
+-        return self.prefix
+-
+-
+-def convert(gr, raw_node):
+-    """
+-    Convert raw node information to a Node or Leaf instance.
+-
+-    This is passed to the parser driver which calls it whenever a reduction of a
+-    grammar rule produces a new complete node, so that the tree is build
+-    strictly bottom-up.
+-    """
+-    type, value, context, children = raw_node
+-    if children or type in gr.number2symbol:
+-        # If there's exactly one child, return that child instead of
+-        # creating a new node.
+-        if len(children) == 1:
+-            return children[0]
+-        return Node(type, children, context=context)
+-    else:
+-        return Leaf(type, value, context=context)
+-
+-
+-class BasePattern(object):
+-
+-    """
+-    A pattern is a tree matching pattern.
+-
+-    It looks for a specific node type (token or symbol), and
+-    optionally for a specific content.
+-
+-    This is an abstract base class.  There are three concrete
+-    subclasses:
+-
+-    - LeafPattern matches a single leaf node;
+-    - NodePattern matches a single node (usually non-leaf);
+-    - WildcardPattern matches a sequence of nodes of variable length.
+-    """
+-
+-    # Defaults for instance variables
+-    type = None     # Node type (token if < 256, symbol if >= 256)
+-    content = None  # Optional content matching pattern
+-    name = None     # Optional name used to store match in results dict
+-
+-    def __new__(cls, *args, **kwds):
+-        """Constructor that prevents BasePattern from being instantiated."""
+-        assert cls is not BasePattern, "Cannot instantiate BasePattern"
+-        return object.__new__(cls)
+-
+-    def __repr__(self):
+-        args = [type_repr(self.type), self.content, self.name]
+-        while args and args[-1] is None:
+-            del args[-1]
+-        return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
+-
+-    def optimize(self):
+-        """
+-        A subclass can define this as a hook for optimizations.
+-
+-        Returns either self or another node with the same effect.
+-        """
+-        return self
+-
+-    def match(self, node, results=None):
+-        """
+-        Does this pattern exactly match a node?
+-
+-        Returns True if it matches, False if not.
+-
+-        If results is not None, it must be a dict which will be
+-        updated with the nodes matching named subpatterns.
+-
+-        Default implementation for non-wildcard patterns.
+-        """
+-        if self.type is not None and node.type != self.type:
+-            return False
+-        if self.content is not None:
+-            r = None
+-            if results is not None:
+-                r = {}
+-            if not self._submatch(node, r):
+-                return False
+-            if r:
+-                results.update(r)
+-        if results is not None and self.name:
+-            results[self.name] = node
+-        return True
+-
+-    def match_seq(self, nodes, results=None):
+-        """
+-        Does this pattern exactly match a sequence of nodes?
+-
+-        Default implementation for non-wildcard patterns.
+-        """
+-        if len(nodes) != 1:
+-            return False
+-        return self.match(nodes[0], results)
+-
+-    def generate_matches(self, nodes):
+-        """
+-        Generator yielding all matches for this pattern.
+-
+-        Default implementation for non-wildcard patterns.
+-        """
+-        r = {}
+-        if nodes and self.match(nodes[0], r):
+-            yield 1, r
+-
+-
+-class LeafPattern(BasePattern):
+-
+-    def __init__(self, type=None, content=None, name=None):
+-        """
+-        Initializer.  Takes optional type, content, and name.
+-
+-        The type, if given must be a token type (< 256).  If not given,
+-        this matches any *leaf* node; the content may still be required.
+-
+-        The content, if given, must be a string.
+-
+-        If a name is given, the matching node is stored in the results
+-        dict under that key.
+-        """
+-        if type is not None:
+-            assert 0 <= type < 256, type
+-        if content is not None:
+-            assert isinstance(content, basestring), repr(content)
+-        self.type = type
+-        self.content = content
+-        self.name = name
+-
+-    def match(self, node, results=None):
+-        """Override match() to insist on a leaf node."""
+-        if not isinstance(node, Leaf):
+-            return False
+-        return BasePattern.match(self, node, results)
+-
+-    def _submatch(self, node, results=None):
+-        """
+-        Match the pattern's content to the node's children.
+-
+-        This assumes the node type matches and self.content is not None.
+-
+-        Returns True if it matches, False if not.
+-
+-        If results is not None, it must be a dict which will be
+-        updated with the nodes matching named subpatterns.
+-
+-        When returning False, the results dict may still be updated.
+-        """
+-        return self.content == node.value
+-
+-
+-class NodePattern(BasePattern):
+-
+-    wildcards = False
+-
+-    def __init__(self, type=None, content=None, name=None):
+-        """
+-        Initializer.  Takes optional type, content, and name.
+-
+-        The type, if given, must be a symbol type (>= 256).  If the
+-        type is None this matches *any* single node (leaf or not),
+-        except if content is not None, in which it only matches
+-        non-leaf nodes that also match the content pattern.
+-
+-        The content, if not None, must be a sequence of Patterns that
+-        must match the node's children exactly.  If the content is
+-        given, the type must not be None.
+-
+-        If a name is given, the matching node is stored in the results
+-        dict under that key.
+-        """
+-        if type is not None:
+-            assert type >= 256, type
+-        if content is not None:
+-            assert not isinstance(content, basestring), repr(content)
+-            content = list(content)
+-            for i, item in enumerate(content):
+-                assert isinstance(item, BasePattern), (i, item)
+-                if isinstance(item, WildcardPattern):
+-                    self.wildcards = True
+-        self.type = type
+-        self.content = content
+-        self.name = name
+-
+-    def _submatch(self, node, results=None):
+-        """
+-        Match the pattern's content to the node's children.
+-
+-        This assumes the node type matches and self.content is not None.
+-
+-        Returns True if it matches, False if not.
+-
+-        If results is not None, it must be a dict which will be
+-        updated with the nodes matching named subpatterns.
+-
+-        When returning False, the results dict may still be updated.
+-        """
+-        if self.wildcards:
+-            for c, r in generate_matches(self.content, node.children):
+-                if c == len(node.children):
+-                    if results is not None:
+-                        results.update(r)
+-                    return True
+-            return False
+-        if len(self.content) != len(node.children):
+-            return False
+-        for subpattern, child in zip(self.content, node.children):
+-            if not subpattern.match(child, results):
+-                return False
+-        return True
+-
+-
+-class WildcardPattern(BasePattern):
+-
+-    """
+-    A wildcard pattern can match zero or more nodes.
+-
+-    This has all the flexibility needed to implement patterns like:
+-
+-    .*      .+      .?      .{m,n}
+-    (a b c | d e | f)
+-    (...)*  (...)+  (...)?  (...){m,n}
+-
+-    except it always uses non-greedy matching.
+-    """
+-
+-    def __init__(self, content=None, min=0, max=HUGE, name=None):
+-        """
+-        Initializer.
+-
+-        Args:
+-            content: optional sequence of subsequences of patterns;
+-                     if absent, matches one node;
+-                     if present, each subsequence is an alternative [*]
+-            min: optinal minumum number of times to match, default 0
+-            max: optional maximum number of times tro match, default HUGE
+-            name: optional name assigned to this match
+-
+-        [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
+-            equivalent to (a b c | d e | f g h); if content is None,
+-            this is equivalent to '.' in regular expression terms.
+-            The min and max parameters work as follows:
+-                min=0, max=maxint: .*
+-                min=1, max=maxint: .+
+-                min=0, max=1: .?
+-                min=1, max=1: .
+-            If content is not None, replace the dot with the parenthesized
+-            list of alternatives, e.g. (a b c | d e | f g h)*
+-        """
+-        assert 0 <= min <= max <= HUGE, (min, max)
+-        if content is not None:
+-            content = tuple(map(tuple, content))  # Protect against alterations
+-            # Check sanity of alternatives
+-            assert len(content), repr(content)  # Can't have zero alternatives
+-            for alt in content:
+-                assert len(alt), repr(alt) # Can have empty alternatives
+-        self.content = content
+-        self.min = min
+-        self.max = max
+-        self.name = name
+-
+-    def optimize(self):
+-        """Optimize certain stacked wildcard patterns."""
+-        subpattern = None
+-        if (self.content is not None and
+-            len(self.content) == 1 and len(self.content[0]) == 1):
+-            subpattern = self.content[0][0]
+-        if self.min == 1 and self.max == 1:
+-            if self.content is None:
+-                return NodePattern(name=self.name)
+-            if subpattern is not None and  self.name == subpattern.name:
+-                return subpattern.optimize()
+-        if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
+-            subpattern.min <= 1 and self.name == subpattern.name):
+-            return WildcardPattern(subpattern.content,
+-                                   self.min*subpattern.min,
+-                                   self.max*subpattern.max,
+-                                   subpattern.name)
+-        return self
+-
+-    def match(self, node, results=None):
+-        """Does this pattern exactly match a node?"""
+-        return self.match_seq([node], results)
+-
+-    def match_seq(self, nodes, results=None):
+-        """Does this pattern exactly match a sequence of nodes?"""
+-        for c, r in self.generate_matches(nodes):
+-            if c == len(nodes):
+-                if results is not None:
+-                    results.update(r)
+-                    if self.name:
+-                        results[self.name] = list(nodes)
+-                return True
+-        return False
+-
+-    def generate_matches(self, nodes):
+-        """
+-        Generator yielding matches for a sequence of nodes.
+-
+-        Args:
+-            nodes: sequence of nodes
+-
+-        Yields:
+-            (count, results) tuples where:
+-            count: the match comprises nodes[:count];
+-            results: dict containing named submatches.
+-        """
+-        if self.content is None:
+-            # Shortcut for special case (see __init__.__doc__)
+-            for count in xrange(self.min, 1 + min(len(nodes), self.max)):
+-                r = {}
+-                if self.name:
+-                    r[self.name] = nodes[:count]
+-                yield count, r
+-        elif self.name == "bare_name":
+-            yield self._bare_name_matches(nodes)
+-        else:
+-            # The reason for this is that hitting the recursion limit usually
+-            # results in some ugly messages about how RuntimeErrors are being
+-            # ignored.
+-            save_stderr = sys.stderr
+-            sys.stderr = StringIO()
+-            try:
+-                for count, r in self._recursive_matches(nodes, 0):
+-                    if self.name:
+-                        r[self.name] = nodes[:count]
+-                    yield count, r
+-            except RuntimeError:
+-                # We fall back to the iterative pattern matching scheme if the recursive
+-                # scheme hits the recursion limit.
+-                for count, r in self._iterative_matches(nodes):
+-                    if self.name:
+-                        r[self.name] = nodes[:count]
+-                    yield count, r
+-            finally:
+-                sys.stderr = save_stderr
+-
+-    def _iterative_matches(self, nodes):
+-        """Helper to iteratively yield the matches."""
+-        nodelen = len(nodes)
+-        if 0 >= self.min:
+-            yield 0, {}
+-
+-        results = []
+-        # generate matches that use just one alt from self.content
+-        for alt in self.content:
+-            for c, r in generate_matches(alt, nodes):
+-                yield c, r
+-                results.append((c, r))
+-
+-        # for each match, iterate down the nodes
+-        while results:
+-            new_results = []
+-            for c0, r0 in results:
+-                # stop if the entire set of nodes has been matched
+-                if c0 < nodelen and c0 <= self.max:
+-                    for alt in self.content:
+-                        for c1, r1 in generate_matches(alt, nodes[c0:]):
+-                            if c1 > 0:
+-                                r = {}
+-                                r.update(r0)
+-                                r.update(r1)
+-                                yield c0 + c1, r
+-                                new_results.append((c0 + c1, r))
+-            results = new_results
+-
+-    def _bare_name_matches(self, nodes):
+-        """Special optimized matcher for bare_name."""
+-        count = 0
+-        r = {}
+-        done = False
+-        max = len(nodes)
+-        while not done and count < max:
+-            done = True
+-            for leaf in self.content:
+-                if leaf[0].match(nodes[count], r):
+-                    count += 1
+-                    done = False
+-                    break
+-        r[self.name] = nodes[:count]
+-        return count, r
+-
+-    def _recursive_matches(self, nodes, count):
+-        """Helper to recursively yield the matches."""
+-        assert self.content is not None
+-        if count >= self.min:
+-            yield 0, {}
+-        if count < self.max:
+-            for alt in self.content:
+-                for c0, r0 in generate_matches(alt, nodes):
+-                    for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
+-                        r = {}
+-                        r.update(r0)
+-                        r.update(r1)
+-                        yield c0 + c1, r
+-
+-
+-class NegatedPattern(BasePattern):
+-
+-    def __init__(self, content=None):
+-        """
+-        Initializer.
+-
+-        The argument is either a pattern or None.  If it is None, this
+-        only matches an empty sequence (effectively '$' in regex
+-        lingo).  If it is not None, this matches whenever the argument
+-        pattern doesn't have any matches.
+-        """
+-        if content is not None:
+-            assert isinstance(content, BasePattern), repr(content)
+-        self.content = content
+-
+-    def match(self, node):
+-        # We never match a node in its entirety
+-        return False
+-
+-    def match_seq(self, nodes):
+-        # We only match an empty sequence of nodes in its entirety
+-        return len(nodes) == 0
+-
+-    def generate_matches(self, nodes):
+-        if self.content is None:
+-            # Return a match if there is an empty sequence
+-            if len(nodes) == 0:
+-                yield 0, {}
+-        else:
+-            # Return a match if the argument pattern has no matches
+-            for c, r in self.content.generate_matches(nodes):
+-                return
+-            yield 0, {}
+-
+-
+-def generate_matches(patterns, nodes):
+-    """
+-    Generator yielding matches for a sequence of patterns and nodes.
+-
+-    Args:
+-        patterns: a sequence of patterns
+-        nodes: a sequence of nodes
+-
+-    Yields:
+-        (count, results) tuples where:
+-        count: the entire sequence of patterns matches nodes[:count];
+-        results: dict containing named submatches.
+-        """
+-    if not patterns:
+-        yield 0, {}
+-    else:
+-        p, rest = patterns[0], patterns[1:]
+-        for c0, r0 in p.generate_matches(nodes):
+-            if not rest:
+-                yield c0, r0
+-            else:
+-                for c1, r1 in generate_matches(rest, nodes[c0:]):
+-                    r = {}
+-                    r.update(r0)
+-                    r.update(r1)
+-                    yield c0 + c1, r
+diff -r 531f2e948299 lib2to3/refactor.py
+--- a/lib2to3/refactor.py	Mon Mar 30 20:02:09 2009 -0500
++++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
+@@ -1,515 +0,0 @@
+-#!/usr/bin/env python2.5
+-# Copyright 2006 Google, Inc. All Rights Reserved.
+-# Licensed to PSF under a Contributor Agreement.
+-
+-"""Refactoring framework.
+-
+-Used as a main program, this can refactor any number of files and/or
+-recursively descend down directories.  Imported as a module, this
+-provides infrastructure to write your own refactoring tool.
+-"""
+-
+-__author__ = "Guido van Rossum <guido at python.org>"
+-
+-
+-# Python imports
+-import os
+-import sys
+-import difflib
+-import logging
+-import operator
+-from collections import defaultdict
+-from itertools import chain
+-
+-# Local imports
+-from .pgen2 import driver
+-from .pgen2 import tokenize
+-
+-from . import pytree
+-from . import patcomp
+-from . import fixes
+-from . import pygram
+-
+-
+-def get_all_fix_names(fixer_pkg, remove_prefix=True):
+-    """Return a sorted list of all available fix names in the given package."""
+-    pkg = __import__(fixer_pkg, [], [], ["*"])
+-    fixer_dir = os.path.dirname(pkg.__file__)
+-    fix_names = []
+-    for name in sorted(os.listdir(fixer_dir)):
+-        if name.startswith("fix_") and name.endswith(".py"):
+-            if remove_prefix:
+-                name = name[4:]
+-            fix_names.append(name[:-3])
+-    return fix_names
+-
+-def get_head_types(pat):
+-    """ Accepts a pytree Pattern Node and returns a set
+-        of the pattern types which will match first. """
+-
+-    if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
+-        # NodePatters must either have no type and no content
+-        #   or a type and content -- so they don't get any farther
+-        # Always return leafs
+-        return set([pat.type])
+-
+-    if isinstance(pat, pytree.NegatedPattern):
+-        if pat.content:
+-            return get_head_types(pat.content)
+-        return set([None]) # Negated Patterns don't have a type
+-
+-    if isinstance(pat, pytree.WildcardPattern):
+-        # Recurse on each node in content
+-        r = set()
+-        for p in pat.content:
+-            for x in p:
+-                r.update(get_head_types(x))
+-        return r
+-
+-    raise Exception("Oh no! I don't understand pattern %s" %(pat))
+-
+-def get_headnode_dict(fixer_list):
+-    """ Accepts a list of fixers and returns a dictionary
+-        of head node type --> fixer list.  """
+-    head_nodes = defaultdict(list)
+-    for fixer in fixer_list:
+-        if not fixer.pattern:
+-            head_nodes[None].append(fixer)
+-            continue
+-        for t in get_head_types(fixer.pattern):
+-            head_nodes[t].append(fixer)
+-    return head_nodes
+-
+-def get_fixers_from_package(pkg_name):
+-    """
+-    Return the fully qualified names for fixers in the package pkg_name.
+-    """
+-    return [pkg_name + "." + fix_name
+-            for fix_name in get_all_fix_names(pkg_name, False)]
+-
+-
+-class FixerError(Exception):
+-    """A fixer could not be loaded."""
+-
+-
+-class RefactoringTool(object):
+-
+-    _default_options = {"print_function": False}
+-
+-    CLASS_PREFIX = "Fix" # The prefix for fixer classes
+-    FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
+-
+-    def __init__(self, fixer_names, options=None, explicit=None):
+-        """Initializer.
+-
+-        Args:
+-            fixer_names: a list of fixers to import
+-            options: an dict with configuration.
+-            explicit: a list of fixers to run even if they are explicit.
+-        """
+-        self.fixers = fixer_names
+-        self.explicit = explicit or []
+-        self.options = self._default_options.copy()
+-        if options is not None:
+-            self.options.update(options)
+-        self.errors = []
+-        self.logger = logging.getLogger("RefactoringTool")
+-        self.fixer_log = []
+-        self.wrote = False
+-        if self.options["print_function"]:
+-            del pygram.python_grammar.keywords["print"]
+-        self.driver = driver.Driver(pygram.python_grammar,
+-                                    convert=pytree.convert,
+-                                    logger=self.logger)
+-        self.pre_order, self.post_order = self.get_fixers()
+-
+-        self.pre_order_heads = get_headnode_dict(self.pre_order)
+-        self.post_order_heads = get_headnode_dict(self.post_order)
+-
+-        self.files = []  # List of files that were or should be modified
+-
+-    def get_fixers(self):
+-        """Inspects the options to load the requested patterns and handlers.
+-
+-        Returns:
+-          (pre_order, post_order), where pre_order is the list of fixers that
+-          want a pre-order AST traversal, and post_order is the list that want
+-          post-order traversal.
+-        """
+-        pre_order_fixers = []
+-        post_order_fixers = []
+-        for fix_mod_path in self.fixers:
+-            mod = __import__(fix_mod_path, {}, {}, ["*"])
+-            fix_name = fix_mod_path.rsplit(".", 1)[-1]
+-            if fix_name.startswith(self.FILE_PREFIX):
+-                fix_name = fix_name[len(self.FILE_PREFIX):]
+-            parts = fix_name.split("_")
+-            class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
+-            try:
+-                fix_class = getattr(mod, class_name)
+-            except AttributeError:
+-                raise FixerError("Can't find %s.%s" % (fix_name, class_name))
+-            fixer = fix_class(self.options, self.fixer_log)
+-            if fixer.explicit and self.explicit is not True and \
+-                    fix_mod_path not in self.explicit:
+-                self.log_message("Skipping implicit fixer: %s", fix_name)
+-                continue
+-
+-            self.log_debug("Adding transformation: %s", fix_name)
+-            if fixer.order == "pre":
+-                pre_order_fixers.append(fixer)
+-            elif fixer.order == "post":
+-                post_order_fixers.append(fixer)
+-            else:
+-                raise FixerError("Illegal fixer order: %r" % fixer.order)
+-
+-        key_func = operator.attrgetter("run_order")
+-        pre_order_fixers.sort(key=key_func)
+-        post_order_fixers.sort(key=key_func)
+-        return (pre_order_fixers, post_order_fixers)
+-
+-    def log_error(self, msg, *args, **kwds):
+-        """Called when an error occurs."""
+-        raise
+-
+-    def log_message(self, msg, *args):
+-        """Hook to log a message."""
+-        if args:
+-            msg = msg % args
+-        self.logger.info(msg)
+-
+-    def log_debug(self, msg, *args):
+-        if args:
+-            msg = msg % args
+-        self.logger.debug(msg)
+-
+-    def print_output(self, lines):
+-        """Called with lines of output to give to the user."""
+-        pass
+-
+-    def refactor(self, items, write=False, doctests_only=False):
+-        """Refactor a list of files and directories."""
+-        for dir_or_file in items:
+-            if os.path.isdir(dir_or_file):
+-                self.refactor_dir(dir_or_file, write, doctests_only)
+-            else:
+-                self.refactor_file(dir_or_file, write, doctests_only)
+-
+-    def refactor_dir(self, dir_name, write=False, doctests_only=False):
+-        """Descends down a directory and refactor every Python file found.
+-
+-        Python files are assumed to have a .py extension.
+-
+-        Files and subdirectories starting with '.' are skipped.
+-        """
+-        for dirpath, dirnames, filenames in os.walk(dir_name):
+-            self.log_debug("Descending into %s", dirpath)
+-            dirnames.sort()
+-            filenames.sort()
+-            for name in filenames:
+-                if not name.startswith(".") and name.endswith("py"):
+-                    fullname = os.path.join(dirpath, name)
+-                    self.refactor_file(fullname, write, doctests_only)
+-            # Modify dirnames in-place to remove subdirs with leading dots
+-            dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
+-
+-    def refactor_file(self, filename, write=False, doctests_only=False):
+-        """Refactors a file."""
+-        try:
+-            f = open(filename)
+-        except IOError, err:
+-            self.log_error("Can't open %s: %s", filename, err)
+-            return
+-        try:
+-            input = f.read() + "\n" # Silence certain parse errors
+-        finally:
+-            f.close()
+-        if doctests_only:
+-            self.log_debug("Refactoring doctests in %s", filename)
+-            output = self.refactor_docstring(input, filename)
+-            if output != input:
+-                self.processed_file(output, filename, input, write=write)
+-            else:
+-                self.log_debug("No doctest changes in %s", filename)
+-        else:
+-            tree = self.refactor_string(input, filename)
+-            if tree and tree.was_changed:
+-                # The [:-1] is to take off the \n we added earlier
+-                self.processed_file(str(tree)[:-1], filename, write=write)
+-            else:
+-                self.log_debug("No changes in %s", filename)
+-
+-    def refactor_string(self, data, name):
+-        """Refactor a given input string.
+-
+-        Args:
+-            data: a string holding the code to be refactored.
+-            name: a human-readable name for use in error/log messages.
+-
+-        Returns:
+-            An AST corresponding to the refactored input stream; None if
+-            there were errors during the parse.
+-        """
+-        try:
+-            tree = self.driver.parse_string(data)
+-        except Exception, err:
+-            self.log_error("Can't parse %s: %s: %s",
+-                           name, err.__class__.__name__, err)
+-            return
+-        self.log_debug("Refactoring %s", name)
+-        self.refactor_tree(tree, name)
+-        return tree
+-
+-    def refactor_stdin(self, doctests_only=False):
+-        input = sys.stdin.read()
+-        if doctests_only:
+-            self.log_debug("Refactoring doctests in stdin")
+-            output = self.refactor_docstring(input, "<stdin>")
+-            if output != input:
+-                self.processed_file(output, "<stdin>", input)
+-            else:
+-                self.log_debug("No doctest changes in stdin")
+-        else:
+-            tree = self.refactor_string(input, "<stdin>")
+-            if tree and tree.was_changed:
+-                self.processed_file(str(tree), "<stdin>", input)
+-            else:
+-                self.log_debug("No changes in stdin")
+-
+-    def refactor_tree(self, tree, name):
+-        """Refactors a parse tree (modifying the tree in place).
+-
+-        Args:
+-            tree: a pytree.Node instance representing the root of the tree
+-                  to be refactored.
+-            name: a human-readable name for this tree.
+-
+-        Returns:
+-            True if the tree was modified, False otherwise.
+-        """
+-        for fixer in chain(self.pre_order, self.post_order):
+-            fixer.start_tree(tree, name)
+-
+-        self.traverse_by(self.pre_order_heads, tree.pre_order())
+-        self.traverse_by(self.post_order_heads, tree.post_order())
+-
+-        for fixer in chain(self.pre_order, self.post_order):
+-            fixer.finish_tree(tree, name)
+-        return tree.was_changed
+-
+-    def traverse_by(self, fixers, traversal):
+-        """Traverse an AST, applying a set of fixers to each node.
+-
+-        This is a helper method for refactor_tree().
+-
+-        Args:
+-            fixers: a list of fixer instances.
+-            traversal: a generator that yields AST nodes.
+-
+-        Returns:
+-            None
+-        """
+-        if not fixers:
+-            return
+-        for node in traversal:
+-            for fixer in fixers[node.type] + fixers[None]:
+-                results = fixer.match(node)
+-                if results:
+-                    new = fixer.transform(node, results)
+-                    if new is not None and (new != node or
+-                                            str(new) != str(node)):
+-                        node.replace(new)
+-                        node = new
+-
+-    def processed_file(self, new_text, filename, old_text=None, write=False):
+-        """
+-        Called when a file has been refactored, and there are changes.
+-        """
+-        self.files.append(filename)
+-        if old_text is None:
+-            try:
+-                f = open(filename, "r")
+-            except IOError, err:
+-                self.log_error("Can't read %s: %s", filename, err)
+-                return
+-            try:
+-                old_text = f.read()
+-            finally:
+-                f.close()
+-        if old_text == new_text:
+-            self.log_debug("No changes to %s", filename)
+-            return
+-        self.print_output(diff_texts(old_text, new_text, filename))
+-        if write:
+-            self.write_file(new_text, filename, old_text)
+-        else:
+-            self.log_debug("Not writing changes to %s", filename)
+-
+-    def write_file(self, new_text, filename, old_text):
+-        """Writes a string to a file.
+-
+-        It first shows a unified diff between the old text and the new text, and
+-        then rewrites the file; the latter is only done if the write option is
+-        set.
+-        """
+-        try:
+-            f = open(filename, "w")
+-        except os.error, err:
+-            self.log_error("Can't create %s: %s", filename, err)
+-            return
+-        try:
+-            f.write(new_text)
+-        except os.error, err:
+-            self.log_error("Can't write %s: %s", filename, err)
+-        finally:
+-            f.close()
+-        self.log_debug("Wrote changes to %s", filename)
+-        self.wrote = True
+-
+-    PS1 = ">>> "
+-    PS2 = "... "
+-
+-    def refactor_docstring(self, input, filename):
+-        """Refactors a docstring, looking for doctests.
+-
+-        This returns a modified version of the input string.  It looks
+-        for doctests, which start with a ">>>" prompt, and may be
+-        continued with "..." prompts, as long as the "..." is indented
+-        the same as the ">>>".
+-
+-        (Unfortunately we can't use the doctest module's parser,
+-        since, like most parsers, it is not geared towards preserving
+-        the original source.)
+-        """
+-        result = []
+-        block = None
+-        block_lineno = None
+-        indent = None
+-        lineno = 0
+-        for line in input.splitlines(True):
+-            lineno += 1
+-            if line.lstrip().startswith(self.PS1):
+-                if block is not None:
+-                    result.extend(self.refactor_doctest(block, block_lineno,
+-                                                        indent, filename))
+-                block_lineno = lineno
+-                block = [line]
+-                i = line.find(self.PS1)
+-                indent = line[:i]
+-            elif (indent is not None and
+-                  (line.startswith(indent + self.PS2) or
+-                   line == indent + self.PS2.rstrip() + "\n")):
+-                block.append(line)
+-            else:
+-                if block is not None:
+-                    result.extend(self.refactor_doctest(block, block_lineno,
+-                                                        indent, filename))
+-                block = None
+-                indent = None
+-                result.append(line)
+-        if block is not None:
+-            result.extend(self.refactor_doctest(block, block_lineno,
+-                                                indent, filename))
+-        return "".join(result)
+-
+-    def refactor_doctest(self, block, lineno, indent, filename):
+-        """Refactors one doctest.
+-
+-        A doctest is given as a block of lines, the first of which starts
+-        with ">>>" (possibly indented), while the remaining lines start
+-        with "..." (identically indented).
+-
+-        """
+-        try:
+-            tree = self.parse_block(block, lineno, indent)
+-        except Exception, err:
+-            if self.log.isEnabledFor(logging.DEBUG):
+-                for line in block:
+-                    self.log_debug("Source: %s", line.rstrip("\n"))
+-            self.log_error("Can't parse docstring in %s line %s: %s: %s",
+-                           filename, lineno, err.__class__.__name__, err)
+-            return block
+-        if self.refactor_tree(tree, filename):
+-            new = str(tree).splitlines(True)
+-            # Undo the adjustment of the line numbers in wrap_toks() below.
+-            clipped, new = new[:lineno-1], new[lineno-1:]
+-            assert clipped == ["\n"] * (lineno-1), clipped
+-            if not new[-1].endswith("\n"):
+-                new[-1] += "\n"
+-            block = [indent + self.PS1 + new.pop(0)]
+-            if new:
+-                block += [indent + self.PS2 + line for line in new]
+-        return block
+-
+-    def summarize(self):
+-        if self.wrote:
+-            were = "were"
+-        else:
+-            were = "need to be"
+-        if not self.files:
+-            self.log_message("No files %s modified.", were)
+-        else:
+-            self.log_message("Files that %s modified:", were)
+-            for file in self.files:
+-                self.log_message(file)
+-        if self.fixer_log:
+-            self.log_message("Warnings/messages while refactoring:")
+-            for message in self.fixer_log:
+-                self.log_message(message)
+-        if self.errors:
+-            if len(self.errors) == 1:
+-                self.log_message("There was 1 error:")
+-            else:
+-                self.log_message("There were %d errors:", len(self.errors))
+-            for msg, args, kwds in self.errors:
+-                self.log_message(msg, *args, **kwds)
+-
+-    def parse_block(self, block, lineno, indent):
+-        """Parses a block into a tree.
+-
+-        This is necessary to get correct line number / offset information
+-        in the parser diagnostics and embedded into the parse tree.
+-        """
+-        return self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
+-
+-    def wrap_toks(self, block, lineno, indent):
+-        """Wraps a tokenize stream to systematically modify start/end."""
+-        tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
+-        for type, value, (line0, col0), (line1, col1), line_text in tokens:
+-            line0 += lineno - 1
+-            line1 += lineno - 1
+-            # Don't bother updating the columns; this is too complicated
+-            # since line_text would also have to be updated and it would
+-            # still break for tokens spanning lines.  Let the user guess
+-            # that the column numbers for doctests are relative to the
+-            # end of the prompt string (PS1 or PS2).
+-            yield type, value, (line0, col0), (line1, col1), line_text
+-
+-
+-    def gen_lines(self, block, indent):
+-        """Generates lines as expected by tokenize from a list of lines.
+-
+-        This strips the first len(indent + self.PS1) characters off each line.
+-        """
+-        prefix1 = indent + self.PS1
+-        prefix2 = indent + self.PS2
+-        prefix = prefix1
+-        for line in block:
+-            if line.startswith(prefix):
+-                yield line[len(prefix):]
+-            elif line == prefix.rstrip() + "\n":
+-                yield "\n"
+-            else:
+-                raise AssertionError("line=%r, prefix=%r" % (line, prefix))
+-            prefix = prefix2
+-        while True:
+-            yield ""
+-
+-
+-def diff_texts(a, b, filename):
+-    """Return a unified diff of two strings."""
+-    a = a.splitlines()
+-    b = b.splitlines()
+-    return difflib.unified_diff(a, b, filename, filename,
+-                                "(original)", "(refactored)",
+-                                lineterm="")
+diff -r 531f2e948299 lib2to3/tests/.svn/entries
+--- a/lib2to3/tests/.svn/entries	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,7 +1,7 @@
+ 9
+ 
+ dir
+-70785
++70822
+ http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/tests
+ http://svn.python.org/projects
+ 
+diff -r 531f2e948299 lib2to3/tests/data/.svn/entries
+--- a/lib2to3/tests/data/.svn/entries	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/data/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,7 +1,7 @@
+ 9
+ 
+ dir
+-70785
++70822
+ http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/tests/data
+ http://svn.python.org/projects
+ 
+diff -r 531f2e948299 lib2to3/tests/data/fixers/.svn/entries
+--- a/lib2to3/tests/data/fixers/.svn/entries	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/data/fixers/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,7 +1,7 @@
+ 9
+ 
+ dir
+-70785
++70822
+ http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/tests/data/fixers
+ http://svn.python.org/projects
+ 
+diff -r 531f2e948299 lib2to3/tests/data/fixers/bad_order.py
+--- a/lib2to3/tests/data/fixers/bad_order.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/data/fixers/bad_order.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,4 +1,4 @@
+-from lib2to3.fixer_base import BaseFix
++from refactor.fixer_base import BaseFix
+ 
+ class FixBadOrder(BaseFix):
+ 
+diff -r 531f2e948299 lib2to3/tests/data/fixers/myfixes/.svn/entries
+--- a/lib2to3/tests/data/fixers/myfixes/.svn/entries	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/data/fixers/myfixes/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,7 +1,7 @@
+ 9
+ 
+ dir
+-70785
++70822
+ http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/tests/data/fixers/myfixes
+ http://svn.python.org/projects
+ 
+diff -r 531f2e948299 lib2to3/tests/data/fixers/myfixes/fix_explicit.py
+--- a/lib2to3/tests/data/fixers/myfixes/fix_explicit.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/data/fixers/myfixes/fix_explicit.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,4 +1,4 @@
+-from lib2to3.fixer_base import BaseFix
++from refactor.fixer_base import BaseFix
+ 
+ class FixExplicit(BaseFix):
+     explicit = True
+diff -r 531f2e948299 lib2to3/tests/data/fixers/myfixes/fix_first.py
+--- a/lib2to3/tests/data/fixers/myfixes/fix_first.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/data/fixers/myfixes/fix_first.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,4 +1,4 @@
+-from lib2to3.fixer_base import BaseFix
++from refactor.fixer_base import BaseFix
+ 
+ class FixFirst(BaseFix):
+     run_order = 1
+diff -r 531f2e948299 lib2to3/tests/data/fixers/myfixes/fix_last.py
+--- a/lib2to3/tests/data/fixers/myfixes/fix_last.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/data/fixers/myfixes/fix_last.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,4 +1,4 @@
+-from lib2to3.fixer_base import BaseFix
++from refactor.fixer_base import BaseFix
+ 
+ class FixLast(BaseFix):
+ 
+diff -r 531f2e948299 lib2to3/tests/data/fixers/myfixes/fix_parrot.py
+--- a/lib2to3/tests/data/fixers/myfixes/fix_parrot.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/data/fixers/myfixes/fix_parrot.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,5 +1,5 @@
+-from lib2to3.fixer_base import BaseFix
+-from lib2to3.fixer_util import Name
++from refactor.fixer_base import BaseFix
++from refactor.fixer_util import Name
+ 
+ class FixParrot(BaseFix):
+     """
+diff -r 531f2e948299 lib2to3/tests/data/fixers/myfixes/fix_preorder.py
+--- a/lib2to3/tests/data/fixers/myfixes/fix_preorder.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/data/fixers/myfixes/fix_preorder.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,4 +1,4 @@
+-from lib2to3.fixer_base import BaseFix
++from refactor.fixer_base import BaseFix
+ 
+ class FixPreorder(BaseFix):
+     order = "pre"
+diff -r 531f2e948299 lib2to3/tests/support.py
+--- a/lib2to3/tests/support.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/support.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,5 +1,5 @@
+ """Support code for test_*.py files"""
+-# Author: Collin Winter
++# Original Author: Collin Winter
+ 
+ # Python imports
+ import unittest
+@@ -16,12 +16,26 @@
+ from .. import refactor
+ from ..pgen2 import driver
+ 
++test_pkg = "refactor.fixes"
+ test_dir = os.path.dirname(__file__)
+ proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
+ grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
+ grammar = driver.load_grammar(grammar_path)
+ driver = driver.Driver(grammar, convert=pytree.convert)
+ 
++def parse_version(version_string):
++    """Returns a version tuple matching input version_string."""
++    if not version_string:
++        return ()
++
++    version_list = []
++    for token in version_string.split('.'):
++        try:
++            version_list.append(int(token))
++        except ValueError:
++            version_list.append(token)
++    return tuple(version_list)
++
+ def parse_string(string):
+     return driver.parse_string(reformat(string), debug=True)
+ 
+@@ -39,18 +53,19 @@
+ def reformat(string):
+     return dedent(string) + "\n\n"
+ 
+-def get_refactorer(fixers=None, options=None):
++def get_refactorer(fixers=None, options=None, pkg_name=None):
+     """
+     A convenience function for creating a RefactoringTool for tests.
+ 
+     fixers is a list of fixers for the RefactoringTool to use. By default
+-    "lib2to3.fixes.*" is used. options is an optional dictionary of options to
++    "refactor.fixes.*" is used. options is an optional dictionary of options to
+     be passed to the RefactoringTool.
+     """
++    pkg_name = pkg_name or test_pkg
+     if fixers is not None:
+-        fixers = ["lib2to3.fixes.fix_" + fix for fix in fixers]
++        fixers = [pkg_name + ".fix_" + fix for fix in fixers]
+     else:
+-        fixers = refactor.get_fixers_from_package("lib2to3.fixes")
++        fixers = refactor.get_fixers_from_package(pkg_name)
+     options = options or {}
+     return refactor.RefactoringTool(fixers, options, explicit=True)
+ 
+diff -r 531f2e948299 lib2to3/tests/test_fixers.py
+--- a/lib2to3/tests/test_fixers.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/test_fixers.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -23,7 +23,7 @@
+         if fix_list is None:
+             fix_list = [self.fixer]
+         options = {"print_function" : False}
+-        self.refactor = support.get_refactorer(fix_list, options)
++        self.refactor = support.get_refactorer(fix_list, options, "refactor.fixes.from2")
+         self.fixer_log = []
+         self.filename = "<string>"
+ 
+@@ -1625,7 +1625,7 @@
+ 
+ class Test_imports(FixerTestCase, ImportsFixerTests):
+     fixer = "imports"
+-    from ..fixes.fix_imports import MAPPING as modules
++    from refactor.fixes.from2.fix_imports import MAPPING as modules
+ 
+     def test_multiple_imports(self):
+         b = """import urlparse, cStringIO"""
+@@ -1646,23 +1646,23 @@
+ 
+ class Test_imports2(FixerTestCase, ImportsFixerTests):
+     fixer = "imports2"
+-    from ..fixes.fix_imports2 import MAPPING as modules
++    from refactor.fixes.from2.fix_imports2 import MAPPING as modules
+ 
+ 
+ class Test_imports_fixer_order(FixerTestCase, ImportsFixerTests):
+ 
+     def setUp(self):
+         super(Test_imports_fixer_order, self).setUp(['imports', 'imports2'])
+-        from ..fixes.fix_imports2 import MAPPING as mapping2
++        from refactor.fixes.from2.fix_imports2 import MAPPING as mapping2
+         self.modules = mapping2.copy()
+-        from ..fixes.fix_imports import MAPPING as mapping1
++        from refactor.fixes.from2.fix_imports import MAPPING as mapping1
+         for key in ('dbhash', 'dumbdbm', 'dbm', 'gdbm'):
+             self.modules[key] = mapping1[key]
+ 
+ 
+ class Test_urllib(FixerTestCase):
+     fixer = "urllib"
+-    from ..fixes.fix_urllib import MAPPING as modules
++    from refactor.fixes.from2.fix_urllib import MAPPING as modules
+ 
+     def test_import_module(self):
+         for old, changes in self.modules.items():
+@@ -3449,7 +3449,7 @@
+             self.files_checked.append(name)
+             return self.always_exists or (name in self.present_files)
+ 
+-        from ..fixes import fix_import
++        from refactor.fixes.from2 import fix_import
+         fix_import.exists = fake_exists
+ 
+     def tearDown(self):
+diff -r 531f2e948299 lib2to3/tests/test_parser.py
+--- a/lib2to3/tests/test_parser.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/test_parser.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -17,7 +17,7 @@
+ import os.path
+ 
+ # Local imports
+-from ..pgen2.parse import ParseError
++from refactor.pgen2.parse import ParseError
+ 
+ 
+ class GrammarTest(support.TestCase):
+diff -r 531f2e948299 lib2to3/tests/test_util.py
+--- a/lib2to3/tests/test_util.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/lib2to3/tests/test_util.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -11,7 +11,7 @@
+ # Local imports
+ from .. import pytree
+ from .. import fixer_util
+-from ..fixer_util import Attr, Name
++from refactor.fixer_util import Attr, Name
+ 
+ 
+ def parse(code, strip_levels=0):
+diff -r 531f2e948299 refactor/.svn/all-wcprops
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/all-wcprops	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,65 @@
++K 25
++svn:wc:ra_dav:version-url
++V 51
++/projects/!svn/ver/69681/sandbox/trunk/2to3/lib2to3
++END
++pytree.py
++K 25
++svn:wc:ra_dav:version-url
++V 61
++/projects/!svn/ver/69680/sandbox/trunk/2to3/lib2to3/pytree.py
++END
++fixer_util.py
++K 25
++svn:wc:ra_dav:version-url
++V 65
++/projects/!svn/ver/69679/sandbox/trunk/2to3/lib2to3/fixer_util.py
++END
++PatternGrammar.txt
++K 25
++svn:wc:ra_dav:version-url
++V 70
++/projects/!svn/ver/61428/sandbox/trunk/2to3/lib2to3/PatternGrammar.txt
++END
++Grammar.txt
++K 25
++svn:wc:ra_dav:version-url
++V 63
++/projects/!svn/ver/66191/sandbox/trunk/2to3/lib2to3/Grammar.txt
++END
++__init__.py
++K 25
++svn:wc:ra_dav:version-url
++V 63
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/__init__.py
++END
++pygram.py
++K 25
++svn:wc:ra_dav:version-url
++V 61
++/projects/!svn/ver/67769/sandbox/trunk/2to3/lib2to3/pygram.py
++END
++patcomp.py
++K 25
++svn:wc:ra_dav:version-url
++V 62
++/projects/!svn/ver/69681/sandbox/trunk/2to3/lib2to3/patcomp.py
++END
++main.py
++K 25
++svn:wc:ra_dav:version-url
++V 59
++/projects/!svn/ver/67919/sandbox/trunk/2to3/lib2to3/main.py
++END
++refactor.py
++K 25
++svn:wc:ra_dav:version-url
++V 63
++/projects/!svn/ver/67991/sandbox/trunk/2to3/lib2to3/refactor.py
++END
++fixer_base.py
++K 25
++svn:wc:ra_dav:version-url
++V 65
++/projects/!svn/ver/67769/sandbox/trunk/2to3/lib2to3/fixer_base.py
++END
+diff -r 531f2e948299 refactor/.svn/dir-prop-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/dir-prop-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 10
++svn:ignore
++V 24
++*.pyc
++*.pyo
++*.pickle
++@*
++
++END
+diff -r 531f2e948299 refactor/.svn/entries
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,377 @@
++9
++
++dir
++70822
++http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3
++http://svn.python.org/projects
++
++
++
++2009-02-16T17:43:09.878955Z
++69681
++benjamin.peterson
++has-props
++
++svn:special svn:externals svn:needs-lock
++
++
++
++
++
++
++
++
++
++
++
++6015fed2-1504-0410-9fe1-9d1591cc4771
++
++pytree.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++04e7d771f65eaa3bd221cb8451652cad
++2009-02-16T17:41:48.036309Z
++69680
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++27575
++
++fixer_util.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++ceed30974a584bd7ca461077b735aa61
++2009-02-16T17:36:06.789054Z
++69679
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++14348
++
++tests
++dir
++
++PatternGrammar.txt
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++4b47e92dafaedf0ea24c8097b65797c4
++2008-03-16T19:36:15.363093Z
++61428
++martin.v.loewis
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++793
++
++Grammar.txt
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++a6aa611a634ddccccf9fef17bbbbeadb
++2008-09-03T22:00:52.351755Z
++66191
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++6331
++
++__init__.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++191142a35d9dceef524b32c6d9676e51
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++7
++
++pygram.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++3a20d757a7db6a30ec477352f7c9cf6a
++2008-12-14T20:59:10.846867Z
++67769
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++774
++
++patcomp.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++e1a5a1fa70f5b518e4dfe6abb24adf1a
++2009-02-16T17:43:09.878955Z
++69681
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++6529
++
++pgen2
++dir
++
++main.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++85da2f9b910a7b8af322a872949da4e1
++2008-12-23T19:12:22.717389Z
++67919
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++4921
++
++refactor.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++93cfbef5d9bcae247382e78984d5b8e5
++2008-12-28T20:30:26.284113Z
++67991
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++19094
++
++fixes
++dir
++
++fixer_base.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++decf389028d0e267eb33ff8a0a69285c
++2008-12-14T20:59:10.846867Z
++67769
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++6215
++
+diff -r 531f2e948299 refactor/.svn/format
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/format	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++9
+diff -r 531f2e948299 refactor/.svn/prop-base/Grammar.txt.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/prop-base/Grammar.txt.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 23
++Author Date Id Revision
++END
+diff -r 531f2e948299 refactor/.svn/prop-base/PatternGrammar.txt.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/prop-base/PatternGrammar.txt.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 23
++Author Date Id Revision
++END
+diff -r 531f2e948299 refactor/.svn/prop-base/__init__.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/prop-base/__init__.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/.svn/prop-base/fixer_base.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/prop-base/fixer_base.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/.svn/prop-base/fixer_util.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/prop-base/fixer_util.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/.svn/prop-base/main.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/prop-base/main.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/.svn/prop-base/patcomp.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/prop-base/patcomp.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/.svn/prop-base/pygram.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/prop-base/pygram.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/.svn/prop-base/pytree.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/prop-base/pytree.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/.svn/prop-base/refactor.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/prop-base/refactor.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,13 @@
++K 13
++svn:eol-style
++V 6
++native
++K 14
++svn:executable
++V 1
++*
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/.svn/text-base/Grammar.txt.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/text-base/Grammar.txt.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,155 @@
++# Grammar for Python
++
++# Note:  Changing the grammar specified in this file will most likely
++#        require corresponding changes in the parser module
++#        (../Modules/parsermodule.c).  If you can't make the changes to
++#        that module yourself, please co-ordinate the required changes
++#        with someone who can; ask around on python-dev for help.  Fred
++#        Drake <fdrake at acm.org> will probably be listening there.
++
++# NOTE WELL: You should also follow all the steps listed in PEP 306,
++# "How to Change Python's Grammar"
++
++# Commands for Kees Blom's railroad program
++#diagram:token NAME
++#diagram:token NUMBER
++#diagram:token STRING
++#diagram:token NEWLINE
++#diagram:token ENDMARKER
++#diagram:token INDENT
++#diagram:output\input python.bla
++#diagram:token DEDENT
++#diagram:output\textwidth 20.04cm\oddsidemargin  0.0cm\evensidemargin 0.0cm
++#diagram:rules
++
++# Start symbols for the grammar:
++#	file_input is a module or sequence of commands read from an input file;
++#	single_input is a single interactive statement;
++#	eval_input is the input for the eval() and input() functions.
++# NB: compound_stmt in single_input is followed by extra NEWLINE!
++file_input: (NEWLINE | stmt)* ENDMARKER
++single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
++eval_input: testlist NEWLINE* ENDMARKER
++
++decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
++decorators: decorator+
++decorated: decorators (classdef | funcdef)
++funcdef: 'def' NAME parameters ['->' test] ':' suite
++parameters: '(' [typedargslist] ')'
++typedargslist: ((tfpdef ['=' test] ',')*
++                ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname)
++                | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
++tname: NAME [':' test]
++tfpdef: tname | '(' tfplist ')'
++tfplist: tfpdef (',' tfpdef)* [',']
++varargslist: ((vfpdef ['=' test] ',')*
++              ('*' [vname] (',' vname ['=' test])*  [',' '**' vname] | '**' vname)
++              | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
++vname: NAME
++vfpdef: vname | '(' vfplist ')'
++vfplist: vfpdef (',' vfpdef)* [',']
++
++stmt: simple_stmt | compound_stmt
++simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
++small_stmt: (expr_stmt | print_stmt  | del_stmt | pass_stmt | flow_stmt |
++             import_stmt | global_stmt | exec_stmt | assert_stmt)
++expr_stmt: testlist (augassign (yield_expr|testlist) |
++                     ('=' (yield_expr|testlist))*)
++augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
++            '<<=' | '>>=' | '**=' | '//=')
++# For normal assignments, additional restrictions enforced by the interpreter
++print_stmt: 'print' ( [ test (',' test)* [','] ] |
++                      '>>' test [ (',' test)+ [','] ] )
++del_stmt: 'del' exprlist
++pass_stmt: 'pass'
++flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
++break_stmt: 'break'
++continue_stmt: 'continue'
++return_stmt: 'return' [testlist]
++yield_stmt: yield_expr
++raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]
++import_stmt: import_name | import_from
++import_name: 'import' dotted_as_names
++import_from: ('from' ('.'* dotted_name | '.'+)
++              'import' ('*' | '(' import_as_names ')' | import_as_names))
++import_as_name: NAME ['as' NAME]
++dotted_as_name: dotted_name ['as' NAME]
++import_as_names: import_as_name (',' import_as_name)* [',']
++dotted_as_names: dotted_as_name (',' dotted_as_name)*
++dotted_name: NAME ('.' NAME)*
++global_stmt: ('global' | 'nonlocal') NAME (',' NAME)*
++exec_stmt: 'exec' expr ['in' test [',' test]]
++assert_stmt: 'assert' test [',' test]
++
++compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
++if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
++while_stmt: 'while' test ':' suite ['else' ':' suite]
++for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
++try_stmt: ('try' ':' suite
++           ((except_clause ':' suite)+
++	    ['else' ':' suite]
++	    ['finally' ':' suite] |
++	   'finally' ':' suite))
++with_stmt: 'with' test [ with_var ] ':' suite
++with_var: 'as' expr
++# NB compile.c makes sure that the default except clause is last
++except_clause: 'except' [test [(',' | 'as') test]]
++suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
++
++# Backward compatibility cruft to support:
++# [ x for x in lambda: True, lambda: False if x() ]
++# even while also allowing:
++# lambda x: 5 if x else 2
++# (But not a mix of the two)
++testlist_safe: old_test [(',' old_test)+ [',']]
++old_test: or_test | old_lambdef
++old_lambdef: 'lambda' [varargslist] ':' old_test
++
++test: or_test ['if' or_test 'else' test] | lambdef
++or_test: and_test ('or' and_test)*
++and_test: not_test ('and' not_test)*
++not_test: 'not' not_test | comparison
++comparison: expr (comp_op expr)*
++comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
++expr: xor_expr ('|' xor_expr)*
++xor_expr: and_expr ('^' and_expr)*
++and_expr: shift_expr ('&' shift_expr)*
++shift_expr: arith_expr (('<<'|'>>') arith_expr)*
++arith_expr: term (('+'|'-') term)*
++term: factor (('*'|'/'|'%'|'//') factor)*
++factor: ('+'|'-'|'~') factor | power
++power: atom trailer* ['**' factor]
++atom: ('(' [yield_expr|testlist_gexp] ')' |
++       '[' [listmaker] ']' |
++       '{' [dictsetmaker] '}' |
++       '`' testlist1 '`' |
++       NAME | NUMBER | STRING+ | '.' '.' '.')
++listmaker: test ( comp_for | (',' test)* [','] )
++testlist_gexp: test ( comp_for | (',' test)* [','] )
++lambdef: 'lambda' [varargslist] ':' test
++trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
++subscriptlist: subscript (',' subscript)* [',']
++subscript: test | [test] ':' [test] [sliceop]
++sliceop: ':' [test]
++exprlist: expr (',' expr)* [',']
++testlist: test (',' test)* [',']
++dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
++                (test (comp_for | (',' test)* [','])) )
++
++classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
++
++arglist: (argument ',')* (argument [',']
++                         |'*' test (',' argument)* [',' '**' test] 
++                         |'**' test)
++argument: test [comp_for] | test '=' test  # Really [keyword '='] test
++
++comp_iter: comp_for | comp_if
++comp_for: 'for' exprlist 'in' testlist_safe [comp_iter]
++comp_if: 'if' old_test [comp_iter]
++
++testlist1: test (',' test)*
++
++# not used in grammar, but may appear in "node" passed from Parser to Compiler
++encoding_decl: NAME
++
++yield_expr: 'yield' [testlist]
+diff -r 531f2e948299 refactor/.svn/text-base/PatternGrammar.txt.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/text-base/PatternGrammar.txt.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,28 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++# A grammar to describe tree matching patterns.
++# Not shown here:
++# - 'TOKEN' stands for any token (leaf node)
++# - 'any' stands for any node (leaf or interior)
++# With 'any' we can still specify the sub-structure.
++
++# The start symbol is 'Matcher'.
++
++Matcher: Alternatives ENDMARKER
++
++Alternatives: Alternative ('|' Alternative)*
++
++Alternative: (Unit | NegatedUnit)+
++
++Unit: [NAME '='] ( STRING [Repeater]
++                 | NAME [Details] [Repeater]
++                 | '(' Alternatives ')' [Repeater]
++                 | '[' Alternatives ']'
++		 )
++
++NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')')
++
++Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}'
++
++Details: '<' Alternatives '>'
+diff -r 531f2e948299 refactor/.svn/text-base/__init__.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/text-base/__init__.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++#empty
+diff -r 531f2e948299 refactor/.svn/text-base/fixer_base.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/text-base/fixer_base.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,178 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Base class for fixers (optional, but recommended)."""
++
++# Python imports
++import logging
++import itertools
++
++# Local imports
++from .patcomp import PatternCompiler
++from . import pygram
++from .fixer_util import does_tree_import
++
++class BaseFix(object):
++
++    """Optional base class for fixers.
++
++    The subclass name must be FixFooBar where FooBar is the result of
++    removing underscores and capitalizing the words of the fix name.
++    For example, the class name for a fixer named 'has_key' should be
++    FixHasKey.
++    """
++
++    PATTERN = None  # Most subclasses should override with a string literal
++    pattern = None  # Compiled pattern, set by compile_pattern()
++    options = None  # Options object passed to initializer
++    filename = None # The filename (set by set_filename)
++    logger = None   # A logger (set by set_filename)
++    numbers = itertools.count(1) # For new_name()
++    used_names = set() # A set of all used NAMEs
++    order = "post" # Does the fixer prefer pre- or post-order traversal
++    explicit = False # Is this ignored by refactor.py -f all?
++    run_order = 5   # Fixers will be sorted by run order before execution
++                    # Lower numbers will be run first.
++
++    # Shortcut for access to Python grammar symbols
++    syms = pygram.python_symbols
++
++    def __init__(self, options, log):
++        """Initializer.  Subclass may override.
++
++        Args:
++            options: an dict containing the options passed to RefactoringTool
++            that could be used to customize the fixer through the command line.
++            log: a list to append warnings and other messages to.
++        """
++        self.options = options
++        self.log = log
++        self.compile_pattern()
++
++    def compile_pattern(self):
++        """Compiles self.PATTERN into self.pattern.
++
++        Subclass may override if it doesn't want to use
++        self.{pattern,PATTERN} in .match().
++        """
++        if self.PATTERN is not None:
++            self.pattern = PatternCompiler().compile_pattern(self.PATTERN)
++
++    def set_filename(self, filename):
++        """Set the filename, and a logger derived from it.
++
++        The main refactoring tool should call this.
++        """
++        self.filename = filename
++        self.logger = logging.getLogger(filename)
++
++    def match(self, node):
++        """Returns match for a given parse tree node.
++
++        Should return a true or false object (not necessarily a bool).
++        It may return a non-empty dict of matching sub-nodes as
++        returned by a matching pattern.
++
++        Subclass may override.
++        """
++        results = {"node": node}
++        return self.pattern.match(node, results) and results
++
++    def transform(self, node, results):
++        """Returns the transformation for a given parse tree node.
++
++        Args:
++          node: the root of the parse tree that matched the fixer.
++          results: a dict mapping symbolic names to part of the match.
++
++        Returns:
++          None, or a node that is a modified copy of the
++          argument node.  The node argument may also be modified in-place to
++          effect the same change.
++
++        Subclass *must* override.
++        """
++        raise NotImplementedError()
++
++    def new_name(self, template="xxx_todo_changeme"):
++        """Return a string suitable for use as an identifier
++
++        The new name is guaranteed not to conflict with other identifiers.
++        """
++        name = template
++        while name in self.used_names:
++            name = template + str(self.numbers.next())
++        self.used_names.add(name)
++        return name
++
++    def log_message(self, message):
++        if self.first_log:
++            self.first_log = False
++            self.log.append("### In file %s ###" % self.filename)
++        self.log.append(message)
++
++    def cannot_convert(self, node, reason=None):
++        """Warn the user that a given chunk of code is not valid Python 3,
++        but that it cannot be converted automatically.
++
++        First argument is the top-level node for the code in question.
++        Optional second argument is why it can't be converted.
++        """
++        lineno = node.get_lineno()
++        for_output = node.clone()
++        for_output.set_prefix("")
++        msg = "Line %d: could not convert: %s"
++        self.log_message(msg % (lineno, for_output))
++        if reason:
++            self.log_message(reason)
++
++    def warning(self, node, reason):
++        """Used for warning the user about possible uncertainty in the
++        translation.
++
++        First argument is the top-level node for the code in question.
++        Optional second argument is why it can't be converted.
++        """
++        lineno = node.get_lineno()
++        self.log_message("Line %d: %s" % (lineno, reason))
++
++    def start_tree(self, tree, filename):
++        """Some fixers need to maintain tree-wide state.
++        This method is called once, at the start of tree fix-up.
++
++        tree - the root node of the tree to be processed.
++        filename - the name of the file the tree came from.
++        """
++        self.used_names = tree.used_names
++        self.set_filename(filename)
++        self.numbers = itertools.count(1)
++        self.first_log = True
++
++    def finish_tree(self, tree, filename):
++        """Some fixers need to maintain tree-wide state.
++        This method is called once, at the conclusion of tree fix-up.
++
++        tree - the root node of the tree to be processed.
++        filename - the name of the file the tree came from.
++        """
++        pass
++
++
++class ConditionalFix(BaseFix):
++    """ Base class for fixers which not execute if an import is found. """
++
++    # This is the name of the import which, if found, will cause the test to be skipped
++    skip_on = None
++
++    def start_tree(self, *args):
++        super(ConditionalFix, self).start_tree(*args)
++        self._should_skip = None
++
++    def should_skip(self, node):
++        if self._should_skip is not None:
++            return self._should_skip
++        pkg = self.skip_on.split(".")
++        name = pkg[-1]
++        pkg = ".".join(pkg[:-1])
++        self._should_skip = does_tree_import(pkg, name, node)
++        return self._should_skip
+diff -r 531f2e948299 refactor/.svn/text-base/fixer_util.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/text-base/fixer_util.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,425 @@
++"""Utility functions, node construction macros, etc."""
++# Author: Collin Winter
++
++# Local imports
++from .pgen2 import token
++from .pytree import Leaf, Node
++from .pygram import python_symbols as syms
++from . import patcomp
++
++
++###########################################################
++### Common node-construction "macros"
++###########################################################
++
++def KeywordArg(keyword, value):
++    return Node(syms.argument,
++                [keyword, Leaf(token.EQUAL, '='), value])
++
++def LParen():
++    return Leaf(token.LPAR, "(")
++
++def RParen():
++    return Leaf(token.RPAR, ")")
++
++def Assign(target, source):
++    """Build an assignment statement"""
++    if not isinstance(target, list):
++        target = [target]
++    if not isinstance(source, list):
++        source.set_prefix(" ")
++        source = [source]
++
++    return Node(syms.atom,
++                target + [Leaf(token.EQUAL, "=", prefix=" ")] + source)
++
++def Name(name, prefix=None):
++    """Return a NAME leaf"""
++    return Leaf(token.NAME, name, prefix=prefix)
++
++def Attr(obj, attr):
++    """A node tuple for obj.attr"""
++    return [obj, Node(syms.trailer, [Dot(), attr])]
++
++def Comma():
++    """A comma leaf"""
++    return Leaf(token.COMMA, ",")
++
++def Dot():
++    """A period (.) leaf"""
++    return Leaf(token.DOT, ".")
++
++def ArgList(args, lparen=LParen(), rparen=RParen()):
++    """A parenthesised argument list, used by Call()"""
++    node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
++    if args:
++        node.insert_child(1, Node(syms.arglist, args))
++    return node
++
++def Call(func_name, args=None, prefix=None):
++    """A function call"""
++    node = Node(syms.power, [func_name, ArgList(args)])
++    if prefix is not None:
++        node.set_prefix(prefix)
++    return node
++
++def Newline():
++    """A newline literal"""
++    return Leaf(token.NEWLINE, "\n")
++
++def BlankLine():
++    """A blank line"""
++    return Leaf(token.NEWLINE, "")
++
++def Number(n, prefix=None):
++    return Leaf(token.NUMBER, n, prefix=prefix)
++
++def Subscript(index_node):
++    """A numeric or string subscript"""
++    return Node(syms.trailer, [Leaf(token.LBRACE, '['),
++                               index_node,
++                               Leaf(token.RBRACE, ']')])
++
++def String(string, prefix=None):
++    """A string leaf"""
++    return Leaf(token.STRING, string, prefix=prefix)
++
++def ListComp(xp, fp, it, test=None):
++    """A list comprehension of the form [xp for fp in it if test].
++
++    If test is None, the "if test" part is omitted.
++    """
++    xp.set_prefix("")
++    fp.set_prefix(" ")
++    it.set_prefix(" ")
++    for_leaf = Leaf(token.NAME, "for")
++    for_leaf.set_prefix(" ")
++    in_leaf = Leaf(token.NAME, "in")
++    in_leaf.set_prefix(" ")
++    inner_args = [for_leaf, fp, in_leaf, it]
++    if test:
++        test.set_prefix(" ")
++        if_leaf = Leaf(token.NAME, "if")
++        if_leaf.set_prefix(" ")
++        inner_args.append(Node(syms.comp_if, [if_leaf, test]))
++    inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
++    return Node(syms.atom,
++                       [Leaf(token.LBRACE, "["),
++                        inner,
++                        Leaf(token.RBRACE, "]")])
++
++def FromImport(package_name, name_leafs):
++    """ Return an import statement in the form:
++        from package import name_leafs"""
++    # XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
++    #assert package_name == '.' or '.' not in package_name, "FromImport has "\
++    #       "not been tested with dotted package names -- use at your own "\
++    #       "peril!"
++
++    for leaf in name_leafs:
++        # Pull the leaves out of their old tree
++        leaf.remove()
++
++    children = [Leaf(token.NAME, 'from'),
++                Leaf(token.NAME, package_name, prefix=" "),
++                Leaf(token.NAME, 'import', prefix=" "),
++                Node(syms.import_as_names, name_leafs)]
++    imp = Node(syms.import_from, children)
++    return imp
++
++
++###########################################################
++### Determine whether a node represents a given literal
++###########################################################
++
++def is_tuple(node):
++    """Does the node represent a tuple literal?"""
++    if isinstance(node, Node) and node.children == [LParen(), RParen()]:
++        return True
++    return (isinstance(node, Node)
++            and len(node.children) == 3
++            and isinstance(node.children[0], Leaf)
++            and isinstance(node.children[1], Node)
++            and isinstance(node.children[2], Leaf)
++            and node.children[0].value == "("
++            and node.children[2].value == ")")
++
++def is_list(node):
++    """Does the node represent a list literal?"""
++    return (isinstance(node, Node)
++            and len(node.children) > 1
++            and isinstance(node.children[0], Leaf)
++            and isinstance(node.children[-1], Leaf)
++            and node.children[0].value == "["
++            and node.children[-1].value == "]")
++
++
++###########################################################
++### Misc
++###########################################################
++
++def parenthesize(node):
++    return Node(syms.atom, [LParen(), node, RParen()])
++
++
++consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
++                       "min", "max"])
++
++def attr_chain(obj, attr):
++    """Follow an attribute chain.
++
++    If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
++    use this to iterate over all objects in the chain. Iteration is
++    terminated by getattr(x, attr) is None.
++
++    Args:
++        obj: the starting object
++        attr: the name of the chaining attribute
++
++    Yields:
++        Each successive object in the chain.
++    """
++    next = getattr(obj, attr)
++    while next:
++        yield next
++        next = getattr(next, attr)
++
++p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
++        | comp_for< 'for' any 'in' node=any any* >
++     """
++p1 = """
++power<
++    ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
++      'any' | 'all' | (any* trailer< '.' 'join' >) )
++    trailer< '(' node=any ')' >
++    any*
++>
++"""
++p2 = """
++power<
++    'sorted'
++    trailer< '(' arglist<node=any any*> ')' >
++    any*
++>
++"""
++pats_built = False
++def in_special_context(node):
++    """ Returns true if node is in an environment where all that is required
++        of it is being itterable (ie, it doesn't matter if it returns a list
++        or an itterator).
++        See test_map_nochange in test_fixers.py for some examples and tests.
++        """
++    global p0, p1, p2, pats_built
++    if not pats_built:
++        p1 = patcomp.compile_pattern(p1)
++        p0 = patcomp.compile_pattern(p0)
++        p2 = patcomp.compile_pattern(p2)
++        pats_built = True
++    patterns = [p0, p1, p2]
++    for pattern, parent in zip(patterns, attr_chain(node, "parent")):
++        results = {}
++        if pattern.match(parent, results) and results["node"] is node:
++            return True
++    return False
++
++def is_probably_builtin(node):
++    """
++    Check that something isn't an attribute or function name etc.
++    """
++    prev = node.prev_sibling
++    if prev is not None and prev.type == token.DOT:
++        # Attribute lookup.
++        return False
++    parent = node.parent
++    if parent.type in (syms.funcdef, syms.classdef):
++        return False
++    if parent.type == syms.expr_stmt and parent.children[0] is node:
++        # Assignment.
++        return False
++    if parent.type == syms.parameters or \
++            (parent.type == syms.typedargslist and (
++            (prev is not None and prev.type == token.COMMA) or
++            parent.children[0] is node
++            )):
++        # The name of an argument.
++        return False
++    return True
++
++###########################################################
++### The following functions are to find bindings in a suite
++###########################################################
++
++def make_suite(node):
++    if node.type == syms.suite:
++        return node
++    node = node.clone()
++    parent, node.parent = node.parent, None
++    suite = Node(syms.suite, [node])
++    suite.parent = parent
++    return suite
++
++def find_root(node):
++    """Find the top level namespace."""
++    # Scamper up to the top level namespace
++    while node.type != syms.file_input:
++        assert node.parent, "Tree is insane! root found before "\
++                           "file_input node was found."
++        node = node.parent
++    return node
++
++def does_tree_import(package, name, node):
++    """ Returns true if name is imported from package at the
++        top level of the tree which node belongs to.
++        To cover the case of an import like 'import foo', use
++        None for the package and 'foo' for the name. """
++    binding = find_binding(name, find_root(node), package)
++    return bool(binding)
++
++def is_import(node):
++    """Returns true if the node is an import statement."""
++    return node.type in (syms.import_name, syms.import_from)
++
++def touch_import(package, name, node):
++    """ Works like `does_tree_import` but adds an import statement
++        if it was not imported. """
++    def is_import_stmt(node):
++        return node.type == syms.simple_stmt and node.children and \
++               is_import(node.children[0])
++
++    root = find_root(node)
++
++    if does_tree_import(package, name, root):
++        return
++
++    add_newline_before = False
++
++    # figure out where to insert the new import.  First try to find
++    # the first import and then skip to the last one.
++    insert_pos = offset = 0
++    for idx, node in enumerate(root.children):
++        if not is_import_stmt(node):
++            continue
++        for offset, node2 in enumerate(root.children[idx:]):
++            if not is_import_stmt(node2):
++                break
++        insert_pos = idx + offset
++        break
++
++    # if there are no imports where we can insert, find the docstring.
++    # if that also fails, we stick to the beginning of the file
++    if insert_pos == 0:
++        for idx, node in enumerate(root.children):
++            if node.type == syms.simple_stmt and node.children and \
++               node.children[0].type == token.STRING:
++                insert_pos = idx + 1
++                add_newline_before
++                break
++
++    if package is None:
++        import_ = Node(syms.import_name, [
++            Leaf(token.NAME, 'import'),
++            Leaf(token.NAME, name, prefix=' ')
++        ])
++    else:
++        import_ = FromImport(package, [Leaf(token.NAME, name, prefix=' ')])
++
++    children = [import_, Newline()]
++    if add_newline_before:
++        children.insert(0, Newline())
++    root.insert_child(insert_pos, Node(syms.simple_stmt, children))
++
++
++_def_syms = set([syms.classdef, syms.funcdef])
++def find_binding(name, node, package=None):
++    """ Returns the node which binds variable name, otherwise None.
++        If optional argument package is supplied, only imports will
++        be returned.
++        See test cases for examples."""
++    for child in node.children:
++        ret = None
++        if child.type == syms.for_stmt:
++            if _find(name, child.children[1]):
++                return child
++            n = find_binding(name, make_suite(child.children[-1]), package)
++            if n: ret = n
++        elif child.type in (syms.if_stmt, syms.while_stmt):
++            n = find_binding(name, make_suite(child.children[-1]), package)
++            if n: ret = n
++        elif child.type == syms.try_stmt:
++            n = find_binding(name, make_suite(child.children[2]), package)
++            if n:
++                ret = n
++            else:
++                for i, kid in enumerate(child.children[3:]):
++                    if kid.type == token.COLON and kid.value == ":":
++                        # i+3 is the colon, i+4 is the suite
++                        n = find_binding(name, make_suite(child.children[i+4]), package)
++                        if n: ret = n
++        elif child.type in _def_syms and child.children[1].value == name:
++            ret = child
++        elif _is_import_binding(child, name, package):
++            ret = child
++        elif child.type == syms.simple_stmt:
++            ret = find_binding(name, child, package)
++        elif child.type == syms.expr_stmt:
++            if _find(name, child.children[0]):
++                ret = child
++
++        if ret:
++            if not package:
++                return ret
++            if is_import(ret):
++                return ret
++    return None
++
++_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
++def _find(name, node):
++    nodes = [node]
++    while nodes:
++        node = nodes.pop()
++        if node.type > 256 and node.type not in _block_syms:
++            nodes.extend(node.children)
++        elif node.type == token.NAME and node.value == name:
++            return node
++    return None
++
++def _is_import_binding(node, name, package=None):
++    """ Will reuturn node if node will import name, or node
++        will import * from package.  None is returned otherwise.
++        See test cases for examples. """
++
++    if node.type == syms.import_name and not package:
++        imp = node.children[1]
++        if imp.type == syms.dotted_as_names:
++            for child in imp.children:
++                if child.type == syms.dotted_as_name:
++                    if child.children[2].value == name:
++                        return node
++                elif child.type == token.NAME and child.value == name:
++                    return node
++        elif imp.type == syms.dotted_as_name:
++            last = imp.children[-1]
++            if last.type == token.NAME and last.value == name:
++                return node
++        elif imp.type == token.NAME and imp.value == name:
++            return node
++    elif node.type == syms.import_from:
++        # unicode(...) is used to make life easier here, because
++        # from a.b import parses to ['import', ['a', '.', 'b'], ...]
++        if package and unicode(node.children[1]).strip() != package:
++            return None
++        n = node.children[3]
++        if package and _find('as', n):
++            # See test_from_import_as for explanation
++            return None
++        elif n.type == syms.import_as_names and _find(name, n):
++            return node
++        elif n.type == syms.import_as_name:
++            child = n.children[2]
++            if child.type == token.NAME and child.value == name:
++                return node
++        elif n.type == token.NAME and n.value == name:
++            return node
++        elif package and n.type == token.STAR:
++            return node
++    return None
+diff -r 531f2e948299 refactor/.svn/text-base/main.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/text-base/main.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,133 @@
++"""
++Main program for 2to3.
++"""
++
++import sys
++import os
++import logging
++import shutil
++import optparse
++
++from . import refactor
++
++
++class StdoutRefactoringTool(refactor.RefactoringTool):
++    """
++    Prints output to stdout.
++    """
++
++    def __init__(self, fixers, options, explicit, nobackups):
++        self.nobackups = nobackups
++        super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
++
++    def log_error(self, msg, *args, **kwargs):
++        self.errors.append((msg, args, kwargs))
++        self.logger.error(msg, *args, **kwargs)
++
++    def write_file(self, new_text, filename, old_text):
++        if not self.nobackups:
++            # Make backup
++            backup = filename + ".bak"
++            if os.path.lexists(backup):
++                try:
++                    os.remove(backup)
++                except os.error, err:
++                    self.log_message("Can't remove backup %s", backup)
++            try:
++                os.rename(filename, backup)
++            except os.error, err:
++                self.log_message("Can't rename %s to %s", filename, backup)
++        # Actually write the new file
++        super(StdoutRefactoringTool, self).write_file(new_text,
++                                                      filename, old_text)
++        if not self.nobackups:
++            shutil.copymode(backup, filename)
++
++    def print_output(self, lines):
++        for line in lines:
++            print line
++
++
++def main(fixer_pkg, args=None):
++    """Main program.
++
++    Args:
++        fixer_pkg: the name of a package where the fixers are located.
++        args: optional; a list of command line arguments. If omitted,
++              sys.argv[1:] is used.
++
++    Returns a suggested exit status (0, 1, 2).
++    """
++    # Set up option parser
++    parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
++    parser.add_option("-d", "--doctests_only", action="store_true",
++                      help="Fix up doctests only")
++    parser.add_option("-f", "--fix", action="append", default=[],
++                      help="Each FIX specifies a transformation; default: all")
++    parser.add_option("-x", "--nofix", action="append", default=[],
++                      help="Prevent a fixer from being run.")
++    parser.add_option("-l", "--list-fixes", action="store_true",
++                      help="List available transformations (fixes/fix_*.py)")
++    parser.add_option("-p", "--print-function", action="store_true",
++                      help="Modify the grammar so that print() is a function")
++    parser.add_option("-v", "--verbose", action="store_true",
++                      help="More verbose logging")
++    parser.add_option("-w", "--write", action="store_true",
++                      help="Write back modified files")
++    parser.add_option("-n", "--nobackups", action="store_true", default=False,
++                      help="Don't write backups for modified files.")
++
++    # Parse command line arguments
++    refactor_stdin = False
++    options, args = parser.parse_args(args)
++    if not options.write and options.nobackups:
++        parser.error("Can't use -n without -w")
++    if options.list_fixes:
++        print "Available transformations for the -f/--fix option:"
++        for fixname in refactor.get_all_fix_names(fixer_pkg):
++            print fixname
++        if not args:
++            return 0
++    if not args:
++        print >>sys.stderr, "At least one file or directory argument required."
++        print >>sys.stderr, "Use --help to show usage."
++        return 2
++    if "-" in args:
++        refactor_stdin = True
++        if options.write:
++            print >>sys.stderr, "Can't write to stdin."
++            return 2
++
++    # Set up logging handler
++    level = logging.DEBUG if options.verbose else logging.INFO
++    logging.basicConfig(format='%(name)s: %(message)s', level=level)
++
++    # Initialize the refactoring tool
++    rt_opts = {"print_function" : options.print_function}
++    avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
++    unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
++    explicit = set()
++    if options.fix:
++        all_present = False
++        for fix in options.fix:
++            if fix == "all":
++                all_present = True
++            else:
++                explicit.add(fixer_pkg + ".fix_" + fix)
++        requested = avail_fixes.union(explicit) if all_present else explicit
++    else:
++        requested = avail_fixes.union(explicit)
++    fixer_names = requested.difference(unwanted_fixes)
++    rt = StdoutRefactoringTool(sorted(fixer_names), rt_opts, sorted(explicit),
++                               options.nobackups)
++
++    # Refactor all files and directories passed as arguments
++    if not rt.errors:
++        if refactor_stdin:
++            rt.refactor_stdin()
++        else:
++            rt.refactor(args, options.write, options.doctests_only)
++        rt.summarize()
++
++    # Return error status (0 if rt.errors is zero)
++    return int(bool(rt.errors))
+diff -r 531f2e948299 refactor/.svn/text-base/patcomp.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/text-base/patcomp.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,186 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Pattern compiler.
++
++The grammer is taken from PatternGrammar.txt.
++
++The compiler compiles a pattern to a pytree.*Pattern instance.
++"""
++
++__author__ = "Guido van Rossum <guido at python.org>"
++
++# Python imports
++import os
++
++# Fairly local imports
++from .pgen2 import driver
++from .pgen2 import literals
++from .pgen2 import token
++from .pgen2 import tokenize
++
++# Really local imports
++from . import pytree
++from . import pygram
++
++# The pattern grammar file
++_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
++                                     "PatternGrammar.txt")
++
++
++def tokenize_wrapper(input):
++    """Tokenizes a string suppressing significant whitespace."""
++    skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
++    tokens = tokenize.generate_tokens(driver.generate_lines(input).next)
++    for quintuple in tokens:
++        type, value, start, end, line_text = quintuple
++        if type not in skip:
++            yield quintuple
++
++
++class PatternCompiler(object):
++
++    def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
++        """Initializer.
++
++        Takes an optional alternative filename for the pattern grammar.
++        """
++        self.grammar = driver.load_grammar(grammar_file)
++        self.syms = pygram.Symbols(self.grammar)
++        self.pygrammar = pygram.python_grammar
++        self.pysyms = pygram.python_symbols
++        self.driver = driver.Driver(self.grammar, convert=pattern_convert)
++
++    def compile_pattern(self, input, debug=False):
++        """Compiles a pattern string to a nested pytree.*Pattern object."""
++        tokens = tokenize_wrapper(input)
++        root = self.driver.parse_tokens(tokens, debug=debug)
++        return self.compile_node(root)
++
++    def compile_node(self, node):
++        """Compiles a node, recursively.
++
++        This is one big switch on the node type.
++        """
++        # XXX Optimize certain Wildcard-containing-Wildcard patterns
++        # that can be merged
++        if node.type == self.syms.Matcher:
++            node = node.children[0] # Avoid unneeded recursion
++
++        if node.type == self.syms.Alternatives:
++            # Skip the odd children since they are just '|' tokens
++            alts = [self.compile_node(ch) for ch in node.children[::2]]
++            if len(alts) == 1:
++                return alts[0]
++            p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
++            return p.optimize()
++
++        if node.type == self.syms.Alternative:
++            units = [self.compile_node(ch) for ch in node.children]
++            if len(units) == 1:
++                return units[0]
++            p = pytree.WildcardPattern([units], min=1, max=1)
++            return p.optimize()
++
++        if node.type == self.syms.NegatedUnit:
++            pattern = self.compile_basic(node.children[1:])
++            p = pytree.NegatedPattern(pattern)
++            return p.optimize()
++
++        assert node.type == self.syms.Unit
++
++        name = None
++        nodes = node.children
++        if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
++            name = nodes[0].value
++            nodes = nodes[2:]
++        repeat = None
++        if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
++            repeat = nodes[-1]
++            nodes = nodes[:-1]
++
++        # Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
++        pattern = self.compile_basic(nodes, repeat)
++
++        if repeat is not None:
++            assert repeat.type == self.syms.Repeater
++            children = repeat.children
++            child = children[0]
++            if child.type == token.STAR:
++                min = 0
++                max = pytree.HUGE
++            elif child.type == token.PLUS:
++                min = 1
++                max = pytree.HUGE
++            elif child.type == token.LBRACE:
++                assert children[-1].type == token.RBRACE
++                assert  len(children) in (3, 5)
++                min = max = self.get_int(children[1])
++                if len(children) == 5:
++                    max = self.get_int(children[3])
++            else:
++                assert False
++            if min != 1 or max != 1:
++                pattern = pattern.optimize()
++                pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
++
++        if name is not None:
++            pattern.name = name
++        return pattern.optimize()
++
++    def compile_basic(self, nodes, repeat=None):
++        # Compile STRING | NAME [Details] | (...) | [...]
++        assert len(nodes) >= 1
++        node = nodes[0]
++        if node.type == token.STRING:
++            value = literals.evalString(node.value)
++            return pytree.LeafPattern(content=value)
++        elif node.type == token.NAME:
++            value = node.value
++            if value.isupper():
++                if value not in TOKEN_MAP:
++                    raise SyntaxError("Invalid token: %r" % value)
++                return pytree.LeafPattern(TOKEN_MAP[value])
++            else:
++                if value == "any":
++                    type = None
++                elif not value.startswith("_"):
++                    type = getattr(self.pysyms, value, None)
++                    if type is None:
++                        raise SyntaxError("Invalid symbol: %r" % value)
++                if nodes[1:]: # Details present
++                    content = [self.compile_node(nodes[1].children[1])]
++                else:
++                    content = None
++                return pytree.NodePattern(type, content)
++        elif node.value == "(":
++            return self.compile_node(nodes[1])
++        elif node.value == "[":
++            assert repeat is None
++            subpattern = self.compile_node(nodes[1])
++            return pytree.WildcardPattern([[subpattern]], min=0, max=1)
++        assert False, node
++
++    def get_int(self, node):
++        assert node.type == token.NUMBER
++        return int(node.value)
++
++
++# Map named tokens to the type value for a LeafPattern
++TOKEN_MAP = {"NAME": token.NAME,
++             "STRING": token.STRING,
++             "NUMBER": token.NUMBER,
++             "TOKEN": None}
++
++
++def pattern_convert(grammar, raw_node_info):
++    """Converts raw node information to a Node or Leaf instance."""
++    type, value, context, children = raw_node_info
++    if children or type in grammar.number2symbol:
++        return pytree.Node(type, children, context=context)
++    else:
++        return pytree.Leaf(type, value, context=context)
++
++
++def compile_pattern(pattern):
++    return PatternCompiler().compile_pattern(pattern)
+diff -r 531f2e948299 refactor/.svn/text-base/pygram.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/text-base/pygram.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,31 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Export the Python grammar and symbols."""
++
++# Python imports
++import os
++
++# Local imports
++from .pgen2 import token
++from .pgen2 import driver
++from . import pytree
++
++# The grammar file
++_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
++
++
++class Symbols(object):
++
++    def __init__(self, grammar):
++        """Initializer.
++
++        Creates an attribute for each grammar symbol (nonterminal),
++        whose value is the symbol's type (an int >= 256).
++        """
++        for name, symbol in grammar.symbol2number.iteritems():
++            setattr(self, name, symbol)
++
++
++python_grammar = driver.load_grammar(_GRAMMAR_FILE)
++python_symbols = Symbols(python_grammar)
+diff -r 531f2e948299 refactor/.svn/text-base/pytree.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/text-base/pytree.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,846 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""
++Python parse tree definitions.
++
++This is a very concrete parse tree; we need to keep every token and
++even the comments and whitespace between tokens.
++
++There's also a pattern matching implementation here.
++"""
++
++__author__ = "Guido van Rossum <guido at python.org>"
++
++import sys
++from StringIO import StringIO
++
++
++HUGE = 0x7FFFFFFF  # maximum repeat count, default max
++
++_type_reprs = {}
++def type_repr(type_num):
++    global _type_reprs
++    if not _type_reprs:
++        from .pygram import python_symbols
++        # printing tokens is possible but not as useful
++        # from .pgen2 import token // token.__dict__.items():
++        for name, val in python_symbols.__dict__.items():
++            if type(val) == int: _type_reprs[val] = name
++    return _type_reprs.setdefault(type_num, type_num)
++
++
++class Base(object):
++
++    """
++    Abstract base class for Node and Leaf.
++
++    This provides some default functionality and boilerplate using the
++    template pattern.
++
++    A node may be a subnode of at most one parent.
++    """
++
++    # Default values for instance variables
++    type = None    # int: token number (< 256) or symbol number (>= 256)
++    parent = None  # Parent node pointer, or None
++    children = ()  # Tuple of subnodes
++    was_changed = False
++
++    def __new__(cls, *args, **kwds):
++        """Constructor that prevents Base from being instantiated."""
++        assert cls is not Base, "Cannot instantiate Base"
++        return object.__new__(cls)
++
++    def __eq__(self, other):
++        """
++        Compare two nodes for equality.
++
++        This calls the method _eq().
++        """
++        if self.__class__ is not other.__class__:
++            return NotImplemented
++        return self._eq(other)
++
++    def __ne__(self, other):
++        """
++        Compare two nodes for inequality.
++
++        This calls the method _eq().
++        """
++        if self.__class__ is not other.__class__:
++            return NotImplemented
++        return not self._eq(other)
++
++    def _eq(self, other):
++        """
++        Compare two nodes for equality.
++
++        This is called by __eq__ and __ne__.  It is only called if the two nodes
++        have the same type.  This must be implemented by the concrete subclass.
++        Nodes should be considered equal if they have the same structure,
++        ignoring the prefix string and other context information.
++        """
++        raise NotImplementedError
++
++    def clone(self):
++        """
++        Return a cloned (deep) copy of self.
++
++        This must be implemented by the concrete subclass.
++        """
++        raise NotImplementedError
++
++    def post_order(self):
++        """
++        Return a post-order iterator for the tree.
++
++        This must be implemented by the concrete subclass.
++        """
++        raise NotImplementedError
++
++    def pre_order(self):
++        """
++        Return a pre-order iterator for the tree.
++
++        This must be implemented by the concrete subclass.
++        """
++        raise NotImplementedError
++
++    def set_prefix(self, prefix):
++        """
++        Set the prefix for the node (see Leaf class).
++
++        This must be implemented by the concrete subclass.
++        """
++        raise NotImplementedError
++
++    def get_prefix(self):
++        """
++        Return the prefix for the node (see Leaf class).
++
++        This must be implemented by the concrete subclass.
++        """
++        raise NotImplementedError
++
++    def replace(self, new):
++        """Replace this node with a new one in the parent."""
++        assert self.parent is not None, str(self)
++        assert new is not None
++        if not isinstance(new, list):
++            new = [new]
++        l_children = []
++        found = False
++        for ch in self.parent.children:
++            if ch is self:
++                assert not found, (self.parent.children, self, new)
++                if new is not None:
++                    l_children.extend(new)
++                found = True
++            else:
++                l_children.append(ch)
++        assert found, (self.children, self, new)
++        self.parent.changed()
++        self.parent.children = l_children
++        for x in new:
++            x.parent = self.parent
++        self.parent = None
++
++    def get_lineno(self):
++        """Return the line number which generated the invocant node."""
++        node = self
++        while not isinstance(node, Leaf):
++            if not node.children:
++                return
++            node = node.children[0]
++        return node.lineno
++
++    def changed(self):
++        if self.parent:
++            self.parent.changed()
++        self.was_changed = True
++
++    def remove(self):
++        """
++        Remove the node from the tree. Returns the position of the node in its
++        parent's children before it was removed.
++        """
++        if self.parent:
++            for i, node in enumerate(self.parent.children):
++                if node is self:
++                    self.parent.changed()
++                    del self.parent.children[i]
++                    self.parent = None
++                    return i
++
++    @property
++    def next_sibling(self):
++        """
++        The node immediately following the invocant in their parent's children
++        list. If the invocant does not have a next sibling, it is None
++        """
++        if self.parent is None:
++            return None
++
++        # Can't use index(); we need to test by identity
++        for i, child in enumerate(self.parent.children):
++            if child is self:
++                try:
++                    return self.parent.children[i+1]
++                except IndexError:
++                    return None
++
++    @property
++    def prev_sibling(self):
++        """
++        The node immediately preceding the invocant in their parent's children
++        list. If the invocant does not have a previous sibling, it is None.
++        """
++        if self.parent is None:
++            return None
++
++        # Can't use index(); we need to test by identity
++        for i, child in enumerate(self.parent.children):
++            if child is self:
++                if i == 0:
++                    return None
++                return self.parent.children[i-1]
++
++    def get_suffix(self):
++        """
++        Return the string immediately following the invocant node. This is
++        effectively equivalent to node.next_sibling.get_prefix()
++        """
++        next_sib = self.next_sibling
++        if next_sib is None:
++            return ""
++        return next_sib.get_prefix()
++
++
++class Node(Base):
++
++    """Concrete implementation for interior nodes."""
++
++    def __init__(self, type, children, context=None, prefix=None):
++        """
++        Initializer.
++
++        Takes a type constant (a symbol number >= 256), a sequence of
++        child nodes, and an optional context keyword argument.
++
++        As a side effect, the parent pointers of the children are updated.
++        """
++        assert type >= 256, type
++        self.type = type
++        self.children = list(children)
++        for ch in self.children:
++            assert ch.parent is None, repr(ch)
++            ch.parent = self
++        if prefix is not None:
++            self.set_prefix(prefix)
++
++    def __repr__(self):
++        """Return a canonical string representation."""
++        return "%s(%s, %r)" % (self.__class__.__name__,
++                               type_repr(self.type),
++                               self.children)
++
++    def __str__(self):
++        """
++        Return a pretty string representation.
++
++        This reproduces the input source exactly.
++        """
++        return "".join(map(str, self.children))
++
++    def _eq(self, other):
++        """Compare two nodes for equality."""
++        return (self.type, self.children) == (other.type, other.children)
++
++    def clone(self):
++        """Return a cloned (deep) copy of self."""
++        return Node(self.type, [ch.clone() for ch in self.children])
++
++    def post_order(self):
++        """Return a post-order iterator for the tree."""
++        for child in self.children:
++            for node in child.post_order():
++                yield node
++        yield self
++
++    def pre_order(self):
++        """Return a pre-order iterator for the tree."""
++        yield self
++        for child in self.children:
++            for node in child.post_order():
++                yield node
++
++    def set_prefix(self, prefix):
++        """
++        Set the prefix for the node.
++
++        This passes the responsibility on to the first child.
++        """
++        if self.children:
++            self.children[0].set_prefix(prefix)
++
++    def get_prefix(self):
++        """
++        Return the prefix for the node.
++
++        This passes the call on to the first child.
++        """
++        if not self.children:
++            return ""
++        return self.children[0].get_prefix()
++
++    def set_child(self, i, child):
++        """
++        Equivalent to 'node.children[i] = child'. This method also sets the
++        child's parent attribute appropriately.
++        """
++        child.parent = self
++        self.children[i].parent = None
++        self.children[i] = child
++        self.changed()
++
++    def insert_child(self, i, child):
++        """
++        Equivalent to 'node.children.insert(i, child)'. This method also sets
++        the child's parent attribute appropriately.
++        """
++        child.parent = self
++        self.children.insert(i, child)
++        self.changed()
++
++    def append_child(self, child):
++        """
++        Equivalent to 'node.children.append(child)'. This method also sets the
++        child's parent attribute appropriately.
++        """
++        child.parent = self
++        self.children.append(child)
++        self.changed()
++
++
++class Leaf(Base):
++
++    """Concrete implementation for leaf nodes."""
++
++    # Default values for instance variables
++    prefix = ""  # Whitespace and comments preceding this token in the input
++    lineno = 0   # Line where this token starts in the input
++    column = 0   # Column where this token tarts in the input
++
++    def __init__(self, type, value, context=None, prefix=None):
++        """
++        Initializer.
++
++        Takes a type constant (a token number < 256), a string value, and an
++        optional context keyword argument.
++        """
++        assert 0 <= type < 256, type
++        if context is not None:
++            self.prefix, (self.lineno, self.column) = context
++        self.type = type
++        self.value = value
++        if prefix is not None:
++            self.prefix = prefix
++
++    def __repr__(self):
++        """Return a canonical string representation."""
++        return "%s(%r, %r)" % (self.__class__.__name__,
++                               self.type,
++                               self.value)
++
++    def __str__(self):
++        """
++        Return a pretty string representation.
++
++        This reproduces the input source exactly.
++        """
++        return self.prefix + str(self.value)
++
++    def _eq(self, other):
++        """Compare two nodes for equality."""
++        return (self.type, self.value) == (other.type, other.value)
++
++    def clone(self):
++        """Return a cloned (deep) copy of self."""
++        return Leaf(self.type, self.value,
++                    (self.prefix, (self.lineno, self.column)))
++
++    def post_order(self):
++        """Return a post-order iterator for the tree."""
++        yield self
++
++    def pre_order(self):
++        """Return a pre-order iterator for the tree."""
++        yield self
++
++    def set_prefix(self, prefix):
++        """Set the prefix for the node."""
++        self.changed()
++        self.prefix = prefix
++
++    def get_prefix(self):
++        """Return the prefix for the node."""
++        return self.prefix
++
++
++def convert(gr, raw_node):
++    """
++    Convert raw node information to a Node or Leaf instance.
++
++    This is passed to the parser driver which calls it whenever a reduction of a
++    grammar rule produces a new complete node, so that the tree is build
++    strictly bottom-up.
++    """
++    type, value, context, children = raw_node
++    if children or type in gr.number2symbol:
++        # If there's exactly one child, return that child instead of
++        # creating a new node.
++        if len(children) == 1:
++            return children[0]
++        return Node(type, children, context=context)
++    else:
++        return Leaf(type, value, context=context)
++
++
++class BasePattern(object):
++
++    """
++    A pattern is a tree matching pattern.
++
++    It looks for a specific node type (token or symbol), and
++    optionally for a specific content.
++
++    This is an abstract base class.  There are three concrete
++    subclasses:
++
++    - LeafPattern matches a single leaf node;
++    - NodePattern matches a single node (usually non-leaf);
++    - WildcardPattern matches a sequence of nodes of variable length.
++    """
++
++    # Defaults for instance variables
++    type = None     # Node type (token if < 256, symbol if >= 256)
++    content = None  # Optional content matching pattern
++    name = None     # Optional name used to store match in results dict
++
++    def __new__(cls, *args, **kwds):
++        """Constructor that prevents BasePattern from being instantiated."""
++        assert cls is not BasePattern, "Cannot instantiate BasePattern"
++        return object.__new__(cls)
++
++    def __repr__(self):
++        args = [type_repr(self.type), self.content, self.name]
++        while args and args[-1] is None:
++            del args[-1]
++        return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
++
++    def optimize(self):
++        """
++        A subclass can define this as a hook for optimizations.
++
++        Returns either self or another node with the same effect.
++        """
++        return self
++
++    def match(self, node, results=None):
++        """
++        Does this pattern exactly match a node?
++
++        Returns True if it matches, False if not.
++
++        If results is not None, it must be a dict which will be
++        updated with the nodes matching named subpatterns.
++
++        Default implementation for non-wildcard patterns.
++        """
++        if self.type is not None and node.type != self.type:
++            return False
++        if self.content is not None:
++            r = None
++            if results is not None:
++                r = {}
++            if not self._submatch(node, r):
++                return False
++            if r:
++                results.update(r)
++        if results is not None and self.name:
++            results[self.name] = node
++        return True
++
++    def match_seq(self, nodes, results=None):
++        """
++        Does this pattern exactly match a sequence of nodes?
++
++        Default implementation for non-wildcard patterns.
++        """
++        if len(nodes) != 1:
++            return False
++        return self.match(nodes[0], results)
++
++    def generate_matches(self, nodes):
++        """
++        Generator yielding all matches for this pattern.
++
++        Default implementation for non-wildcard patterns.
++        """
++        r = {}
++        if nodes and self.match(nodes[0], r):
++            yield 1, r
++
++
++class LeafPattern(BasePattern):
++
++    def __init__(self, type=None, content=None, name=None):
++        """
++        Initializer.  Takes optional type, content, and name.
++
++        The type, if given must be a token type (< 256).  If not given,
++        this matches any *leaf* node; the content may still be required.
++
++        The content, if given, must be a string.
++
++        If a name is given, the matching node is stored in the results
++        dict under that key.
++        """
++        if type is not None:
++            assert 0 <= type < 256, type
++        if content is not None:
++            assert isinstance(content, basestring), repr(content)
++        self.type = type
++        self.content = content
++        self.name = name
++
++    def match(self, node, results=None):
++        """Override match() to insist on a leaf node."""
++        if not isinstance(node, Leaf):
++            return False
++        return BasePattern.match(self, node, results)
++
++    def _submatch(self, node, results=None):
++        """
++        Match the pattern's content to the node's children.
++
++        This assumes the node type matches and self.content is not None.
++
++        Returns True if it matches, False if not.
++
++        If results is not None, it must be a dict which will be
++        updated with the nodes matching named subpatterns.
++
++        When returning False, the results dict may still be updated.
++        """
++        return self.content == node.value
++
++
++class NodePattern(BasePattern):
++
++    wildcards = False
++
++    def __init__(self, type=None, content=None, name=None):
++        """
++        Initializer.  Takes optional type, content, and name.
++
++        The type, if given, must be a symbol type (>= 256).  If the
++        type is None this matches *any* single node (leaf or not),
++        except if content is not None, in which it only matches
++        non-leaf nodes that also match the content pattern.
++
++        The content, if not None, must be a sequence of Patterns that
++        must match the node's children exactly.  If the content is
++        given, the type must not be None.
++
++        If a name is given, the matching node is stored in the results
++        dict under that key.
++        """
++        if type is not None:
++            assert type >= 256, type
++        if content is not None:
++            assert not isinstance(content, basestring), repr(content)
++            content = list(content)
++            for i, item in enumerate(content):
++                assert isinstance(item, BasePattern), (i, item)
++                if isinstance(item, WildcardPattern):
++                    self.wildcards = True
++        self.type = type
++        self.content = content
++        self.name = name
++
++    def _submatch(self, node, results=None):
++        """
++        Match the pattern's content to the node's children.
++
++        This assumes the node type matches and self.content is not None.
++
++        Returns True if it matches, False if not.
++
++        If results is not None, it must be a dict which will be
++        updated with the nodes matching named subpatterns.
++
++        When returning False, the results dict may still be updated.
++        """
++        if self.wildcards:
++            for c, r in generate_matches(self.content, node.children):
++                if c == len(node.children):
++                    if results is not None:
++                        results.update(r)
++                    return True
++            return False
++        if len(self.content) != len(node.children):
++            return False
++        for subpattern, child in zip(self.content, node.children):
++            if not subpattern.match(child, results):
++                return False
++        return True
++
++
++class WildcardPattern(BasePattern):
++
++    """
++    A wildcard pattern can match zero or more nodes.
++
++    This has all the flexibility needed to implement patterns like:
++
++    .*      .+      .?      .{m,n}
++    (a b c | d e | f)
++    (...)*  (...)+  (...)?  (...){m,n}
++
++    except it always uses non-greedy matching.
++    """
++
++    def __init__(self, content=None, min=0, max=HUGE, name=None):
++        """
++        Initializer.
++
++        Args:
++            content: optional sequence of subsequences of patterns;
++                     if absent, matches one node;
++                     if present, each subsequence is an alternative [*]
++            min: optinal minumum number of times to match, default 0
++            max: optional maximum number of times tro match, default HUGE
++            name: optional name assigned to this match
++
++        [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
++            equivalent to (a b c | d e | f g h); if content is None,
++            this is equivalent to '.' in regular expression terms.
++            The min and max parameters work as follows:
++                min=0, max=maxint: .*
++                min=1, max=maxint: .+
++                min=0, max=1: .?
++                min=1, max=1: .
++            If content is not None, replace the dot with the parenthesized
++            list of alternatives, e.g. (a b c | d e | f g h)*
++        """
++        assert 0 <= min <= max <= HUGE, (min, max)
++        if content is not None:
++            content = tuple(map(tuple, content))  # Protect against alterations
++            # Check sanity of alternatives
++            assert len(content), repr(content)  # Can't have zero alternatives
++            for alt in content:
++                assert len(alt), repr(alt) # Can have empty alternatives
++        self.content = content
++        self.min = min
++        self.max = max
++        self.name = name
++
++    def optimize(self):
++        """Optimize certain stacked wildcard patterns."""
++        subpattern = None
++        if (self.content is not None and
++            len(self.content) == 1 and len(self.content[0]) == 1):
++            subpattern = self.content[0][0]
++        if self.min == 1 and self.max == 1:
++            if self.content is None:
++                return NodePattern(name=self.name)
++            if subpattern is not None and  self.name == subpattern.name:
++                return subpattern.optimize()
++        if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
++            subpattern.min <= 1 and self.name == subpattern.name):
++            return WildcardPattern(subpattern.content,
++                                   self.min*subpattern.min,
++                                   self.max*subpattern.max,
++                                   subpattern.name)
++        return self
++
++    def match(self, node, results=None):
++        """Does this pattern exactly match a node?"""
++        return self.match_seq([node], results)
++
++    def match_seq(self, nodes, results=None):
++        """Does this pattern exactly match a sequence of nodes?"""
++        for c, r in self.generate_matches(nodes):
++            if c == len(nodes):
++                if results is not None:
++                    results.update(r)
++                    if self.name:
++                        results[self.name] = list(nodes)
++                return True
++        return False
++
++    def generate_matches(self, nodes):
++        """
++        Generator yielding matches for a sequence of nodes.
++
++        Args:
++            nodes: sequence of nodes
++
++        Yields:
++            (count, results) tuples where:
++            count: the match comprises nodes[:count];
++            results: dict containing named submatches.
++        """
++        if self.content is None:
++            # Shortcut for special case (see __init__.__doc__)
++            for count in xrange(self.min, 1 + min(len(nodes), self.max)):
++                r = {}
++                if self.name:
++                    r[self.name] = nodes[:count]
++                yield count, r
++        elif self.name == "bare_name":
++            yield self._bare_name_matches(nodes)
++        else:
++            # The reason for this is that hitting the recursion limit usually
++            # results in some ugly messages about how RuntimeErrors are being
++            # ignored.
++            save_stderr = sys.stderr
++            sys.stderr = StringIO()
++            try:
++                for count, r in self._recursive_matches(nodes, 0):
++                    if self.name:
++                        r[self.name] = nodes[:count]
++                    yield count, r
++            except RuntimeError:
++                # We fall back to the iterative pattern matching scheme if the recursive
++                # scheme hits the recursion limit.
++                for count, r in self._iterative_matches(nodes):
++                    if self.name:
++                        r[self.name] = nodes[:count]
++                    yield count, r
++            finally:
++                sys.stderr = save_stderr
++
++    def _iterative_matches(self, nodes):
++        """Helper to iteratively yield the matches."""
++        nodelen = len(nodes)
++        if 0 >= self.min:
++            yield 0, {}
++
++        results = []
++        # generate matches that use just one alt from self.content
++        for alt in self.content:
++            for c, r in generate_matches(alt, nodes):
++                yield c, r
++                results.append((c, r))
++
++        # for each match, iterate down the nodes
++        while results:
++            new_results = []
++            for c0, r0 in results:
++                # stop if the entire set of nodes has been matched
++                if c0 < nodelen and c0 <= self.max:
++                    for alt in self.content:
++                        for c1, r1 in generate_matches(alt, nodes[c0:]):
++                            if c1 > 0:
++                                r = {}
++                                r.update(r0)
++                                r.update(r1)
++                                yield c0 + c1, r
++                                new_results.append((c0 + c1, r))
++            results = new_results
++
++    def _bare_name_matches(self, nodes):
++        """Special optimized matcher for bare_name."""
++        count = 0
++        r = {}
++        done = False
++        max = len(nodes)
++        while not done and count < max:
++            done = True
++            for leaf in self.content:
++                if leaf[0].match(nodes[count], r):
++                    count += 1
++                    done = False
++                    break
++        r[self.name] = nodes[:count]
++        return count, r
++
++    def _recursive_matches(self, nodes, count):
++        """Helper to recursively yield the matches."""
++        assert self.content is not None
++        if count >= self.min:
++            yield 0, {}
++        if count < self.max:
++            for alt in self.content:
++                for c0, r0 in generate_matches(alt, nodes):
++                    for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
++                        r = {}
++                        r.update(r0)
++                        r.update(r1)
++                        yield c0 + c1, r
++
++
++class NegatedPattern(BasePattern):
++
++    def __init__(self, content=None):
++        """
++        Initializer.
++
++        The argument is either a pattern or None.  If it is None, this
++        only matches an empty sequence (effectively '$' in regex
++        lingo).  If it is not None, this matches whenever the argument
++        pattern doesn't have any matches.
++        """
++        if content is not None:
++            assert isinstance(content, BasePattern), repr(content)
++        self.content = content
++
++    def match(self, node):
++        # We never match a node in its entirety
++        return False
++
++    def match_seq(self, nodes):
++        # We only match an empty sequence of nodes in its entirety
++        return len(nodes) == 0
++
++    def generate_matches(self, nodes):
++        if self.content is None:
++            # Return a match if there is an empty sequence
++            if len(nodes) == 0:
++                yield 0, {}
++        else:
++            # Return a match if the argument pattern has no matches
++            for c, r in self.content.generate_matches(nodes):
++                return
++            yield 0, {}
++
++
++def generate_matches(patterns, nodes):
++    """
++    Generator yielding matches for a sequence of patterns and nodes.
++
++    Args:
++        patterns: a sequence of patterns
++        nodes: a sequence of nodes
++
++    Yields:
++        (count, results) tuples where:
++        count: the entire sequence of patterns matches nodes[:count];
++        results: dict containing named submatches.
++        """
++    if not patterns:
++        yield 0, {}
++    else:
++        p, rest = patterns[0], patterns[1:]
++        for c0, r0 in p.generate_matches(nodes):
++            if not rest:
++                yield c0, r0
++            else:
++                for c1, r1 in generate_matches(rest, nodes[c0:]):
++                    r = {}
++                    r.update(r0)
++                    r.update(r1)
++                    yield c0 + c1, r
+diff -r 531f2e948299 refactor/.svn/text-base/refactor.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/.svn/text-base/refactor.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,515 @@
++#!/usr/bin/env python2.5
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Refactoring framework.
++
++Used as a main program, this can refactor any number of files and/or
++recursively descend down directories.  Imported as a module, this
++provides infrastructure to write your own refactoring tool.
++"""
++
++__author__ = "Guido van Rossum <guido at python.org>"
++
++
++# Python imports
++import os
++import sys
++import difflib
++import logging
++import operator
++from collections import defaultdict
++from itertools import chain
++
++# Local imports
++from .pgen2 import driver
++from .pgen2 import tokenize
++
++from . import pytree
++from . import patcomp
++from . import fixes
++from . import pygram
++
++
++def get_all_fix_names(fixer_pkg, remove_prefix=True):
++    """Return a sorted list of all available fix names in the given package."""
++    pkg = __import__(fixer_pkg, [], [], ["*"])
++    fixer_dir = os.path.dirname(pkg.__file__)
++    fix_names = []
++    for name in sorted(os.listdir(fixer_dir)):
++        if name.startswith("fix_") and name.endswith(".py"):
++            if remove_prefix:
++                name = name[4:]
++            fix_names.append(name[:-3])
++    return fix_names
++
++def get_head_types(pat):
++    """ Accepts a pytree Pattern Node and returns a set
++        of the pattern types which will match first. """
++
++    if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
++        # NodePatters must either have no type and no content
++        #   or a type and content -- so they don't get any farther
++        # Always return leafs
++        return set([pat.type])
++
++    if isinstance(pat, pytree.NegatedPattern):
++        if pat.content:
++            return get_head_types(pat.content)
++        return set([None]) # Negated Patterns don't have a type
++
++    if isinstance(pat, pytree.WildcardPattern):
++        # Recurse on each node in content
++        r = set()
++        for p in pat.content:
++            for x in p:
++                r.update(get_head_types(x))
++        return r
++
++    raise Exception("Oh no! I don't understand pattern %s" %(pat))
++
++def get_headnode_dict(fixer_list):
++    """ Accepts a list of fixers and returns a dictionary
++        of head node type --> fixer list.  """
++    head_nodes = defaultdict(list)
++    for fixer in fixer_list:
++        if not fixer.pattern:
++            head_nodes[None].append(fixer)
++            continue
++        for t in get_head_types(fixer.pattern):
++            head_nodes[t].append(fixer)
++    return head_nodes
++
++def get_fixers_from_package(pkg_name):
++    """
++    Return the fully qualified names for fixers in the package pkg_name.
++    """
++    return [pkg_name + "." + fix_name
++            for fix_name in get_all_fix_names(pkg_name, False)]
++
++
++class FixerError(Exception):
++    """A fixer could not be loaded."""
++
++
++class RefactoringTool(object):
++
++    _default_options = {"print_function": False}
++
++    CLASS_PREFIX = "Fix" # The prefix for fixer classes
++    FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
++
++    def __init__(self, fixer_names, options=None, explicit=None):
++        """Initializer.
++
++        Args:
++            fixer_names: a list of fixers to import
++            options: an dict with configuration.
++            explicit: a list of fixers to run even if they are explicit.
++        """
++        self.fixers = fixer_names
++        self.explicit = explicit or []
++        self.options = self._default_options.copy()
++        if options is not None:
++            self.options.update(options)
++        self.errors = []
++        self.logger = logging.getLogger("RefactoringTool")
++        self.fixer_log = []
++        self.wrote = False
++        if self.options["print_function"]:
++            del pygram.python_grammar.keywords["print"]
++        self.driver = driver.Driver(pygram.python_grammar,
++                                    convert=pytree.convert,
++                                    logger=self.logger)
++        self.pre_order, self.post_order = self.get_fixers()
++
++        self.pre_order_heads = get_headnode_dict(self.pre_order)
++        self.post_order_heads = get_headnode_dict(self.post_order)
++
++        self.files = []  # List of files that were or should be modified
++
++    def get_fixers(self):
++        """Inspects the options to load the requested patterns and handlers.
++
++        Returns:
++          (pre_order, post_order), where pre_order is the list of fixers that
++          want a pre-order AST traversal, and post_order is the list that want
++          post-order traversal.
++        """
++        pre_order_fixers = []
++        post_order_fixers = []
++        for fix_mod_path in self.fixers:
++            mod = __import__(fix_mod_path, {}, {}, ["*"])
++            fix_name = fix_mod_path.rsplit(".", 1)[-1]
++            if fix_name.startswith(self.FILE_PREFIX):
++                fix_name = fix_name[len(self.FILE_PREFIX):]
++            parts = fix_name.split("_")
++            class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
++            try:
++                fix_class = getattr(mod, class_name)
++            except AttributeError:
++                raise FixerError("Can't find %s.%s" % (fix_name, class_name))
++            fixer = fix_class(self.options, self.fixer_log)
++            if fixer.explicit and self.explicit is not True and \
++                    fix_mod_path not in self.explicit:
++                self.log_message("Skipping implicit fixer: %s", fix_name)
++                continue
++
++            self.log_debug("Adding transformation: %s", fix_name)
++            if fixer.order == "pre":
++                pre_order_fixers.append(fixer)
++            elif fixer.order == "post":
++                post_order_fixers.append(fixer)
++            else:
++                raise FixerError("Illegal fixer order: %r" % fixer.order)
++
++        key_func = operator.attrgetter("run_order")
++        pre_order_fixers.sort(key=key_func)
++        post_order_fixers.sort(key=key_func)
++        return (pre_order_fixers, post_order_fixers)
++
++    def log_error(self, msg, *args, **kwds):
++        """Called when an error occurs."""
++        raise
++
++    def log_message(self, msg, *args):
++        """Hook to log a message."""
++        if args:
++            msg = msg % args
++        self.logger.info(msg)
++
++    def log_debug(self, msg, *args):
++        if args:
++            msg = msg % args
++        self.logger.debug(msg)
++
++    def print_output(self, lines):
++        """Called with lines of output to give to the user."""
++        pass
++
++    def refactor(self, items, write=False, doctests_only=False):
++        """Refactor a list of files and directories."""
++        for dir_or_file in items:
++            if os.path.isdir(dir_or_file):
++                self.refactor_dir(dir_or_file, write, doctests_only)
++            else:
++                self.refactor_file(dir_or_file, write, doctests_only)
++
++    def refactor_dir(self, dir_name, write=False, doctests_only=False):
++        """Descends down a directory and refactor every Python file found.
++
++        Python files are assumed to have a .py extension.
++
++        Files and subdirectories starting with '.' are skipped.
++        """
++        for dirpath, dirnames, filenames in os.walk(dir_name):
++            self.log_debug("Descending into %s", dirpath)
++            dirnames.sort()
++            filenames.sort()
++            for name in filenames:
++                if not name.startswith(".") and name.endswith("py"):
++                    fullname = os.path.join(dirpath, name)
++                    self.refactor_file(fullname, write, doctests_only)
++            # Modify dirnames in-place to remove subdirs with leading dots
++            dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
++
++    def refactor_file(self, filename, write=False, doctests_only=False):
++        """Refactors a file."""
++        try:
++            f = open(filename)
++        except IOError, err:
++            self.log_error("Can't open %s: %s", filename, err)
++            return
++        try:
++            input = f.read() + "\n" # Silence certain parse errors
++        finally:
++            f.close()
++        if doctests_only:
++            self.log_debug("Refactoring doctests in %s", filename)
++            output = self.refactor_docstring(input, filename)
++            if output != input:
++                self.processed_file(output, filename, input, write=write)
++            else:
++                self.log_debug("No doctest changes in %s", filename)
++        else:
++            tree = self.refactor_string(input, filename)
++            if tree and tree.was_changed:
++                # The [:-1] is to take off the \n we added earlier
++                self.processed_file(str(tree)[:-1], filename, write=write)
++            else:
++                self.log_debug("No changes in %s", filename)
++
++    def refactor_string(self, data, name):
++        """Refactor a given input string.
++
++        Args:
++            data: a string holding the code to be refactored.
++            name: a human-readable name for use in error/log messages.
++
++        Returns:
++            An AST corresponding to the refactored input stream; None if
++            there were errors during the parse.
++        """
++        try:
++            tree = self.driver.parse_string(data)
++        except Exception, err:
++            self.log_error("Can't parse %s: %s: %s",
++                           name, err.__class__.__name__, err)
++            return
++        self.log_debug("Refactoring %s", name)
++        self.refactor_tree(tree, name)
++        return tree
++
++    def refactor_stdin(self, doctests_only=False):
++        input = sys.stdin.read()
++        if doctests_only:
++            self.log_debug("Refactoring doctests in stdin")
++            output = self.refactor_docstring(input, "<stdin>")
++            if output != input:
++                self.processed_file(output, "<stdin>", input)
++            else:
++                self.log_debug("No doctest changes in stdin")
++        else:
++            tree = self.refactor_string(input, "<stdin>")
++            if tree and tree.was_changed:
++                self.processed_file(str(tree), "<stdin>", input)
++            else:
++                self.log_debug("No changes in stdin")
++
++    def refactor_tree(self, tree, name):
++        """Refactors a parse tree (modifying the tree in place).
++
++        Args:
++            tree: a pytree.Node instance representing the root of the tree
++                  to be refactored.
++            name: a human-readable name for this tree.
++
++        Returns:
++            True if the tree was modified, False otherwise.
++        """
++        for fixer in chain(self.pre_order, self.post_order):
++            fixer.start_tree(tree, name)
++
++        self.traverse_by(self.pre_order_heads, tree.pre_order())
++        self.traverse_by(self.post_order_heads, tree.post_order())
++
++        for fixer in chain(self.pre_order, self.post_order):
++            fixer.finish_tree(tree, name)
++        return tree.was_changed
++
++    def traverse_by(self, fixers, traversal):
++        """Traverse an AST, applying a set of fixers to each node.
++
++        This is a helper method for refactor_tree().
++
++        Args:
++            fixers: a list of fixer instances.
++            traversal: a generator that yields AST nodes.
++
++        Returns:
++            None
++        """
++        if not fixers:
++            return
++        for node in traversal:
++            for fixer in fixers[node.type] + fixers[None]:
++                results = fixer.match(node)
++                if results:
++                    new = fixer.transform(node, results)
++                    if new is not None and (new != node or
++                                            str(new) != str(node)):
++                        node.replace(new)
++                        node = new
++
++    def processed_file(self, new_text, filename, old_text=None, write=False):
++        """
++        Called when a file has been refactored, and there are changes.
++        """
++        self.files.append(filename)
++        if old_text is None:
++            try:
++                f = open(filename, "r")
++            except IOError, err:
++                self.log_error("Can't read %s: %s", filename, err)
++                return
++            try:
++                old_text = f.read()
++            finally:
++                f.close()
++        if old_text == new_text:
++            self.log_debug("No changes to %s", filename)
++            return
++        self.print_output(diff_texts(old_text, new_text, filename))
++        if write:
++            self.write_file(new_text, filename, old_text)
++        else:
++            self.log_debug("Not writing changes to %s", filename)
++
++    def write_file(self, new_text, filename, old_text):
++        """Writes a string to a file.
++
++        It first shows a unified diff between the old text and the new text, and
++        then rewrites the file; the latter is only done if the write option is
++        set.
++        """
++        try:
++            f = open(filename, "w")
++        except os.error, err:
++            self.log_error("Can't create %s: %s", filename, err)
++            return
++        try:
++            f.write(new_text)
++        except os.error, err:
++            self.log_error("Can't write %s: %s", filename, err)
++        finally:
++            f.close()
++        self.log_debug("Wrote changes to %s", filename)
++        self.wrote = True
++
++    PS1 = ">>> "
++    PS2 = "... "
++
++    def refactor_docstring(self, input, filename):
++        """Refactors a docstring, looking for doctests.
++
++        This returns a modified version of the input string.  It looks
++        for doctests, which start with a ">>>" prompt, and may be
++        continued with "..." prompts, as long as the "..." is indented
++        the same as the ">>>".
++
++        (Unfortunately we can't use the doctest module's parser,
++        since, like most parsers, it is not geared towards preserving
++        the original source.)
++        """
++        result = []
++        block = None
++        block_lineno = None
++        indent = None
++        lineno = 0
++        for line in input.splitlines(True):
++            lineno += 1
++            if line.lstrip().startswith(self.PS1):
++                if block is not None:
++                    result.extend(self.refactor_doctest(block, block_lineno,
++                                                        indent, filename))
++                block_lineno = lineno
++                block = [line]
++                i = line.find(self.PS1)
++                indent = line[:i]
++            elif (indent is not None and
++                  (line.startswith(indent + self.PS2) or
++                   line == indent + self.PS2.rstrip() + "\n")):
++                block.append(line)
++            else:
++                if block is not None:
++                    result.extend(self.refactor_doctest(block, block_lineno,
++                                                        indent, filename))
++                block = None
++                indent = None
++                result.append(line)
++        if block is not None:
++            result.extend(self.refactor_doctest(block, block_lineno,
++                                                indent, filename))
++        return "".join(result)
++
++    def refactor_doctest(self, block, lineno, indent, filename):
++        """Refactors one doctest.
++
++        A doctest is given as a block of lines, the first of which starts
++        with ">>>" (possibly indented), while the remaining lines start
++        with "..." (identically indented).
++
++        """
++        try:
++            tree = self.parse_block(block, lineno, indent)
++        except Exception, err:
++            if self.log.isEnabledFor(logging.DEBUG):
++                for line in block:
++                    self.log_debug("Source: %s", line.rstrip("\n"))
++            self.log_error("Can't parse docstring in %s line %s: %s: %s",
++                           filename, lineno, err.__class__.__name__, err)
++            return block
++        if self.refactor_tree(tree, filename):
++            new = str(tree).splitlines(True)
++            # Undo the adjustment of the line numbers in wrap_toks() below.
++            clipped, new = new[:lineno-1], new[lineno-1:]
++            assert clipped == ["\n"] * (lineno-1), clipped
++            if not new[-1].endswith("\n"):
++                new[-1] += "\n"
++            block = [indent + self.PS1 + new.pop(0)]
++            if new:
++                block += [indent + self.PS2 + line for line in new]
++        return block
++
++    def summarize(self):
++        if self.wrote:
++            were = "were"
++        else:
++            were = "need to be"
++        if not self.files:
++            self.log_message("No files %s modified.", were)
++        else:
++            self.log_message("Files that %s modified:", were)
++            for file in self.files:
++                self.log_message(file)
++        if self.fixer_log:
++            self.log_message("Warnings/messages while refactoring:")
++            for message in self.fixer_log:
++                self.log_message(message)
++        if self.errors:
++            if len(self.errors) == 1:
++                self.log_message("There was 1 error:")
++            else:
++                self.log_message("There were %d errors:", len(self.errors))
++            for msg, args, kwds in self.errors:
++                self.log_message(msg, *args, **kwds)
++
++    def parse_block(self, block, lineno, indent):
++        """Parses a block into a tree.
++
++        This is necessary to get correct line number / offset information
++        in the parser diagnostics and embedded into the parse tree.
++        """
++        return self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
++
++    def wrap_toks(self, block, lineno, indent):
++        """Wraps a tokenize stream to systematically modify start/end."""
++        tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
++        for type, value, (line0, col0), (line1, col1), line_text in tokens:
++            line0 += lineno - 1
++            line1 += lineno - 1
++            # Don't bother updating the columns; this is too complicated
++            # since line_text would also have to be updated and it would
++            # still break for tokens spanning lines.  Let the user guess
++            # that the column numbers for doctests are relative to the
++            # end of the prompt string (PS1 or PS2).
++            yield type, value, (line0, col0), (line1, col1), line_text
++
++
++    def gen_lines(self, block, indent):
++        """Generates lines as expected by tokenize from a list of lines.
++
++        This strips the first len(indent + self.PS1) characters off each line.
++        """
++        prefix1 = indent + self.PS1
++        prefix2 = indent + self.PS2
++        prefix = prefix1
++        for line in block:
++            if line.startswith(prefix):
++                yield line[len(prefix):]
++            elif line == prefix.rstrip() + "\n":
++                yield "\n"
++            else:
++                raise AssertionError("line=%r, prefix=%r" % (line, prefix))
++            prefix = prefix2
++        while True:
++            yield ""
++
++
++def diff_texts(a, b, filename):
++    """Return a unified diff of two strings."""
++    a = a.splitlines()
++    b = b.splitlines()
++    return difflib.unified_diff(a, b, filename, filename,
++                                "(original)", "(refactored)",
++                                lineterm="")
+diff -r 531f2e948299 refactor/Grammar.txt
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/Grammar.txt	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,155 @@
++# Grammar for Python
++
++# Note:  Changing the grammar specified in this file will most likely
++#        require corresponding changes in the parser module
++#        (../Modules/parsermodule.c).  If you can't make the changes to
++#        that module yourself, please co-ordinate the required changes
++#        with someone who can; ask around on python-dev for help.  Fred
++#        Drake <fdrake at acm.org> will probably be listening there.
++
++# NOTE WELL: You should also follow all the steps listed in PEP 306,
++# "How to Change Python's Grammar"
++
++# Commands for Kees Blom's railroad program
++#diagram:token NAME
++#diagram:token NUMBER
++#diagram:token STRING
++#diagram:token NEWLINE
++#diagram:token ENDMARKER
++#diagram:token INDENT
++#diagram:output\input python.bla
++#diagram:token DEDENT
++#diagram:output\textwidth 20.04cm\oddsidemargin  0.0cm\evensidemargin 0.0cm
++#diagram:rules
++
++# Start symbols for the grammar:
++#	file_input is a module or sequence of commands read from an input file;
++#	single_input is a single interactive statement;
++#	eval_input is the input for the eval() and input() functions.
++# NB: compound_stmt in single_input is followed by extra NEWLINE!
++file_input: (NEWLINE | stmt)* ENDMARKER
++single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
++eval_input: testlist NEWLINE* ENDMARKER
++
++decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
++decorators: decorator+
++decorated: decorators (classdef | funcdef)
++funcdef: 'def' NAME parameters ['->' test] ':' suite
++parameters: '(' [typedargslist] ')'
++typedargslist: ((tfpdef ['=' test] ',')*
++                ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname)
++                | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
++tname: NAME [':' test]
++tfpdef: tname | '(' tfplist ')'
++tfplist: tfpdef (',' tfpdef)* [',']
++varargslist: ((vfpdef ['=' test] ',')*
++              ('*' [vname] (',' vname ['=' test])*  [',' '**' vname] | '**' vname)
++              | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
++vname: NAME
++vfpdef: vname | '(' vfplist ')'
++vfplist: vfpdef (',' vfpdef)* [',']
++
++stmt: simple_stmt | compound_stmt
++simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
++small_stmt: (expr_stmt | print_stmt  | del_stmt | pass_stmt | flow_stmt |
++             import_stmt | global_stmt | exec_stmt | assert_stmt)
++expr_stmt: testlist (augassign (yield_expr|testlist) |
++                     ('=' (yield_expr|testlist))*)
++augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
++            '<<=' | '>>=' | '**=' | '//=')
++# For normal assignments, additional restrictions enforced by the interpreter
++print_stmt: 'print' ( [ test (',' test)* [','] ] |
++                      '>>' test [ (',' test)+ [','] ] )
++del_stmt: 'del' exprlist
++pass_stmt: 'pass'
++flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
++break_stmt: 'break'
++continue_stmt: 'continue'
++return_stmt: 'return' [testlist]
++yield_stmt: yield_expr
++raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]
++import_stmt: import_name | import_from
++import_name: 'import' dotted_as_names
++import_from: ('from' ('.'* dotted_name | '.'+)
++              'import' ('*' | '(' import_as_names ')' | import_as_names))
++import_as_name: NAME ['as' NAME]
++dotted_as_name: dotted_name ['as' NAME]
++import_as_names: import_as_name (',' import_as_name)* [',']
++dotted_as_names: dotted_as_name (',' dotted_as_name)*
++dotted_name: NAME ('.' NAME)*
++global_stmt: ('global' | 'nonlocal') NAME (',' NAME)*
++exec_stmt: 'exec' expr ['in' test [',' test]]
++assert_stmt: 'assert' test [',' test]
++
++compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
++if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
++while_stmt: 'while' test ':' suite ['else' ':' suite]
++for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
++try_stmt: ('try' ':' suite
++           ((except_clause ':' suite)+
++	    ['else' ':' suite]
++	    ['finally' ':' suite] |
++	   'finally' ':' suite))
++with_stmt: 'with' test [ with_var ] ':' suite
++with_var: 'as' expr
++# NB compile.c makes sure that the default except clause is last
++except_clause: 'except' [test [(',' | 'as') test]]
++suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
++
++# Backward compatibility cruft to support:
++# [ x for x in lambda: True, lambda: False if x() ]
++# even while also allowing:
++# lambda x: 5 if x else 2
++# (But not a mix of the two)
++testlist_safe: old_test [(',' old_test)+ [',']]
++old_test: or_test | old_lambdef
++old_lambdef: 'lambda' [varargslist] ':' old_test
++
++test: or_test ['if' or_test 'else' test] | lambdef
++or_test: and_test ('or' and_test)*
++and_test: not_test ('and' not_test)*
++not_test: 'not' not_test | comparison
++comparison: expr (comp_op expr)*
++comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
++expr: xor_expr ('|' xor_expr)*
++xor_expr: and_expr ('^' and_expr)*
++and_expr: shift_expr ('&' shift_expr)*
++shift_expr: arith_expr (('<<'|'>>') arith_expr)*
++arith_expr: term (('+'|'-') term)*
++term: factor (('*'|'/'|'%'|'//') factor)*
++factor: ('+'|'-'|'~') factor | power
++power: atom trailer* ['**' factor]
++atom: ('(' [yield_expr|testlist_gexp] ')' |
++       '[' [listmaker] ']' |
++       '{' [dictsetmaker] '}' |
++       '`' testlist1 '`' |
++       NAME | NUMBER | STRING+ | '.' '.' '.')
++listmaker: test ( comp_for | (',' test)* [','] )
++testlist_gexp: test ( comp_for | (',' test)* [','] )
++lambdef: 'lambda' [varargslist] ':' test
++trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
++subscriptlist: subscript (',' subscript)* [',']
++subscript: test | [test] ':' [test] [sliceop]
++sliceop: ':' [test]
++exprlist: expr (',' expr)* [',']
++testlist: test (',' test)* [',']
++dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
++                (test (comp_for | (',' test)* [','])) )
++
++classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
++
++arglist: (argument ',')* (argument [',']
++                         |'*' test (',' argument)* [',' '**' test] 
++                         |'**' test)
++argument: test [comp_for] | test '=' test  # Really [keyword '='] test
++
++comp_iter: comp_for | comp_if
++comp_for: 'for' exprlist 'in' testlist_safe [comp_iter]
++comp_if: 'if' old_test [comp_iter]
++
++testlist1: test (',' test)*
++
++# not used in grammar, but may appear in "node" passed from Parser to Compiler
++encoding_decl: NAME
++
++yield_expr: 'yield' [testlist]
+diff -r 531f2e948299 refactor/PatternGrammar.txt
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/PatternGrammar.txt	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,28 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++# A grammar to describe tree matching patterns.
++# Not shown here:
++# - 'TOKEN' stands for any token (leaf node)
++# - 'any' stands for any node (leaf or interior)
++# With 'any' we can still specify the sub-structure.
++
++# The start symbol is 'Matcher'.
++
++Matcher: Alternatives ENDMARKER
++
++Alternatives: Alternative ('|' Alternative)*
++
++Alternative: (Unit | NegatedUnit)+
++
++Unit: [NAME '='] ( STRING [Repeater]
++                 | NAME [Details] [Repeater]
++                 | '(' Alternatives ')' [Repeater]
++                 | '[' Alternatives ']'
++		 )
++
++NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')')
++
++Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}'
++
++Details: '<' Alternatives '>'
+diff -r 531f2e948299 refactor/__init__.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/__init__.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,8 @@
++from . import fixer_base
++from . import fixer_util
++from . import main
++from . import patcomp
++from . import pgen2
++from . import pygram
++from . import pytree
++from . import refactor
+diff -r 531f2e948299 refactor/fixer_base.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixer_base.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,178 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Base class for fixers (optional, but recommended)."""
++
++# Python imports
++import logging
++import itertools
++
++# Local imports
++from .patcomp import PatternCompiler
++from . import pygram
++from .fixer_util import does_tree_import
++
++class BaseFix(object):
++
++    """Optional base class for fixers.
++
++    The subclass name must be FixFooBar where FooBar is the result of
++    removing underscores and capitalizing the words of the fix name.
++    For example, the class name for a fixer named 'has_key' should be
++    FixHasKey.
++    """
++
++    PATTERN = None  # Most subclasses should override with a string literal
++    pattern = None  # Compiled pattern, set by compile_pattern()
++    options = None  # Options object passed to initializer
++    filename = None # The filename (set by set_filename)
++    logger = None   # A logger (set by set_filename)
++    numbers = itertools.count(1) # For new_name()
++    used_names = set() # A set of all used NAMEs
++    order = "post" # Does the fixer prefer pre- or post-order traversal
++    explicit = False # Is this ignored by refactor.py -f all?
++    run_order = 5   # Fixers will be sorted by run order before execution
++                    # Lower numbers will be run first.
++
++    # Shortcut for access to Python grammar symbols
++    syms = pygram.python_symbols
++
++    def __init__(self, options, log):
++        """Initializer.  Subclass may override.
++
++        Args:
++            options: an dict containing the options passed to RefactoringTool
++            that could be used to customize the fixer through the command line.
++            log: a list to append warnings and other messages to.
++        """
++        self.options = options
++        self.log = log
++        self.compile_pattern()
++
++    def compile_pattern(self):
++        """Compiles self.PATTERN into self.pattern.
++
++        Subclass may override if it doesn't want to use
++        self.{pattern,PATTERN} in .match().
++        """
++        if self.PATTERN is not None:
++            self.pattern = PatternCompiler().compile_pattern(self.PATTERN)
++
++    def set_filename(self, filename):
++        """Set the filename, and a logger derived from it.
++
++        The main refactoring tool should call this.
++        """
++        self.filename = filename
++        self.logger = logging.getLogger(filename)
++
++    def match(self, node):
++        """Returns match for a given parse tree node.
++
++        Should return a true or false object (not necessarily a bool).
++        It may return a non-empty dict of matching sub-nodes as
++        returned by a matching pattern.
++
++        Subclass may override.
++        """
++        results = {"node": node}
++        return self.pattern.match(node, results) and results
++
++    def transform(self, node, results):
++        """Returns the transformation for a given parse tree node.
++
++        Args:
++          node: the root of the parse tree that matched the fixer.
++          results: a dict mapping symbolic names to part of the match.
++
++        Returns:
++          None, or a node that is a modified copy of the
++          argument node.  The node argument may also be modified in-place to
++          effect the same change.
++
++        Subclass *must* override.
++        """
++        raise NotImplementedError()
++
++    def new_name(self, template="xxx_todo_changeme"):
++        """Return a string suitable for use as an identifier
++
++        The new name is guaranteed not to conflict with other identifiers.
++        """
++        name = template
++        while name in self.used_names:
++            name = template + str(self.numbers.next())
++        self.used_names.add(name)
++        return name
++
++    def log_message(self, message):
++        if self.first_log:
++            self.first_log = False
++            self.log.append("### In file %s ###" % self.filename)
++        self.log.append(message)
++
++    def cannot_convert(self, node, reason=None):
++        """Warn the user that a given chunk of code is not valid Python 3,
++        but that it cannot be converted automatically.
++
++        First argument is the top-level node for the code in question.
++        Optional second argument is why it can't be converted.
++        """
++        lineno = node.get_lineno()
++        for_output = node.clone()
++        for_output.set_prefix("")
++        msg = "Line %d: could not convert: %s"
++        self.log_message(msg % (lineno, for_output))
++        if reason:
++            self.log_message(reason)
++
++    def warning(self, node, reason):
++        """Used for warning the user about possible uncertainty in the
++        translation.
++
++        First argument is the top-level node for the code in question.
++        Optional second argument is why it can't be converted.
++        """
++        lineno = node.get_lineno()
++        self.log_message("Line %d: %s" % (lineno, reason))
++
++    def start_tree(self, tree, filename):
++        """Some fixers need to maintain tree-wide state.
++        This method is called once, at the start of tree fix-up.
++
++        tree - the root node of the tree to be processed.
++        filename - the name of the file the tree came from.
++        """
++        self.used_names = tree.used_names
++        self.set_filename(filename)
++        self.numbers = itertools.count(1)
++        self.first_log = True
++
++    def finish_tree(self, tree, filename):
++        """Some fixers need to maintain tree-wide state.
++        This method is called once, at the conclusion of tree fix-up.
++
++        tree - the root node of the tree to be processed.
++        filename - the name of the file the tree came from.
++        """
++        pass
++
++
++class ConditionalFix(BaseFix):
++    """ Base class for fixers which not execute if an import is found. """
++
++    # This is the name of the import which, if found, will cause the test to be skipped
++    skip_on = None
++
++    def start_tree(self, *args):
++        super(ConditionalFix, self).start_tree(*args)
++        self._should_skip = None
++
++    def should_skip(self, node):
++        if self._should_skip is not None:
++            return self._should_skip
++        pkg = self.skip_on.split(".")
++        name = pkg[-1]
++        pkg = ".".join(pkg[:-1])
++        self._should_skip = does_tree_import(pkg, name, node)
++        return self._should_skip
+diff -r 531f2e948299 refactor/fixer_util.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixer_util.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,425 @@
++"""Utility functions, node construction macros, etc."""
++# Author: Collin Winter
++
++# Local imports
++from .pgen2 import token
++from .pytree import Leaf, Node
++from .pygram import python_symbols as syms
++from . import patcomp
++
++
++###########################################################
++### Common node-construction "macros"
++###########################################################
++
++def KeywordArg(keyword, value):
++    return Node(syms.argument,
++                [keyword, Leaf(token.EQUAL, '='), value])
++
++def LParen():
++    return Leaf(token.LPAR, "(")
++
++def RParen():
++    return Leaf(token.RPAR, ")")
++
++def Assign(target, source):
++    """Build an assignment statement"""
++    if not isinstance(target, list):
++        target = [target]
++    if not isinstance(source, list):
++        source.set_prefix(" ")
++        source = [source]
++
++    return Node(syms.atom,
++                target + [Leaf(token.EQUAL, "=", prefix=" ")] + source)
++
++def Name(name, prefix=None):
++    """Return a NAME leaf"""
++    return Leaf(token.NAME, name, prefix=prefix)
++
++def Attr(obj, attr):
++    """A node tuple for obj.attr"""
++    return [obj, Node(syms.trailer, [Dot(), attr])]
++
++def Comma():
++    """A comma leaf"""
++    return Leaf(token.COMMA, ",")
++
++def Dot():
++    """A period (.) leaf"""
++    return Leaf(token.DOT, ".")
++
++def ArgList(args, lparen=LParen(), rparen=RParen()):
++    """A parenthesised argument list, used by Call()"""
++    node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
++    if args:
++        node.insert_child(1, Node(syms.arglist, args))
++    return node
++
++def Call(func_name, args=None, prefix=None):
++    """A function call"""
++    node = Node(syms.power, [func_name, ArgList(args)])
++    if prefix is not None:
++        node.set_prefix(prefix)
++    return node
++
++def Newline():
++    """A newline literal"""
++    return Leaf(token.NEWLINE, "\n")
++
++def BlankLine():
++    """A blank line"""
++    return Leaf(token.NEWLINE, "")
++
++def Number(n, prefix=None):
++    return Leaf(token.NUMBER, n, prefix=prefix)
++
++def Subscript(index_node):
++    """A numeric or string subscript"""
++    return Node(syms.trailer, [Leaf(token.LBRACE, '['),
++                               index_node,
++                               Leaf(token.RBRACE, ']')])
++
++def String(string, prefix=None):
++    """A string leaf"""
++    return Leaf(token.STRING, string, prefix=prefix)
++
++def ListComp(xp, fp, it, test=None):
++    """A list comprehension of the form [xp for fp in it if test].
++
++    If test is None, the "if test" part is omitted.
++    """
++    xp.set_prefix("")
++    fp.set_prefix(" ")
++    it.set_prefix(" ")
++    for_leaf = Leaf(token.NAME, "for")
++    for_leaf.set_prefix(" ")
++    in_leaf = Leaf(token.NAME, "in")
++    in_leaf.set_prefix(" ")
++    inner_args = [for_leaf, fp, in_leaf, it]
++    if test:
++        test.set_prefix(" ")
++        if_leaf = Leaf(token.NAME, "if")
++        if_leaf.set_prefix(" ")
++        inner_args.append(Node(syms.comp_if, [if_leaf, test]))
++    inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
++    return Node(syms.atom,
++                       [Leaf(token.LBRACE, "["),
++                        inner,
++                        Leaf(token.RBRACE, "]")])
++
++def FromImport(package_name, name_leafs):
++    """ Return an import statement in the form:
++        from package import name_leafs"""
++    # XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
++    #assert package_name == '.' or '.' not in package_name, "FromImport has "\
++    #       "not been tested with dotted package names -- use at your own "\
++    #       "peril!"
++
++    for leaf in name_leafs:
++        # Pull the leaves out of their old tree
++        leaf.remove()
++
++    children = [Leaf(token.NAME, 'from'),
++                Leaf(token.NAME, package_name, prefix=" "),
++                Leaf(token.NAME, 'import', prefix=" "),
++                Node(syms.import_as_names, name_leafs)]
++    imp = Node(syms.import_from, children)
++    return imp
++
++
++###########################################################
++### Determine whether a node represents a given literal
++###########################################################
++
++def is_tuple(node):
++    """Does the node represent a tuple literal?"""
++    if isinstance(node, Node) and node.children == [LParen(), RParen()]:
++        return True
++    return (isinstance(node, Node)
++            and len(node.children) == 3
++            and isinstance(node.children[0], Leaf)
++            and isinstance(node.children[1], Node)
++            and isinstance(node.children[2], Leaf)
++            and node.children[0].value == "("
++            and node.children[2].value == ")")
++
++def is_list(node):
++    """Does the node represent a list literal?"""
++    return (isinstance(node, Node)
++            and len(node.children) > 1
++            and isinstance(node.children[0], Leaf)
++            and isinstance(node.children[-1], Leaf)
++            and node.children[0].value == "["
++            and node.children[-1].value == "]")
++
++
++###########################################################
++### Misc
++###########################################################
++
++def parenthesize(node):
++    return Node(syms.atom, [LParen(), node, RParen()])
++
++
++consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
++                       "min", "max"])
++
++def attr_chain(obj, attr):
++    """Follow an attribute chain.
++
++    If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
++    use this to iterate over all objects in the chain. Iteration is
++    terminated by getattr(x, attr) is None.
++
++    Args:
++        obj: the starting object
++        attr: the name of the chaining attribute
++
++    Yields:
++        Each successive object in the chain.
++    """
++    next = getattr(obj, attr)
++    while next:
++        yield next
++        next = getattr(next, attr)
++
++p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
++        | comp_for< 'for' any 'in' node=any any* >
++     """
++p1 = """
++power<
++    ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
++      'any' | 'all' | (any* trailer< '.' 'join' >) )
++    trailer< '(' node=any ')' >
++    any*
++>
++"""
++p2 = """
++power<
++    'sorted'
++    trailer< '(' arglist<node=any any*> ')' >
++    any*
++>
++"""
++pats_built = False
++def in_special_context(node):
++    """ Returns true if node is in an environment where all that is required
++        of it is being itterable (ie, it doesn't matter if it returns a list
++        or an itterator).
++        See test_map_nochange in test_fixers.py for some examples and tests.
++        """
++    global p0, p1, p2, pats_built
++    if not pats_built:
++        p1 = patcomp.compile_pattern(p1)
++        p0 = patcomp.compile_pattern(p0)
++        p2 = patcomp.compile_pattern(p2)
++        pats_built = True
++    patterns = [p0, p1, p2]
++    for pattern, parent in zip(patterns, attr_chain(node, "parent")):
++        results = {}
++        if pattern.match(parent, results) and results["node"] is node:
++            return True
++    return False
++
++def is_probably_builtin(node):
++    """
++    Check that something isn't an attribute or function name etc.
++    """
++    prev = node.prev_sibling
++    if prev is not None and prev.type == token.DOT:
++        # Attribute lookup.
++        return False
++    parent = node.parent
++    if parent.type in (syms.funcdef, syms.classdef):
++        return False
++    if parent.type == syms.expr_stmt and parent.children[0] is node:
++        # Assignment.
++        return False
++    if parent.type == syms.parameters or \
++            (parent.type == syms.typedargslist and (
++            (prev is not None and prev.type == token.COMMA) or
++            parent.children[0] is node
++            )):
++        # The name of an argument.
++        return False
++    return True
++
++###########################################################
++### The following functions are to find bindings in a suite
++###########################################################
++
++def make_suite(node):
++    if node.type == syms.suite:
++        return node
++    node = node.clone()
++    parent, node.parent = node.parent, None
++    suite = Node(syms.suite, [node])
++    suite.parent = parent
++    return suite
++
++def find_root(node):
++    """Find the top level namespace."""
++    # Scamper up to the top level namespace
++    while node.type != syms.file_input:
++        assert node.parent, "Tree is insane! root found before "\
++                           "file_input node was found."
++        node = node.parent
++    return node
++
++def does_tree_import(package, name, node):
++    """ Returns true if name is imported from package at the
++        top level of the tree which node belongs to.
++        To cover the case of an import like 'import foo', use
++        None for the package and 'foo' for the name. """
++    binding = find_binding(name, find_root(node), package)
++    return bool(binding)
++
++def is_import(node):
++    """Returns true if the node is an import statement."""
++    return node.type in (syms.import_name, syms.import_from)
++
++def touch_import(package, name, node):
++    """ Works like `does_tree_import` but adds an import statement
++        if it was not imported. """
++    def is_import_stmt(node):
++        return node.type == syms.simple_stmt and node.children and \
++               is_import(node.children[0])
++
++    root = find_root(node)
++
++    if does_tree_import(package, name, root):
++        return
++
++    add_newline_before = False
++
++    # figure out where to insert the new import.  First try to find
++    # the first import and then skip to the last one.
++    insert_pos = offset = 0
++    for idx, node in enumerate(root.children):
++        if not is_import_stmt(node):
++            continue
++        for offset, node2 in enumerate(root.children[idx:]):
++            if not is_import_stmt(node2):
++                break
++        insert_pos = idx + offset
++        break
++
++    # if there are no imports where we can insert, find the docstring.
++    # if that also fails, we stick to the beginning of the file
++    if insert_pos == 0:
++        for idx, node in enumerate(root.children):
++            if node.type == syms.simple_stmt and node.children and \
++               node.children[0].type == token.STRING:
++                insert_pos = idx + 1
++                add_newline_before
++                break
++
++    if package is None:
++        import_ = Node(syms.import_name, [
++            Leaf(token.NAME, 'import'),
++            Leaf(token.NAME, name, prefix=' ')
++        ])
++    else:
++        import_ = FromImport(package, [Leaf(token.NAME, name, prefix=' ')])
++
++    children = [import_, Newline()]
++    if add_newline_before:
++        children.insert(0, Newline())
++    root.insert_child(insert_pos, Node(syms.simple_stmt, children))
++
++
++_def_syms = set([syms.classdef, syms.funcdef])
++def find_binding(name, node, package=None):
++    """ Returns the node which binds variable name, otherwise None.
++        If optional argument package is supplied, only imports will
++        be returned.
++        See test cases for examples."""
++    for child in node.children:
++        ret = None
++        if child.type == syms.for_stmt:
++            if _find(name, child.children[1]):
++                return child
++            n = find_binding(name, make_suite(child.children[-1]), package)
++            if n: ret = n
++        elif child.type in (syms.if_stmt, syms.while_stmt):
++            n = find_binding(name, make_suite(child.children[-1]), package)
++            if n: ret = n
++        elif child.type == syms.try_stmt:
++            n = find_binding(name, make_suite(child.children[2]), package)
++            if n:
++                ret = n
++            else:
++                for i, kid in enumerate(child.children[3:]):
++                    if kid.type == token.COLON and kid.value == ":":
++                        # i+3 is the colon, i+4 is the suite
++                        n = find_binding(name, make_suite(child.children[i+4]), package)
++                        if n: ret = n
++        elif child.type in _def_syms and child.children[1].value == name:
++            ret = child
++        elif _is_import_binding(child, name, package):
++            ret = child
++        elif child.type == syms.simple_stmt:
++            ret = find_binding(name, child, package)
++        elif child.type == syms.expr_stmt:
++            if _find(name, child.children[0]):
++                ret = child
++
++        if ret:
++            if not package:
++                return ret
++            if is_import(ret):
++                return ret
++    return None
++
++_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
++def _find(name, node):
++    nodes = [node]
++    while nodes:
++        node = nodes.pop()
++        if node.type > 256 and node.type not in _block_syms:
++            nodes.extend(node.children)
++        elif node.type == token.NAME and node.value == name:
++            return node
++    return None
++
++def _is_import_binding(node, name, package=None):
++    """ Will reuturn node if node will import name, or node
++        will import * from package.  None is returned otherwise.
++        See test cases for examples. """
++
++    if node.type == syms.import_name and not package:
++        imp = node.children[1]
++        if imp.type == syms.dotted_as_names:
++            for child in imp.children:
++                if child.type == syms.dotted_as_name:
++                    if child.children[2].value == name:
++                        return node
++                elif child.type == token.NAME and child.value == name:
++                    return node
++        elif imp.type == syms.dotted_as_name:
++            last = imp.children[-1]
++            if last.type == token.NAME and last.value == name:
++                return node
++        elif imp.type == token.NAME and imp.value == name:
++            return node
++    elif node.type == syms.import_from:
++        # unicode(...) is used to make life easier here, because
++        # from a.b import parses to ['import', ['a', '.', 'b'], ...]
++        if package and unicode(node.children[1]).strip() != package:
++            return None
++        n = node.children[3]
++        if package and _find('as', n):
++            # See test_from_import_as for explanation
++            return None
++        elif n.type == syms.import_as_names and _find(name, n):
++            return node
++        elif n.type == syms.import_as_name:
++            child = n.children[2]
++            if child.type == token.NAME and child.value == name:
++                return node
++        elif n.type == token.NAME and n.value == name:
++            return node
++        elif package and n.type == token.STAR:
++            return node
++    return None
+diff -r 531f2e948299 refactor/fixes/.svn/all-wcprops
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/all-wcprops	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,305 @@
++K 25
++svn:wc:ra_dav:version-url
++V 57
++/projects/!svn/ver/69679/sandbox/trunk/2to3/lib2to3/fixes
++END
++fix_dict.py
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/67389/sandbox/trunk/2to3/lib2to3/fixes/fix_dict.py
++END
++fix_has_key.py
++K 25
++svn:wc:ra_dav:version-url
++V 72
++/projects/!svn/ver/67769/sandbox/trunk/2to3/lib2to3/fixes/fix_has_key.py
++END
++fix_exec.py
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_exec.py
++END
++fix_idioms.py
++K 25
++svn:wc:ra_dav:version-url
++V 71
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_idioms.py
++END
++__init__.py
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/61428/sandbox/trunk/2to3/lib2to3/fixes/__init__.py
++END
++fix_urllib.py
++K 25
++svn:wc:ra_dav:version-url
++V 71
++/projects/!svn/ver/68368/sandbox/trunk/2to3/lib2to3/fixes/fix_urllib.py
++END
++fix_nonzero.py
++K 25
++svn:wc:ra_dav:version-url
++V 72
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_nonzero.py
++END
++fix_print.py
++K 25
++svn:wc:ra_dav:version-url
++V 70
++/projects/!svn/ver/66418/sandbox/trunk/2to3/lib2to3/fixes/fix_print.py
++END
++fix_imports.py
++K 25
++svn:wc:ra_dav:version-url
++V 72
++/projects/!svn/ver/69054/sandbox/trunk/2to3/lib2to3/fixes/fix_imports.py
++END
++fix_numliterals.py
++K 25
++svn:wc:ra_dav:version-url
++V 76
++/projects/!svn/ver/67389/sandbox/trunk/2to3/lib2to3/fixes/fix_numliterals.py
++END
++fix_input.py
++K 25
++svn:wc:ra_dav:version-url
++V 70
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_input.py
++END
++fix_itertools_imports.py
++K 25
++svn:wc:ra_dav:version-url
++V 82
++/projects/!svn/ver/69673/sandbox/trunk/2to3/lib2to3/fixes/fix_itertools_imports.py
++END
++fix_getcwdu.py
++K 25
++svn:wc:ra_dav:version-url
++V 72
++/projects/!svn/ver/66782/sandbox/trunk/2to3/lib2to3/fixes/fix_getcwdu.py
++END
++fix_zip.py
++K 25
++svn:wc:ra_dav:version-url
++V 68
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_zip.py
++END
++fix_raise.py
++K 25
++svn:wc:ra_dav:version-url
++V 70
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_raise.py
++END
++fix_throw.py
++K 25
++svn:wc:ra_dav:version-url
++V 70
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_throw.py
++END
++fix_types.py
++K 25
++svn:wc:ra_dav:version-url
++V 70
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_types.py
++END
++fix_paren.py
++K 25
++svn:wc:ra_dav:version-url
++V 70
++/projects/!svn/ver/65981/sandbox/trunk/2to3/lib2to3/fixes/fix_paren.py
++END
++fix_ws_comma.py
++K 25
++svn:wc:ra_dav:version-url
++V 73
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_ws_comma.py
++END
++fix_reduce.py
++K 25
++svn:wc:ra_dav:version-url
++V 71
++/projects/!svn/ver/67657/sandbox/trunk/2to3/lib2to3/fixes/fix_reduce.py
++END
++fix_raw_input.py
++K 25
++svn:wc:ra_dav:version-url
++V 74
++/projects/!svn/ver/65887/sandbox/trunk/2to3/lib2to3/fixes/fix_raw_input.py
++END
++fix_repr.py
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/67769/sandbox/trunk/2to3/lib2to3/fixes/fix_repr.py
++END
++fix_buffer.py
++K 25
++svn:wc:ra_dav:version-url
++V 71
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_buffer.py
++END
++fix_funcattrs.py
++K 25
++svn:wc:ra_dav:version-url
++V 74
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_funcattrs.py
++END
++fix_import.py
++K 25
++svn:wc:ra_dav:version-url
++V 71
++/projects/!svn/ver/67928/sandbox/trunk/2to3/lib2to3/fixes/fix_import.py
++END
++fix_standarderror.py
++K 25
++svn:wc:ra_dav:version-url
++V 78
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_standarderror.py
++END
++fix_map.py
++K 25
++svn:wc:ra_dav:version-url
++V 68
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_map.py
++END
++fix_next.py
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/67389/sandbox/trunk/2to3/lib2to3/fixes/fix_next.py
++END
++fix_itertools.py
++K 25
++svn:wc:ra_dav:version-url
++V 74
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_itertools.py
++END
++fix_execfile.py
++K 25
++svn:wc:ra_dav:version-url
++V 73
++/projects/!svn/ver/67901/sandbox/trunk/2to3/lib2to3/fixes/fix_execfile.py
++END
++fix_xrange.py
++K 25
++svn:wc:ra_dav:version-url
++V 71
++/projects/!svn/ver/67705/sandbox/trunk/2to3/lib2to3/fixes/fix_xrange.py
++END
++fix_apply.py
++K 25
++svn:wc:ra_dav:version-url
++V 70
++/projects/!svn/ver/67769/sandbox/trunk/2to3/lib2to3/fixes/fix_apply.py
++END
++fix_filter.py
++K 25
++svn:wc:ra_dav:version-url
++V 71
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_filter.py
++END
++fix_unicode.py
++K 25
++svn:wc:ra_dav:version-url
++V 72
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_unicode.py
++END
++fix_except.py
++K 25
++svn:wc:ra_dav:version-url
++V 71
++/projects/!svn/ver/68694/sandbox/trunk/2to3/lib2to3/fixes/fix_except.py
++END
++fix_renames.py
++K 25
++svn:wc:ra_dav:version-url
++V 72
++/projects/!svn/ver/67389/sandbox/trunk/2to3/lib2to3/fixes/fix_renames.py
++END
++fix_tuple_params.py
++K 25
++svn:wc:ra_dav:version-url
++V 77
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_tuple_params.py
++END
++fix_methodattrs.py
++K 25
++svn:wc:ra_dav:version-url
++V 76
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_methodattrs.py
++END
++fix_xreadlines.py
++K 25
++svn:wc:ra_dav:version-url
++V 75
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/fixes/fix_xreadlines.py
++END
++fix_long.py
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/68110/sandbox/trunk/2to3/lib2to3/fixes/fix_long.py
++END
++fix_intern.py
++K 25
++svn:wc:ra_dav:version-url
++V 71
++/projects/!svn/ver/67657/sandbox/trunk/2to3/lib2to3/fixes/fix_intern.py
++END
++fix_callable.py
++K 25
++svn:wc:ra_dav:version-url
++V 73
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_callable.py
++END
++fix_isinstance.py
++K 25
++svn:wc:ra_dav:version-url
++V 75
++/projects/!svn/ver/67767/sandbox/trunk/2to3/lib2to3/fixes/fix_isinstance.py
++END
++fix_basestring.py
++K 25
++svn:wc:ra_dav:version-url
++V 75
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_basestring.py
++END
++fix_ne.py
++K 25
++svn:wc:ra_dav:version-url
++V 67
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_ne.py
++END
++fix_set_literal.py
++K 25
++svn:wc:ra_dav:version-url
++V 76
++/projects/!svn/ver/69679/sandbox/trunk/2to3/lib2to3/fixes/fix_set_literal.py
++END
++fix_future.py
++K 25
++svn:wc:ra_dav:version-url
++V 71
++/projects/!svn/ver/63880/sandbox/trunk/2to3/lib2to3/fixes/fix_future.py
++END
++fix_metaclass.py
++K 25
++svn:wc:ra_dav:version-url
++V 74
++/projects/!svn/ver/67371/sandbox/trunk/2to3/lib2to3/fixes/fix_metaclass.py
++END
++fix_sys_exc.py
++K 25
++svn:wc:ra_dav:version-url
++V 72
++/projects/!svn/ver/65968/sandbox/trunk/2to3/lib2to3/fixes/fix_sys_exc.py
++END
++fix_imports2.py
++K 25
++svn:wc:ra_dav:version-url
++V 73
++/projects/!svn/ver/68422/sandbox/trunk/2to3/lib2to3/fixes/fix_imports2.py
++END
+diff -r 531f2e948299 refactor/fixes/.svn/dir-prop-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/dir-prop-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,10 @@
++K 10
++svn:ignore
++V 25
++*.pyc
++*.pyo
++*.pickle
++@*
++
++
++END
+diff -r 531f2e948299 refactor/fixes/.svn/entries
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1728 @@
++9
++
++dir
++70822
++http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/fixes
++http://svn.python.org/projects
++
++
++
++2009-02-16T17:36:06.789054Z
++69679
++benjamin.peterson
++has-props
++
++svn:special svn:externals svn:needs-lock
++
++
++
++
++
++
++
++
++
++
++
++6015fed2-1504-0410-9fe1-9d1591cc4771
++
++fix_dict.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++d12677f15a5a34c7754e90cb06bc153e
++2008-11-25T23:13:17.968453Z
++67389
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++3588
++
++fix_has_key.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++1b88e2b6b4c60df9b85a168a07e13fa7
++2008-12-14T20:59:10.846867Z
++67769
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++3209
++
++fix_exec.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++679db75847dfd56367a8cd2b4286949c
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++985
++
++fix_idioms.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++0281b19c721594c6eb341c83270d37bd
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++3939
++
++__init__.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++97781d2954bbc2eebdc963de519fe2de
++2006-12-12T14:56:29.604692Z
++53006
++guido.van.rossum
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++47
++
++fix_urllib.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++c883d34902a6e74c08f4370a978e5b86
++2009-01-06T23:56:10.682943Z
++68368
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++7484
++
++fix_nonzero.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++6f8983345b023d63ddce248a93c5db83
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++578
++
++fix_print.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++478786e57412307d598aee1a20595102
++2008-09-12T23:49:48.354778Z
++66418
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++2957
++
++fix_imports.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++fa0f30cff73ee261c93c85286007c761
++2009-01-28T16:01:54.183761Z
++69054
++guilherme.polo
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++5692
++
++fix_numliterals.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++7e04fa79f3ff3ff475ec1716021b8489
++2008-11-25T23:13:17.968453Z
++67389
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++789
++
++fix_input.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++3a704e4f30c9f72c236274118f093034
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++692
++
++fix_itertools_imports.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++05420b3d189c8130eca6bf051bd31a17
++2009-02-16T15:38:22.416590Z
++69673
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1837
++
++fix_getcwdu.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++1bf89e0a81cc997173d5d63078f8ea5a
++2008-10-03T22:51:36.115136Z
++66782
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++432
++
++fix_zip.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++8e61d2105f3122181e793e7c9b4caf31
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++889
++
++fix_throw.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++52c18fcf966a4c7f44940e331784f51c
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1564
++
++fix_raise.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++7f69130d4008f2b870fbc5d88ed726de
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++2587
++
++fix_types.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++08728aeba77665139ce3f967cb24c2f1
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1779
++
++fix_paren.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++e2b3bd30551e285f3bc45eed6a797014
++2008-08-22T20:41:30.636639Z
++65981
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1213
++
++fix_ws_comma.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++8e92e7f56434c9b2263874296578ea53
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1108
++
++fix_reduce.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++2fee5cc1f796c98a749dc789199da016
++2008-12-08T00:29:35.627027Z
++67657
++armin.ronacher
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++816
++
++fix_raw_input.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++18666e7c36b850f0b6d5666504bec0ae
++2008-08-19T22:45:04.505207Z
++65887
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++435
++
++fix_repr.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++badd6b1054395732bd64df829d16cf96
++2008-12-14T20:59:10.846867Z
++67769
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++594
++
++fix_buffer.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++d6f8cc141ad7ab3f197f1638b9e3e1aa
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++566
++
++fix_funcattrs.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++7a7b9d2abe6fbecfdf2e5c0095978f0b
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++624
++
++fix_import.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++9973617c9e868b2b9afb0a609ef30b35
++2008-12-27T02:49:30.983707Z
++67928
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++2953
++
++fix_standarderror.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++f76efc435650b1eba8bf73dbdfdeef3e
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++431
++
++fix_map.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++0cdf1b348ed0dc9348377ad6ce1aef42
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++2537
++
++fix_next.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++46917a2b5128a18a5224f6ae5dc021db
++2008-11-25T23:13:17.968453Z
++67389
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++3205
++
++fix_itertools.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++d2b48acbc9d415b64f3c71575fbfb9df
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1483
++
++fix_execfile.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++f968686ed347fd544fc69fd9cb6073cd
++2008-12-22T20:09:55.444195Z
++67901
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1974
++
++fix_xrange.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++4f054eb9bb8f4d4916f1b33eec5175f9
++2008-12-11T19:04:08.320821Z
++67705
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++2291
++
++fix_apply.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++2a00b679f13c1dca9b45bc23a3b2a695
++2008-12-14T20:59:10.846867Z
++67769
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1894
++
++fix_filter.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++0879d4b1af4eeb93b1a8baff1fd298c1
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++2089
++
++fix_unicode.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++05e9e9ae6cbc1c396bc11b19b5dab25a
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++832
++
++fix_except.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++450c9cbb28a5be9d21719abcb33a59f5
++2009-01-17T23:55:59.992428Z
++68694
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++3251
++
++fix_renames.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++52f66737c4206d8cfa77bbb07af4a056
++2008-11-25T23:13:17.968453Z
++67389
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++2192
++
++fix_tuple_params.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++557690cc5399b0ade14c16089df2effb
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++5405
++
++fix_methodattrs.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++6ee0925ec01e9ae632326855ab5cb016
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++587
++
++fix_xreadlines.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++ade2c0b61ba9f8effa9df543a2fbdc4a
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++670
++
++fix_long.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++2aaca612bae42bfe84dd0d6139260749
++2008-12-31T20:13:26.408132Z
++68110
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++538
++
++fix_intern.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++00e20c0723e807004c3fd0ae88d26b09
++2008-12-08T00:29:35.627027Z
++67657
++armin.ronacher
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1368
++
++fix_callable.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++37990663703ff5ea2fabb3095a9ad189
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++952
++
++fix_isinstance.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++921a0f20d0a6e47b4b1291d37599bf09
++2008-12-14T20:28:12.506842Z
++67767
++benjamin.peterson
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1594
++
++fix_basestring.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++0fe11afa759b94c75323aa2a3188089d
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++301
++
++fix_ne.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++a787f8744fda47bffd7f2b6a9ee4ff38
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++590
++
++fix_set_literal.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++fc4742a5a8d78f9dd84b1c5f0040003b
++2009-02-16T17:36:06.789054Z
++69679
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1699
++
++fix_future.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++0e2786c94aac6b11a47d8ec46d8b19d6
++2008-06-01T23:09:38.597843Z
++63880
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++527
++
++fix_metaclass.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++c608b0bf4a9c0c1028051ffe82d055f4
++2008-11-24T22:02:00.590445Z
++67371
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++8213
++
++fix_sys_exc.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++2b412acd29c54b0101163bb8be2ab5c7
++2008-08-21T23:45:13.840810Z
++65968
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1030
++
++fix_imports2.py
++file
++
++
++
++
++2009-03-31T00:29:37.000000Z
++15274809df396bec14aeafccd2ab9875
++2009-01-09T02:01:03.956074Z
++68422
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++289
++
+diff -r 531f2e948299 refactor/fixes/.svn/format
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/format	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++9
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/__init__.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/__init__.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_apply.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_apply.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_basestring.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_basestring.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_buffer.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_buffer.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_callable.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_callable.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_dict.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_dict.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_except.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_except.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_exec.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_exec.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_execfile.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_execfile.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_filter.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_filter.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_funcattrs.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_funcattrs.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_future.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_future.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 13
++'Id Revision'
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_getcwdu.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_getcwdu.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_has_key.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_has_key.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_idioms.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_idioms.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_import.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_import.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_imports.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_imports.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_imports2.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_imports2.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_input.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_input.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_intern.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_intern.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_itertools.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_itertools.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_itertools_imports.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_itertools_imports.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_long.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_long.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_map.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_map.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_metaclass.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_metaclass.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_methodattrs.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_methodattrs.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 13
++'Id Revision'
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_ne.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_ne.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_next.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_next.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_nonzero.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_nonzero.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_numliterals.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_numliterals.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_paren.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_paren.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_print.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_print.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_raise.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_raise.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_raw_input.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_raw_input.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_renames.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_renames.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 13
++'Id Revision'
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_repr.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_repr.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_set_literal.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_set_literal.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_standarderror.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_standarderror.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_sys_exc.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_sys_exc.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_throw.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_throw.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_tuple_params.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_tuple_params.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_types.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_types.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_unicode.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_unicode.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_urllib.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_urllib.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_ws_comma.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_ws_comma.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_xrange.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_xrange.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_xreadlines.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_xreadlines.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/prop-base/fix_zip.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/prop-base/fix_zip.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/__init__.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/__init__.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++# Dummy file to make this directory a package.
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_apply.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_apply.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,58 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for apply().
++
++This converts apply(func, v, k) into (func)(*v, **k)."""
++
++# Local imports
++from .. import pytree
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Call, Comma, parenthesize
++
++class FixApply(fixer_base.BaseFix):
++
++    PATTERN = """
++    power< 'apply'
++        trailer<
++            '('
++            arglist<
++                (not argument<NAME '=' any>) func=any ','
++                (not argument<NAME '=' any>) args=any [','
++                (not argument<NAME '=' any>) kwds=any] [',']
++            >
++            ')'
++        >
++    >
++    """
++
++    def transform(self, node, results):
++        syms = self.syms
++        assert results
++        func = results["func"]
++        args = results["args"]
++        kwds = results.get("kwds")
++        prefix = node.get_prefix()
++        func = func.clone()
++        if (func.type not in (token.NAME, syms.atom) and
++            (func.type != syms.power or
++             func.children[-2].type == token.DOUBLESTAR)):
++            # Need to parenthesize
++            func = parenthesize(func)
++        func.set_prefix("")
++        args = args.clone()
++        args.set_prefix("")
++        if kwds is not None:
++            kwds = kwds.clone()
++            kwds.set_prefix("")
++        l_newargs = [pytree.Leaf(token.STAR, "*"), args]
++        if kwds is not None:
++            l_newargs.extend([Comma(),
++                              pytree.Leaf(token.DOUBLESTAR, "**"),
++                              kwds])
++            l_newargs[-2].set_prefix(" ") # that's the ** token
++        # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
++        # can be translated into f(x, y, *t) instead of f(*(x, y) + t)
++        #new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
++        return Call(func, l_newargs, prefix=prefix)
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_basestring.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_basestring.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,13 @@
++"""Fixer for basestring -> str."""
++# Author: Christian Heimes
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name
++
++class FixBasestring(fixer_base.BaseFix):
++
++    PATTERN = "'basestring'"
++
++    def transform(self, node, results):
++        return Name("str", prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_buffer.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_buffer.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,21 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that changes buffer(...) into memoryview(...)."""
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name
++
++
++class FixBuffer(fixer_base.BaseFix):
++
++    explicit = True # The user must ask for this fixer
++
++    PATTERN = """
++              power< name='buffer' trailer< '(' [any] ')' > >
++              """
++
++    def transform(self, node, results):
++        name = results["name"]
++        name.replace(Name("memoryview", prefix=name.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_callable.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_callable.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,31 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for callable().
++
++This converts callable(obj) into hasattr(obj, '__call__')."""
++
++# Local imports
++from .. import pytree
++from .. import fixer_base
++from ..fixer_util import Call, Name, String
++
++class FixCallable(fixer_base.BaseFix):
++
++    # Ignore callable(*args) or use of keywords.
++    # Either could be a hint that the builtin callable() is not being used.
++    PATTERN = """
++    power< 'callable'
++           trailer< lpar='('
++                    ( not(arglist | argument<any '=' any>) func=any
++                      | func=arglist<(not argument<any '=' any>) any ','> )
++                    rpar=')' >
++           after=any*
++    >
++    """
++
++    def transform(self, node, results):
++        func = results["func"]
++
++        args = [func.clone(), String(', '), String("'__call__'")]
++        return Call(Name("hasattr"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_dict.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_dict.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,99 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for dict methods.
++
++d.keys() -> list(d.keys())
++d.items() -> list(d.items())
++d.values() -> list(d.values())
++
++d.iterkeys() -> iter(d.keys())
++d.iteritems() -> iter(d.items())
++d.itervalues() -> iter(d.values())
++
++Except in certain very specific contexts: the iter() can be dropped
++when the context is list(), sorted(), iter() or for...in; the list()
++can be dropped when the context is list() or sorted() (but not iter()
++or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
++set(), any(), all(), sum().
++
++Note: iter(d.keys()) could be written as iter(d) but since the
++original d.iterkeys() was also redundant we don't fix this.  And there
++are (rare) contexts where it makes a difference (e.g. when passing it
++as an argument to a function that introspects the argument).
++"""
++
++# Local imports
++from .. import pytree
++from .. import patcomp
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot
++from .. import fixer_util
++
++
++iter_exempt = fixer_util.consuming_calls | set(["iter"])
++
++
++class FixDict(fixer_base.BaseFix):
++    PATTERN = """
++    power< head=any+
++         trailer< '.' method=('keys'|'items'|'values'|
++                              'iterkeys'|'iteritems'|'itervalues') >
++         parens=trailer< '(' ')' >
++         tail=any*
++    >
++    """
++
++    def transform(self, node, results):
++        head = results["head"]
++        method = results["method"][0] # Extract node for method name
++        tail = results["tail"]
++        syms = self.syms
++        method_name = method.value
++        isiter = method_name.startswith("iter")
++        if isiter:
++            method_name = method_name[4:]
++        assert method_name in ("keys", "items", "values"), repr(method)
++        head = [n.clone() for n in head]
++        tail = [n.clone() for n in tail]
++        special = not tail and self.in_special_context(node, isiter)
++        args = head + [pytree.Node(syms.trailer,
++                                   [Dot(),
++                                    Name(method_name,
++                                         prefix=method.get_prefix())]),
++                       results["parens"].clone()]
++        new = pytree.Node(syms.power, args)
++        if not special:
++            new.set_prefix("")
++            new = Call(Name(isiter and "iter" or "list"), [new])
++        if tail:
++            new = pytree.Node(syms.power, [new] + tail)
++        new.set_prefix(node.get_prefix())
++        return new
++
++    P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
++    p1 = patcomp.compile_pattern(P1)
++
++    P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
++            | comp_for< 'for' any 'in' node=any any* >
++         """
++    p2 = patcomp.compile_pattern(P2)
++
++    def in_special_context(self, node, isiter):
++        if node.parent is None:
++            return False
++        results = {}
++        if (node.parent.parent is not None and
++               self.p1.match(node.parent.parent, results) and
++               results["node"] is node):
++            if isiter:
++                # iter(d.iterkeys()) -> iter(d.keys()), etc.
++                return results["func"].value in iter_exempt
++            else:
++                # list(d.keys()) -> list(d.keys()), etc.
++                return results["func"].value in fixer_util.consuming_calls
++        if not isiter:
++            return False
++        # for ... in d.iterkeys() -> for ... in d.keys(), etc.
++        return self.p2.match(node.parent, results) and results["node"] is node
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_except.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_except.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,92 @@
++"""Fixer for except statements with named exceptions.
++
++The following cases will be converted:
++
++- "except E, T:" where T is a name:
++
++    except E as T:
++
++- "except E, T:" where T is not a name, tuple or list:
++
++        except E as t:
++            T = t
++
++    This is done because the target of an "except" clause must be a
++    name.
++
++- "except E, T:" where T is a tuple or list literal:
++
++        except E as t:
++            T = t.args
++"""
++# Author: Collin Winter
++
++# Local imports
++from .. import pytree
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
++
++def find_excepts(nodes):
++    for i, n in enumerate(nodes):
++        if n.type == syms.except_clause:
++            if n.children[0].value == 'except':
++                yield (n, nodes[i+2])
++
++class FixExcept(fixer_base.BaseFix):
++
++    PATTERN = """
++    try_stmt< 'try' ':' suite
++                  cleanup=(except_clause ':' suite)+
++                  tail=(['except' ':' suite]
++                        ['else' ':' suite]
++                        ['finally' ':' suite]) >
++    """
++
++    def transform(self, node, results):
++        syms = self.syms
++
++        tail = [n.clone() for n in results["tail"]]
++
++        try_cleanup = [ch.clone() for ch in results["cleanup"]]
++        for except_clause, e_suite in find_excepts(try_cleanup):
++            if len(except_clause.children) == 4:
++                (E, comma, N) = except_clause.children[1:4]
++                comma.replace(Name("as", prefix=" "))
++
++                if N.type != token.NAME:
++                    # Generate a new N for the except clause
++                    new_N = Name(self.new_name(), prefix=" ")
++                    target = N.clone()
++                    target.set_prefix("")
++                    N.replace(new_N)
++                    new_N = new_N.clone()
++
++                    # Insert "old_N = new_N" as the first statement in
++                    #  the except body. This loop skips leading whitespace
++                    #  and indents
++                    #TODO(cwinter) suite-cleanup
++                    suite_stmts = e_suite.children
++                    for i, stmt in enumerate(suite_stmts):
++                        if isinstance(stmt, pytree.Node):
++                            break
++
++                    # The assignment is different if old_N is a tuple or list
++                    # In that case, the assignment is old_N = new_N.args
++                    if is_tuple(N) or is_list(N):
++                        assign = Assign(target, Attr(new_N, Name('args')))
++                    else:
++                        assign = Assign(target, new_N)
++
++                    #TODO(cwinter) stopgap until children becomes a smart list
++                    for child in reversed(suite_stmts[:i]):
++                        e_suite.insert_child(0, child)
++                    e_suite.insert_child(i, assign)
++                elif N.get_prefix() == "":
++                    # No space after a comma is legal; no space after "as",
++                    # not so much.
++                    N.set_prefix(" ")
++
++        #TODO(cwinter) fix this when children becomes a smart list
++        children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
++        return pytree.Node(node.type, children)
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_exec.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_exec.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,39 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for exec.
++
++This converts usages of the exec statement into calls to a built-in
++exec() function.
++
++exec code in ns1, ns2 -> exec(code, ns1, ns2)
++"""
++
++# Local imports
++from .. import pytree
++from .. import fixer_base
++from ..fixer_util import Comma, Name, Call
++
++
++class FixExec(fixer_base.BaseFix):
++
++    PATTERN = """
++    exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
++    |
++    exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
++    """
++
++    def transform(self, node, results):
++        assert results
++        syms = self.syms
++        a = results["a"]
++        b = results.get("b")
++        c = results.get("c")
++        args = [a.clone()]
++        args[0].set_prefix("")
++        if b is not None:
++            args.extend([Comma(), b.clone()])
++        if c is not None:
++            args.extend([Comma(), c.clone()])
++
++        return Call(Name("exec"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_execfile.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_execfile.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,51 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for execfile.
++
++This converts usages of the execfile function into calls to the built-in
++exec() function.
++"""
++
++from .. import fixer_base
++from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
++                          ArgList, String, syms)
++
++
++class FixExecfile(fixer_base.BaseFix):
++
++    PATTERN = """
++    power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
++    |
++    power< 'execfile' trailer< '(' filename=any ')' > >
++    """
++
++    def transform(self, node, results):
++        assert results
++        filename = results["filename"]
++        globals = results.get("globals")
++        locals = results.get("locals")
++
++        # Copy over the prefix from the right parentheses end of the execfile
++        # call.
++        execfile_paren = node.children[-1].children[-1].clone()
++        # Construct open().read().
++        open_args = ArgList([filename.clone()], rparen=execfile_paren)
++        open_call = Node(syms.power, [Name("open"), open_args])
++        read = [Node(syms.trailer, [Dot(), Name('read')]),
++                Node(syms.trailer, [LParen(), RParen()])]
++        open_expr = [open_call] + read
++        # Wrap the open call in a compile call. This is so the filename will be
++        # preserved in the execed code.
++        filename_arg = filename.clone()
++        filename_arg.set_prefix(" ")
++        exec_str = String("'exec'", " ")
++        compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
++        compile_call = Call(Name("compile"), compile_args, "")
++        # Finally, replace the execfile call with an exec call.
++        args = [compile_call]
++        if globals is not None:
++            args.extend([Comma(), globals.clone()])
++        if locals is not None:
++            args.extend([Comma(), locals.clone()])
++        return Call(Name("exec"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_filter.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_filter.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,75 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that changes filter(F, X) into list(filter(F, X)).
++
++We avoid the transformation if the filter() call is directly contained
++in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
++for V in <>:.
++
++NOTE: This is still not correct if the original code was depending on
++filter(F, X) to return a string if X is a string and a tuple if X is a
++tuple.  That would require type inference, which we don't do.  Let
++Python 2.6 figure it out.
++"""
++
++# Local imports
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Name, Call, ListComp, in_special_context
++
++class FixFilter(fixer_base.ConditionalFix):
++
++    PATTERN = """
++    filter_lambda=power<
++        'filter'
++        trailer<
++            '('
++            arglist<
++                lambdef< 'lambda'
++                         (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
++                >
++                ','
++                it=any
++            >
++            ')'
++        >
++    >
++    |
++    power<
++        'filter'
++        trailer< '(' arglist< none='None' ',' seq=any > ')' >
++    >
++    |
++    power<
++        'filter'
++        args=trailer< '(' [any] ')' >
++    >
++    """
++
++    skip_on = "future_builtins.filter"
++
++    def transform(self, node, results):
++        if self.should_skip(node):
++            return
++
++        if "filter_lambda" in results:
++            new = ListComp(results.get("fp").clone(),
++                           results.get("fp").clone(),
++                           results.get("it").clone(),
++                           results.get("xp").clone())
++
++        elif "none" in results:
++            new = ListComp(Name("_f"),
++                           Name("_f"),
++                           results["seq"].clone(),
++                           Name("_f"))
++
++        else:
++            if in_special_context(node):
++                return None
++            new = node.clone()
++            new.set_prefix("")
++            new = Call(Name("list"), [new])
++        new.set_prefix(node.get_prefix())
++        return new
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_funcattrs.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_funcattrs.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,19 @@
++"""Fix function attribute names (f.func_x -> f.__x__)."""
++# Author: Collin Winter
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name
++
++
++class FixFuncattrs(fixer_base.BaseFix):
++    PATTERN = """
++    power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals'
++                                  | 'func_name' | 'func_defaults' | 'func_code'
++                                  | 'func_dict') > any* >
++    """
++
++    def transform(self, node, results):
++        attr = results["attr"][0]
++        attr.replace(Name(("__%s__" % attr.value[5:]),
++                          prefix=attr.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_future.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_future.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,20 @@
++"""Remove __future__ imports
++
++from __future__ import foo is replaced with an empty line.
++"""
++# Author: Christian Heimes
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import BlankLine
++
++class FixFuture(fixer_base.BaseFix):
++    PATTERN = """import_from< 'from' module_name="__future__" 'import' any >"""
++
++    # This should be run last -- some things check for the import
++    run_order = 10
++
++    def transform(self, node, results):
++        new = BlankLine()
++        new.prefix = node.get_prefix()
++        return new
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_getcwdu.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_getcwdu.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,18 @@
++"""
++Fixer that changes os.getcwdu() to os.getcwd().
++"""
++# Author: Victor Stinner
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name
++
++class FixGetcwdu(fixer_base.BaseFix):
++
++    PATTERN = """
++              power< 'os' trailer< dot='.' name='getcwdu' > any* >
++              """
++
++    def transform(self, node, results):
++        name = results["name"]
++        name.replace(Name("getcwd", prefix=name.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_has_key.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_has_key.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,109 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for has_key().
++
++Calls to .has_key() methods are expressed in terms of the 'in'
++operator:
++
++    d.has_key(k) -> k in d
++
++CAVEATS:
++1) While the primary target of this fixer is dict.has_key(), the
++   fixer will change any has_key() method call, regardless of its
++   class.
++
++2) Cases like this will not be converted:
++
++    m = d.has_key
++    if m(k):
++        ...
++
++   Only *calls* to has_key() are converted. While it is possible to
++   convert the above to something like
++
++    m = d.__contains__
++    if m(k):
++        ...
++
++   this is currently not done.
++"""
++
++# Local imports
++from .. import pytree
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Name, parenthesize
++
++
++class FixHasKey(fixer_base.BaseFix):
++
++    PATTERN = """
++    anchor=power<
++        before=any+
++        trailer< '.' 'has_key' >
++        trailer<
++            '('
++            ( not(arglist | argument<any '=' any>) arg=any
++            | arglist<(not argument<any '=' any>) arg=any ','>
++            )
++            ')'
++        >
++        after=any*
++    >
++    |
++    negation=not_test<
++        'not'
++        anchor=power<
++            before=any+
++            trailer< '.' 'has_key' >
++            trailer<
++                '('
++                ( not(arglist | argument<any '=' any>) arg=any
++                | arglist<(not argument<any '=' any>) arg=any ','>
++                )
++                ')'
++            >
++        >
++    >
++    """
++
++    def transform(self, node, results):
++        assert results
++        syms = self.syms
++        if (node.parent.type == syms.not_test and
++            self.pattern.match(node.parent)):
++            # Don't transform a node matching the first alternative of the
++            # pattern when its parent matches the second alternative
++            return None
++        negation = results.get("negation")
++        anchor = results["anchor"]
++        prefix = node.get_prefix()
++        before = [n.clone() for n in results["before"]]
++        arg = results["arg"].clone()
++        after = results.get("after")
++        if after:
++            after = [n.clone() for n in after]
++        if arg.type in (syms.comparison, syms.not_test, syms.and_test,
++                        syms.or_test, syms.test, syms.lambdef, syms.argument):
++            arg = parenthesize(arg)
++        if len(before) == 1:
++            before = before[0]
++        else:
++            before = pytree.Node(syms.power, before)
++        before.set_prefix(" ")
++        n_op = Name("in", prefix=" ")
++        if negation:
++            n_not = Name("not", prefix=" ")
++            n_op = pytree.Node(syms.comp_op, (n_not, n_op))
++        new = pytree.Node(syms.comparison, (arg, n_op, before))
++        if after:
++            new = parenthesize(new)
++            new = pytree.Node(syms.power, (new,) + tuple(after))
++        if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr,
++                                syms.and_expr, syms.shift_expr,
++                                syms.arith_expr, syms.term,
++                                syms.factor, syms.power):
++            new = parenthesize(new)
++        new.set_prefix(prefix)
++        return new
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_idioms.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_idioms.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,134 @@
++"""Adjust some old Python 2 idioms to their modern counterparts.
++
++* Change some type comparisons to isinstance() calls:
++    type(x) == T -> isinstance(x, T)
++    type(x) is T -> isinstance(x, T)
++    type(x) != T -> not isinstance(x, T)
++    type(x) is not T -> not isinstance(x, T)
++
++* Change "while 1:" into "while True:".
++
++* Change both
++
++    v = list(EXPR)
++    v.sort()
++    foo(v)
++
++and the more general
++
++    v = EXPR
++    v.sort()
++    foo(v)
++
++into
++
++    v = sorted(EXPR)
++    foo(v)
++"""
++# Author: Jacques Frechet, Collin Winter
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Call, Comma, Name, Node, syms
++
++CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
++TYPE = "power< 'type' trailer< '(' x=any ')' > >"
++
++class FixIdioms(fixer_base.BaseFix):
++
++    explicit = True # The user must ask for this fixer
++
++    PATTERN = r"""
++        isinstance=comparison< %s %s T=any >
++        |
++        isinstance=comparison< T=any %s %s >
++        |
++        while_stmt< 'while' while='1' ':' any+ >
++        |
++        sorted=any<
++            any*
++            simple_stmt<
++              expr_stmt< id1=any '='
++                         power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
++              >
++              '\n'
++            >
++            sort=
++            simple_stmt<
++              power< id2=any
++                     trailer< '.' 'sort' > trailer< '(' ')' >
++              >
++              '\n'
++            >
++            next=any*
++        >
++        |
++        sorted=any<
++            any*
++            simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
++            sort=
++            simple_stmt<
++              power< id2=any
++                     trailer< '.' 'sort' > trailer< '(' ')' >
++              >
++              '\n'
++            >
++            next=any*
++        >
++    """ % (TYPE, CMP, CMP, TYPE)
++
++    def match(self, node):
++        r = super(FixIdioms, self).match(node)
++        # If we've matched one of the sort/sorted subpatterns above, we
++        # want to reject matches where the initial assignment and the
++        # subsequent .sort() call involve different identifiers.
++        if r and "sorted" in r:
++            if r["id1"] == r["id2"]:
++                return r
++            return None
++        return r
++
++    def transform(self, node, results):
++        if "isinstance" in results:
++            return self.transform_isinstance(node, results)
++        elif "while" in results:
++            return self.transform_while(node, results)
++        elif "sorted" in results:
++            return self.transform_sort(node, results)
++        else:
++            raise RuntimeError("Invalid match")
++
++    def transform_isinstance(self, node, results):
++        x = results["x"].clone() # The thing inside of type()
++        T = results["T"].clone() # The type being compared against
++        x.set_prefix("")
++        T.set_prefix(" ")
++        test = Call(Name("isinstance"), [x, Comma(), T])
++        if "n" in results:
++            test.set_prefix(" ")
++            test = Node(syms.not_test, [Name("not"), test])
++        test.set_prefix(node.get_prefix())
++        return test
++
++    def transform_while(self, node, results):
++        one = results["while"]
++        one.replace(Name("True", prefix=one.get_prefix()))
++
++    def transform_sort(self, node, results):
++        sort_stmt = results["sort"]
++        next_stmt = results["next"]
++        list_call = results.get("list")
++        simple_expr = results.get("expr")
++
++        if list_call:
++            list_call.replace(Name("sorted", prefix=list_call.get_prefix()))
++        elif simple_expr:
++            new = simple_expr.clone()
++            new.set_prefix("")
++            simple_expr.replace(Call(Name("sorted"), [new],
++                                     prefix=simple_expr.get_prefix()))
++        else:
++            raise RuntimeError("should not have reached here")
++        sort_stmt.remove()
++        if next_stmt:
++            next_stmt[0].set_prefix(sort_stmt.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_import.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_import.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,90 @@
++"""Fixer for import statements.
++If spam is being imported from the local directory, this import:
++    from spam import eggs
++Becomes:
++    from .spam import eggs
++
++And this import:
++    import spam
++Becomes:
++    from . import spam
++"""
++
++# Local imports
++from .. import fixer_base
++from os.path import dirname, join, exists, pathsep
++from ..fixer_util import FromImport, syms, token
++
++
++def traverse_imports(names):
++    """
++    Walks over all the names imported in a dotted_as_names node.
++    """
++    pending = [names]
++    while pending:
++        node = pending.pop()
++        if node.type == token.NAME:
++            yield node.value
++        elif node.type == syms.dotted_name:
++            yield "".join([ch.value for ch in node.children])
++        elif node.type == syms.dotted_as_name:
++            pending.append(node.children[0])
++        elif node.type == syms.dotted_as_names:
++            pending.extend(node.children[::-2])
++        else:
++            raise AssertionError("unkown node type")
++
++
++class FixImport(fixer_base.BaseFix):
++
++    PATTERN = """
++    import_from< 'from' imp=any 'import' ['('] any [')'] >
++    |
++    import_name< 'import' imp=any >
++    """
++
++    def transform(self, node, results):
++        imp = results['imp']
++
++        if node.type == syms.import_from:
++            # Some imps are top-level (eg: 'import ham')
++            # some are first level (eg: 'import ham.eggs')
++            # some are third level (eg: 'import ham.eggs as spam')
++            # Hence, the loop
++            while not hasattr(imp, 'value'):
++                imp = imp.children[0]
++            if self.probably_a_local_import(imp.value):
++                imp.value = "." + imp.value
++                imp.changed()
++                return node
++        else:
++            have_local = False
++            have_absolute = False
++            for mod_name in traverse_imports(imp):
++                if self.probably_a_local_import(mod_name):
++                    have_local = True
++                else:
++                    have_absolute = True
++            if have_absolute:
++                if have_local:
++                    # We won't handle both sibling and absolute imports in the
++                    # same statement at the moment.
++                    self.warning(node, "absolute and local imports together")
++                return
++
++            new = FromImport('.', [imp])
++            new.set_prefix(node.get_prefix())
++            return new
++
++    def probably_a_local_import(self, imp_name):
++        imp_name = imp_name.split('.', 1)[0]
++        base_path = dirname(self.filename)
++        base_path = join(base_path, imp_name)
++        # If there is no __init__.py next to the file its not in a package
++        # so can't be a relative import.
++        if not exists(join(dirname(base_path), '__init__.py')):
++            return False
++        for ext in ['.py', pathsep, '.pyc', '.so', '.sl', '.pyd']:
++            if exists(base_path + ext):
++                return True
++        return False
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_imports.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_imports.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,145 @@
++"""Fix incompatible imports and module references."""
++# Authors: Collin Winter, Nick Edds
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name, attr_chain
++
++MAPPING = {'StringIO':  'io',
++           'cStringIO': 'io',
++           'cPickle': 'pickle',
++           '__builtin__' : 'builtins',
++           'copy_reg': 'copyreg',
++           'Queue': 'queue',
++           'SocketServer': 'socketserver',
++           'ConfigParser': 'configparser',
++           'repr': 'reprlib',
++           'FileDialog': 'tkinter.filedialog',
++           'tkFileDialog': 'tkinter.filedialog',
++           'SimpleDialog': 'tkinter.simpledialog',
++           'tkSimpleDialog': 'tkinter.simpledialog',
++           'tkColorChooser': 'tkinter.colorchooser',
++           'tkCommonDialog': 'tkinter.commondialog',
++           'Dialog': 'tkinter.dialog',
++           'Tkdnd': 'tkinter.dnd',
++           'tkFont': 'tkinter.font',
++           'tkMessageBox': 'tkinter.messagebox',
++           'ScrolledText': 'tkinter.scrolledtext',
++           'Tkconstants': 'tkinter.constants',
++           'Tix': 'tkinter.tix',
++           'ttk': 'tkinter.ttk',
++           'Tkinter': 'tkinter',
++           'markupbase': '_markupbase',
++           '_winreg': 'winreg',
++           'thread': '_thread',
++           'dummy_thread': '_dummy_thread',
++           # anydbm and whichdb are handled by fix_imports2
++           'dbhash': 'dbm.bsd',
++           'dumbdbm': 'dbm.dumb',
++           'dbm': 'dbm.ndbm',
++           'gdbm': 'dbm.gnu',
++           'xmlrpclib': 'xmlrpc.client',
++           'DocXMLRPCServer': 'xmlrpc.server',
++           'SimpleXMLRPCServer': 'xmlrpc.server',
++           'httplib': 'http.client',
++           'htmlentitydefs' : 'html.entities',
++           'HTMLParser' : 'html.parser',
++           'Cookie': 'http.cookies',
++           'cookielib': 'http.cookiejar',
++           'BaseHTTPServer': 'http.server',
++           'SimpleHTTPServer': 'http.server',
++           'CGIHTTPServer': 'http.server',
++           #'test.test_support': 'test.support',
++           'commands': 'subprocess',
++           'UserString' : 'collections',
++           'UserList' : 'collections',
++           'urlparse' : 'urllib.parse',
++           'robotparser' : 'urllib.robotparser',
++}
++
++
++def alternates(members):
++    return "(" + "|".join(map(repr, members)) + ")"
++
++
++def build_pattern(mapping=MAPPING):
++    mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
++    bare_names = alternates(mapping.keys())
++
++    yield """name_import=import_name< 'import' ((%s) |
++               multiple_imports=dotted_as_names< any* (%s) any* >) >
++          """ % (mod_list, mod_list)
++    yield """import_from< 'from' (%s) 'import' ['(']
++              ( any | import_as_name< any 'as' any > |
++                import_as_names< any* >)  [')'] >
++          """ % mod_list
++    yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
++               multiple_imports=dotted_as_names<
++                 any* dotted_as_name< (%s) 'as' any > any* >) >
++          """ % (mod_list, mod_list)
++
++    # Find usages of module members in code e.g. thread.foo(bar)
++    yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
++
++
++class FixImports(fixer_base.BaseFix):
++
++    order = "pre" # Pre-order tree traversal
++
++    # This is overridden in fix_imports2.
++    mapping = MAPPING
++
++    # We want to run this fixer late, so fix_import doesn't try to make stdlib
++    # renames into relative imports.
++    run_order = 6
++
++    def build_pattern(self):
++        return "|".join(build_pattern(self.mapping))
++
++    def compile_pattern(self):
++        # We override this, so MAPPING can be pragmatically altered and the
++        # changes will be reflected in PATTERN.
++        self.PATTERN = self.build_pattern()
++        super(FixImports, self).compile_pattern()
++
++    # Don't match the node if it's within another match.
++    def match(self, node):
++        match = super(FixImports, self).match
++        results = match(node)
++        if results:
++            # Module usage could be in the trailer of an attribute lookup, so we
++            # might have nested matches when "bare_with_attr" is present.
++            if "bare_with_attr" not in results and \
++                    any([match(obj) for obj in attr_chain(node, "parent")]):
++                return False
++            return results
++        return False
++
++    def start_tree(self, tree, filename):
++        super(FixImports, self).start_tree(tree, filename)
++        self.replace = {}
++
++    def transform(self, node, results):
++        import_mod = results.get("module_name")
++        if import_mod:
++            mod_name = import_mod.value
++            new_name = self.mapping[mod_name]
++            import_mod.replace(Name(new_name, prefix=import_mod.get_prefix()))
++            if "name_import" in results:
++                # If it's not a "from x import x, y" or "import x as y" import,
++                # marked its usage to be replaced.
++                self.replace[mod_name] = new_name
++            if "multiple_imports" in results:
++                # This is a nasty hack to fix multiple imports on a line (e.g.,
++                # "import StringIO, urlparse"). The problem is that I can't
++                # figure out an easy way to make a pattern recognize the keys of
++                # MAPPING randomly sprinkled in an import statement.
++                results = self.match(node)
++                if results:
++                    self.transform(node, results)
++        else:
++            # Replace usage of the module.
++            bare_name = results["bare_with_attr"][0]
++            new_name = self.replace.get(bare_name.value)
++            if new_name:
++                bare_name.replace(Name(new_name, prefix=bare_name.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_imports2.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_imports2.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,16 @@
++"""Fix incompatible imports and module references that must be fixed after
++fix_imports."""
++from . import fix_imports
++
++
++MAPPING = {
++            'whichdb': 'dbm',
++            'anydbm': 'dbm',
++          }
++
++
++class FixImports2(fix_imports.FixImports):
++
++    run_order = 7
++
++    mapping = MAPPING
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_input.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_input.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,26 @@
++"""Fixer that changes input(...) into eval(input(...))."""
++# Author: Andre Roberge
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Call, Name
++from .. import patcomp
++
++
++context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >")
++
++
++class FixInput(fixer_base.BaseFix):
++
++    PATTERN = """
++              power< 'input' args=trailer< '(' [any] ')' > >
++              """
++
++    def transform(self, node, results):
++        # If we're already wrapped in a eval() call, we're done.
++        if context.match(node.parent.parent):
++            return
++
++        new = node.clone()
++        new.set_prefix("")
++        return Call(Name("eval"), [new], prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_intern.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_intern.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,44 @@
++# Copyright 2006 Georg Brandl.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for intern().
++
++intern(s) -> sys.intern(s)"""
++
++# Local imports
++from .. import pytree
++from .. import fixer_base
++from ..fixer_util import Name, Attr, touch_import
++
++
++class FixIntern(fixer_base.BaseFix):
++
++    PATTERN = """
++    power< 'intern'
++           trailer< lpar='('
++                    ( not(arglist | argument<any '=' any>) obj=any
++                      | obj=arglist<(not argument<any '=' any>) any ','> )
++                    rpar=')' >
++           after=any*
++    >
++    """
++
++    def transform(self, node, results):
++        syms = self.syms
++        obj = results["obj"].clone()
++        if obj.type == syms.arglist:
++            newarglist = obj.clone()
++        else:
++            newarglist = pytree.Node(syms.arglist, [obj.clone()])
++        after = results["after"]
++        if after:
++            after = [n.clone() for n in after]
++        new = pytree.Node(syms.power,
++                          Attr(Name("sys"), Name("intern")) +
++                          [pytree.Node(syms.trailer,
++                                       [results["lpar"].clone(),
++                                        newarglist,
++                                        results["rpar"].clone()])] + after)
++        new.set_prefix(node.get_prefix())
++        touch_import(None, 'sys', node)
++        return new
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_isinstance.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_isinstance.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,52 @@
++# Copyright 2008 Armin Ronacher.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that cleans up a tuple argument to isinstance after the tokens
++in it were fixed.  This is mainly used to remove double occurrences of
++tokens as a leftover of the long -> int / unicode -> str conversion.
++
++eg.  isinstance(x, (int, long)) -> isinstance(x, (int, int))
++       -> isinstance(x, int)
++"""
++
++from .. import fixer_base
++from ..fixer_util import token
++
++
++class FixIsinstance(fixer_base.BaseFix):
++
++    PATTERN = """
++    power<
++        'isinstance'
++        trailer< '(' arglist< any ',' atom< '('
++            args=testlist_gexp< any+ >
++        ')' > > ')' >
++    >
++    """
++
++    run_order = 6
++
++    def transform(self, node, results):
++        names_inserted = set()
++        testlist = results["args"]
++        args = testlist.children
++        new_args = []
++        iterator = enumerate(args)
++        for idx, arg in iterator:
++            if arg.type == token.NAME and arg.value in names_inserted:
++                if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
++                    iterator.next()
++                    continue
++            else:
++                new_args.append(arg)
++                if arg.type == token.NAME:
++                    names_inserted.add(arg.value)
++        if new_args and new_args[-1].type == token.COMMA:
++            del new_args[-1]
++        if len(new_args) == 1:
++            atom = testlist.parent
++            new_args[0].set_prefix(atom.get_prefix())
++            atom.replace(new_args[0])
++        else:
++            args[:] = new_args
++            node.changed()
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_itertools.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_itertools.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,41 @@
++""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
++    itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
++
++    imports from itertools are fixed in fix_itertools_import.py
++
++    If itertools is imported as something else (ie: import itertools as it;
++    it.izip(spam, eggs)) method calls will not get fixed.
++    """
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name
++
++class FixItertools(fixer_base.BaseFix):
++    it_funcs = "('imap'|'ifilter'|'izip'|'ifilterfalse')"
++    PATTERN = """
++              power< it='itertools'
++                  trailer<
++                     dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
++              |
++              power< func=%(it_funcs)s trailer< '(' [any] ')' > >
++              """ %(locals())
++
++    # Needs to be run after fix_(map|zip|filter)
++    run_order = 6
++
++    def transform(self, node, results):
++        prefix = None
++        func = results['func'][0]
++        if 'it' in results and func.value != 'ifilterfalse':
++            dot, it = (results['dot'], results['it'])
++            # Remove the 'itertools'
++            prefix = it.get_prefix()
++            it.remove()
++            # Replace the node wich contains ('.', 'function') with the
++            # function (to be consistant with the second part of the pattern)
++            dot.remove()
++            func.parent.replace(func)
++
++        prefix = prefix or func.get_prefix()
++        func.replace(Name(func.value[1:], prefix=prefix))
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_itertools_imports.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_itertools_imports.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,52 @@
++""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """
++
++# Local imports
++from lib2to3 import fixer_base
++from lib2to3.fixer_util import BlankLine, syms, token
++
++
++class FixItertoolsImports(fixer_base.BaseFix):
++    PATTERN = """
++              import_from< 'from' 'itertools' 'import' imports=any >
++              """ %(locals())
++
++    def transform(self, node, results):
++        imports = results['imports']
++        if imports.type == syms.import_as_name or not imports.children:
++            children = [imports]
++        else:
++            children = imports.children
++        for child in children[::2]:
++            if child.type == token.NAME:
++                member = child.value
++                name_node = child
++            else:
++                assert child.type == syms.import_as_name
++                name_node = child.children[0]
++            member_name = name_node.value
++            if member_name in ('imap', 'izip', 'ifilter'):
++                child.value = None
++                child.remove()
++            elif member_name == 'ifilterfalse':
++                node.changed()
++                name_node.value = 'filterfalse'
++
++        # Make sure the import statement is still sane
++        children = imports.children[:] or [imports]
++        remove_comma = True
++        for child in children:
++            if remove_comma and child.type == token.COMMA:
++                child.remove()
++            else:
++                remove_comma ^= True
++
++        if children[-1].type == token.COMMA:
++            children[-1].remove()
++
++        # If there are no imports left, just get rid of the entire statement
++        if not (imports.children or getattr(imports, 'value', None)) or \
++                imports.parent is None:
++            p = node.get_prefix()
++            node = BlankLine()
++            node.prefix = p
++        return node
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_long.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_long.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,22 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that turns 'long' into 'int' everywhere.
++"""
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name, Number, is_probably_builtin
++
++
++class FixLong(fixer_base.BaseFix):
++
++    PATTERN = "'long'"
++
++    static_int = Name("int")
++
++    def transform(self, node, results):
++        if is_probably_builtin(node):
++            new = self.static_int.clone()
++            new.set_prefix(node.get_prefix())
++            return new
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_map.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_map.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,82 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
++exists a 'from future_builtins import map' statement in the top-level
++namespace.
++
++As a special case, map(None, X) is changed into list(X).  (This is
++necessary because the semantics are changed in this case -- the new
++map(None, X) is equivalent to [(x,) for x in X].)
++
++We avoid the transformation (except for the special case mentioned
++above) if the map() call is directly contained in iter(<>), list(<>),
++tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
++
++NOTE: This is still not correct if the original code was depending on
++map(F, X, Y, ...) to go on until the longest argument is exhausted,
++substituting None for missing values -- like zip(), it now stops as
++soon as the shortest argument is exhausted.
++"""
++
++# Local imports
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Name, Call, ListComp, in_special_context
++from ..pygram import python_symbols as syms
++
++class FixMap(fixer_base.ConditionalFix):
++
++    PATTERN = """
++    map_none=power<
++        'map'
++        trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
++    >
++    |
++    map_lambda=power<
++        'map'
++        trailer<
++            '('
++            arglist<
++                lambdef< 'lambda'
++                         (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
++                >
++                ','
++                it=any
++            >
++            ')'
++        >
++    >
++    |
++    power<
++        'map'
++        args=trailer< '(' [any] ')' >
++    >
++    """
++
++    skip_on = 'future_builtins.map'
++
++    def transform(self, node, results):
++        if self.should_skip(node):
++            return
++
++        if node.parent.type == syms.simple_stmt:
++            self.warning(node, "You should use a for loop here")
++            new = node.clone()
++            new.set_prefix("")
++            new = Call(Name("list"), [new])
++        elif "map_lambda" in results:
++            new = ListComp(results.get("xp").clone(),
++                           results.get("fp").clone(),
++                           results.get("it").clone())
++        else:
++            if "map_none" in results:
++                new = results["arg"].clone()
++            else:
++                if in_special_context(node):
++                    return None
++                new = node.clone()
++            new.set_prefix("")
++            new = Call(Name("list"), [new])
++        new.set_prefix(node.get_prefix())
++        return new
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_metaclass.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_metaclass.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,227 @@
++"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
++
++   The various forms of classef (inherits nothing, inherits once, inherints
++   many) don't parse the same in the CST so we look at ALL classes for
++   a __metaclass__ and if we find one normalize the inherits to all be
++   an arglist.
++
++   For one-liner classes ('class X: pass') there is no indent/dedent so
++   we normalize those into having a suite.
++
++   Moving the __metaclass__ into the classdef can also cause the class
++   body to be empty so there is some special casing for that as well.
++
++   This fixer also tries very hard to keep original indenting and spacing
++   in all those corner cases.
++
++"""
++# Author: Jack Diederich
++
++# Local imports
++from .. import fixer_base
++from ..pygram import token
++from ..fixer_util import Name, syms, Node, Leaf
++
++
++def has_metaclass(parent):
++    """ we have to check the cls_node without changing it.
++        There are two possiblities:
++          1)  clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
++          2)  clsdef => simple_stmt => expr_stmt => Leaf('__meta')
++    """
++    for node in parent.children:
++        if node.type == syms.suite:
++            return has_metaclass(node)
++        elif node.type == syms.simple_stmt and node.children:
++            expr_node = node.children[0]
++            if expr_node.type == syms.expr_stmt and expr_node.children:
++                left_side = expr_node.children[0]
++                if isinstance(left_side, Leaf) and \
++                        left_side.value == '__metaclass__':
++                    return True
++    return False
++
++
++def fixup_parse_tree(cls_node):
++    """ one-line classes don't get a suite in the parse tree so we add
++        one to normalize the tree
++    """
++    for node in cls_node.children:
++        if node.type == syms.suite:
++            # already in the prefered format, do nothing
++            return
++
++    # !%@#! oneliners have no suite node, we have to fake one up
++    for i, node in enumerate(cls_node.children):
++        if node.type == token.COLON:
++            break
++    else:
++        raise ValueError("No class suite and no ':'!")
++
++    # move everything into a suite node
++    suite = Node(syms.suite, [])
++    while cls_node.children[i+1:]:
++        move_node = cls_node.children[i+1]
++        suite.append_child(move_node.clone())
++        move_node.remove()
++    cls_node.append_child(suite)
++    node = suite
++
++
++def fixup_simple_stmt(parent, i, stmt_node):
++    """ if there is a semi-colon all the parts count as part of the same
++        simple_stmt.  We just want the __metaclass__ part so we move
++        everything efter the semi-colon into its own simple_stmt node
++    """
++    for semi_ind, node in enumerate(stmt_node.children):
++        if node.type == token.SEMI: # *sigh*
++            break
++    else:
++        return
++
++    node.remove() # kill the semicolon
++    new_expr = Node(syms.expr_stmt, [])
++    new_stmt = Node(syms.simple_stmt, [new_expr])
++    while stmt_node.children[semi_ind:]:
++        move_node = stmt_node.children[semi_ind]
++        new_expr.append_child(move_node.clone())
++        move_node.remove()
++    parent.insert_child(i, new_stmt)
++    new_leaf1 = new_stmt.children[0].children[0]
++    old_leaf1 = stmt_node.children[0].children[0]
++    new_leaf1.set_prefix(old_leaf1.get_prefix())
++
++
++def remove_trailing_newline(node):
++    if node.children and node.children[-1].type == token.NEWLINE:
++        node.children[-1].remove()
++
++
++def find_metas(cls_node):
++    # find the suite node (Mmm, sweet nodes)
++    for node in cls_node.children:
++        if node.type == syms.suite:
++            break
++    else:
++        raise ValueError("No class suite!")
++
++    # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
++    for i, simple_node in list(enumerate(node.children)):
++        if simple_node.type == syms.simple_stmt and simple_node.children:
++            expr_node = simple_node.children[0]
++            if expr_node.type == syms.expr_stmt and expr_node.children:
++                # Check if the expr_node is a simple assignment.
++                left_node = expr_node.children[0]
++                if isinstance(left_node, Leaf) and \
++                        left_node.value == '__metaclass__':
++                    # We found a assignment to __metaclass__.
++                    fixup_simple_stmt(node, i, simple_node)
++                    remove_trailing_newline(simple_node)
++                    yield (node, i, simple_node)
++
++
++def fixup_indent(suite):
++    """ If an INDENT is followed by a thing with a prefix then nuke the prefix
++        Otherwise we get in trouble when removing __metaclass__ at suite start
++    """
++    kids = suite.children[::-1]
++    # find the first indent
++    while kids:
++        node = kids.pop()
++        if node.type == token.INDENT:
++            break
++
++    # find the first Leaf
++    while kids:
++        node = kids.pop()
++        if isinstance(node, Leaf) and node.type != token.DEDENT:
++            if node.prefix:
++                node.set_prefix('')
++            return
++        else:
++            kids.extend(node.children[::-1])
++
++
++class FixMetaclass(fixer_base.BaseFix):
++
++    PATTERN = """
++    classdef<any*>
++    """
++
++    def transform(self, node, results):
++        if not has_metaclass(node):
++            return node
++
++        fixup_parse_tree(node)
++
++        # find metaclasses, keep the last one
++        last_metaclass = None
++        for suite, i, stmt in find_metas(node):
++            last_metaclass = stmt
++            stmt.remove()
++
++        text_type = node.children[0].type # always Leaf(nnn, 'class')
++
++        # figure out what kind of classdef we have
++        if len(node.children) == 7:
++            # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
++            #                 0        1       2    3        4    5    6
++            if node.children[3].type == syms.arglist:
++                arglist = node.children[3]
++            # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
++            else:
++                parent = node.children[3].clone()
++                arglist = Node(syms.arglist, [parent])
++                node.set_child(3, arglist)
++        elif len(node.children) == 6:
++            # Node(classdef, ['class', 'name', '(',  ')', ':', suite])
++            #                 0        1       2     3    4    5
++            arglist = Node(syms.arglist, [])
++            node.insert_child(3, arglist)
++        elif len(node.children) == 4:
++            # Node(classdef, ['class', 'name', ':', suite])
++            #                 0        1       2    3
++            arglist = Node(syms.arglist, [])
++            node.insert_child(2, Leaf(token.RPAR, ')'))
++            node.insert_child(2, arglist)
++            node.insert_child(2, Leaf(token.LPAR, '('))
++        else:
++            raise ValueError("Unexpected class definition")
++
++        # now stick the metaclass in the arglist
++        meta_txt = last_metaclass.children[0].children[0]
++        meta_txt.value = 'metaclass'
++        orig_meta_prefix = meta_txt.get_prefix()
++
++        if arglist.children:
++            arglist.append_child(Leaf(token.COMMA, ','))
++            meta_txt.set_prefix(' ')
++        else:
++            meta_txt.set_prefix('')
++
++        # compact the expression "metaclass = Meta" -> "metaclass=Meta"
++        expr_stmt = last_metaclass.children[0]
++        assert expr_stmt.type == syms.expr_stmt
++        expr_stmt.children[1].set_prefix('')
++        expr_stmt.children[2].set_prefix('')
++
++        arglist.append_child(last_metaclass)
++
++        fixup_indent(suite)
++
++        # check for empty suite
++        if not suite.children:
++            # one-liner that was just __metaclass_
++            suite.remove()
++            pass_leaf = Leaf(text_type, 'pass')
++            pass_leaf.set_prefix(orig_meta_prefix)
++            node.append_child(pass_leaf)
++            node.append_child(Leaf(token.NEWLINE, '\n'))
++
++        elif len(suite.children) > 1 and \
++                 (suite.children[-2].type == token.INDENT and
++                  suite.children[-1].type == token.DEDENT):
++            # there was only one line in the class body and it was __metaclass__
++            pass_leaf = Leaf(text_type, 'pass')
++            suite.insert_child(-1, pass_leaf)
++            suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_methodattrs.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_methodattrs.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,23 @@
++"""Fix bound method attributes (method.im_? -> method.__?__).
++"""
++# Author: Christian Heimes
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name
++
++MAP = {
++    "im_func" : "__func__",
++    "im_self" : "__self__",
++    "im_class" : "__self__.__class__"
++    }
++
++class FixMethodattrs(fixer_base.BaseFix):
++    PATTERN = """
++    power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
++    """
++
++    def transform(self, node, results):
++        attr = results["attr"][0]
++        new = MAP[attr.value]
++        attr.replace(Name(new, prefix=attr.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_ne.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_ne.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,22 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that turns <> into !=."""
++
++# Local imports
++from .. import pytree
++from ..pgen2 import token
++from .. import fixer_base
++
++
++class FixNe(fixer_base.BaseFix):
++    # This is so simple that we don't need the pattern compiler.
++
++    def match(self, node):
++        # Override
++        return node.type == token.NOTEQUAL and node.value == "<>"
++
++    def transform(self, node, results):
++        new = pytree.Leaf(token.NOTEQUAL, "!=")
++        new.set_prefix(node.get_prefix())
++        return new
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_next.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_next.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,103 @@
++"""Fixer for it.next() -> next(it), per PEP 3114."""
++# Author: Collin Winter
++
++# Things that currently aren't covered:
++#   - listcomp "next" names aren't warned
++#   - "with" statement targets aren't checked
++
++# Local imports
++from ..pgen2 import token
++from ..pygram import python_symbols as syms
++from .. import fixer_base
++from ..fixer_util import Name, Call, find_binding
++
++bind_warning = "Calls to builtin next() possibly shadowed by global binding"
++
++
++class FixNext(fixer_base.BaseFix):
++    PATTERN = """
++    power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
++    |
++    power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
++    |
++    classdef< 'class' any+ ':'
++              suite< any*
++                     funcdef< 'def'
++                              name='next'
++                              parameters< '(' NAME ')' > any+ >
++                     any* > >
++    |
++    global=global_stmt< 'global' any* 'next' any* >
++    """
++
++    order = "pre" # Pre-order tree traversal
++
++    def start_tree(self, tree, filename):
++        super(FixNext, self).start_tree(tree, filename)
++
++        n = find_binding('next', tree)
++        if n:
++            self.warning(n, bind_warning)
++            self.shadowed_next = True
++        else:
++            self.shadowed_next = False
++
++    def transform(self, node, results):
++        assert results
++
++        base = results.get("base")
++        attr = results.get("attr")
++        name = results.get("name")
++        mod = results.get("mod")
++
++        if base:
++            if self.shadowed_next:
++                attr.replace(Name("__next__", prefix=attr.get_prefix()))
++            else:
++                base = [n.clone() for n in base]
++                base[0].set_prefix("")
++                node.replace(Call(Name("next", prefix=node.get_prefix()), base))
++        elif name:
++            n = Name("__next__", prefix=name.get_prefix())
++            name.replace(n)
++        elif attr:
++            # We don't do this transformation if we're assigning to "x.next".
++            # Unfortunately, it doesn't seem possible to do this in PATTERN,
++            #  so it's being done here.
++            if is_assign_target(node):
++                head = results["head"]
++                if "".join([str(n) for n in head]).strip() == '__builtin__':
++                    self.warning(node, bind_warning)
++                return
++            attr.replace(Name("__next__"))
++        elif "global" in results:
++            self.warning(node, bind_warning)
++            self.shadowed_next = True
++
++
++### The following functions help test if node is part of an assignment
++###  target.
++
++def is_assign_target(node):
++    assign = find_assign(node)
++    if assign is None:
++        return False
++
++    for child in assign.children:
++        if child.type == token.EQUAL:
++            return False
++        elif is_subtree(child, node):
++            return True
++    return False
++
++def find_assign(node):
++    if node.type == syms.expr_stmt:
++        return node
++    if node.type == syms.simple_stmt or node.parent is None:
++        return None
++    return find_assign(node.parent)
++
++def is_subtree(root, node):
++    if root == node:
++        return True
++    return any([is_subtree(c, node) for c in root.children])
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_nonzero.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_nonzero.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,20 @@
++"""Fixer for __nonzero__ -> __bool__ methods."""
++# Author: Collin Winter
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name, syms
++
++class FixNonzero(fixer_base.BaseFix):
++    PATTERN = """
++    classdef< 'class' any+ ':'
++              suite< any*
++                     funcdef< 'def' name='__nonzero__'
++                              parameters< '(' NAME ')' > any+ >
++                     any* > >
++    """
++
++    def transform(self, node, results):
++        name = results["name"]
++        new = Name("__bool__", prefix=name.get_prefix())
++        name.replace(new)
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_numliterals.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_numliterals.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,27 @@
++"""Fixer that turns 1L into 1, 0755 into 0o755.
++"""
++# Copyright 2007 Georg Brandl.
++# Licensed to PSF under a Contributor Agreement.
++
++# Local imports
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Number
++
++
++class FixNumliterals(fixer_base.BaseFix):
++    # This is so simple that we don't need the pattern compiler.
++
++    def match(self, node):
++        # Override
++        return (node.type == token.NUMBER and
++                (node.value.startswith("0") or node.value[-1] in "Ll"))
++
++    def transform(self, node, results):
++        val = node.value
++        if val[-1] in 'Ll':
++            val = val[:-1]
++        elif val.startswith('0') and val.isdigit() and len(set(val)) > 1:
++            val = "0o" + val[1:]
++
++        return Number(val, prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_paren.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_paren.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,42 @@
++"""Fixer that addes parentheses where they are required
++
++This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``."""
++
++# By Taek Joo Kim and Benjamin Peterson
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import LParen, RParen
++
++# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2]
++class FixParen(fixer_base.BaseFix):
++    PATTERN = """
++        atom< ('[' | '(')
++            (listmaker< any
++                comp_for<
++                    'for' NAME 'in'
++                    target=testlist_safe< any (',' any)+ [',']
++                     >
++                    [any]
++                >
++            >
++            |
++            testlist_gexp< any
++                comp_for<
++                    'for' NAME 'in'
++                    target=testlist_safe< any (',' any)+ [',']
++                     >
++                    [any]
++                >
++            >)
++        (']' | ')') >
++    """
++
++    def transform(self, node, results):
++        target = results["target"]
++
++        lparen = LParen()
++        lparen.set_prefix(target.get_prefix())
++        target.set_prefix("") # Make it hug the parentheses
++        target.insert_child(0, lparen)
++        target.append_child(RParen())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_print.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_print.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,90 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for print.
++
++Change:
++    'print'          into 'print()'
++    'print ...'      into 'print(...)'
++    'print ... ,'    into 'print(..., end=" ")'
++    'print >>x, ...' into 'print(..., file=x)'
++
++No changes are applied if print_function is imported from __future__
++
++"""
++
++# Local imports
++from .. import patcomp
++from .. import pytree
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Name, Call, Comma, String, is_tuple
++
++
++parend_expr = patcomp.compile_pattern(
++              """atom< '(' [atom|STRING|NAME] ')' >"""
++              )
++
++
++class FixPrint(fixer_base.ConditionalFix):
++
++    PATTERN = """
++              simple_stmt< any* bare='print' any* > | print_stmt
++              """
++
++    skip_on = '__future__.print_function'
++
++    def transform(self, node, results):
++        assert results
++
++        if self.should_skip(node):
++            return
++
++        bare_print = results.get("bare")
++
++        if bare_print:
++            # Special-case print all by itself
++            bare_print.replace(Call(Name("print"), [],
++                               prefix=bare_print.get_prefix()))
++            return
++        assert node.children[0] == Name("print")
++        args = node.children[1:]
++        if len(args) == 1 and parend_expr.match(args[0]):
++            # We don't want to keep sticking parens around an
++            # already-parenthesised expression.
++            return
++
++        sep = end = file = None
++        if args and args[-1] == Comma():
++            args = args[:-1]
++            end = " "
++        if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
++            assert len(args) >= 2
++            file = args[1].clone()
++            args = args[3:] # Strip a possible comma after the file expression
++        # Now synthesize a print(args, sep=..., end=..., file=...) node.
++        l_args = [arg.clone() for arg in args]
++        if l_args:
++            l_args[0].set_prefix("")
++        if sep is not None or end is not None or file is not None:
++            if sep is not None:
++                self.add_kwarg(l_args, "sep", String(repr(sep)))
++            if end is not None:
++                self.add_kwarg(l_args, "end", String(repr(end)))
++            if file is not None:
++                self.add_kwarg(l_args, "file", file)
++        n_stmt = Call(Name("print"), l_args)
++        n_stmt.set_prefix(node.get_prefix())
++        return n_stmt
++
++    def add_kwarg(self, l_nodes, s_kwd, n_expr):
++        # XXX All this prefix-setting may lose comments (though rarely)
++        n_expr.set_prefix("")
++        n_argument = pytree.Node(self.syms.argument,
++                                 (Name(s_kwd),
++                                  pytree.Leaf(token.EQUAL, "="),
++                                  n_expr))
++        if l_nodes:
++            l_nodes.append(Comma())
++            n_argument.set_prefix(" ")
++        l_nodes.append(n_argument)
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_raise.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_raise.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,82 @@
++"""Fixer for 'raise E, V, T'
++
++raise         -> raise
++raise E       -> raise E
++raise E, V    -> raise E(V)
++raise E, V, T -> raise E(V).with_traceback(T)
++
++raise (((E, E'), E''), E'''), V -> raise E(V)
++raise "foo", V, T               -> warns about string exceptions
++
++
++CAVEATS:
++1) "raise E, V" will be incorrectly translated if V is an exception
++   instance. The correct Python 3 idiom is
++
++        raise E from V
++
++   but since we can't detect instance-hood by syntax alone and since
++   any client code would have to be changed as well, we don't automate
++   this.
++"""
++# Author: Collin Winter
++
++# Local imports
++from .. import pytree
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Name, Call, Attr, ArgList, is_tuple
++
++class FixRaise(fixer_base.BaseFix):
++
++    PATTERN = """
++    raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
++    """
++
++    def transform(self, node, results):
++        syms = self.syms
++
++        exc = results["exc"].clone()
++        if exc.type is token.STRING:
++            self.cannot_convert(node, "Python 3 does not support string exceptions")
++            return
++
++        # Python 2 supports
++        #  raise ((((E1, E2), E3), E4), E5), V
++        # as a synonym for
++        #  raise E1, V
++        # Since Python 3 will not support this, we recurse down any tuple
++        # literals, always taking the first element.
++        if is_tuple(exc):
++            while is_tuple(exc):
++                # exc.children[1:-1] is the unparenthesized tuple
++                # exc.children[1].children[0] is the first element of the tuple
++                exc = exc.children[1].children[0].clone()
++            exc.set_prefix(" ")
++
++        if "val" not in results:
++            # One-argument raise
++            new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
++            new.set_prefix(node.get_prefix())
++            return new
++
++        val = results["val"].clone()
++        if is_tuple(val):
++            args = [c.clone() for c in val.children[1:-1]]
++        else:
++            val.set_prefix("")
++            args = [val]
++
++        if "tb" in results:
++            tb = results["tb"].clone()
++            tb.set_prefix("")
++
++            e = Call(exc, args)
++            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
++            new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
++            new.set_prefix(node.get_prefix())
++            return new
++        else:
++            return pytree.Node(syms.raise_stmt,
++                               [Name("raise"), Call(exc, args)],
++                               prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_raw_input.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_raw_input.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,16 @@
++"""Fixer that changes raw_input(...) into input(...)."""
++# Author: Andre Roberge
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name
++
++class FixRawInput(fixer_base.BaseFix):
++
++    PATTERN = """
++              power< name='raw_input' trailer< '(' [any] ')' > any* >
++              """
++
++    def transform(self, node, results):
++        name = results["name"]
++        name.replace(Name("input", prefix=name.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_reduce.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_reduce.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,33 @@
++# Copyright 2008 Armin Ronacher.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for reduce().
++
++Makes sure reduce() is imported from the functools module if reduce is
++used in that module.
++"""
++
++from .. import pytree
++from .. import fixer_base
++from ..fixer_util import Name, Attr, touch_import
++
++
++
++class FixReduce(fixer_base.BaseFix):
++
++    PATTERN = """
++    power< 'reduce'
++        trailer< '('
++            arglist< (
++                (not(argument<any '=' any>) any ','
++                 not(argument<any '=' any>) any) |
++                (not(argument<any '=' any>) any ','
++                 not(argument<any '=' any>) any ','
++                 not(argument<any '=' any>) any)
++            ) >
++        ')' >
++    >
++    """
++
++    def transform(self, node, results):
++        touch_import('functools', 'reduce', node)
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_renames.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_renames.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,69 @@
++"""Fix incompatible renames
++
++Fixes:
++  * sys.maxint -> sys.maxsize
++"""
++# Author: Christian Heimes
++# based on Collin Winter's fix_import
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name, attr_chain
++
++MAPPING = {"sys":  {"maxint" : "maxsize"},
++          }
++LOOKUP = {}
++
++def alternates(members):
++    return "(" + "|".join(map(repr, members)) + ")"
++
++
++def build_pattern():
++    #bare = set()
++    for module, replace in MAPPING.items():
++        for old_attr, new_attr in replace.items():
++            LOOKUP[(module, old_attr)] = new_attr
++            #bare.add(module)
++            #bare.add(old_attr)
++            #yield """
++            #      import_name< 'import' (module=%r
++            #          | dotted_as_names< any* module=%r any* >) >
++            #      """ % (module, module)
++            yield """
++                  import_from< 'from' module_name=%r 'import'
++                      ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
++                  """ % (module, old_attr, old_attr)
++            yield """
++                  power< module_name=%r trailer< '.' attr_name=%r > any* >
++                  """ % (module, old_attr)
++    #yield """bare_name=%s""" % alternates(bare)
++
++
++class FixRenames(fixer_base.BaseFix):
++    PATTERN = "|".join(build_pattern())
++
++    order = "pre" # Pre-order tree traversal
++
++    # Don't match the node if it's within another match
++    def match(self, node):
++        match = super(FixRenames, self).match
++        results = match(node)
++        if results:
++            if any([match(obj) for obj in attr_chain(node, "parent")]):
++                return False
++            return results
++        return False
++
++    #def start_tree(self, tree, filename):
++    #    super(FixRenames, self).start_tree(tree, filename)
++    #    self.replace = {}
++
++    def transform(self, node, results):
++        mod_name = results.get("module_name")
++        attr_name = results.get("attr_name")
++        #bare_name = results.get("bare_name")
++        #import_mod = results.get("module")
++
++        if mod_name and attr_name:
++            new_attr = LOOKUP[(mod_name.value, attr_name.value)]
++            attr_name.replace(Name(new_attr, prefix=attr_name.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_repr.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_repr.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,22 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that transforms `xyzzy` into repr(xyzzy)."""
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Call, Name, parenthesize
++
++
++class FixRepr(fixer_base.BaseFix):
++
++    PATTERN = """
++              atom < '`' expr=any '`' >
++              """
++
++    def transform(self, node, results):
++        expr = results["expr"].clone()
++
++        if expr.type == self.syms.testlist1:
++            expr = parenthesize(expr)
++        return Call(Name("repr"), [expr], prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_set_literal.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_set_literal.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,52 @@
++"""
++Optional fixer to transform set() calls to set literals.
++"""
++
++# Author: Benjamin Peterson
++
++from lib2to3 import fixer_base, pytree
++from lib2to3.fixer_util import token, syms
++
++
++
++class FixSetLiteral(fixer_base.BaseFix):
++
++    explicit = True
++
++    PATTERN = """power< 'set' trailer< '('
++                     (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) >
++                                |
++                                single=any) ']' >
++                     |
++                     atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' >
++                     )
++                     ')' > >
++              """
++
++    def transform(self, node, results):
++        single = results.get("single")
++        if single:
++            # Make a fake listmaker
++            fake = pytree.Node(syms.listmaker, [single.clone()])
++            single.replace(fake)
++            items = fake
++        else:
++            items = results["items"]
++
++        # Build the contents of the literal
++        literal = [pytree.Leaf(token.LBRACE, "{")]
++        literal.extend(n.clone() for n in items.children)
++        literal.append(pytree.Leaf(token.RBRACE, "}"))
++        # Set the prefix of the right brace to that of the ')' or ']'
++        literal[-1].set_prefix(items.next_sibling.get_prefix())
++        maker = pytree.Node(syms.dictsetmaker, literal)
++        maker.set_prefix(node.get_prefix())
++
++        # If the original was a one tuple, we need to remove the extra comma.
++        if len(maker.children) == 4:
++            n = maker.children[2]
++            n.remove()
++            maker.children[-1].set_prefix(n.get_prefix())
++
++        # Finally, replace the set call with our shiny new literal.
++        return maker
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_standarderror.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_standarderror.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,18 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for StandardError -> Exception."""
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name
++
++
++class FixStandarderror(fixer_base.BaseFix):
++
++    PATTERN = """
++              'StandardError'
++              """
++
++    def transform(self, node, results):
++        return Name("Exception", prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_sys_exc.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_sys_exc.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,29 @@
++"""Fixer for sys.exc_{type, value, traceback}
++
++sys.exc_type -> sys.exc_info()[0]
++sys.exc_value -> sys.exc_info()[1]
++sys.exc_traceback -> sys.exc_info()[2]
++"""
++
++# By Jeff Balogh and Benjamin Peterson
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms
++
++class FixSysExc(fixer_base.BaseFix):
++    # This order matches the ordering of sys.exc_info().
++    exc_info = ["exc_type", "exc_value", "exc_traceback"]
++    PATTERN = """
++              power< 'sys' trailer< dot='.' attribute=(%s) > >
++              """ % '|'.join("'%s'" % e for e in exc_info)
++
++    def transform(self, node, results):
++        sys_attr = results["attribute"][0]
++        index = Number(self.exc_info.index(sys_attr.value))
++
++        call = Call(Name("exc_info"), prefix=sys_attr.get_prefix())
++        attr = Attr(Name("sys"), call)
++        attr[1].children[0].set_prefix(results["dot"].get_prefix())
++        attr.append(Subscript(index))
++        return Node(syms.power, attr, prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_throw.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_throw.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,56 @@
++"""Fixer for generator.throw(E, V, T).
++
++g.throw(E)       -> g.throw(E)
++g.throw(E, V)    -> g.throw(E(V))
++g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
++
++g.throw("foo"[, V[, T]]) will warn about string exceptions."""
++# Author: Collin Winter
++
++# Local imports
++from .. import pytree
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Name, Call, ArgList, Attr, is_tuple
++
++class FixThrow(fixer_base.BaseFix):
++
++    PATTERN = """
++    power< any trailer< '.' 'throw' >
++           trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
++    >
++    |
++    power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
++    """
++
++    def transform(self, node, results):
++        syms = self.syms
++
++        exc = results["exc"].clone()
++        if exc.type is token.STRING:
++            self.cannot_convert(node, "Python 3 does not support string exceptions")
++            return
++
++        # Leave "g.throw(E)" alone
++        val = results.get("val")
++        if val is None:
++            return
++
++        val = val.clone()
++        if is_tuple(val):
++            args = [c.clone() for c in val.children[1:-1]]
++        else:
++            val.set_prefix("")
++            args = [val]
++
++        throw_args = results["args"]
++
++        if "tb" in results:
++            tb = results["tb"].clone()
++            tb.set_prefix("")
++
++            e = Call(exc, args)
++            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
++            throw_args.replace(pytree.Node(syms.power, with_tb))
++        else:
++            throw_args.replace(Call(exc, args))
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_tuple_params.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_tuple_params.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,169 @@
++"""Fixer for function definitions with tuple parameters.
++
++def func(((a, b), c), d):
++    ...
++
++    ->
++
++def func(x, d):
++    ((a, b), c) = x
++    ...
++
++It will also support lambdas:
++
++    lambda (x, y): x + y -> lambda t: t[0] + t[1]
++
++    # The parens are a syntax error in Python 3
++    lambda (x): x + y -> lambda x: x + y
++"""
++# Author: Collin Winter
++
++# Local imports
++from .. import pytree
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms
++
++def is_docstring(stmt):
++    return isinstance(stmt, pytree.Node) and \
++           stmt.children[0].type == token.STRING
++
++class FixTupleParams(fixer_base.BaseFix):
++    PATTERN = """
++              funcdef< 'def' any parameters< '(' args=any ')' >
++                       ['->' any] ':' suite=any+ >
++              |
++              lambda=
++              lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
++                       ':' body=any
++              >
++              """
++
++    def transform(self, node, results):
++        if "lambda" in results:
++            return self.transform_lambda(node, results)
++
++        new_lines = []
++        suite = results["suite"]
++        args = results["args"]
++        # This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
++        # TODO(cwinter): suite-cleanup
++        if suite[0].children[1].type == token.INDENT:
++            start = 2
++            indent = suite[0].children[1].value
++            end = Newline()
++        else:
++            start = 0
++            indent = "; "
++            end = pytree.Leaf(token.INDENT, "")
++
++        # We need access to self for new_name(), and making this a method
++        #  doesn't feel right. Closing over self and new_lines makes the
++        #  code below cleaner.
++        def handle_tuple(tuple_arg, add_prefix=False):
++            n = Name(self.new_name())
++            arg = tuple_arg.clone()
++            arg.set_prefix("")
++            stmt = Assign(arg, n.clone())
++            if add_prefix:
++                n.set_prefix(" ")
++            tuple_arg.replace(n)
++            new_lines.append(pytree.Node(syms.simple_stmt,
++                                         [stmt, end.clone()]))
++
++        if args.type == syms.tfpdef:
++            handle_tuple(args)
++        elif args.type == syms.typedargslist:
++            for i, arg in enumerate(args.children):
++                if arg.type == syms.tfpdef:
++                    # Without add_prefix, the emitted code is correct,
++                    #  just ugly.
++                    handle_tuple(arg, add_prefix=(i > 0))
++
++        if not new_lines:
++            return node
++
++        # This isn't strictly necessary, but it plays nicely with other fixers.
++        # TODO(cwinter) get rid of this when children becomes a smart list
++        for line in new_lines:
++            line.parent = suite[0]
++
++        # TODO(cwinter) suite-cleanup
++        after = start
++        if start == 0:
++            new_lines[0].set_prefix(" ")
++        elif is_docstring(suite[0].children[start]):
++            new_lines[0].set_prefix(indent)
++            after = start + 1
++
++        suite[0].children[after:after] = new_lines
++        for i in range(after+1, after+len(new_lines)+1):
++            suite[0].children[i].set_prefix(indent)
++        suite[0].changed()
++
++    def transform_lambda(self, node, results):
++        args = results["args"]
++        body = results["body"]
++        inner = simplify_args(results["inner"])
++
++        # Replace lambda ((((x)))): x  with lambda x: x
++        if inner.type == token.NAME:
++            inner = inner.clone()
++            inner.set_prefix(" ")
++            args.replace(inner)
++            return
++
++        params = find_params(args)
++        to_index = map_to_index(params)
++        tup_name = self.new_name(tuple_name(params))
++
++        new_param = Name(tup_name, prefix=" ")
++        args.replace(new_param.clone())
++        for n in body.post_order():
++            if n.type == token.NAME and n.value in to_index:
++                subscripts = [c.clone() for c in to_index[n.value]]
++                new = pytree.Node(syms.power,
++                                  [new_param.clone()] + subscripts)
++                new.set_prefix(n.get_prefix())
++                n.replace(new)
++
++
++### Helper functions for transform_lambda()
++
++def simplify_args(node):
++    if node.type in (syms.vfplist, token.NAME):
++        return node
++    elif node.type == syms.vfpdef:
++        # These look like vfpdef< '(' x ')' > where x is NAME
++        # or another vfpdef instance (leading to recursion).
++        while node.type == syms.vfpdef:
++            node = node.children[1]
++        return node
++    raise RuntimeError("Received unexpected node %s" % node)
++
++def find_params(node):
++    if node.type == syms.vfpdef:
++        return find_params(node.children[1])
++    elif node.type == token.NAME:
++        return node.value
++    return [find_params(c) for c in node.children if c.type != token.COMMA]
++
++def map_to_index(param_list, prefix=[], d=None):
++    if d is None:
++        d = {}
++    for i, obj in enumerate(param_list):
++        trailer = [Subscript(Number(i))]
++        if isinstance(obj, list):
++            map_to_index(obj, trailer, d=d)
++        else:
++            d[obj] = prefix + trailer
++    return d
++
++def tuple_name(param_list):
++    l = []
++    for obj in param_list:
++        if isinstance(obj, list):
++            l.append(tuple_name(obj))
++        else:
++            l.append(obj)
++    return "_".join(l)
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_types.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_types.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,62 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for removing uses of the types module.
++
++These work for only the known names in the types module.  The forms above
++can include types. or not.  ie, It is assumed the module is imported either as:
++
++    import types
++    from types import ... # either * or specific types
++
++The import statements are not modified.
++
++There should be another fixer that handles at least the following constants:
++
++   type([]) -> list
++   type(()) -> tuple
++   type('') -> str
++
++"""
++
++# Local imports
++from ..pgen2 import token
++from .. import fixer_base
++from ..fixer_util import Name
++
++_TYPE_MAPPING = {
++        'BooleanType' : 'bool',
++        'BufferType' : 'memoryview',
++        'ClassType' : 'type',
++        'ComplexType' : 'complex',
++        'DictType': 'dict',
++        'DictionaryType' : 'dict',
++        'EllipsisType' : 'type(Ellipsis)',
++        #'FileType' : 'io.IOBase',
++        'FloatType': 'float',
++        'IntType': 'int',
++        'ListType': 'list',
++        'LongType': 'int',
++        'ObjectType' : 'object',
++        'NoneType': 'type(None)',
++        'NotImplementedType' : 'type(NotImplemented)',
++        'SliceType' : 'slice',
++        'StringType': 'bytes', # XXX ?
++        'StringTypes' : 'str', # XXX ?
++        'TupleType': 'tuple',
++        'TypeType' : 'type',
++        'UnicodeType': 'str',
++        'XRangeType' : 'range',
++    }
++
++_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
++
++class FixTypes(fixer_base.BaseFix):
++
++    PATTERN = '|'.join(_pats)
++
++    def transform(self, node, results):
++        new_value = _TYPE_MAPPING.get(results["name"].value)
++        if new_value:
++            return Name(new_value, prefix=node.get_prefix())
++        return None
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_unicode.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_unicode.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,28 @@
++"""Fixer that changes unicode to str, unichr to chr, and u"..." into "...".
++
++"""
++
++import re
++from ..pgen2 import token
++from .. import fixer_base
++
++class FixUnicode(fixer_base.BaseFix):
++
++    PATTERN = "STRING | NAME<'unicode' | 'unichr'>"
++
++    def transform(self, node, results):
++        if node.type == token.NAME:
++            if node.value == "unicode":
++                new = node.clone()
++                new.value = "str"
++                return new
++            if node.value == "unichr":
++                new = node.clone()
++                new.value = "chr"
++                return new
++            # XXX Warn when __unicode__ found?
++        elif node.type == token.STRING:
++            if re.match(r"[uU][rR]?[\'\"]", node.value):
++                new = node.clone()
++                new.value = new.value[1:]
++                return new
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_urllib.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_urllib.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,180 @@
++"""Fix changes imports of urllib which are now incompatible.
++   This is rather similar to fix_imports, but because of the more
++   complex nature of the fixing for urllib, it has its own fixer.
++"""
++# Author: Nick Edds
++
++# Local imports
++from .fix_imports import alternates, FixImports
++from .. import fixer_base
++from ..fixer_util import Name, Comma, FromImport, Newline, attr_chain
++
++MAPPING = {'urllib':  [
++                ('urllib.request',
++                    ['URLOpener', 'FancyURLOpener', 'urlretrieve',
++                     '_urlopener', 'urlcleanup']),
++                ('urllib.parse',
++                    ['quote', 'quote_plus', 'unquote', 'unquote_plus',
++                     'urlencode', 'pathname2url', 'url2pathname', 'splitattr',
++                     'splithost', 'splitnport', 'splitpasswd', 'splitport',
++                     'splitquery', 'splittag', 'splittype', 'splituser',
++                     'splitvalue', ]),
++                ('urllib.error',
++                    ['ContentTooShortError'])],
++           'urllib2' : [
++                ('urllib.request',
++                    ['urlopen', 'install_opener', 'build_opener',
++                     'Request', 'OpenerDirector', 'BaseHandler',
++                     'HTTPDefaultErrorHandler', 'HTTPRedirectHandler',
++                     'HTTPCookieProcessor', 'ProxyHandler',
++                     'HTTPPasswordMgr',
++                     'HTTPPasswordMgrWithDefaultRealm',
++                     'AbstractBasicAuthHandler',
++                     'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler',
++                     'AbstractDigestAuthHandler',
++                     'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler',
++                     'HTTPHandler', 'HTTPSHandler', 'FileHandler',
++                     'FTPHandler', 'CacheFTPHandler',
++                     'UnknownHandler']),
++                ('urllib.error',
++                    ['URLError', 'HTTPError']),
++           ]
++}
++
++# Duplicate the url parsing functions for urllib2.
++MAPPING["urllib2"].append(MAPPING["urllib"][1])
++
++
++def build_pattern():
++    bare = set()
++    for old_module, changes in MAPPING.items():
++        for change in changes:
++            new_module, members = change
++            members = alternates(members)
++            yield """import_name< 'import' (module=%r
++                                  | dotted_as_names< any* module=%r any* >) >
++                  """ % (old_module, old_module)
++            yield """import_from< 'from' mod_member=%r 'import'
++                       ( member=%s | import_as_name< member=%s 'as' any > |
++                         import_as_names< members=any*  >) >
++                  """ % (old_module, members, members)
++            yield """import_from< 'from' module_star=%r 'import' star='*' >
++                  """ % old_module
++            yield """import_name< 'import'
++                                  dotted_as_name< module_as=%r 'as' any > >
++                  """ % old_module
++            yield """power< module_dot=%r trailer< '.' member=%s > any* >
++                  """ % (old_module, members)
++
++
++class FixUrllib(FixImports):
++
++    def build_pattern(self):
++        return "|".join(build_pattern())
++
++    def transform_import(self, node, results):
++        """Transform for the basic import case. Replaces the old
++           import name with a comma separated list of its
++           replacements.
++        """
++        import_mod = results.get('module')
++        pref = import_mod.get_prefix()
++
++        names = []
++
++        # create a Node list of the replacement modules
++        for name in MAPPING[import_mod.value][:-1]:
++            names.extend([Name(name[0], prefix=pref), Comma()])
++        names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
++        import_mod.replace(names)
++
++    def transform_member(self, node, results):
++        """Transform for imports of specific module elements. Replaces
++           the module to be imported from with the appropriate new
++           module.
++        """
++        mod_member = results.get('mod_member')
++        pref = mod_member.get_prefix()
++        member = results.get('member')
++
++        # Simple case with only a single member being imported
++        if member:
++            # this may be a list of length one, or just a node
++            if isinstance(member, list):
++                member = member[0]
++            new_name = None
++            for change in MAPPING[mod_member.value]:
++                if member.value in change[1]:
++                    new_name = change[0]
++                    break
++            if new_name:
++                mod_member.replace(Name(new_name, prefix=pref))
++            else:
++                self.cannot_convert(node,
++                                    'This is an invalid module element')
++
++        # Multiple members being imported
++        else:
++            # a dictionary for replacements, order matters
++            modules = []
++            mod_dict = {}
++            members = results.get('members')
++            for member in members:
++                member = member.value
++                # we only care about the actual members
++                if member != ',':
++                    for change in MAPPING[mod_member.value]:
++                        if member in change[1]:
++                            if change[0] in mod_dict:
++                                mod_dict[change[0]].append(member)
++                            else:
++                                mod_dict[change[0]] = [member]
++                                modules.append(change[0])
++
++            new_nodes = []
++            for module in modules:
++                elts = mod_dict[module]
++                names = []
++                for elt in elts[:-1]:
++                    names.extend([Name(elt, prefix=pref), Comma()])
++                names.append(Name(elts[-1], prefix=pref))
++                new_nodes.append(FromImport(module, names))
++            if new_nodes:
++                nodes = []
++                for new_node in new_nodes[:-1]:
++                    nodes.extend([new_node, Newline()])
++                nodes.append(new_nodes[-1])
++                node.replace(nodes)
++            else:
++                self.cannot_convert(node, 'All module elements are invalid')
++
++    def transform_dot(self, node, results):
++        """Transform for calls to module members in code."""
++        module_dot = results.get('module_dot')
++        member = results.get('member')
++        # this may be a list of length one, or just a node
++        if isinstance(member, list):
++            member = member[0]
++        new_name = None
++        for change in MAPPING[module_dot.value]:
++            if member.value in change[1]:
++                new_name = change[0]
++                break
++        if new_name:
++            module_dot.replace(Name(new_name,
++                                    prefix=module_dot.get_prefix()))
++        else:
++            self.cannot_convert(node, 'This is an invalid module element')
++
++    def transform(self, node, results):
++        if results.get('module'):
++            self.transform_import(node, results)
++        elif results.get('mod_member'):
++            self.transform_member(node, results)
++        elif results.get('module_dot'):
++            self.transform_dot(node, results)
++        # Renaming and star imports are not supported for these modules.
++        elif results.get('module_star'):
++            self.cannot_convert(node, 'Cannot handle star imports.')
++        elif results.get('module_as'):
++            self.cannot_convert(node, 'This module is now multiple modules')
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_ws_comma.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_ws_comma.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,39 @@
++"""Fixer that changes 'a ,b' into 'a, b'.
++
++This also changes '{a :b}' into '{a: b}', but does not touch other
++uses of colons.  It does not touch other uses of whitespace.
++
++"""
++
++from .. import pytree
++from ..pgen2 import token
++from .. import fixer_base
++
++class FixWsComma(fixer_base.BaseFix):
++
++    explicit = True # The user must ask for this fixers
++
++    PATTERN = """
++    any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]>
++    """
++
++    COMMA = pytree.Leaf(token.COMMA, ",")
++    COLON = pytree.Leaf(token.COLON, ":")
++    SEPS = (COMMA, COLON)
++
++    def transform(self, node, results):
++        new = node.clone()
++        comma = False
++        for child in new.children:
++            if child in self.SEPS:
++                prefix = child.get_prefix()
++                if prefix.isspace() and "\n" not in prefix:
++                    child.set_prefix("")
++                comma = True
++            else:
++                if comma:
++                    prefix = child.get_prefix()
++                    if not prefix:
++                        child.set_prefix(" ")
++                comma = False
++        return new
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_xrange.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_xrange.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,64 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that changes xrange(...) into range(...)."""
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name, Call, consuming_calls
++from .. import patcomp
++
++
++class FixXrange(fixer_base.BaseFix):
++
++    PATTERN = """
++              power<
++                 (name='range'|name='xrange') trailer< '(' args=any ')' >
++              rest=any* >
++              """
++
++    def transform(self, node, results):
++        name = results["name"]
++        if name.value == "xrange":
++            return self.transform_xrange(node, results)
++        elif name.value == "range":
++            return self.transform_range(node, results)
++        else:
++            raise ValueError(repr(name))
++
++    def transform_xrange(self, node, results):
++        name = results["name"]
++        name.replace(Name("range", prefix=name.get_prefix()))
++
++    def transform_range(self, node, results):
++        if not self.in_special_context(node):
++            range_call = Call(Name("range"), [results["args"].clone()])
++            # Encase the range call in list().
++            list_call = Call(Name("list"), [range_call],
++                             prefix=node.get_prefix())
++            # Put things that were after the range() call after the list call.
++            for n in results["rest"]:
++                list_call.append_child(n)
++            return list_call
++        return node
++
++    P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
++    p1 = patcomp.compile_pattern(P1)
++
++    P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
++            | comp_for< 'for' any 'in' node=any any* >
++            | comparison< any 'in' node=any any*>
++         """
++    p2 = patcomp.compile_pattern(P2)
++
++    def in_special_context(self, node):
++        if node.parent is None:
++            return False
++        results = {}
++        if (node.parent.parent is not None and
++               self.p1.match(node.parent.parent, results) and
++               results["node"] is node):
++            # list(d.keys()) -> list(d.keys()), etc.
++            return results["func"].value in consuming_calls
++        # for ... in d.iterkeys() -> for ... in d.keys(), etc.
++        return self.p2.match(node.parent, results) and results["node"] is node
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_xreadlines.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_xreadlines.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,24 @@
++"""Fix "for x in f.xreadlines()" -> "for x in f".
++
++This fixer will also convert g(f.xreadlines) into g(f.__iter__)."""
++# Author: Collin Winter
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name
++
++
++class FixXreadlines(fixer_base.BaseFix):
++    PATTERN = """
++    power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > >
++    |
++    power< any+ trailer< '.' no_call='xreadlines' > >
++    """
++
++    def transform(self, node, results):
++        no_call = results.get("no_call")
++
++        if no_call:
++            no_call.replace(Name("__iter__", prefix=no_call.get_prefix()))
++        else:
++            node.replace([x.clone() for x in results["call"]])
+diff -r 531f2e948299 refactor/fixes/.svn/text-base/fix_zip.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/.svn/text-base/fix_zip.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,34 @@
++"""
++Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...)
++unless there exists a 'from future_builtins import zip' statement in the
++top-level namespace.
++
++We avoid the transformation if the zip() call is directly contained in
++iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
++"""
++
++# Local imports
++from .. import fixer_base
++from ..fixer_util import Name, Call, in_special_context
++
++class FixZip(fixer_base.ConditionalFix):
++
++    PATTERN = """
++    power< 'zip' args=trailer< '(' [any] ')' >
++    >
++    """
++
++    skip_on = "future_builtins.zip"
++
++    def transform(self, node, results):
++        if self.should_skip(node):
++            return
++
++        if in_special_context(node):
++            return None
++
++        new = node.clone()
++        new.set_prefix("")
++        new = Call(Name("list"), [new])
++        new.set_prefix(node.get_prefix())
++        return new
+diff -r 531f2e948299 refactor/fixes/__init__.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/__init__.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,3 @@
++from . import from2
++from . import from3
++from .from2 import *
+diff -r 531f2e948299 refactor/fixes/fixer_common.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/fixer_common.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,4 @@
++# Common fixer imports
++from .. import fixer_base
++from ..fixer_util import Name, Call, consuming_calls, attr_chain
++from .. import patcomp
+diff -r 531f2e948299 refactor/fixes/from2/__init__.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/__init__.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,49 @@
++from . import fix_apply
++from . import fix_basestring
++from . import fix_buffer
++from . import fix_callable
++from . import fix_dict
++from . import fix_except
++from . import fix_exec
++from . import fix_execfile
++from . import fix_filter
++from . import fix_funcattrs
++from . import fix_future
++from . import fix_getcwdu
++from . import fix_has_key
++from . import fix_idioms
++from . import fix_import
++from . import fix_imports
++from . import fix_imports2
++from . import fix_input
++from . import fix_intern
++from . import fix_isinstance
++from . import fix_itertools
++from . import fix_itertools_imports
++from . import fix_long
++from . import fix_map
++from . import fix_metaclass
++from . import fix_methodattrs
++from . import fix_ne
++from . import fix_next
++from . import fix_nonzero
++from . import fix_numliterals
++from . import fix_paren
++from . import fix_print
++from . import fix_raise
++from . import fix_raw_input
++from . import fix_reduce
++from . import fix_renames
++from . import fix_repr
++from . import fix_set_literal
++from . import fix_standarderror
++from . import fix_sys_exc
++from . import fix_throw
++from . import fix_tuple_params
++from . import fix_types
++from . import fix_unicode
++from . import fix_urllib
++from . import fix_ws_comma
++from . import fix_xrange
++from . import fix_xreadlines
++from . import fix_zip
+diff -r 531f2e948299 refactor/fixes/from2/fix_apply.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_apply.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,58 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for apply().
++
++This converts apply(func, v, k) into (func)(*v, **k)."""
++
++# Local imports
++from ... import pytree
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Call, Comma, parenthesize
++
++class FixApply(fixer_base.BaseFix):
++
++    PATTERN = """
++    power< 'apply'
++        trailer<
++            '('
++            arglist<
++                (not argument<NAME '=' any>) func=any ','
++                (not argument<NAME '=' any>) args=any [','
++                (not argument<NAME '=' any>) kwds=any] [',']
++            >
++            ')'
++        >
++    >
++    """
++
++    def transform(self, node, results):
++        syms = self.syms
++        assert results
++        func = results["func"]
++        args = results["args"]
++        kwds = results.get("kwds")
++        prefix = node.get_prefix()
++        func = func.clone()
++        if (func.type not in (token.NAME, syms.atom) and
++            (func.type != syms.power or
++             func.children[-2].type == token.DOUBLESTAR)):
++            # Need to parenthesize
++            func = parenthesize(func)
++        func.set_prefix("")
++        args = args.clone()
++        args.set_prefix("")
++        if kwds is not None:
++            kwds = kwds.clone()
++            kwds.set_prefix("")
++        l_newargs = [pytree.Leaf(token.STAR, "*"), args]
++        if kwds is not None:
++            l_newargs.extend([Comma(),
++                              pytree.Leaf(token.DOUBLESTAR, "**"),
++                              kwds])
++            l_newargs[-2].set_prefix(" ") # that's the ** token
++        # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
++        # can be translated into f(x, y, *t) instead of f(*(x, y) + t)
++        #new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
++        return Call(func, l_newargs, prefix=prefix)
+diff -r 531f2e948299 refactor/fixes/from2/fix_basestring.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_basestring.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,13 @@
++"""Fixer for basestring -> str."""
++# Author: Christian Heimes
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name
++
++class FixBasestring(fixer_base.BaseFix):
++
++    PATTERN = "'basestring'"
++
++    def transform(self, node, results):
++        return Name("str", prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_buffer.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_buffer.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,21 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that changes buffer(...) into memoryview(...)."""
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name
++
++
++class FixBuffer(fixer_base.BaseFix):
++
++    explicit = True # The user must ask for this fixer
++
++    PATTERN = """
++              power< name='buffer' trailer< '(' [any] ')' > >
++              """
++
++    def transform(self, node, results):
++        name = results["name"]
++        name.replace(Name("memoryview", prefix=name.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/from2/fix_callable.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_callable.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,31 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for callable().
++
++This converts callable(obj) into hasattr(obj, '__call__')."""
++
++# Local imports
++from ... import pytree
++from ... import fixer_base
++from ...fixer_util import Call, Name, String
++
++class FixCallable(fixer_base.BaseFix):
++
++    # Ignore callable(*args) or use of keywords.
++    # Either could be a hint that the builtin callable() is not being used.
++    PATTERN = """
++    power< 'callable'
++           trailer< lpar='('
++                    ( not(arglist | argument<any '=' any>) func=any
++                      | func=arglist<(not argument<any '=' any>) any ','> )
++                    rpar=')' >
++           after=any*
++    >
++    """
++
++    def transform(self, node, results):
++        func = results["func"]
++
++        args = [func.clone(), String(', '), String("'__call__'")]
++        return Call(Name("hasattr"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_dict.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_dict.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,99 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for dict methods.
++
++d.keys() -> list(d.keys())
++d.items() -> list(d.items())
++d.values() -> list(d.values())
++
++d.iterkeys() -> iter(d.keys())
++d.iteritems() -> iter(d.items())
++d.itervalues() -> iter(d.values())
++
++Except in certain very specific contexts: the iter() can be dropped
++when the context is list(), sorted(), iter() or for...in; the list()
++can be dropped when the context is list() or sorted() (but not iter()
++or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
++set(), any(), all(), sum().
++
++Note: iter(d.keys()) could be written as iter(d) but since the
++original d.iterkeys() was also redundant we don't fix this.  And there
++are (rare) contexts where it makes a difference (e.g. when passing it
++as an argument to a function that introspects the argument).
++"""
++
++# Local imports
++from ... import pytree
++from ... import patcomp
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Name, Call, LParen, RParen, ArgList, Dot
++from ... import fixer_util
++
++
++iter_exempt = fixer_util.consuming_calls | set(["iter"])
++
++
++class FixDict(fixer_base.BaseFix):
++    PATTERN = """
++    power< head=any+
++         trailer< '.' method=('keys'|'items'|'values'|
++                              'iterkeys'|'iteritems'|'itervalues') >
++         parens=trailer< '(' ')' >
++         tail=any*
++    >
++    """
++
++    def transform(self, node, results):
++        head = results["head"]
++        method = results["method"][0] # Extract node for method name
++        tail = results["tail"]
++        syms = self.syms
++        method_name = method.value
++        isiter = method_name.startswith("iter")
++        if isiter:
++            method_name = method_name[4:]
++        assert method_name in ("keys", "items", "values"), repr(method)
++        head = [n.clone() for n in head]
++        tail = [n.clone() for n in tail]
++        special = not tail and self.in_special_context(node, isiter)
++        args = head + [pytree.Node(syms.trailer,
++                                   [Dot(),
++                                    Name(method_name,
++                                         prefix=method.get_prefix())]),
++                       results["parens"].clone()]
++        new = pytree.Node(syms.power, args)
++        if not special:
++            new.set_prefix("")
++            new = Call(Name(isiter and "iter" or "list"), [new])
++        if tail:
++            new = pytree.Node(syms.power, [new] + tail)
++        new.set_prefix(node.get_prefix())
++        return new
++
++    P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
++    p1 = patcomp.compile_pattern(P1)
++
++    P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
++            | comp_for< 'for' any 'in' node=any any* >
++         """
++    p2 = patcomp.compile_pattern(P2)
++
++    def in_special_context(self, node, isiter):
++        if node.parent is None:
++            return False
++        results = {}
++        if (node.parent.parent is not None and
++               self.p1.match(node.parent.parent, results) and
++               results["node"] is node):
++            if isiter:
++                # iter(d.iterkeys()) -> iter(d.keys()), etc.
++                return results["func"].value in iter_exempt
++            else:
++                # list(d.keys()) -> list(d.keys()), etc.
++                return results["func"].value in fixer_util.consuming_calls
++        if not isiter:
++            return False
++        # for ... in d.iterkeys() -> for ... in d.keys(), etc.
++        return self.p2.match(node.parent, results) and results["node"] is node
+diff -r 531f2e948299 refactor/fixes/from2/fix_except.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_except.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,92 @@
++"""Fixer for except statements with named exceptions.
++
++The following cases will be converted:
++
++- "except E, T:" where T is a name:
++
++    except E as T:
++
++- "except E, T:" where T is not a name, tuple or list:
++
++        except E as t:
++            T = t
++
++    This is done because the target of an "except" clause must be a
++    name.
++
++- "except E, T:" where T is a tuple or list literal:
++
++        except E as t:
++            T = t.args
++"""
++# Author: Collin Winter
++
++# Local imports
++from ... import pytree
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
++
++def find_excepts(nodes):
++    for i, n in enumerate(nodes):
++        if n.type == syms.except_clause:
++            if n.children[0].value == 'except':
++                yield (n, nodes[i+2])
++
++class FixExcept(fixer_base.BaseFix):
++
++    PATTERN = """
++    try_stmt< 'try' ':' suite
++                  cleanup=(except_clause ':' suite)+
++                  tail=(['except' ':' suite]
++                        ['else' ':' suite]
++                        ['finally' ':' suite]) >
++    """
++
++    def transform(self, node, results):
++        syms = self.syms
++
++        tail = [n.clone() for n in results["tail"]]
++
++        try_cleanup = [ch.clone() for ch in results["cleanup"]]
++        for except_clause, e_suite in find_excepts(try_cleanup):
++            if len(except_clause.children) == 4:
++                (E, comma, N) = except_clause.children[1:4]
++                comma.replace(Name("as", prefix=" "))
++
++                if N.type != token.NAME:
++                    # Generate a new N for the except clause
++                    new_N = Name(self.new_name(), prefix=" ")
++                    target = N.clone()
++                    target.set_prefix("")
++                    N.replace(new_N)
++                    new_N = new_N.clone()
++
++                    # Insert "old_N = new_N" as the first statement in
++                    #  the except body. This loop skips leading whitespace
++                    #  and indents
++                    #TODO(cwinter) suite-cleanup
++                    suite_stmts = e_suite.children
++                    for i, stmt in enumerate(suite_stmts):
++                        if isinstance(stmt, pytree.Node):
++                            break
++
++                    # The assignment is different if old_N is a tuple or list
++                    # In that case, the assignment is old_N = new_N.args
++                    if is_tuple(N) or is_list(N):
++                        assign = Assign(target, Attr(new_N, Name('args')))
++                    else:
++                        assign = Assign(target, new_N)
++
++                    #TODO(cwinter) stopgap until children becomes a smart list
++                    for child in reversed(suite_stmts[:i]):
++                        e_suite.insert_child(0, child)
++                    e_suite.insert_child(i, assign)
++                elif N.get_prefix() == "":
++                    # No space after a comma is legal; no space after "as",
++                    # not so much.
++                    N.set_prefix(" ")
++
++        #TODO(cwinter) fix this when children becomes a smart list
++        children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
++        return pytree.Node(node.type, children)
+diff -r 531f2e948299 refactor/fixes/from2/fix_exec.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_exec.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,39 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for exec.
++
++This converts usages of the exec statement into calls to a built-in
++exec() function.
++
++exec code in ns1, ns2 -> exec(code, ns1, ns2)
++"""
++
++# Local imports
++from ... import pytree
++from ... import fixer_base
++from ...fixer_util import Comma, Name, Call
++
++
++class FixExec(fixer_base.BaseFix):
++
++    PATTERN = """
++    exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
++    |
++    exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
++    """
++
++    def transform(self, node, results):
++        assert results
++        syms = self.syms
++        a = results["a"]
++        b = results.get("b")
++        c = results.get("c")
++        args = [a.clone()]
++        args[0].set_prefix("")
++        if b is not None:
++            args.extend([Comma(), b.clone()])
++        if c is not None:
++            args.extend([Comma(), c.clone()])
++
++        return Call(Name("exec"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_execfile.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_execfile.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,51 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for execfile.
++
++This converts usages of the execfile function into calls to the built-in
++exec() function.
++"""
++
++from ... import fixer_base
++from ...fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
++                          ArgList, String, syms)
++
++
++class FixExecfile(fixer_base.BaseFix):
++
++    PATTERN = """
++    power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
++    |
++    power< 'execfile' trailer< '(' filename=any ')' > >
++    """
++
++    def transform(self, node, results):
++        assert results
++        filename = results["filename"]
++        globals = results.get("globals")
++        locals = results.get("locals")
++
++        # Copy over the prefix from the right parentheses end of the execfile
++        # call.
++        execfile_paren = node.children[-1].children[-1].clone()
++        # Construct open().read().
++        open_args = ArgList([filename.clone()], rparen=execfile_paren)
++        open_call = Node(syms.power, [Name("open"), open_args])
++        read = [Node(syms.trailer, [Dot(), Name('read')]),
++                Node(syms.trailer, [LParen(), RParen()])]
++        open_expr = [open_call] + read
++        # Wrap the open call in a compile call. This is so the filename will be
++        # preserved in the execed code.
++        filename_arg = filename.clone()
++        filename_arg.set_prefix(" ")
++        exec_str = String("'exec'", " ")
++        compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
++        compile_call = Call(Name("compile"), compile_args, "")
++        # Finally, replace the execfile call with an exec call.
++        args = [compile_call]
++        if globals is not None:
++            args.extend([Comma(), globals.clone()])
++        if locals is not None:
++            args.extend([Comma(), locals.clone()])
++        return Call(Name("exec"), args, prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_filter.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_filter.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,75 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that changes filter(F, X) into list(filter(F, X)).
++
++We avoid the transformation if the filter() call is directly contained
++in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
++for V in <>:.
++
++NOTE: This is still not correct if the original code was depending on
++filter(F, X) to return a string if X is a string and a tuple if X is a
++tuple.  That would require type inference, which we don't do.  Let
++Python 2.6 figure it out.
++"""
++
++# Local imports
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Name, Call, ListComp, in_special_context
++
++class FixFilter(fixer_base.ConditionalFix):
++
++    PATTERN = """
++    filter_lambda=power<
++        'filter'
++        trailer<
++            '('
++            arglist<
++                lambdef< 'lambda'
++                         (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
++                >
++                ','
++                it=any
++            >
++            ')'
++        >
++    >
++    |
++    power<
++        'filter'
++        trailer< '(' arglist< none='None' ',' seq=any > ')' >
++    >
++    |
++    power<
++        'filter'
++        args=trailer< '(' [any] ')' >
++    >
++    """
++
++    skip_on = "future_builtins.filter"
++
++    def transform(self, node, results):
++        if self.should_skip(node):
++            return
++
++        if "filter_lambda" in results:
++            new = ListComp(results.get("fp").clone(),
++                           results.get("fp").clone(),
++                           results.get("it").clone(),
++                           results.get("xp").clone())
++
++        elif "none" in results:
++            new = ListComp(Name("_f"),
++                           Name("_f"),
++                           results["seq"].clone(),
++                           Name("_f"))
++
++        else:
++            if in_special_context(node):
++                return None
++            new = node.clone()
++            new.set_prefix("")
++            new = Call(Name("list"), [new])
++        new.set_prefix(node.get_prefix())
++        return new
+diff -r 531f2e948299 refactor/fixes/from2/fix_funcattrs.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_funcattrs.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,19 @@
++"""Fix function attribute names (f.func_x -> f.__x__)."""
++# Author: Collin Winter
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name
++
++
++class FixFuncattrs(fixer_base.BaseFix):
++    PATTERN = """
++    power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals'
++                                  | 'func_name' | 'func_defaults' | 'func_code'
++                                  | 'func_dict') > any* >
++    """
++
++    def transform(self, node, results):
++        attr = results["attr"][0]
++        attr.replace(Name(("__%s__" % attr.value[5:]),
++                          prefix=attr.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/from2/fix_future.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_future.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,20 @@
++"""Remove __future__ imports
++
++from __future__ import foo is replaced with an empty line.
++"""
++# Author: Christian Heimes
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import BlankLine
++
++class FixFuture(fixer_base.BaseFix):
++    PATTERN = """import_from< 'from' module_name="__future__" 'import' any >"""
++
++    # This should be run last -- some things check for the import
++    run_order = 10
++
++    def transform(self, node, results):
++        new = BlankLine()
++        new.prefix = node.get_prefix()
++        return new
+diff -r 531f2e948299 refactor/fixes/from2/fix_getcwdu.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_getcwdu.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,18 @@
++"""
++Fixer that changes os.getcwdu() to os.getcwd().
++"""
++# Author: Victor Stinner
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name
++
++class FixGetcwdu(fixer_base.BaseFix):
++
++    PATTERN = """
++              power< 'os' trailer< dot='.' name='getcwdu' > any* >
++              """
++
++    def transform(self, node, results):
++        name = results["name"]
++        name.replace(Name("getcwd", prefix=name.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/from2/fix_has_key.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_has_key.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,109 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for has_key().
++
++Calls to .has_key() methods are expressed in terms of the 'in'
++operator:
++
++    d.has_key(k) -> k in d
++
++CAVEATS:
++1) While the primary target of this fixer is dict.has_key(), the
++   fixer will change any has_key() method call, regardless of its
++   class.
++
++2) Cases like this will not be converted:
++
++    m = d.has_key
++    if m(k):
++        ...
++
++   Only *calls* to has_key() are converted. While it is possible to
++   convert the above to something like
++
++    m = d.__contains__
++    if m(k):
++        ...
++
++   this is currently not done.
++"""
++
++# Local imports
++from ... import pytree
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Name, parenthesize
++
++
++class FixHasKey(fixer_base.BaseFix):
++
++    PATTERN = """
++    anchor=power<
++        before=any+
++        trailer< '.' 'has_key' >
++        trailer<
++            '('
++            ( not(arglist | argument<any '=' any>) arg=any
++            | arglist<(not argument<any '=' any>) arg=any ','>
++            )
++            ')'
++        >
++        after=any*
++    >
++    |
++    negation=not_test<
++        'not'
++        anchor=power<
++            before=any+
++            trailer< '.' 'has_key' >
++            trailer<
++                '('
++                ( not(arglist | argument<any '=' any>) arg=any
++                | arglist<(not argument<any '=' any>) arg=any ','>
++                )
++                ')'
++            >
++        >
++    >
++    """
++
++    def transform(self, node, results):
++        assert results
++        syms = self.syms
++        if (node.parent.type == syms.not_test and
++            self.pattern.match(node.parent)):
++            # Don't transform a node matching the first alternative of the
++            # pattern when its parent matches the second alternative
++            return None
++        negation = results.get("negation")
++        anchor = results["anchor"]
++        prefix = node.get_prefix()
++        before = [n.clone() for n in results["before"]]
++        arg = results["arg"].clone()
++        after = results.get("after")
++        if after:
++            after = [n.clone() for n in after]
++        if arg.type in (syms.comparison, syms.not_test, syms.and_test,
++                        syms.or_test, syms.test, syms.lambdef, syms.argument):
++            arg = parenthesize(arg)
++        if len(before) == 1:
++            before = before[0]
++        else:
++            before = pytree.Node(syms.power, before)
++        before.set_prefix(" ")
++        n_op = Name("in", prefix=" ")
++        if negation:
++            n_not = Name("not", prefix=" ")
++            n_op = pytree.Node(syms.comp_op, (n_not, n_op))
++        new = pytree.Node(syms.comparison, (arg, n_op, before))
++        if after:
++            new = parenthesize(new)
++            new = pytree.Node(syms.power, (new,) + tuple(after))
++        if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr,
++                                syms.and_expr, syms.shift_expr,
++                                syms.arith_expr, syms.term,
++                                syms.factor, syms.power):
++            new = parenthesize(new)
++        new.set_prefix(prefix)
++        return new
+diff -r 531f2e948299 refactor/fixes/from2/fix_idioms.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_idioms.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,134 @@
++"""Adjust some old Python 2 idioms to their modern counterparts.
++
++* Change some type comparisons to isinstance() calls:
++    type(x) == T -> isinstance(x, T)
++    type(x) is T -> isinstance(x, T)
++    type(x) != T -> not isinstance(x, T)
++    type(x) is not T -> not isinstance(x, T)
++
++* Change "while 1:" into "while True:".
++
++* Change both
++
++    v = list(EXPR)
++    v.sort()
++    foo(v)
++
++and the more general
++
++    v = EXPR
++    v.sort()
++    foo(v)
++
++into
++
++    v = sorted(EXPR)
++    foo(v)
++"""
++# Author: Jacques Frechet, Collin Winter
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Call, Comma, Name, Node, syms
++
++CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
++TYPE = "power< 'type' trailer< '(' x=any ')' > >"
++
++class FixIdioms(fixer_base.BaseFix):
++
++    explicit = True # The user must ask for this fixer
++
++    PATTERN = r"""
++        isinstance=comparison< %s %s T=any >
++        |
++        isinstance=comparison< T=any %s %s >
++        |
++        while_stmt< 'while' while='1' ':' any+ >
++        |
++        sorted=any<
++            any*
++            simple_stmt<
++              expr_stmt< id1=any '='
++                         power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
++              >
++              '\n'
++            >
++            sort=
++            simple_stmt<
++              power< id2=any
++                     trailer< '.' 'sort' > trailer< '(' ')' >
++              >
++              '\n'
++            >
++            next=any*
++        >
++        |
++        sorted=any<
++            any*
++            simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
++            sort=
++            simple_stmt<
++              power< id2=any
++                     trailer< '.' 'sort' > trailer< '(' ')' >
++              >
++              '\n'
++            >
++            next=any*
++        >
++    """ % (TYPE, CMP, CMP, TYPE)
++
++    def match(self, node):
++        r = super(FixIdioms, self).match(node)
++        # If we've matched one of the sort/sorted subpatterns above, we
++        # want to reject matches where the initial assignment and the
++        # subsequent .sort() call involve different identifiers.
++        if r and "sorted" in r:
++            if r["id1"] == r["id2"]:
++                return r
++            return None
++        return r
++
++    def transform(self, node, results):
++        if "isinstance" in results:
++            return self.transform_isinstance(node, results)
++        elif "while" in results:
++            return self.transform_while(node, results)
++        elif "sorted" in results:
++            return self.transform_sort(node, results)
++        else:
++            raise RuntimeError("Invalid match")
++
++    def transform_isinstance(self, node, results):
++        x = results["x"].clone() # The thing inside of type()
++        T = results["T"].clone() # The type being compared against
++        x.set_prefix("")
++        T.set_prefix(" ")
++        test = Call(Name("isinstance"), [x, Comma(), T])
++        if "n" in results:
++            test.set_prefix(" ")
++            test = Node(syms.not_test, [Name("not"), test])
++        test.set_prefix(node.get_prefix())
++        return test
++
++    def transform_while(self, node, results):
++        one = results["while"]
++        one.replace(Name("True", prefix=one.get_prefix()))
++
++    def transform_sort(self, node, results):
++        sort_stmt = results["sort"]
++        next_stmt = results["next"]
++        list_call = results.get("list")
++        simple_expr = results.get("expr")
++
++        if list_call:
++            list_call.replace(Name("sorted", prefix=list_call.get_prefix()))
++        elif simple_expr:
++            new = simple_expr.clone()
++            new.set_prefix("")
++            simple_expr.replace(Call(Name("sorted"), [new],
++                                     prefix=simple_expr.get_prefix()))
++        else:
++            raise RuntimeError("should not have reached here")
++        sort_stmt.remove()
++        if next_stmt:
++            next_stmt[0].set_prefix(sort_stmt.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_import.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_import.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,90 @@
++"""Fixer for import statements.
++If spam is being imported from the local directory, this import:
++    from spam import eggs
++Becomes:
++    from .spam import eggs
++
++And this import:
++    import spam
++Becomes:
++    from . import spam
++"""
++
++# Local imports
++from ... import fixer_base
++from os.path import dirname, join, exists, pathsep
++from ...fixer_util import FromImport, syms, token
++
++
++def traverse_imports(names):
++    """
++    Walks over all the names imported in a dotted_as_names node.
++    """
++    pending = [names]
++    while pending:
++        node = pending.pop()
++        if node.type == token.NAME:
++            yield node.value
++        elif node.type == syms.dotted_name:
++            yield "".join([ch.value for ch in node.children])
++        elif node.type == syms.dotted_as_name:
++            pending.append(node.children[0])
++        elif node.type == syms.dotted_as_names:
++            pending.extend(node.children[::-2])
++        else:
++            raise AssertionError("unkown node type")
++
++
++class FixImport(fixer_base.BaseFix):
++
++    PATTERN = """
++    import_from< 'from' imp=any 'import' ['('] any [')'] >
++    |
++    import_name< 'import' imp=any >
++    """
++
++    def transform(self, node, results):
++        imp = results['imp']
++
++        if node.type == syms.import_from:
++            # Some imps are top-level (eg: 'import ham')
++            # some are first level (eg: 'import ham.eggs')
++            # some are third level (eg: 'import ham.eggs as spam')
++            # Hence, the loop
++            while not hasattr(imp, 'value'):
++                imp = imp.children[0]
++            if self.probably_a_local_import(imp.value):
++                imp.value = "." + imp.value
++                imp.changed()
++                return node
++        else:
++            have_local = False
++            have_absolute = False
++            for mod_name in traverse_imports(imp):
++                if self.probably_a_local_import(mod_name):
++                    have_local = True
++                else:
++                    have_absolute = True
++            if have_absolute:
++                if have_local:
++                    # We won't handle both sibling and absolute imports in the
++                    # same statement at the moment.
++                    self.warning(node, "absolute and local imports together")
++                return
++
++            new = FromImport('.', [imp])
++            new.set_prefix(node.get_prefix())
++            return new
++
++    def probably_a_local_import(self, imp_name):
++        imp_name = imp_name.split('.', 1)[0]
++        base_path = dirname(self.filename)
++        base_path = join(base_path, imp_name)
++        # If there is no __init__.py next to the file its not in a package
++        # so can't be a relative import.
++        if not exists(join(dirname(base_path), '__init__.py')):
++            return False
++        for ext in ['.py', pathsep, '.pyc', '.so', '.sl', '.pyd']:
++            if exists(base_path + ext):
++                return True
++        return False
+diff -r 531f2e948299 refactor/fixes/from2/fix_imports.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_imports.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,145 @@
++"""Fix incompatible imports and module references."""
++# Authors: Collin Winter, Nick Edds
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name, attr_chain
++
++MAPPING = {'StringIO':  'io',
++           'cStringIO': 'io',
++           'cPickle': 'pickle',
++           '__builtin__' : 'builtins',
++           'copy_reg': 'copyreg',
++           'Queue': 'queue',
++           'SocketServer': 'socketserver',
++           'ConfigParser': 'configparser',
++           'repr': 'reprlib',
++           'FileDialog': 'tkinter.filedialog',
++           'tkFileDialog': 'tkinter.filedialog',
++           'SimpleDialog': 'tkinter.simpledialog',
++           'tkSimpleDialog': 'tkinter.simpledialog',
++           'tkColorChooser': 'tkinter.colorchooser',
++           'tkCommonDialog': 'tkinter.commondialog',
++           'Dialog': 'tkinter.dialog',
++           'Tkdnd': 'tkinter.dnd',
++           'tkFont': 'tkinter.font',
++           'tkMessageBox': 'tkinter.messagebox',
++           'ScrolledText': 'tkinter.scrolledtext',
++           'Tkconstants': 'tkinter.constants',
++           'Tix': 'tkinter.tix',
++           'ttk': 'tkinter.ttk',
++           'Tkinter': 'tkinter',
++           'markupbase': '_markupbase',
++           '_winreg': 'winreg',
++           'thread': '_thread',
++           'dummy_thread': '_dummy_thread',
++           # anydbm and whichdb are handled by fix_imports2
++           'dbhash': 'dbm.bsd',
++           'dumbdbm': 'dbm.dumb',
++           'dbm': 'dbm.ndbm',
++           'gdbm': 'dbm.gnu',
++           'xmlrpclib': 'xmlrpc.client',
++           'DocXMLRPCServer': 'xmlrpc.server',
++           'SimpleXMLRPCServer': 'xmlrpc.server',
++           'httplib': 'http.client',
++           'htmlentitydefs' : 'html.entities',
++           'HTMLParser' : 'html.parser',
++           'Cookie': 'http.cookies',
++           'cookielib': 'http.cookiejar',
++           'BaseHTTPServer': 'http.server',
++           'SimpleHTTPServer': 'http.server',
++           'CGIHTTPServer': 'http.server',
++           #'test.test_support': 'test.support',
++           'commands': 'subprocess',
++           'UserString' : 'collections',
++           'UserList' : 'collections',
++           'urlparse' : 'urllib.parse',
++           'robotparser' : 'urllib.robotparser',
++}
++
++
++def alternates(members):
++    return "(" + "|".join(map(repr, members)) + ")"
++
++
++def build_pattern(mapping=MAPPING):
++    mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
++    bare_names = alternates(mapping.keys())
++
++    yield """name_import=import_name< 'import' ((%s) |
++               multiple_imports=dotted_as_names< any* (%s) any* >) >
++          """ % (mod_list, mod_list)
++    yield """import_from< 'from' (%s) 'import' ['(']
++              ( any | import_as_name< any 'as' any > |
++                import_as_names< any* >)  [')'] >
++          """ % mod_list
++    yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
++               multiple_imports=dotted_as_names<
++                 any* dotted_as_name< (%s) 'as' any > any* >) >
++          """ % (mod_list, mod_list)
++
++    # Find usages of module members in code e.g. thread.foo(bar)
++    yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
++
++
++class FixImports(fixer_base.BaseFix):
++
++    order = "pre" # Pre-order tree traversal
++
++    # This is overridden in fix_imports2.
++    mapping = MAPPING
++
++    # We want to run this fixer late, so fix_import doesn't try to make stdlib
++    # renames into relative imports.
++    run_order = 6
++
++    def build_pattern(self):
++        return "|".join(build_pattern(self.mapping))
++
++    def compile_pattern(self):
++        # We override this, so MAPPING can be pragmatically altered and the
++        # changes will be reflected in PATTERN.
++        self.PATTERN = self.build_pattern()
++        super(FixImports, self).compile_pattern()
++
++    # Don't match the node if it's within another match.
++    def match(self, node):
++        match = super(FixImports, self).match
++        results = match(node)
++        if results:
++            # Module usage could be in the trailer of an attribute lookup, so we
++            # might have nested matches when "bare_with_attr" is present.
++            if "bare_with_attr" not in results and \
++                    any([match(obj) for obj in attr_chain(node, "parent")]):
++                return False
++            return results
++        return False
++
++    def start_tree(self, tree, filename):
++        super(FixImports, self).start_tree(tree, filename)
++        self.replace = {}
++
++    def transform(self, node, results):
++        import_mod = results.get("module_name")
++        if import_mod:
++            mod_name = import_mod.value
++            new_name = self.mapping[mod_name]
++            import_mod.replace(Name(new_name, prefix=import_mod.get_prefix()))
++            if "name_import" in results:
++                # If it's not a "from x import x, y" or "import x as y" import,
++                # marked its usage to be replaced.
++                self.replace[mod_name] = new_name
++            if "multiple_imports" in results:
++                # This is a nasty hack to fix multiple imports on a line (e.g.,
++                # "import StringIO, urlparse"). The problem is that I can't
++                # figure out an easy way to make a pattern recognize the keys of
++                # MAPPING randomly sprinkled in an import statement.
++                results = self.match(node)
++                if results:
++                    self.transform(node, results)
++        else:
++            # Replace usage of the module.
++            bare_name = results["bare_with_attr"][0]
++            new_name = self.replace.get(bare_name.value)
++            if new_name:
++                bare_name.replace(Name(new_name, prefix=bare_name.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/from2/fix_imports2.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_imports2.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,16 @@
++"""Fix incompatible imports and module references that must be fixed after
++fix_imports."""
++from . import fix_imports
++
++
++MAPPING = {
++            'whichdb': 'dbm',
++            'anydbm': 'dbm',
++          }
++
++
++class FixImports2(fix_imports.FixImports):
++
++    run_order = 7
++
++    mapping = MAPPING
+diff -r 531f2e948299 refactor/fixes/from2/fix_input.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_input.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,26 @@
++"""Fixer that changes input(...) into eval(input(...))."""
++# Author: Andre Roberge
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Call, Name
++from ... import patcomp
++
++
++context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >")
++
++
++class FixInput(fixer_base.BaseFix):
++
++    PATTERN = """
++              power< 'input' args=trailer< '(' [any] ')' > >
++              """
++
++    def transform(self, node, results):
++        # If we're already wrapped in a eval() call, we're done.
++        if context.match(node.parent.parent):
++            return
++
++        new = node.clone()
++        new.set_prefix("")
++        return Call(Name("eval"), [new], prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_intern.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_intern.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,44 @@
++# Copyright 2006 Georg Brandl.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for intern().
++
++intern(s) -> sys.intern(s)"""
++
++# Local imports
++from ... import pytree
++from ... import fixer_base
++from ...fixer_util import Name, Attr, touch_import
++
++
++class FixIntern(fixer_base.BaseFix):
++
++    PATTERN = """
++    power< 'intern'
++           trailer< lpar='('
++                    ( not(arglist | argument<any '=' any>) obj=any
++                      | obj=arglist<(not argument<any '=' any>) any ','> )
++                    rpar=')' >
++           after=any*
++    >
++    """
++
++    def transform(self, node, results):
++        syms = self.syms
++        obj = results["obj"].clone()
++        if obj.type == syms.arglist:
++            newarglist = obj.clone()
++        else:
++            newarglist = pytree.Node(syms.arglist, [obj.clone()])
++        after = results["after"]
++        if after:
++            after = [n.clone() for n in after]
++        new = pytree.Node(syms.power,
++                          Attr(Name("sys"), Name("intern")) +
++                          [pytree.Node(syms.trailer,
++                                       [results["lpar"].clone(),
++                                        newarglist,
++                                        results["rpar"].clone()])] + after)
++        new.set_prefix(node.get_prefix())
++        touch_import(None, 'sys', node)
++        return new
+diff -r 531f2e948299 refactor/fixes/from2/fix_isinstance.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_isinstance.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,52 @@
++# Copyright 2008 Armin Ronacher.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that cleans up a tuple argument to isinstance after the tokens
++in it were fixed.  This is mainly used to remove double occurrences of
++tokens as a leftover of the long -> int / unicode -> str conversion.
++
++eg.  isinstance(x, (int, long)) -> isinstance(x, (int, int))
++       -> isinstance(x, int)
++"""
++
++from ... import fixer_base
++from ...fixer_util import token
++
++
++class FixIsinstance(fixer_base.BaseFix):
++
++    PATTERN = """
++    power<
++        'isinstance'
++        trailer< '(' arglist< any ',' atom< '('
++            args=testlist_gexp< any+ >
++        ')' > > ')' >
++    >
++    """
++
++    run_order = 6
++
++    def transform(self, node, results):
++        names_inserted = set()
++        testlist = results["args"]
++        args = testlist.children
++        new_args = []
++        iterator = enumerate(args)
++        for idx, arg in iterator:
++            if arg.type == token.NAME and arg.value in names_inserted:
++                if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
++                    iterator.next()
++                    continue
++            else:
++                new_args.append(arg)
++                if arg.type == token.NAME:
++                    names_inserted.add(arg.value)
++        if new_args and new_args[-1].type == token.COMMA:
++            del new_args[-1]
++        if len(new_args) == 1:
++            atom = testlist.parent
++            new_args[0].set_prefix(atom.get_prefix())
++            atom.replace(new_args[0])
++        else:
++            args[:] = new_args
++            node.changed()
+diff -r 531f2e948299 refactor/fixes/from2/fix_itertools.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_itertools.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,41 @@
++""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
++    itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
++
++    imports from itertools are fixed in fix_itertools_import.py
++
++    If itertools is imported as something else (ie: import itertools as it;
++    it.izip(spam, eggs)) method calls will not get fixed.
++    """
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name
++
++class FixItertools(fixer_base.BaseFix):
++    it_funcs = "('imap'|'ifilter'|'izip'|'ifilterfalse')"
++    PATTERN = """
++              power< it='itertools'
++                  trailer<
++                     dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
++              |
++              power< func=%(it_funcs)s trailer< '(' [any] ')' > >
++              """ %(locals())
++
++    # Needs to be run after fix_(map|zip|filter)
++    run_order = 6
++
++    def transform(self, node, results):
++        prefix = None
++        func = results['func'][0]
++        if 'it' in results and func.value != 'ifilterfalse':
++            dot, it = (results['dot'], results['it'])
++            # Remove the 'itertools'
++            prefix = it.get_prefix()
++            it.remove()
++            # Replace the node wich contains ('.', 'function') with the
++            # function (to be consistant with the second part of the pattern)
++            dot.remove()
++            func.parent.replace(func)
++
++        prefix = prefix or func.get_prefix()
++        func.replace(Name(func.value[1:], prefix=prefix))
+diff -r 531f2e948299 refactor/fixes/from2/fix_itertools_imports.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_itertools_imports.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,52 @@
++""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import BlankLine, syms, token
++
++
++class FixItertoolsImports(fixer_base.BaseFix):
++    PATTERN = """
++              import_from< 'from' 'itertools' 'import' imports=any >
++              """ %(locals())
++
++    def transform(self, node, results):
++        imports = results['imports']
++        if imports.type == syms.import_as_name or not imports.children:
++            children = [imports]
++        else:
++            children = imports.children
++        for child in children[::2]:
++            if child.type == token.NAME:
++                member = child.value
++                name_node = child
++            else:
++                assert child.type == syms.import_as_name
++                name_node = child.children[0]
++            member_name = name_node.value
++            if member_name in ('imap', 'izip', 'ifilter'):
++                child.value = None
++                child.remove()
++            elif member_name == 'ifilterfalse':
++                node.changed()
++                name_node.value = 'filterfalse'
++
++        # Make sure the import statement is still sane
++        children = imports.children[:] or [imports]
++        remove_comma = True
++        for child in children:
++            if remove_comma and child.type == token.COMMA:
++                child.remove()
++            else:
++                remove_comma ^= True
++
++        if children[-1].type == token.COMMA:
++            children[-1].remove()
++
++        # If there are no imports left, just get rid of the entire statement
++        if not (imports.children or getattr(imports, 'value', None)) or \
++                imports.parent is None:
++            p = node.get_prefix()
++            node = BlankLine()
++            node.prefix = p
++        return node
+diff -r 531f2e948299 refactor/fixes/from2/fix_long.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_long.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,22 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that turns 'long' into 'int' everywhere.
++"""
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name, Number, is_probably_builtin
++
++
++class FixLong(fixer_base.BaseFix):
++
++    PATTERN = "'long'"
++
++    static_int = Name("int")
++
++    def transform(self, node, results):
++        if is_probably_builtin(node):
++            new = self.static_int.clone()
++            new.set_prefix(node.get_prefix())
++            return new
+diff -r 531f2e948299 refactor/fixes/from2/fix_map.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_map.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,82 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
++exists a 'from future_builtins import map' statement in the top-level
++namespace.
++
++As a special case, map(None, X) is changed into list(X).  (This is
++necessary because the semantics are changed in this case -- the new
++map(None, X) is equivalent to [(x,) for x in X].)
++
++We avoid the transformation (except for the special case mentioned
++above) if the map() call is directly contained in iter(<>), list(<>),
++tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
++
++NOTE: This is still not correct if the original code was depending on
++map(F, X, Y, ...) to go on until the longest argument is exhausted,
++substituting None for missing values -- like zip(), it now stops as
++soon as the shortest argument is exhausted.
++"""
++
++# Local imports
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Name, Call, ListComp, in_special_context
++from ...pygram import python_symbols as syms
++
++class FixMap(fixer_base.ConditionalFix):
++
++    PATTERN = """
++    map_none=power<
++        'map'
++        trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
++    >
++    |
++    map_lambda=power<
++        'map'
++        trailer<
++            '('
++            arglist<
++                lambdef< 'lambda'
++                         (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
++                >
++                ','
++                it=any
++            >
++            ')'
++        >
++    >
++    |
++    power<
++        'map'
++        args=trailer< '(' [any] ')' >
++    >
++    """
++
++    skip_on = 'future_builtins.map'
++
++    def transform(self, node, results):
++        if self.should_skip(node):
++            return
++
++        if node.parent.type == syms.simple_stmt:
++            self.warning(node, "You should use a for loop here")
++            new = node.clone()
++            new.set_prefix("")
++            new = Call(Name("list"), [new])
++        elif "map_lambda" in results:
++            new = ListComp(results.get("xp").clone(),
++                           results.get("fp").clone(),
++                           results.get("it").clone())
++        else:
++            if "map_none" in results:
++                new = results["arg"].clone()
++            else:
++                if in_special_context(node):
++                    return None
++                new = node.clone()
++            new.set_prefix("")
++            new = Call(Name("list"), [new])
++        new.set_prefix(node.get_prefix())
++        return new
+diff -r 531f2e948299 refactor/fixes/from2/fix_metaclass.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_metaclass.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,227 @@
++"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
++
++   The various forms of classef (inherits nothing, inherits once, inherints
++   many) don't parse the same in the CST so we look at ALL classes for
++   a __metaclass__ and if we find one normalize the inherits to all be
++   an arglist.
++
++   For one-liner classes ('class X: pass') there is no indent/dedent so
++   we normalize those into having a suite.
++
++   Moving the __metaclass__ into the classdef can also cause the class
++   body to be empty so there is some special casing for that as well.
++
++   This fixer also tries very hard to keep original indenting and spacing
++   in all those corner cases.
++
++"""
++# Author: Jack Diederich
++
++# Local imports
++from ... import fixer_base
++from ...pygram import token
++from ...fixer_util import Name, syms, Node, Leaf
++
++
++def has_metaclass(parent):
++    """ we have to check the cls_node without changing it.
++        There are two possiblities:
++          1)  clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
++          2)  clsdef => simple_stmt => expr_stmt => Leaf('__meta')
++    """
++    for node in parent.children:
++        if node.type == syms.suite:
++            return has_metaclass(node)
++        elif node.type == syms.simple_stmt and node.children:
++            expr_node = node.children[0]
++            if expr_node.type == syms.expr_stmt and expr_node.children:
++                left_side = expr_node.children[0]
++                if isinstance(left_side, Leaf) and \
++                        left_side.value == '__metaclass__':
++                    return True
++    return False
++
++
++def fixup_parse_tree(cls_node):
++    """ one-line classes don't get a suite in the parse tree so we add
++        one to normalize the tree
++    """
++    for node in cls_node.children:
++        if node.type == syms.suite:
++            # already in the prefered format, do nothing
++            return
++
++    # !%@#! oneliners have no suite node, we have to fake one up
++    for i, node in enumerate(cls_node.children):
++        if node.type == token.COLON:
++            break
++    else:
++        raise ValueError("No class suite and no ':'!")
++
++    # move everything into a suite node
++    suite = Node(syms.suite, [])
++    while cls_node.children[i+1:]:
++        move_node = cls_node.children[i+1]
++        suite.append_child(move_node.clone())
++        move_node.remove()
++    cls_node.append_child(suite)
++    node = suite
++
++
++def fixup_simple_stmt(parent, i, stmt_node):
++    """ if there is a semi-colon all the parts count as part of the same
++        simple_stmt.  We just want the __metaclass__ part so we move
++        everything efter the semi-colon into its own simple_stmt node
++    """
++    for semi_ind, node in enumerate(stmt_node.children):
++        if node.type == token.SEMI: # *sigh*
++            break
++    else:
++        return
++
++    node.remove() # kill the semicolon
++    new_expr = Node(syms.expr_stmt, [])
++    new_stmt = Node(syms.simple_stmt, [new_expr])
++    while stmt_node.children[semi_ind:]:
++        move_node = stmt_node.children[semi_ind]
++        new_expr.append_child(move_node.clone())
++        move_node.remove()
++    parent.insert_child(i, new_stmt)
++    new_leaf1 = new_stmt.children[0].children[0]
++    old_leaf1 = stmt_node.children[0].children[0]
++    new_leaf1.set_prefix(old_leaf1.get_prefix())
++
++
++def remove_trailing_newline(node):
++    if node.children and node.children[-1].type == token.NEWLINE:
++        node.children[-1].remove()
++
++
++def find_metas(cls_node):
++    # find the suite node (Mmm, sweet nodes)
++    for node in cls_node.children:
++        if node.type == syms.suite:
++            break
++    else:
++        raise ValueError("No class suite!")
++
++    # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
++    for i, simple_node in list(enumerate(node.children)):
++        if simple_node.type == syms.simple_stmt and simple_node.children:
++            expr_node = simple_node.children[0]
++            if expr_node.type == syms.expr_stmt and expr_node.children:
++                # Check if the expr_node is a simple assignment.
++                left_node = expr_node.children[0]
++                if isinstance(left_node, Leaf) and \
++                        left_node.value == '__metaclass__':
++                    # We found a assignment to __metaclass__.
++                    fixup_simple_stmt(node, i, simple_node)
++                    remove_trailing_newline(simple_node)
++                    yield (node, i, simple_node)
++
++
++def fixup_indent(suite):
++    """ If an INDENT is followed by a thing with a prefix then nuke the prefix
++        Otherwise we get in trouble when removing __metaclass__ at suite start
++    """
++    kids = suite.children[::-1]
++    # find the first indent
++    while kids:
++        node = kids.pop()
++        if node.type == token.INDENT:
++            break
++
++    # find the first Leaf
++    while kids:
++        node = kids.pop()
++        if isinstance(node, Leaf) and node.type != token.DEDENT:
++            if node.prefix:
++                node.set_prefix('')
++            return
++        else:
++            kids.extend(node.children[::-1])
++
++
++class FixMetaclass(fixer_base.BaseFix):
++
++    PATTERN = """
++    classdef<any*>
++    """
++
++    def transform(self, node, results):
++        if not has_metaclass(node):
++            return node
++
++        fixup_parse_tree(node)
++
++        # find metaclasses, keep the last one
++        last_metaclass = None
++        for suite, i, stmt in find_metas(node):
++            last_metaclass = stmt
++            stmt.remove()
++
++        text_type = node.children[0].type # always Leaf(nnn, 'class')
++
++        # figure out what kind of classdef we have
++        if len(node.children) == 7:
++            # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
++            #                 0        1       2    3        4    5    6
++            if node.children[3].type == syms.arglist:
++                arglist = node.children[3]
++            # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
++            else:
++                parent = node.children[3].clone()
++                arglist = Node(syms.arglist, [parent])
++                node.set_child(3, arglist)
++        elif len(node.children) == 6:
++            # Node(classdef, ['class', 'name', '(',  ')', ':', suite])
++            #                 0        1       2     3    4    5
++            arglist = Node(syms.arglist, [])
++            node.insert_child(3, arglist)
++        elif len(node.children) == 4:
++            # Node(classdef, ['class', 'name', ':', suite])
++            #                 0        1       2    3
++            arglist = Node(syms.arglist, [])
++            node.insert_child(2, Leaf(token.RPAR, ')'))
++            node.insert_child(2, arglist)
++            node.insert_child(2, Leaf(token.LPAR, '('))
++        else:
++            raise ValueError("Unexpected class definition")
++
++        # now stick the metaclass in the arglist
++        meta_txt = last_metaclass.children[0].children[0]
++        meta_txt.value = 'metaclass'
++        orig_meta_prefix = meta_txt.get_prefix()
++
++        if arglist.children:
++            arglist.append_child(Leaf(token.COMMA, ','))
++            meta_txt.set_prefix(' ')
++        else:
++            meta_txt.set_prefix('')
++
++        # compact the expression "metaclass = Meta" -> "metaclass=Meta"
++        expr_stmt = last_metaclass.children[0]
++        assert expr_stmt.type == syms.expr_stmt
++        expr_stmt.children[1].set_prefix('')
++        expr_stmt.children[2].set_prefix('')
++
++        arglist.append_child(last_metaclass)
++
++        fixup_indent(suite)
++
++        # check for empty suite
++        if not suite.children:
++            # one-liner that was just __metaclass_
++            suite.remove()
++            pass_leaf = Leaf(text_type, 'pass')
++            pass_leaf.set_prefix(orig_meta_prefix)
++            node.append_child(pass_leaf)
++            node.append_child(Leaf(token.NEWLINE, '\n'))
++
++        elif len(suite.children) > 1 and \
++                 (suite.children[-2].type == token.INDENT and
++                  suite.children[-1].type == token.DEDENT):
++            # there was only one line in the class body and it was __metaclass__
++            pass_leaf = Leaf(text_type, 'pass')
++            suite.insert_child(-1, pass_leaf)
++            suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
+diff -r 531f2e948299 refactor/fixes/from2/fix_methodattrs.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_methodattrs.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,23 @@
++"""Fix bound method attributes (method.im_? -> method.__?__).
++"""
++# Author: Christian Heimes
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name
++
++MAP = {
++    "im_func" : "__func__",
++    "im_self" : "__self__",
++    "im_class" : "__self__.__class__"
++    }
++
++class FixMethodattrs(fixer_base.BaseFix):
++    PATTERN = """
++    power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
++    """
++
++    def transform(self, node, results):
++        attr = results["attr"][0]
++        new = MAP[attr.value]
++        attr.replace(Name(new, prefix=attr.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/from2/fix_ne.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_ne.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,22 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that turns <> into !=."""
++
++# Local imports
++from ... import pytree
++from ...pgen2 import token
++from ... import fixer_base
++
++
++class FixNe(fixer_base.BaseFix):
++    # This is so simple that we don't need the pattern compiler.
++
++    def match(self, node):
++        # Override
++        return node.type == token.NOTEQUAL and node.value == "<>"
++
++    def transform(self, node, results):
++        new = pytree.Leaf(token.NOTEQUAL, "!=")
++        new.set_prefix(node.get_prefix())
++        return new
+diff -r 531f2e948299 refactor/fixes/from2/fix_next.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_next.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,103 @@
++"""Fixer for it.next() -> next(it), per PEP 3114."""
++# Author: Collin Winter
++
++# Things that currently aren't covered:
++#   - listcomp "next" names aren't warned
++#   - "with" statement targets aren't checked
++
++# Local imports
++from ...pgen2 import token
++from ...pygram import python_symbols as syms
++from ... import fixer_base
++from ...fixer_util import Name, Call, find_binding
++
++bind_warning = "Calls to builtin next() possibly shadowed by global binding"
++
++
++class FixNext(fixer_base.BaseFix):
++    PATTERN = """
++    power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
++    |
++    power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
++    |
++    classdef< 'class' any+ ':'
++              suite< any*
++                     funcdef< 'def'
++                              name='next'
++                              parameters< '(' NAME ')' > any+ >
++                     any* > >
++    |
++    global=global_stmt< 'global' any* 'next' any* >
++    """
++
++    order = "pre" # Pre-order tree traversal
++
++    def start_tree(self, tree, filename):
++        super(FixNext, self).start_tree(tree, filename)
++
++        n = find_binding('next', tree)
++        if n:
++            self.warning(n, bind_warning)
++            self.shadowed_next = True
++        else:
++            self.shadowed_next = False
++
++    def transform(self, node, results):
++        assert results
++
++        base = results.get("base")
++        attr = results.get("attr")
++        name = results.get("name")
++        mod = results.get("mod")
++
++        if base:
++            if self.shadowed_next:
++                attr.replace(Name("__next__", prefix=attr.get_prefix()))
++            else:
++                base = [n.clone() for n in base]
++                base[0].set_prefix("")
++                node.replace(Call(Name("next", prefix=node.get_prefix()), base))
++        elif name:
++            n = Name("__next__", prefix=name.get_prefix())
++            name.replace(n)
++        elif attr:
++            # We don't do this transformation if we're assigning to "x.next".
++            # Unfortunately, it doesn't seem possible to do this in PATTERN,
++            #  so it's being done here.
++            if is_assign_target(node):
++                head = results["head"]
++                if "".join([str(n) for n in head]).strip() == '__builtin__':
++                    self.warning(node, bind_warning)
++                return
++            attr.replace(Name("__next__"))
++        elif "global" in results:
++            self.warning(node, bind_warning)
++            self.shadowed_next = True
++
++
++### The following functions help test if node is part of an assignment
++###  target.
++
++def is_assign_target(node):
++    assign = find_assign(node)
++    if assign is None:
++        return False
++
++    for child in assign.children:
++        if child.type == token.EQUAL:
++            return False
++        elif is_subtree(child, node):
++            return True
++    return False
++
++def find_assign(node):
++    if node.type == syms.expr_stmt:
++        return node
++    if node.type == syms.simple_stmt or node.parent is None:
++        return None
++    return find_assign(node.parent)
++
++def is_subtree(root, node):
++    if root == node:
++        return True
++    return any([is_subtree(c, node) for c in root.children])
+diff -r 531f2e948299 refactor/fixes/from2/fix_nonzero.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_nonzero.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,20 @@
++"""Fixer for __nonzero__ -> __bool__ methods."""
++# Author: Collin Winter
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name, syms
++
++class FixNonzero(fixer_base.BaseFix):
++    PATTERN = """
++    classdef< 'class' any+ ':'
++              suite< any*
++                     funcdef< 'def' name='__nonzero__'
++                              parameters< '(' NAME ')' > any+ >
++                     any* > >
++    """
++
++    def transform(self, node, results):
++        name = results["name"]
++        new = Name("__bool__", prefix=name.get_prefix())
++        name.replace(new)
+diff -r 531f2e948299 refactor/fixes/from2/fix_numliterals.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_numliterals.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,27 @@
++"""Fixer that turns 1L into 1, 0755 into 0o755.
++"""
++# Copyright 2007 Georg Brandl.
++# Licensed to PSF under a Contributor Agreement.
++
++# Local imports
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Number
++
++
++class FixNumliterals(fixer_base.BaseFix):
++    # This is so simple that we don't need the pattern compiler.
++
++    def match(self, node):
++        # Override
++        return (node.type == token.NUMBER and
++                (node.value.startswith("0") or node.value[-1] in "Ll"))
++
++    def transform(self, node, results):
++        val = node.value
++        if val[-1] in 'Ll':
++            val = val[:-1]
++        elif val.startswith('0') and val.isdigit() and len(set(val)) > 1:
++            val = "0o" + val[1:]
++
++        return Number(val, prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_paren.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_paren.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,42 @@
++"""Fixer that addes parentheses where they are required
++
++This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``."""
++
++# By Taek Joo Kim and Benjamin Peterson
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import LParen, RParen
++
++# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2]
++class FixParen(fixer_base.BaseFix):
++    PATTERN = """
++        atom< ('[' | '(')
++            (listmaker< any
++                comp_for<
++                    'for' NAME 'in'
++                    target=testlist_safe< any (',' any)+ [',']
++                     >
++                    [any]
++                >
++            >
++            |
++            testlist_gexp< any
++                comp_for<
++                    'for' NAME 'in'
++                    target=testlist_safe< any (',' any)+ [',']
++                     >
++                    [any]
++                >
++            >)
++        (']' | ')') >
++    """
++
++    def transform(self, node, results):
++        target = results["target"]
++
++        lparen = LParen()
++        lparen.set_prefix(target.get_prefix())
++        target.set_prefix("") # Make it hug the parentheses
++        target.insert_child(0, lparen)
++        target.append_child(RParen())
+diff -r 531f2e948299 refactor/fixes/from2/fix_print.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_print.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,90 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for print.
++
++Change:
++    'print'          into 'print()'
++    'print ...'      into 'print(...)'
++    'print ... ,'    into 'print(..., end=" ")'
++    'print >>x, ...' into 'print(..., file=x)'
++
++No changes are applied if print_function is imported from __future__
++
++"""
++
++# Local imports
++from ... import patcomp
++from ... import pytree
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Name, Call, Comma, String, is_tuple
++
++
++parend_expr = patcomp.compile_pattern(
++              """atom< '(' [atom|STRING|NAME] ')' >"""
++              )
++
++
++class FixPrint(fixer_base.ConditionalFix):
++
++    PATTERN = """
++              simple_stmt< any* bare='print' any* > | print_stmt
++              """
++
++    skip_on = '__future__.print_function'
++
++    def transform(self, node, results):
++        assert results
++
++        if self.should_skip(node):
++            return
++
++        bare_print = results.get("bare")
++
++        if bare_print:
++            # Special-case print all by itself
++            bare_print.replace(Call(Name("print"), [],
++                               prefix=bare_print.get_prefix()))
++            return
++        assert node.children[0] == Name("print")
++        args = node.children[1:]
++        if len(args) == 1 and parend_expr.match(args[0]):
++            # We don't want to keep sticking parens around an
++            # already-parenthesised expression.
++            return
++
++        sep = end = file = None
++        if args and args[-1] == Comma():
++            args = args[:-1]
++            end = " "
++        if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
++            assert len(args) >= 2
++            file = args[1].clone()
++            args = args[3:] # Strip a possible comma after the file expression
++        # Now synthesize a print(args, sep=..., end=..., file=...) node.
++        l_args = [arg.clone() for arg in args]
++        if l_args:
++            l_args[0].set_prefix("")
++        if sep is not None or end is not None or file is not None:
++            if sep is not None:
++                self.add_kwarg(l_args, "sep", String(repr(sep)))
++            if end is not None:
++                self.add_kwarg(l_args, "end", String(repr(end)))
++            if file is not None:
++                self.add_kwarg(l_args, "file", file)
++        n_stmt = Call(Name("print"), l_args)
++        n_stmt.set_prefix(node.get_prefix())
++        return n_stmt
++
++    def add_kwarg(self, l_nodes, s_kwd, n_expr):
++        # XXX All this prefix-setting may lose comments (though rarely)
++        n_expr.set_prefix("")
++        n_argument = pytree.Node(self.syms.argument,
++                                 (Name(s_kwd),
++                                  pytree.Leaf(token.EQUAL, "="),
++                                  n_expr))
++        if l_nodes:
++            l_nodes.append(Comma())
++            n_argument.set_prefix(" ")
++        l_nodes.append(n_argument)
+diff -r 531f2e948299 refactor/fixes/from2/fix_raise.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_raise.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,82 @@
++"""Fixer for 'raise E, V, T'
++
++raise         -> raise
++raise E       -> raise E
++raise E, V    -> raise E(V)
++raise E, V, T -> raise E(V).with_traceback(T)
++
++raise (((E, E'), E''), E'''), V -> raise E(V)
++raise "foo", V, T               -> warns about string exceptions
++
++
++CAVEATS:
++1) "raise E, V" will be incorrectly translated if V is an exception
++   instance. The correct Python 3 idiom is
++
++        raise E from V
++
++   but since we can't detect instance-hood by syntax alone and since
++   any client code would have to be changed as well, we don't automate
++   this.
++"""
++# Author: Collin Winter
++
++# Local imports
++from ... import pytree
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Name, Call, Attr, ArgList, is_tuple
++
++class FixRaise(fixer_base.BaseFix):
++
++    PATTERN = """
++    raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
++    """
++
++    def transform(self, node, results):
++        syms = self.syms
++
++        exc = results["exc"].clone()
++        if exc.type is token.STRING:
++            self.cannot_convert(node, "Python 3 does not support string exceptions")
++            return
++
++        # Python 2 supports
++        #  raise ((((E1, E2), E3), E4), E5), V
++        # as a synonym for
++        #  raise E1, V
++        # Since Python 3 will not support this, we recurse down any tuple
++        # literals, always taking the first element.
++        if is_tuple(exc):
++            while is_tuple(exc):
++                # exc.children[1:-1] is the unparenthesized tuple
++                # exc.children[1].children[0] is the first element of the tuple
++                exc = exc.children[1].children[0].clone()
++            exc.set_prefix(" ")
++
++        if "val" not in results:
++            # One-argument raise
++            new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
++            new.set_prefix(node.get_prefix())
++            return new
++
++        val = results["val"].clone()
++        if is_tuple(val):
++            args = [c.clone() for c in val.children[1:-1]]
++        else:
++            val.set_prefix("")
++            args = [val]
++
++        if "tb" in results:
++            tb = results["tb"].clone()
++            tb.set_prefix("")
++
++            e = Call(exc, args)
++            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
++            new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
++            new.set_prefix(node.get_prefix())
++            return new
++        else:
++            return pytree.Node(syms.raise_stmt,
++                               [Name("raise"), Call(exc, args)],
++                               prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_raw_input.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_raw_input.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,16 @@
++"""Fixer that changes raw_input(...) into input(...)."""
++# Author: Andre Roberge
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name
++
++class FixRawInput(fixer_base.BaseFix):
++
++    PATTERN = """
++              power< name='raw_input' trailer< '(' [any] ')' > any* >
++              """
++
++    def transform(self, node, results):
++        name = results["name"]
++        name.replace(Name("input", prefix=name.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/from2/fix_reduce.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_reduce.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,33 @@
++# Copyright 2008 Armin Ronacher.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for reduce().
++
++Makes sure reduce() is imported from the functools module if reduce is
++used in that module.
++"""
++
++from ... import pytree
++from ... import fixer_base
++from ...fixer_util import Name, Attr, touch_import
++
++
++
++class FixReduce(fixer_base.BaseFix):
++
++    PATTERN = """
++    power< 'reduce'
++        trailer< '('
++            arglist< (
++                (not(argument<any '=' any>) any ','
++                 not(argument<any '=' any>) any) |
++                (not(argument<any '=' any>) any ','
++                 not(argument<any '=' any>) any ','
++                 not(argument<any '=' any>) any)
++            ) >
++        ')' >
++    >
++    """
++
++    def transform(self, node, results):
++        touch_import('functools', 'reduce', node)
+diff -r 531f2e948299 refactor/fixes/from2/fix_renames.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_renames.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,69 @@
++"""Fix incompatible renames
++
++Fixes:
++  * sys.maxint -> sys.maxsize
++"""
++# Author: Christian Heimes
++# based on Collin Winter's fix_import
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name, attr_chain
++
++MAPPING = {"sys":  {"maxint" : "maxsize"},
++          }
++LOOKUP = {}
++
++def alternates(members):
++    return "(" + "|".join(map(repr, members)) + ")"
++
++
++def build_pattern():
++    #bare = set()
++    for module, replace in MAPPING.items():
++        for old_attr, new_attr in replace.items():
++            LOOKUP[(module, old_attr)] = new_attr
++            #bare.add(module)
++            #bare.add(old_attr)
++            #yield """
++            #      import_name< 'import' (module=%r
++            #          | dotted_as_names< any* module=%r any* >) >
++            #      """ % (module, module)
++            yield """
++                  import_from< 'from' module_name=%r 'import'
++                      ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
++                  """ % (module, old_attr, old_attr)
++            yield """
++                  power< module_name=%r trailer< '.' attr_name=%r > any* >
++                  """ % (module, old_attr)
++    #yield """bare_name=%s""" % alternates(bare)
++
++
++class FixRenames(fixer_base.BaseFix):
++    PATTERN = "|".join(build_pattern())
++
++    order = "pre" # Pre-order tree traversal
++
++    # Don't match the node if it's within another match
++    def match(self, node):
++        match = super(FixRenames, self).match
++        results = match(node)
++        if results:
++            if any([match(obj) for obj in attr_chain(node, "parent")]):
++                return False
++            return results
++        return False
++
++    #def start_tree(self, tree, filename):
++    #    super(FixRenames, self).start_tree(tree, filename)
++    #    self.replace = {}
++
++    def transform(self, node, results):
++        mod_name = results.get("module_name")
++        attr_name = results.get("attr_name")
++        #bare_name = results.get("bare_name")
++        #import_mod = results.get("module")
++
++        if mod_name and attr_name:
++            new_attr = LOOKUP[(mod_name.value, attr_name.value)]
++            attr_name.replace(Name(new_attr, prefix=attr_name.get_prefix()))
+diff -r 531f2e948299 refactor/fixes/from2/fix_repr.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_repr.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,22 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that transforms `xyzzy` into repr(xyzzy)."""
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Call, Name, parenthesize
++
++
++class FixRepr(fixer_base.BaseFix):
++
++    PATTERN = """
++              atom < '`' expr=any '`' >
++              """
++
++    def transform(self, node, results):
++        expr = results["expr"].clone()
++
++        if expr.type == self.syms.testlist1:
++            expr = parenthesize(expr)
++        return Call(Name("repr"), [expr], prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_set_literal.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_set_literal.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,50 @@
++"""
++Optional fixer to transform set() calls to set literals.
++"""
++
++# Author: Benjamin Peterson
++
++from ... import fixer_base, pytree
++from ...fixer_util import token, syms
++
++class FixSetLiteral(fixer_base.BaseFix):
++
++    explicit = True
++
++    PATTERN = """power< 'set' trailer< '('
++                     (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) >
++                                |
++                                single=any) ']' >
++                     |
++                     atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' >
++                     )
++                     ')' > >
++              """
++
++    def transform(self, node, results):
++        single = results.get("single")
++        if single:
++            # Make a fake listmaker
++            fake = pytree.Node(syms.listmaker, [single.clone()])
++            single.replace(fake)
++            items = fake
++        else:
++            items = results["items"]
++
++        # Build the contents of the literal
++        literal = [pytree.Leaf(token.LBRACE, "{")]
++        literal.extend(n.clone() for n in items.children)
++        literal.append(pytree.Leaf(token.RBRACE, "}"))
++        # Set the prefix of the right brace to that of the ')' or ']'
++        literal[-1].set_prefix(items.next_sibling.get_prefix())
++        maker = pytree.Node(syms.dictsetmaker, literal)
++        maker.set_prefix(node.get_prefix())
++
++        # If the original was a one tuple, we need to remove the extra comma.
++        if len(maker.children) == 4:
++            n = maker.children[2]
++            n.remove()
++            maker.children[-1].set_prefix(n.get_prefix())
++
++        # Finally, replace the set call with our shiny new literal.
++        return maker
+diff -r 531f2e948299 refactor/fixes/from2/fix_standarderror.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_standarderror.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,18 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for StandardError -> Exception."""
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name
++
++
++class FixStandarderror(fixer_base.BaseFix):
++
++    PATTERN = """
++              'StandardError'
++              """
++
++    def transform(self, node, results):
++        return Name("Exception", prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_sys_exc.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_sys_exc.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,29 @@
++"""Fixer for sys.exc_{type, value, traceback}
++
++sys.exc_type -> sys.exc_info()[0]
++sys.exc_value -> sys.exc_info()[1]
++sys.exc_traceback -> sys.exc_info()[2]
++"""
++
++# By Jeff Balogh and Benjamin Peterson
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Attr, Call, Name, Number, Subscript, Node, syms
++
++class FixSysExc(fixer_base.BaseFix):
++    # This order matches the ordering of sys.exc_info().
++    exc_info = ["exc_type", "exc_value", "exc_traceback"]
++    PATTERN = """
++              power< 'sys' trailer< dot='.' attribute=(%s) > >
++              """ % '|'.join("'%s'" % e for e in exc_info)
++
++    def transform(self, node, results):
++        sys_attr = results["attribute"][0]
++        index = Number(self.exc_info.index(sys_attr.value))
++
++        call = Call(Name("exc_info"), prefix=sys_attr.get_prefix())
++        attr = Attr(Name("sys"), call)
++        attr[1].children[0].set_prefix(results["dot"].get_prefix())
++        attr.append(Subscript(index))
++        return Node(syms.power, attr, prefix=node.get_prefix())
+diff -r 531f2e948299 refactor/fixes/from2/fix_throw.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_throw.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,56 @@
++"""Fixer for generator.throw(E, V, T).
++
++g.throw(E)       -> g.throw(E)
++g.throw(E, V)    -> g.throw(E(V))
++g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
++
++g.throw("foo"[, V[, T]]) will warn about string exceptions."""
++# Author: Collin Winter
++
++# Local imports
++from ... import pytree
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Name, Call, ArgList, Attr, is_tuple
++
++class FixThrow(fixer_base.BaseFix):
++
++    PATTERN = """
++    power< any trailer< '.' 'throw' >
++           trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
++    >
++    |
++    power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
++    """
++
++    def transform(self, node, results):
++        syms = self.syms
++
++        exc = results["exc"].clone()
++        if exc.type is token.STRING:
++            self.cannot_convert(node, "Python 3 does not support string exceptions")
++            return
++
++        # Leave "g.throw(E)" alone
++        val = results.get("val")
++        if val is None:
++            return
++
++        val = val.clone()
++        if is_tuple(val):
++            args = [c.clone() for c in val.children[1:-1]]
++        else:
++            val.set_prefix("")
++            args = [val]
++
++        throw_args = results["args"]
++
++        if "tb" in results:
++            tb = results["tb"].clone()
++            tb.set_prefix("")
++
++            e = Call(exc, args)
++            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
++            throw_args.replace(pytree.Node(syms.power, with_tb))
++        else:
++            throw_args.replace(Call(exc, args))
+diff -r 531f2e948299 refactor/fixes/from2/fix_tuple_params.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_tuple_params.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,169 @@
++"""Fixer for function definitions with tuple parameters.
++
++def func(((a, b), c), d):
++    ...
++
++    ->
++
++def func(x, d):
++    ((a, b), c) = x
++    ...
++
++It will also support lambdas:
++
++    lambda (x, y): x + y -> lambda t: t[0] + t[1]
++
++    # The parens are a syntax error in Python 3
++    lambda (x): x + y -> lambda x: x + y
++"""
++# Author: Collin Winter
++
++# Local imports
++from ... import pytree
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Assign, Name, Newline, Number, Subscript, syms
++
++def is_docstring(stmt):
++    return isinstance(stmt, pytree.Node) and \
++           stmt.children[0].type == token.STRING
++
++class FixTupleParams(fixer_base.BaseFix):
++    PATTERN = """
++              funcdef< 'def' any parameters< '(' args=any ')' >
++                       ['->' any] ':' suite=any+ >
++              |
++              lambda=
++              lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
++                       ':' body=any
++              >
++              """
++
++    def transform(self, node, results):
++        if "lambda" in results:
++            return self.transform_lambda(node, results)
++
++        new_lines = []
++        suite = results["suite"]
++        args = results["args"]
++        # This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
++        # TODO(cwinter): suite-cleanup
++        if suite[0].children[1].type == token.INDENT:
++            start = 2
++            indent = suite[0].children[1].value
++            end = Newline()
++        else:
++            start = 0
++            indent = "; "
++            end = pytree.Leaf(token.INDENT, "")
++
++        # We need access to self for new_name(), and making this a method
++        #  doesn't feel right. Closing over self and new_lines makes the
++        #  code below cleaner.
++        def handle_tuple(tuple_arg, add_prefix=False):
++            n = Name(self.new_name())
++            arg = tuple_arg.clone()
++            arg.set_prefix("")
++            stmt = Assign(arg, n.clone())
++            if add_prefix:
++                n.set_prefix(" ")
++            tuple_arg.replace(n)
++            new_lines.append(pytree.Node(syms.simple_stmt,
++                                         [stmt, end.clone()]))
++
++        if args.type == syms.tfpdef:
++            handle_tuple(args)
++        elif args.type == syms.typedargslist:
++            for i, arg in enumerate(args.children):
++                if arg.type == syms.tfpdef:
++                    # Without add_prefix, the emitted code is correct,
++                    #  just ugly.
++                    handle_tuple(arg, add_prefix=(i > 0))
++
++        if not new_lines:
++            return node
++
++        # This isn't strictly necessary, but it plays nicely with other fixers.
++        # TODO(cwinter) get rid of this when children becomes a smart list
++        for line in new_lines:
++            line.parent = suite[0]
++
++        # TODO(cwinter) suite-cleanup
++        after = start
++        if start == 0:
++            new_lines[0].set_prefix(" ")
++        elif is_docstring(suite[0].children[start]):
++            new_lines[0].set_prefix(indent)
++            after = start + 1
++
++        suite[0].children[after:after] = new_lines
++        for i in range(after+1, after+len(new_lines)+1):
++            suite[0].children[i].set_prefix(indent)
++        suite[0].changed()
++
++    def transform_lambda(self, node, results):
++        args = results["args"]
++        body = results["body"]
++        inner = simplify_args(results["inner"])
++
++        # Replace lambda ((((x)))): x  with lambda x: x
++        if inner.type == token.NAME:
++            inner = inner.clone()
++            inner.set_prefix(" ")
++            args.replace(inner)
++            return
++
++        params = find_params(args)
++        to_index = map_to_index(params)
++        tup_name = self.new_name(tuple_name(params))
++
++        new_param = Name(tup_name, prefix=" ")
++        args.replace(new_param.clone())
++        for n in body.post_order():
++            if n.type == token.NAME and n.value in to_index:
++                subscripts = [c.clone() for c in to_index[n.value]]
++                new = pytree.Node(syms.power,
++                                  [new_param.clone()] + subscripts)
++                new.set_prefix(n.get_prefix())
++                n.replace(new)
++
++
++### Helper functions for transform_lambda()
++
++def simplify_args(node):
++    if node.type in (syms.vfplist, token.NAME):
++        return node
++    elif node.type == syms.vfpdef:
++        # These look like vfpdef< '(' x ')' > where x is NAME
++        # or another vfpdef instance (leading to recursion).
++        while node.type == syms.vfpdef:
++            node = node.children[1]
++        return node
++    raise RuntimeError("Received unexpected node %s" % node)
++
++def find_params(node):
++    if node.type == syms.vfpdef:
++        return find_params(node.children[1])
++    elif node.type == token.NAME:
++        return node.value
++    return [find_params(c) for c in node.children if c.type != token.COMMA]
++
++def map_to_index(param_list, prefix=[], d=None):
++    if d is None:
++        d = {}
++    for i, obj in enumerate(param_list):
++        trailer = [Subscript(Number(i))]
++        if isinstance(obj, list):
++            map_to_index(obj, trailer, d=d)
++        else:
++            d[obj] = prefix + trailer
++    return d
++
++def tuple_name(param_list):
++    l = []
++    for obj in param_list:
++        if isinstance(obj, list):
++            l.append(tuple_name(obj))
++        else:
++            l.append(obj)
++    return "_".join(l)
+diff -r 531f2e948299 refactor/fixes/from2/fix_types.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_types.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,62 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer for removing uses of the types module.
++
++These work for only the known names in the types module.  The forms above
++can include types. or not.  ie, It is assumed the module is imported either as:
++
++    import types
++    from types import ... # either * or specific types
++
++The import statements are not modified.
++
++There should be another fixer that handles at least the following constants:
++
++   type([]) -> list
++   type(()) -> tuple
++   type('') -> str
++
++"""
++
++# Local imports
++from ...pgen2 import token
++from ... import fixer_base
++from ...fixer_util import Name
++
++_TYPE_MAPPING = {
++        'BooleanType' : 'bool',
++        'BufferType' : 'memoryview',
++        'ClassType' : 'type',
++        'ComplexType' : 'complex',
++        'DictType': 'dict',
++        'DictionaryType' : 'dict',
++        'EllipsisType' : 'type(Ellipsis)',
++        #'FileType' : 'io.IOBase',
++        'FloatType': 'float',
++        'IntType': 'int',
++        'ListType': 'list',
++        'LongType': 'int',
++        'ObjectType' : 'object',
++        'NoneType': 'type(None)',
++        'NotImplementedType' : 'type(NotImplemented)',
++        'SliceType' : 'slice',
++        'StringType': 'bytes', # XXX ?
++        'StringTypes' : 'str', # XXX ?
++        'TupleType': 'tuple',
++        'TypeType' : 'type',
++        'UnicodeType': 'str',
++        'XRangeType' : 'range',
++    }
++
++_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
++
++class FixTypes(fixer_base.BaseFix):
++
++    PATTERN = '|'.join(_pats)
++
++    def transform(self, node, results):
++        new_value = _TYPE_MAPPING.get(results["name"].value)
++        if new_value:
++            return Name(new_value, prefix=node.get_prefix())
++        return None
+diff -r 531f2e948299 refactor/fixes/from2/fix_unicode.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_unicode.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,28 @@
++"""Fixer that changes unicode to str, unichr to chr, and u"..." into "...".
++
++"""
++
++import re
++from ...pgen2 import token
++from ... import fixer_base
++
++class FixUnicode(fixer_base.BaseFix):
++
++    PATTERN = "STRING | NAME<'unicode' | 'unichr'>"
++
++    def transform(self, node, results):
++        if node.type == token.NAME:
++            if node.value == "unicode":
++                new = node.clone()
++                new.value = "str"
++                return new
++            if node.value == "unichr":
++                new = node.clone()
++                new.value = "chr"
++                return new
++            # XXX Warn when __unicode__ found?
++        elif node.type == token.STRING:
++            if re.match(r"[uU][rR]?[\'\"]", node.value):
++                new = node.clone()
++                new.value = new.value[1:]
++                return new
+diff -r 531f2e948299 refactor/fixes/from2/fix_urllib.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_urllib.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,180 @@
++"""Fix changes imports of urllib which are now incompatible.
++   This is rather similar to fix_imports, but because of the more
++   complex nature of the fixing for urllib, it has its own fixer.
++"""
++# Author: Nick Edds
++
++# Local imports
++from .fix_imports import alternates, FixImports
++from ... import fixer_base
++from ...fixer_util import Name, Comma, FromImport, Newline, attr_chain
++
++MAPPING = {'urllib':  [
++                ('urllib.request',
++                    ['URLOpener', 'FancyURLOpener', 'urlretrieve',
++                     '_urlopener', 'urlcleanup']),
++                ('urllib.parse',
++                    ['quote', 'quote_plus', 'unquote', 'unquote_plus',
++                     'urlencode', 'pathname2url', 'url2pathname', 'splitattr',
++                     'splithost', 'splitnport', 'splitpasswd', 'splitport',
++                     'splitquery', 'splittag', 'splittype', 'splituser',
++                     'splitvalue', ]),
++                ('urllib.error',
++                    ['ContentTooShortError'])],
++           'urllib2' : [
++                ('urllib.request',
++                    ['urlopen', 'install_opener', 'build_opener',
++                     'Request', 'OpenerDirector', 'BaseHandler',
++                     'HTTPDefaultErrorHandler', 'HTTPRedirectHandler',
++                     'HTTPCookieProcessor', 'ProxyHandler',
++                     'HTTPPasswordMgr',
++                     'HTTPPasswordMgrWithDefaultRealm',
++                     'AbstractBasicAuthHandler',
++                     'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler',
++                     'AbstractDigestAuthHandler',
++                     'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler',
++                     'HTTPHandler', 'HTTPSHandler', 'FileHandler',
++                     'FTPHandler', 'CacheFTPHandler',
++                     'UnknownHandler']),
++                ('urllib.error',
++                    ['URLError', 'HTTPError']),
++           ]
++}
++
++# Duplicate the url parsing functions for urllib2.
++MAPPING["urllib2"].append(MAPPING["urllib"][1])
++
++
++def build_pattern():
++    bare = set()
++    for old_module, changes in MAPPING.items():
++        for change in changes:
++            new_module, members = change
++            members = alternates(members)
++            yield """import_name< 'import' (module=%r
++                                  | dotted_as_names< any* module=%r any* >) >
++                  """ % (old_module, old_module)
++            yield """import_from< 'from' mod_member=%r 'import'
++                       ( member=%s | import_as_name< member=%s 'as' any > |
++                         import_as_names< members=any*  >) >
++                  """ % (old_module, members, members)
++            yield """import_from< 'from' module_star=%r 'import' star='*' >
++                  """ % old_module
++            yield """import_name< 'import'
++                                  dotted_as_name< module_as=%r 'as' any > >
++                  """ % old_module
++            yield """power< module_dot=%r trailer< '.' member=%s > any* >
++                  """ % (old_module, members)
++
++
++class FixUrllib(FixImports):
++
++    def build_pattern(self):
++        return "|".join(build_pattern())
++
++    def transform_import(self, node, results):
++        """Transform for the basic import case. Replaces the old
++           import name with a comma separated list of its
++           replacements.
++        """
++        import_mod = results.get('module')
++        pref = import_mod.get_prefix()
++
++        names = []
++
++        # create a Node list of the replacement modules
++        for name in MAPPING[import_mod.value][:-1]:
++            names.extend([Name(name[0], prefix=pref), Comma()])
++        names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
++        import_mod.replace(names)
++
++    def transform_member(self, node, results):
++        """Transform for imports of specific module elements. Replaces
++           the module to be imported from with the appropriate new
++           module.
++        """
++        mod_member = results.get('mod_member')
++        pref = mod_member.get_prefix()
++        member = results.get('member')
++
++        # Simple case with only a single member being imported
++        if member:
++            # this may be a list of length one, or just a node
++            if isinstance(member, list):
++                member = member[0]
++            new_name = None
++            for change in MAPPING[mod_member.value]:
++                if member.value in change[1]:
++                    new_name = change[0]
++                    break
++            if new_name:
++                mod_member.replace(Name(new_name, prefix=pref))
++            else:
++                self.cannot_convert(node,
++                                    'This is an invalid module element')
++
++        # Multiple members being imported
++        else:
++            # a dictionary for replacements, order matters
++            modules = []
++            mod_dict = {}
++            members = results.get('members')
++            for member in members:
++                member = member.value
++                # we only care about the actual members
++                if member != ',':
++                    for change in MAPPING[mod_member.value]:
++                        if member in change[1]:
++                            if change[0] in mod_dict:
++                                mod_dict[change[0]].append(member)
++                            else:
++                                mod_dict[change[0]] = [member]
++                                modules.append(change[0])
++
++            new_nodes = []
++            for module in modules:
++                elts = mod_dict[module]
++                names = []
++                for elt in elts[:-1]:
++                    names.extend([Name(elt, prefix=pref), Comma()])
++                names.append(Name(elts[-1], prefix=pref))
++                new_nodes.append(FromImport(module, names))
++            if new_nodes:
++                nodes = []
++                for new_node in new_nodes[:-1]:
++                    nodes.extend([new_node, Newline()])
++                nodes.append(new_nodes[-1])
++                node.replace(nodes)
++            else:
++                self.cannot_convert(node, 'All module elements are invalid')
++
++    def transform_dot(self, node, results):
++        """Transform for calls to module members in code."""
++        module_dot = results.get('module_dot')
++        member = results.get('member')
++        # this may be a list of length one, or just a node
++        if isinstance(member, list):
++            member = member[0]
++        new_name = None
++        for change in MAPPING[module_dot.value]:
++            if member.value in change[1]:
++                new_name = change[0]
++                break
++        if new_name:
++            module_dot.replace(Name(new_name,
++                                    prefix=module_dot.get_prefix()))
++        else:
++            self.cannot_convert(node, 'This is an invalid module element')
++
++    def transform(self, node, results):
++        if results.get('module'):
++            self.transform_import(node, results)
++        elif results.get('mod_member'):
++            self.transform_member(node, results)
++        elif results.get('module_dot'):
++            self.transform_dot(node, results)
++        # Renaming and star imports are not supported for these modules.
++        elif results.get('module_star'):
++            self.cannot_convert(node, 'Cannot handle star imports.')
++        elif results.get('module_as'):
++            self.cannot_convert(node, 'This module is now multiple modules')
+diff -r 531f2e948299 refactor/fixes/from2/fix_ws_comma.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_ws_comma.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,39 @@
++"""Fixer that changes 'a ,b' into 'a, b'.
++
++This also changes '{a :b}' into '{a: b}', but does not touch other
++uses of colons.  It does not touch other uses of whitespace.
++
++"""
++
++from ... import pytree
++from ...pgen2 import token
++from ... import fixer_base
++
++class FixWsComma(fixer_base.BaseFix):
++
++    explicit = True # The user must ask for this fixers
++
++    PATTERN = """
++    any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]>
++    """
++
++    COMMA = pytree.Leaf(token.COMMA, ",")
++    COLON = pytree.Leaf(token.COLON, ":")
++    SEPS = (COMMA, COLON)
++
++    def transform(self, node, results):
++        new = node.clone()
++        comma = False
++        for child in new.children:
++            if child in self.SEPS:
++                prefix = child.get_prefix()
++                if prefix.isspace() and "\n" not in prefix:
++                    child.set_prefix("")
++                comma = True
++            else:
++                if comma:
++                    prefix = child.get_prefix()
++                    if not prefix:
++                        child.set_prefix(" ")
++                comma = False
++        return new
+diff -r 531f2e948299 refactor/fixes/from2/fix_xrange.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_xrange.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,64 @@
++# Copyright 2007 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Fixer that changes xrange(...) into range(...)."""
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name, Call, consuming_calls
++from ... import patcomp
++
++
++class FixXrange(fixer_base.BaseFix):
++
++    PATTERN = """
++              power<
++                 (name='range'|name='xrange') trailer< '(' args=any ')' >
++              rest=any* >
++              """
++
++    def transform(self, node, results):
++        name = results["name"]
++        if name.value == "xrange":
++            return self.transform_xrange(node, results)
++        elif name.value == "range":
++            return self.transform_range(node, results)
++        else:
++            raise ValueError(repr(name))
++
++    def transform_xrange(self, node, results):
++        name = results["name"]
++        name.replace(Name("range", prefix=name.get_prefix()))
++
++    def transform_range(self, node, results):
++        if not self.in_special_context(node):
++            range_call = Call(Name("range"), [results["args"].clone()])
++            # Encase the range call in list().
++            list_call = Call(Name("list"), [range_call],
++                             prefix=node.get_prefix())
++            # Put things that were after the range() call after the list call.
++            for n in results["rest"]:
++                list_call.append_child(n)
++            return list_call
++        return node
++
++    P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
++    p1 = patcomp.compile_pattern(P1)
++
++    P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
++            | comp_for< 'for' any 'in' node=any any* >
++            | comparison< any 'in' node=any any*>
++         """
++    p2 = patcomp.compile_pattern(P2)
++
++    def in_special_context(self, node):
++        if node.parent is None:
++            return False
++        results = {}
++        if (node.parent.parent is not None and
++               self.p1.match(node.parent.parent, results) and
++               results["node"] is node):
++            # list(d.keys()) -> list(d.keys()), etc.
++            return results["func"].value in consuming_calls
++        # for ... in d.iterkeys() -> for ... in d.keys(), etc.
++        return self.p2.match(node.parent, results) and results["node"] is node
+diff -r 531f2e948299 refactor/fixes/from2/fix_xreadlines.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_xreadlines.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,24 @@
++"""Fix "for x in f.xreadlines()" -> "for x in f".
++
++This fixer will also convert g(f.xreadlines) into g(f.__iter__)."""
++# Author: Collin Winter
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name
++
++
++class FixXreadlines(fixer_base.BaseFix):
++    PATTERN = """
++    power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > >
++    |
++    power< any+ trailer< '.' no_call='xreadlines' > >
++    """
++
++    def transform(self, node, results):
++        no_call = results.get("no_call")
++
++        if no_call:
++            no_call.replace(Name("__iter__", prefix=no_call.get_prefix()))
++        else:
++            node.replace([x.clone() for x in results["call"]])
+diff -r 531f2e948299 refactor/fixes/from2/fix_zip.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from2/fix_zip.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,34 @@
++"""
++Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...)
++unless there exists a 'from future_builtins import zip' statement in the
++top-level namespace.
++
++We avoid the transformation if the zip() call is directly contained in
++iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
++"""
++
++# Local imports
++from ... import fixer_base
++from ...fixer_util import Name, Call, in_special_context
++
++class FixZip(fixer_base.ConditionalFix):
++
++    PATTERN = """
++    power< 'zip' args=trailer< '(' [any] ')' >
++    >
++    """
++
++    skip_on = "future_builtins.zip"
++
++    def transform(self, node, results):
++        if self.should_skip(node):
++            return
++
++        if in_special_context(node):
++            return None
++
++        new = node.clone()
++        new.set_prefix("")
++        new = Call(Name("list"), [new])
++        new.set_prefix(node.get_prefix())
++        return new
+diff -r 531f2e948299 refactor/fixes/from3/__init__.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from3/__init__.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,2 @@
++from . import fix_range
++from . import fix_renames
+diff -r 531f2e948299 refactor/fixes/from3/fix_range.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from3/fix_range.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,21 @@
++# Based on fix_xrange.py
++# 3to2 modification by Paul Kippes
++
++"""
++range(...) -> xrange(...)
++"""
++
++from ..fixer_common import *
++
++class FixRange(fixer_base.BaseFix):
++
++    PATTERN = """
++              power<
++                 (name='range') trailer< '(' args=any ')' >
++              rest=any* >
++              """
++
++    def transform(self, node, results):
++        name = results["name"]
++        name.replace(Name("xrange", prefix=name.get_prefix()))
++        return node
+diff -r 531f2e948299 refactor/fixes/from3/fix_renames.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/fixes/from3/fix_renames.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,69 @@
++"""Fix incompatible renames
++
++Incorporates simple compliment 3-to-2 transforms:
++
++Fixes:
++  * sys.maxsize -> sys.maxint
++"""
++
++# Local imports
++from ..fixer_common import *
++
++MAPPING = {'sys':  {'maxsize' : 'maxint',
++                    },
++           }
++LOOKUP = {}
++
++def alternates(members):
++    return "(" + "|".join(map(repr, members)) + ")"
++
++
++def build_pattern():
++    #bare = set()
++    for module, replace in MAPPING.items():
++        for old_attr, new_attr in replace.items():
++            LOOKUP[(module, old_attr)] = new_attr
++            #bare.add(module)
++            #bare.add(old_attr)
++            #yield """
++            #      import_name< 'import' (module=%r
++            #          | dotted_as_names< any* module=%r any* >) >
++            #      """ % (module, module)
++            yield """
++                  import_from< 'from' module_name=%r 'import'
++                      ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
++                  """ % (module, old_attr, old_attr)
++            yield """
++                  power< module_name=%r trailer< '.' attr_name=%r > any* >
++                  """ % (module, old_attr)
++    #yield """bare_name=%s""" % alternates(bare)
++
++
++class FixRenames(fixer_base.BaseFix):
++    PATTERN = "|".join(build_pattern())
++
++    order = "pre" # Pre-order tree traversal
++
++    # Don't match the node if it's within another match
++    def match(self, node):
++        match = super(FixRenames, self).match
++        results = match(node)
++        if results:
++            if any([match(obj) for obj in attr_chain(node, "parent")]):
++                return False
++            return results
++        return False
++
++    #def start_tree(self, tree, filename):
++    #    super(FixRenames, self).start_tree(tree, filename)
++    #    self.replace = {}
++
++    def transform(self, node, results):
++        mod_name = results.get("module_name")
++        attr_name = results.get("attr_name")
++        #bare_name = results.get("bare_name")
++        #import_mod = results.get("module")
++
++        if mod_name and attr_name:
++            new_attr = LOOKUP[(mod_name.value, attr_name.value)]
++            attr_name.replace(Name(new_attr, prefix=attr_name.get_prefix()))
+diff -r 531f2e948299 refactor/main.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/main.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,134 @@
++"""
++Main program for refactor.
++"""
++
++import sys
++import os
++import logging
++import shutil
++import optparse
++
++from . import refactor
++
++
++class StdoutRefactoringTool(refactor.RefactoringTool):
++    """
++    Prints output to stdout.
++    """
++
++    def __init__(self, fixers, options, explicit, nobackups):
++        self.nobackups = nobackups
++        super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
++
++    def log_error(self, msg, *args, **kwargs):
++        self.errors.append((msg, args, kwargs))
++        self.logger.error(msg, *args, **kwargs)
++
++    def write_file(self, new_text, filename, old_text):
++        if not self.nobackups:
++            # Make backup
++            backup = filename + ".bak"
++            if os.path.lexists(backup):
++                try:
++                    os.remove(backup)
++                except os.error, err:
++                    self.log_message("Can't remove backup %s", backup)
++            try:
++                os.rename(filename, backup)
++            except os.error, err:
++                self.log_message("Can't rename %s to %s", filename, backup)
++        # Actually write the new file
++        super(StdoutRefactoringTool, self).write_file(new_text,
++                                                      filename, old_text)
++        if not self.nobackups:
++            shutil.copymode(backup, filename)
++
++    def print_output(self, lines):
++        for line in lines:
++            print line
++
++
++def main(fixer_pkg, args=None):
++    """Main program.
++
++    Args:
++        fixer_pkg: the name of a package where the fixers are located.
++        args: optional; a list of command line arguments. If omitted,
++              sys.argv[1:] is used.
++
++    Returns a suggested exit status (0, 1, 2).
++    """
++    # Set up option parser
++    parser = optparse.OptionParser(usage="%s [options] file|dir ..." %
++                                   sys.argv[0])
++    parser.add_option("-d", "--doctests_only", action="store_true",
++                      help="Fix up doctests only")
++    parser.add_option("-f", "--fix", action="append", default=[],
++                      help="Each FIX specifies a transformation; default: all")
++    parser.add_option("-x", "--nofix", action="append", default=[],
++                      help="Prevent a fixer from being run.")
++    parser.add_option("-l", "--list-fixes", action="store_true",
++                      help="List available transformations (fixes/fix_*.py)")
++    parser.add_option("-p", "--print-function", action="store_true",
++                      help="Modify the grammar so that print() is a function")
++    parser.add_option("-v", "--verbose", action="store_true",
++                      help="More verbose logging")
++    parser.add_option("-w", "--write", action="store_true",
++                      help="Write back modified files")
++    parser.add_option("-n", "--nobackups", action="store_true", default=False,
++                      help="Don't write backups for modified files.")
++
++    # Parse command line arguments
++    refactor_stdin = False
++    options, args = parser.parse_args(args)
++    if not options.write and options.nobackups:
++        parser.error("Can't use -n without -w")
++    if options.list_fixes:
++        print "Available transformations for the -f/--fix option:"
++        for fixname in refactor.get_all_fix_names(fixer_pkg):
++            print fixname
++        if not args:
++            return 0
++    if not args:
++        print >>sys.stderr, "At least one file or directory argument required."
++        print >>sys.stderr, "Use --help to show usage."
++        return 2
++    if "-" in args:
++        refactor_stdin = True
++        if options.write:
++            print >>sys.stderr, "Can't write to stdin."
++            return 2
++
++    # Set up logging handler
++    level = logging.DEBUG if options.verbose else logging.INFO
++    logging.basicConfig(format='%(name)s: %(message)s', level=level)
++
++    # Initialize the refactoring tool
++    rt_opts = {"print_function" : options.print_function}
++    avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
++    unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
++    explicit = set()
++    if options.fix:
++        all_present = False
++        for fix in options.fix:
++            if fix == "all":
++                all_present = True
++            else:
++                explicit.add(fixer_pkg + ".fix_" + fix)
++        requested = avail_fixes.union(explicit) if all_present else explicit
++    else:
++        requested = avail_fixes.union(explicit)
++    fixer_names = requested.difference(unwanted_fixes)
++    rt = StdoutRefactoringTool(sorted(fixer_names), rt_opts, sorted(explicit),
++                               options.nobackups)
++
++    # Refactor all files and directories passed as arguments
++    if not rt.errors:
++        if refactor_stdin:
++            rt.refactor_stdin()
++        else:
++            rt.refactor(args, options.write, options.doctests_only)
++        rt.summarize()
++
++    # Return error status (0 if rt.errors is zero)
++    return int(bool(rt.errors))
+diff -r 531f2e948299 refactor/patcomp.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/patcomp.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,186 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Pattern compiler.
++
++The grammer is taken from PatternGrammar.txt.
++
++The compiler compiles a pattern to a pytree.*Pattern instance.
++"""
++
++__author__ = "Guido van Rossum <guido at python.org>"
++
++# Python imports
++import os
++
++# Fairly local imports
++from .pgen2 import driver
++from .pgen2 import literals
++from .pgen2 import token
++from .pgen2 import tokenize
++
++# Really local imports
++from . import pytree
++from . import pygram
++
++# The pattern grammar file
++_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
++                                     "PatternGrammar.txt")
++
++
++def tokenize_wrapper(input):
++    """Tokenizes a string suppressing significant whitespace."""
++    skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
++    tokens = tokenize.generate_tokens(driver.generate_lines(input).next)
++    for quintuple in tokens:
++        type, value, start, end, line_text = quintuple
++        if type not in skip:
++            yield quintuple
++
++
++class PatternCompiler(object):
++
++    def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
++        """Initializer.
++
++        Takes an optional alternative filename for the pattern grammar.
++        """
++        self.grammar = driver.load_grammar(grammar_file)
++        self.syms = pygram.Symbols(self.grammar)
++        self.pygrammar = pygram.python_grammar
++        self.pysyms = pygram.python_symbols
++        self.driver = driver.Driver(self.grammar, convert=pattern_convert)
++
++    def compile_pattern(self, input, debug=False):
++        """Compiles a pattern string to a nested pytree.*Pattern object."""
++        tokens = tokenize_wrapper(input)
++        root = self.driver.parse_tokens(tokens, debug=debug)
++        return self.compile_node(root)
++
++    def compile_node(self, node):
++        """Compiles a node, recursively.
++
++        This is one big switch on the node type.
++        """
++        # XXX Optimize certain Wildcard-containing-Wildcard patterns
++        # that can be merged
++        if node.type == self.syms.Matcher:
++            node = node.children[0] # Avoid unneeded recursion
++
++        if node.type == self.syms.Alternatives:
++            # Skip the odd children since they are just '|' tokens
++            alts = [self.compile_node(ch) for ch in node.children[::2]]
++            if len(alts) == 1:
++                return alts[0]
++            p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
++            return p.optimize()
++
++        if node.type == self.syms.Alternative:
++            units = [self.compile_node(ch) for ch in node.children]
++            if len(units) == 1:
++                return units[0]
++            p = pytree.WildcardPattern([units], min=1, max=1)
++            return p.optimize()
++
++        if node.type == self.syms.NegatedUnit:
++            pattern = self.compile_basic(node.children[1:])
++            p = pytree.NegatedPattern(pattern)
++            return p.optimize()
++
++        assert node.type == self.syms.Unit
++
++        name = None
++        nodes = node.children
++        if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
++            name = nodes[0].value
++            nodes = nodes[2:]
++        repeat = None
++        if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
++            repeat = nodes[-1]
++            nodes = nodes[:-1]
++
++        # Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
++        pattern = self.compile_basic(nodes, repeat)
++
++        if repeat is not None:
++            assert repeat.type == self.syms.Repeater
++            children = repeat.children
++            child = children[0]
++            if child.type == token.STAR:
++                min = 0
++                max = pytree.HUGE
++            elif child.type == token.PLUS:
++                min = 1
++                max = pytree.HUGE
++            elif child.type == token.LBRACE:
++                assert children[-1].type == token.RBRACE
++                assert  len(children) in (3, 5)
++                min = max = self.get_int(children[1])
++                if len(children) == 5:
++                    max = self.get_int(children[3])
++            else:
++                assert False
++            if min != 1 or max != 1:
++                pattern = pattern.optimize()
++                pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
++
++        if name is not None:
++            pattern.name = name
++        return pattern.optimize()
++
++    def compile_basic(self, nodes, repeat=None):
++        # Compile STRING | NAME [Details] | (...) | [...]
++        assert len(nodes) >= 1
++        node = nodes[0]
++        if node.type == token.STRING:
++            value = literals.evalString(node.value)
++            return pytree.LeafPattern(content=value)
++        elif node.type == token.NAME:
++            value = node.value
++            if value.isupper():
++                if value not in TOKEN_MAP:
++                    raise SyntaxError("Invalid token: %r" % value)
++                return pytree.LeafPattern(TOKEN_MAP[value])
++            else:
++                if value == "any":
++                    type = None
++                elif not value.startswith("_"):
++                    type = getattr(self.pysyms, value, None)
++                    if type is None:
++                        raise SyntaxError("Invalid symbol: %r" % value)
++                if nodes[1:]: # Details present
++                    content = [self.compile_node(nodes[1].children[1])]
++                else:
++                    content = None
++                return pytree.NodePattern(type, content)
++        elif node.value == "(":
++            return self.compile_node(nodes[1])
++        elif node.value == "[":
++            assert repeat is None
++            subpattern = self.compile_node(nodes[1])
++            return pytree.WildcardPattern([[subpattern]], min=0, max=1)
++        assert False, node
++
++    def get_int(self, node):
++        assert node.type == token.NUMBER
++        return int(node.value)
++
++
++# Map named tokens to the type value for a LeafPattern
++TOKEN_MAP = {"NAME": token.NAME,
++             "STRING": token.STRING,
++             "NUMBER": token.NUMBER,
++             "TOKEN": None}
++
++
++def pattern_convert(grammar, raw_node_info):
++    """Converts raw node information to a Node or Leaf instance."""
++    type, value, context, children = raw_node_info
++    if children or type in grammar.number2symbol:
++        return pytree.Node(type, children, context=context)
++    else:
++        return pytree.Leaf(type, value, context=context)
++
++
++def compile_pattern(pattern):
++    return PatternCompiler().compile_pattern(pattern)
+diff -r 531f2e948299 refactor/pgen2/.svn/all-wcprops
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/all-wcprops	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,59 @@
++K 25
++svn:wc:ra_dav:version-url
++V 57
++/projects/!svn/ver/68340/sandbox/trunk/2to3/lib2to3/pgen2
++END
++tokenize.py
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/tokenize.py
++END
++pgen.py
++K 25
++svn:wc:ra_dav:version-url
++V 65
++/projects/!svn/ver/61629/sandbox/trunk/2to3/lib2to3/pgen2/pgen.py
++END
++parse.py
++K 25
++svn:wc:ra_dav:version-url
++V 66
++/projects/!svn/ver/67389/sandbox/trunk/2to3/lib2to3/pgen2/parse.py
++END
++driver.py
++K 25
++svn:wc:ra_dav:version-url
++V 67
++/projects/!svn/ver/68340/sandbox/trunk/2to3/lib2to3/pgen2/driver.py
++END
++__init__.py
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/__init__.py
++END
++literals.py
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/literals.py
++END
++token.py
++K 25
++svn:wc:ra_dav:version-url
++V 66
++/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/token.py
++END
++conv.py
++K 25
++svn:wc:ra_dav:version-url
++V 65
++/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/conv.py
++END
++grammar.py
++K 25
++svn:wc:ra_dav:version-url
++V 68
++/projects/!svn/ver/61441/sandbox/trunk/2to3/lib2to3/pgen2/grammar.py
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/dir-prop-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/dir-prop-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,8 @@
++K 10
++svn:ignore
++V 13
++*.pyc
++*.pyo
++
++
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/entries
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,334 @@
++9
++
++dir
++70822
++http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/pgen2
++http://svn.python.org/projects
++
++
++
++2009-01-05T08:11:39.704315Z
++68340
++georg.brandl
++has-props
++
++svn:special svn:externals svn:needs-lock
++
++
++
++
++
++
++
++
++
++
++
++6015fed2-1504-0410-9fe1-9d1591cc4771
++
++tokenize.py
++file
++
++
++
++
++2009-03-31T00:29:32.000000Z
++06aea8121aa7b0fc71345d011813d4b4
++2008-03-17T16:59:51.273602Z
++61441
++martin.v.loewis
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++16184
++
++pgen.py
++file
++
++
++
++
++2009-03-31T00:29:32.000000Z
++40f1eec8af5247a511bf6acc34eac994
++2008-03-19T16:58:19.069158Z
++61629
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++13740
++
++parse.py
++file
++
++
++
++
++2009-03-31T00:29:32.000000Z
++80c0ee069eab8de116e1c13572d6cd4b
++2008-11-25T23:13:17.968453Z
++67389
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++8053
++
++driver.py
++file
++
++
++
++
++2009-03-31T00:29:32.000000Z
++e2c063aca0163f8f47fefeab1a5cdff7
++2009-01-05T08:11:39.704315Z
++68340
++georg.brandl
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++4809
++
++__init__.py
++file
++
++
++
++
++2009-03-31T00:29:32.000000Z
++5cb6bc9b6c96e165df87b615f2df9f1a
++2006-11-29T17:38:40.278528Z
++52858
++guido.van.rossum
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++143
++
++literals.py
++file
++
++
++
++
++2009-03-31T00:29:32.000000Z
++e3b1d03cade5fa0c3a1a5324e0b1e539
++2006-11-29T17:38:40.278528Z
++52858
++guido.van.rossum
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1614
++
++token.py
++file
++
++
++
++
++2009-03-31T00:29:32.000000Z
++8fd1f5c3fc2ad1b2afa7e17064b0ba04
++2007-02-12T23:59:44.048119Z
++53758
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1244
++
++conv.py
++file
++
++
++
++
++2009-03-31T00:29:32.000000Z
++942a8910f37b9e5d202806ea05f7b2f1
++2007-02-12T23:59:44.048119Z
++53758
++collin.winter
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++9625
++
++grammar.py
++file
++
++
++
++
++2009-03-31T00:29:32.000000Z
++612ee8e1a84660a7c44f7d5af3e7db69
++2008-03-17T16:59:51.273602Z
++61441
++martin.v.loewis
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++4947
++
+diff -r 531f2e948299 refactor/pgen2/.svn/format
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/format	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++9
+diff -r 531f2e948299 refactor/pgen2/.svn/prop-base/__init__.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/prop-base/__init__.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/prop-base/conv.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/prop-base/conv.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/prop-base/driver.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/prop-base/driver.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/prop-base/grammar.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/prop-base/grammar.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/prop-base/literals.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/prop-base/literals.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/prop-base/parse.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/prop-base/parse.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/prop-base/pgen.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/prop-base/pgen.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/prop-base/token.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/prop-base/token.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,13 @@
++K 13
++svn:eol-style
++V 6
++native
++K 14
++svn:executable
++V 1
++*
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/prop-base/tokenize.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/prop-base/tokenize.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 23
++Author Date Id Revision
++END
+diff -r 531f2e948299 refactor/pgen2/.svn/text-base/__init__.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/text-base/__init__.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,4 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""The pgen2 package."""
+diff -r 531f2e948299 refactor/pgen2/.svn/text-base/conv.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/text-base/conv.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,257 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Convert graminit.[ch] spit out by pgen to Python code.
++
++Pgen is the Python parser generator.  It is useful to quickly create a
++parser from a grammar file in Python's grammar notation.  But I don't
++want my parsers to be written in C (yet), so I'm translating the
++parsing tables to Python data structures and writing a Python parse
++engine.
++
++Note that the token numbers are constants determined by the standard
++Python tokenizer.  The standard token module defines these numbers and
++their names (the names are not used much).  The token numbers are
++hardcoded into the Python tokenizer and into pgen.  A Python
++implementation of the Python tokenizer is also available, in the
++standard tokenize module.
++
++On the other hand, symbol numbers (representing the grammar's
++non-terminals) are assigned by pgen based on the actual grammar
++input.
++
++Note: this module is pretty much obsolete; the pgen module generates
++equivalent grammar tables directly from the Grammar.txt input file
++without having to invoke the Python pgen C program.
++
++"""
++
++# Python imports
++import re
++
++# Local imports
++from pgen2 import grammar, token
++
++
++class Converter(grammar.Grammar):
++    """Grammar subclass that reads classic pgen output files.
++
++    The run() method reads the tables as produced by the pgen parser
++    generator, typically contained in two C files, graminit.h and
++    graminit.c.  The other methods are for internal use only.
++
++    See the base class for more documentation.
++
++    """
++
++    def run(self, graminit_h, graminit_c):
++        """Load the grammar tables from the text files written by pgen."""
++        self.parse_graminit_h(graminit_h)
++        self.parse_graminit_c(graminit_c)
++        self.finish_off()
++
++    def parse_graminit_h(self, filename):
++        """Parse the .h file writen by pgen.  (Internal)
++
++        This file is a sequence of #define statements defining the
++        nonterminals of the grammar as numbers.  We build two tables
++        mapping the numbers to names and back.
++
++        """
++        try:
++            f = open(filename)
++        except IOError, err:
++            print "Can't open %s: %s" % (filename, err)
++            return False
++        self.symbol2number = {}
++        self.number2symbol = {}
++        lineno = 0
++        for line in f:
++            lineno += 1
++            mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
++            if not mo and line.strip():
++                print "%s(%s): can't parse %s" % (filename, lineno,
++                                                  line.strip())
++            else:
++                symbol, number = mo.groups()
++                number = int(number)
++                assert symbol not in self.symbol2number
++                assert number not in self.number2symbol
++                self.symbol2number[symbol] = number
++                self.number2symbol[number] = symbol
++        return True
++
++    def parse_graminit_c(self, filename):
++        """Parse the .c file writen by pgen.  (Internal)
++
++        The file looks as follows.  The first two lines are always this:
++
++        #include "pgenheaders.h"
++        #include "grammar.h"
++
++        After that come four blocks:
++
++        1) one or more state definitions
++        2) a table defining dfas
++        3) a table defining labels
++        4) a struct defining the grammar
++
++        A state definition has the following form:
++        - one or more arc arrays, each of the form:
++          static arc arcs_<n>_<m>[<k>] = {
++                  {<i>, <j>},
++                  ...
++          };
++        - followed by a state array, of the form:
++          static state states_<s>[<t>] = {
++                  {<k>, arcs_<n>_<m>},
++                  ...
++          };
++
++        """
++        try:
++            f = open(filename)
++        except IOError, err:
++            print "Can't open %s: %s" % (filename, err)
++            return False
++        # The code below essentially uses f's iterator-ness!
++        lineno = 0
++
++        # Expect the two #include lines
++        lineno, line = lineno+1, f.next()
++        assert line == '#include "pgenheaders.h"\n', (lineno, line)
++        lineno, line = lineno+1, f.next()
++        assert line == '#include "grammar.h"\n', (lineno, line)
++
++        # Parse the state definitions
++        lineno, line = lineno+1, f.next()
++        allarcs = {}
++        states = []
++        while line.startswith("static arc "):
++            while line.startswith("static arc "):
++                mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
++                              line)
++                assert mo, (lineno, line)
++                n, m, k = map(int, mo.groups())
++                arcs = []
++                for _ in range(k):
++                    lineno, line = lineno+1, f.next()
++                    mo = re.match(r"\s+{(\d+), (\d+)},$", line)
++                    assert mo, (lineno, line)
++                    i, j = map(int, mo.groups())
++                    arcs.append((i, j))
++                lineno, line = lineno+1, f.next()
++                assert line == "};\n", (lineno, line)
++                allarcs[(n, m)] = arcs
++                lineno, line = lineno+1, f.next()
++            mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
++            assert mo, (lineno, line)
++            s, t = map(int, mo.groups())
++            assert s == len(states), (lineno, line)
++            state = []
++            for _ in range(t):
++                lineno, line = lineno+1, f.next()
++                mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
++                assert mo, (lineno, line)
++                k, n, m = map(int, mo.groups())
++                arcs = allarcs[n, m]
++                assert k == len(arcs), (lineno, line)
++                state.append(arcs)
++            states.append(state)
++            lineno, line = lineno+1, f.next()
++            assert line == "};\n", (lineno, line)
++            lineno, line = lineno+1, f.next()
++        self.states = states
++
++        # Parse the dfas
++        dfas = {}
++        mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
++        assert mo, (lineno, line)
++        ndfas = int(mo.group(1))
++        for i in range(ndfas):
++            lineno, line = lineno+1, f.next()
++            mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
++                          line)
++            assert mo, (lineno, line)
++            symbol = mo.group(2)
++            number, x, y, z = map(int, mo.group(1, 3, 4, 5))
++            assert self.symbol2number[symbol] == number, (lineno, line)
++            assert self.number2symbol[number] == symbol, (lineno, line)
++            assert x == 0, (lineno, line)
++            state = states[z]
++            assert y == len(state), (lineno, line)
++            lineno, line = lineno+1, f.next()
++            mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
++            assert mo, (lineno, line)
++            first = {}
++            rawbitset = eval(mo.group(1))
++            for i, c in enumerate(rawbitset):
++                byte = ord(c)
++                for j in range(8):
++                    if byte & (1<<j):
++                        first[i*8 + j] = 1
++            dfas[number] = (state, first)
++        lineno, line = lineno+1, f.next()
++        assert line == "};\n", (lineno, line)
++        self.dfas = dfas
++
++        # Parse the labels
++        labels = []
++        lineno, line = lineno+1, f.next()
++        mo = re.match(r"static label labels\[(\d+)\] = {$", line)
++        assert mo, (lineno, line)
++        nlabels = int(mo.group(1))
++        for i in range(nlabels):
++            lineno, line = lineno+1, f.next()
++            mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
++            assert mo, (lineno, line)
++            x, y = mo.groups()
++            x = int(x)
++            if y == "0":
++                y = None
++            else:
++                y = eval(y)
++            labels.append((x, y))
++        lineno, line = lineno+1, f.next()
++        assert line == "};\n", (lineno, line)
++        self.labels = labels
++
++        # Parse the grammar struct
++        lineno, line = lineno+1, f.next()
++        assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
++        lineno, line = lineno+1, f.next()
++        mo = re.match(r"\s+(\d+),$", line)
++        assert mo, (lineno, line)
++        ndfas = int(mo.group(1))
++        assert ndfas == len(self.dfas)
++        lineno, line = lineno+1, f.next()
++        assert line == "\tdfas,\n", (lineno, line)
++        lineno, line = lineno+1, f.next()
++        mo = re.match(r"\s+{(\d+), labels},$", line)
++        assert mo, (lineno, line)
++        nlabels = int(mo.group(1))
++        assert nlabels == len(self.labels), (lineno, line)
++        lineno, line = lineno+1, f.next()
++        mo = re.match(r"\s+(\d+)$", line)
++        assert mo, (lineno, line)
++        start = int(mo.group(1))
++        assert start in self.number2symbol, (lineno, line)
++        self.start = start
++        lineno, line = lineno+1, f.next()
++        assert line == "};\n", (lineno, line)
++        try:
++            lineno, line = lineno+1, f.next()
++        except StopIteration:
++            pass
++        else:
++            assert 0, (lineno, line)
++
++    def finish_off(self):
++        """Create additional useful structures.  (Internal)."""
++        self.keywords = {} # map from keyword strings to arc labels
++        self.tokens = {}   # map from numeric token values to arc labels
++        for ilabel, (type, value) in enumerate(self.labels):
++            if type == token.NAME and value is not None:
++                self.keywords[value] = ilabel
++            elif value is None:
++                self.tokens[type] = ilabel
+diff -r 531f2e948299 refactor/pgen2/.svn/text-base/driver.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/text-base/driver.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,146 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++# Modifications:
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Parser driver.
++
++This provides a high-level interface to parse a file into a syntax tree.
++
++"""
++
++__author__ = "Guido van Rossum <guido at python.org>"
++
++__all__ = ["Driver", "load_grammar"]
++
++# Python imports
++import os
++import logging
++import sys
++
++# Pgen imports
++from . import grammar, parse, token, tokenize, pgen
++
++
++class Driver(object):
++
++    def __init__(self, grammar, convert=None, logger=None):
++        self.grammar = grammar
++        if logger is None:
++            logger = logging.getLogger()
++        self.logger = logger
++        self.convert = convert
++
++    def parse_tokens(self, tokens, debug=False):
++        """Parse a series of tokens and return the syntax tree."""
++        # XXX Move the prefix computation into a wrapper around tokenize.
++        p = parse.Parser(self.grammar, self.convert)
++        p.setup()
++        lineno = 1
++        column = 0
++        type = value = start = end = line_text = None
++        prefix = ""
++        for quintuple in tokens:
++            type, value, start, end, line_text = quintuple
++            if start != (lineno, column):
++                assert (lineno, column) <= start, ((lineno, column), start)
++                s_lineno, s_column = start
++                if lineno < s_lineno:
++                    prefix += "\n" * (s_lineno - lineno)
++                    lineno = s_lineno
++                    column = 0
++                if column < s_column:
++                    prefix += line_text[column:s_column]
++                    column = s_column
++            if type in (tokenize.COMMENT, tokenize.NL):
++                prefix += value
++                lineno, column = end
++                if value.endswith("\n"):
++                    lineno += 1
++                    column = 0
++                continue
++            if type == token.OP:
++                type = grammar.opmap[value]
++            if debug:
++                self.logger.debug("%s %r (prefix=%r)",
++                                  token.tok_name[type], value, prefix)
++            if p.addtoken(type, value, (prefix, start)):
++                if debug:
++                    self.logger.debug("Stop.")
++                break
++            prefix = ""
++            lineno, column = end
++            if value.endswith("\n"):
++                lineno += 1
++                column = 0
++        else:
++            # We never broke out -- EOF is too soon (how can this happen???)
++            raise parse.ParseError("incomplete input",
++                                   type, value, (prefix, start))
++        return p.rootnode
++
++    def parse_stream_raw(self, stream, debug=False):
++        """Parse a stream and return the syntax tree."""
++        tokens = tokenize.generate_tokens(stream.readline)
++        return self.parse_tokens(tokens, debug)
++
++    def parse_stream(self, stream, debug=False):
++        """Parse a stream and return the syntax tree."""
++        return self.parse_stream_raw(stream, debug)
++
++    def parse_file(self, filename, debug=False):
++        """Parse a file and return the syntax tree."""
++        stream = open(filename)
++        try:
++            return self.parse_stream(stream, debug)
++        finally:
++            stream.close()
++
++    def parse_string(self, text, debug=False):
++        """Parse a string and return the syntax tree."""
++        tokens = tokenize.generate_tokens(generate_lines(text).next)
++        return self.parse_tokens(tokens, debug)
++
++
++def generate_lines(text):
++    """Generator that behaves like readline without using StringIO."""
++    for line in text.splitlines(True):
++        yield line
++    while True:
++        yield ""
++
++
++def load_grammar(gt="Grammar.txt", gp=None,
++                 save=True, force=False, logger=None):
++    """Load the grammar (maybe from a pickle)."""
++    if logger is None:
++        logger = logging.getLogger()
++    if gp is None:
++        head, tail = os.path.splitext(gt)
++        if tail == ".txt":
++            tail = ""
++        gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
++    if force or not _newer(gp, gt):
++        logger.info("Generating grammar tables from %s", gt)
++        g = pgen.generate_grammar(gt)
++        if save:
++            logger.info("Writing grammar tables to %s", gp)
++            try:
++                g.dump(gp)
++            except IOError, e:
++                logger.info("Writing failed:"+str(e))
++    else:
++        g = grammar.Grammar()
++        g.load(gp)
++    return g
++
++
++def _newer(a, b):
++    """Inquire whether file a was written since file b."""
++    if not os.path.exists(a):
++        return False
++    if not os.path.exists(b):
++        return True
++    return os.path.getmtime(a) >= os.path.getmtime(b)
+diff -r 531f2e948299 refactor/pgen2/.svn/text-base/grammar.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/text-base/grammar.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,171 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""This module defines the data structures used to represent a grammar.
++
++These are a bit arcane because they are derived from the data
++structures used by Python's 'pgen' parser generator.
++
++There's also a table here mapping operators to their names in the
++token module; the Python tokenize module reports all operators as the
++fallback token code OP, but the parser needs the actual token code.
++
++"""
++
++# Python imports
++import pickle
++
++# Local imports
++from . import token, tokenize
++
++
++class Grammar(object):
++    """Pgen parsing tables tables conversion class.
++
++    Once initialized, this class supplies the grammar tables for the
++    parsing engine implemented by parse.py.  The parsing engine
++    accesses the instance variables directly.  The class here does not
++    provide initialization of the tables; several subclasses exist to
++    do this (see the conv and pgen modules).
++
++    The load() method reads the tables from a pickle file, which is
++    much faster than the other ways offered by subclasses.  The pickle
++    file is written by calling dump() (after loading the grammar
++    tables using a subclass).  The report() method prints a readable
++    representation of the tables to stdout, for debugging.
++
++    The instance variables are as follows:
++
++    symbol2number -- a dict mapping symbol names to numbers.  Symbol
++                     numbers are always 256 or higher, to distinguish
++                     them from token numbers, which are between 0 and
++                     255 (inclusive).
++
++    number2symbol -- a dict mapping numbers to symbol names;
++                     these two are each other's inverse.
++
++    states        -- a list of DFAs, where each DFA is a list of
++                     states, each state is is a list of arcs, and each
++                     arc is a (i, j) pair where i is a label and j is
++                     a state number.  The DFA number is the index into
++                     this list.  (This name is slightly confusing.)
++                     Final states are represented by a special arc of
++                     the form (0, j) where j is its own state number.
++
++    dfas          -- a dict mapping symbol numbers to (DFA, first)
++                     pairs, where DFA is an item from the states list
++                     above, and first is a set of tokens that can
++                     begin this grammar rule (represented by a dict
++                     whose values are always 1).
++
++    labels        -- a list of (x, y) pairs where x is either a token
++                     number or a symbol number, and y is either None
++                     or a string; the strings are keywords.  The label
++                     number is the index in this list; label numbers
++                     are used to mark state transitions (arcs) in the
++                     DFAs.
++
++    start         -- the number of the grammar's start symbol.
++
++    keywords      -- a dict mapping keyword strings to arc labels.
++
++    tokens        -- a dict mapping token numbers to arc labels.
++
++    """
++
++    def __init__(self):
++        self.symbol2number = {}
++        self.number2symbol = {}
++        self.states = []
++        self.dfas = {}
++        self.labels = [(0, "EMPTY")]
++        self.keywords = {}
++        self.tokens = {}
++        self.symbol2label = {}
++        self.start = 256
++
++    def dump(self, filename):
++        """Dump the grammar tables to a pickle file."""
++        f = open(filename, "wb")
++        pickle.dump(self.__dict__, f, 2)
++        f.close()
++
++    def load(self, filename):
++        """Load the grammar tables from a pickle file."""
++        f = open(filename, "rb")
++        d = pickle.load(f)
++        f.close()
++        self.__dict__.update(d)
++
++    def report(self):
++        """Dump the grammar tables to standard output, for debugging."""
++        from pprint import pprint
++        print "s2n"
++        pprint(self.symbol2number)
++        print "n2s"
++        pprint(self.number2symbol)
++        print "states"
++        pprint(self.states)
++        print "dfas"
++        pprint(self.dfas)
++        print "labels"
++        pprint(self.labels)
++        print "start", self.start
++
++
++# Map from operator to number (since tokenize doesn't do this)
++
++opmap_raw = """
++( LPAR
++) RPAR
++[ LSQB
++] RSQB
++: COLON
++, COMMA
++; SEMI
+++ PLUS
++- MINUS
++* STAR
++/ SLASH
++| VBAR
++& AMPER
++< LESS
++> GREATER
++= EQUAL
++. DOT
++% PERCENT
++` BACKQUOTE
++{ LBRACE
++} RBRACE
++@ AT
++== EQEQUAL
++!= NOTEQUAL
++<> NOTEQUAL
++<= LESSEQUAL
++>= GREATEREQUAL
++~ TILDE
++^ CIRCUMFLEX
++<< LEFTSHIFT
++>> RIGHTSHIFT
++** DOUBLESTAR
+++= PLUSEQUAL
++-= MINEQUAL
++*= STAREQUAL
++/= SLASHEQUAL
++%= PERCENTEQUAL
++&= AMPEREQUAL
++|= VBAREQUAL
++^= CIRCUMFLEXEQUAL
++<<= LEFTSHIFTEQUAL
++>>= RIGHTSHIFTEQUAL
++**= DOUBLESTAREQUAL
++// DOUBLESLASH
++//= DOUBLESLASHEQUAL
++-> RARROW
++"""
++
++opmap = {}
++for line in opmap_raw.splitlines():
++    if line:
++        op, name = line.split()
++        opmap[op] = getattr(token, name)
+diff -r 531f2e948299 refactor/pgen2/.svn/text-base/literals.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/text-base/literals.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,60 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Safely evaluate Python string literals without using eval()."""
++
++import re
++
++simple_escapes = {"a": "\a",
++                  "b": "\b",
++                  "f": "\f",
++                  "n": "\n",
++                  "r": "\r",
++                  "t": "\t",
++                  "v": "\v",
++                  "'": "'",
++                  '"': '"',
++                  "\\": "\\"}
++
++def escape(m):
++    all, tail = m.group(0, 1)
++    assert all.startswith("\\")
++    esc = simple_escapes.get(tail)
++    if esc is not None:
++        return esc
++    if tail.startswith("x"):
++        hexes = tail[1:]
++        if len(hexes) < 2:
++            raise ValueError("invalid hex string escape ('\\%s')" % tail)
++        try:
++            i = int(hexes, 16)
++        except ValueError:
++            raise ValueError("invalid hex string escape ('\\%s')" % tail)
++    else:
++        try:
++            i = int(tail, 8)
++        except ValueError:
++            raise ValueError("invalid octal string escape ('\\%s')" % tail)
++    return chr(i)
++
++def evalString(s):
++    assert s.startswith("'") or s.startswith('"'), repr(s[:1])
++    q = s[0]
++    if s[:3] == q*3:
++        q = q*3
++    assert s.endswith(q), repr(s[-len(q):])
++    assert len(s) >= 2*len(q)
++    s = s[len(q):-len(q)]
++    return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)
++
++def test():
++    for i in range(256):
++        c = chr(i)
++        s = repr(c)
++        e = evalString(s)
++        if e != c:
++            print i, c, s, e
++
++
++if __name__ == "__main__":
++    test()
+diff -r 531f2e948299 refactor/pgen2/.svn/text-base/parse.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/text-base/parse.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,201 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Parser engine for the grammar tables generated by pgen.
++
++The grammar table must be loaded first.
++
++See Parser/parser.c in the Python distribution for additional info on
++how this parsing engine works.
++
++"""
++
++# Local imports
++from . import token
++
++class ParseError(Exception):
++    """Exception to signal the parser is stuck."""
++
++    def __init__(self, msg, type, value, context):
++        Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
++                           (msg, type, value, context))
++        self.msg = msg
++        self.type = type
++        self.value = value
++        self.context = context
++
++class Parser(object):
++    """Parser engine.
++
++    The proper usage sequence is:
++
++    p = Parser(grammar, [converter])  # create instance
++    p.setup([start])                  # prepare for parsing
++    <for each input token>:
++        if p.addtoken(...):           # parse a token; may raise ParseError
++            break
++    root = p.rootnode                 # root of abstract syntax tree
++
++    A Parser instance may be reused by calling setup() repeatedly.
++
++    A Parser instance contains state pertaining to the current token
++    sequence, and should not be used concurrently by different threads
++    to parse separate token sequences.
++
++    See driver.py for how to get input tokens by tokenizing a file or
++    string.
++
++    Parsing is complete when addtoken() returns True; the root of the
++    abstract syntax tree can then be retrieved from the rootnode
++    instance variable.  When a syntax error occurs, addtoken() raises
++    the ParseError exception.  There is no error recovery; the parser
++    cannot be used after a syntax error was reported (but it can be
++    reinitialized by calling setup()).
++
++    """
++
++    def __init__(self, grammar, convert=None):
++        """Constructor.
++
++        The grammar argument is a grammar.Grammar instance; see the
++        grammar module for more information.
++
++        The parser is not ready yet for parsing; you must call the
++        setup() method to get it started.
++
++        The optional convert argument is a function mapping concrete
++        syntax tree nodes to abstract syntax tree nodes.  If not
++        given, no conversion is done and the syntax tree produced is
++        the concrete syntax tree.  If given, it must be a function of
++        two arguments, the first being the grammar (a grammar.Grammar
++        instance), and the second being the concrete syntax tree node
++        to be converted.  The syntax tree is converted from the bottom
++        up.
++
++        A concrete syntax tree node is a (type, value, context, nodes)
++        tuple, where type is the node type (a token or symbol number),
++        value is None for symbols and a string for tokens, context is
++        None or an opaque value used for error reporting (typically a
++        (lineno, offset) pair), and nodes is a list of children for
++        symbols, and None for tokens.
++
++        An abstract syntax tree node may be anything; this is entirely
++        up to the converter function.
++
++        """
++        self.grammar = grammar
++        self.convert = convert or (lambda grammar, node: node)
++
++    def setup(self, start=None):
++        """Prepare for parsing.
++
++        This *must* be called before starting to parse.
++
++        The optional argument is an alternative start symbol; it
++        defaults to the grammar's start symbol.
++
++        You can use a Parser instance to parse any number of programs;
++        each time you call setup() the parser is reset to an initial
++        state determined by the (implicit or explicit) start symbol.
++
++        """
++        if start is None:
++            start = self.grammar.start
++        # Each stack entry is a tuple: (dfa, state, node).
++        # A node is a tuple: (type, value, context, children),
++        # where children is a list of nodes or None, and context may be None.
++        newnode = (start, None, None, [])
++        stackentry = (self.grammar.dfas[start], 0, newnode)
++        self.stack = [stackentry]
++        self.rootnode = None
++        self.used_names = set() # Aliased to self.rootnode.used_names in pop()
++
++    def addtoken(self, type, value, context):
++        """Add a token; return True iff this is the end of the program."""
++        # Map from token to label
++        ilabel = self.classify(type, value, context)
++        # Loop until the token is shifted; may raise exceptions
++        while True:
++            dfa, state, node = self.stack[-1]
++            states, first = dfa
++            arcs = states[state]
++            # Look for a state with this label
++            for i, newstate in arcs:
++                t, v = self.grammar.labels[i]
++                if ilabel == i:
++                    # Look it up in the list of labels
++                    assert t < 256
++                    # Shift a token; we're done with it
++                    self.shift(type, value, newstate, context)
++                    # Pop while we are in an accept-only state
++                    state = newstate
++                    while states[state] == [(0, state)]:
++                        self.pop()
++                        if not self.stack:
++                            # Done parsing!
++                            return True
++                        dfa, state, node = self.stack[-1]
++                        states, first = dfa
++                    # Done with this token
++                    return False
++                elif t >= 256:
++                    # See if it's a symbol and if we're in its first set
++                    itsdfa = self.grammar.dfas[t]
++                    itsstates, itsfirst = itsdfa
++                    if ilabel in itsfirst:
++                        # Push a symbol
++                        self.push(t, self.grammar.dfas[t], newstate, context)
++                        break # To continue the outer while loop
++            else:
++                if (0, state) in arcs:
++                    # An accepting state, pop it and try something else
++                    self.pop()
++                    if not self.stack:
++                        # Done parsing, but another token is input
++                        raise ParseError("too much input",
++                                         type, value, context)
++                else:
++                    # No success finding a transition
++                    raise ParseError("bad input", type, value, context)
++
++    def classify(self, type, value, context):
++        """Turn a token into a label.  (Internal)"""
++        if type == token.NAME:
++            # Keep a listing of all used names
++            self.used_names.add(value)
++            # Check for reserved words
++            ilabel = self.grammar.keywords.get(value)
++            if ilabel is not None:
++                return ilabel
++        ilabel = self.grammar.tokens.get(type)
++        if ilabel is None:
++            raise ParseError("bad token", type, value, context)
++        return ilabel
++
++    def shift(self, type, value, newstate, context):
++        """Shift a token.  (Internal)"""
++        dfa, state, node = self.stack[-1]
++        newnode = (type, value, context, None)
++        newnode = self.convert(self.grammar, newnode)
++        if newnode is not None:
++            node[-1].append(newnode)
++        self.stack[-1] = (dfa, newstate, node)
++
++    def push(self, type, newdfa, newstate, context):
++        """Push a nonterminal.  (Internal)"""
++        dfa, state, node = self.stack[-1]
++        newnode = (type, None, context, [])
++        self.stack[-1] = (dfa, newstate, node)
++        self.stack.append((newdfa, 0, newnode))
++
++    def pop(self):
++        """Pop a nonterminal.  (Internal)"""
++        popdfa, popstate, popnode = self.stack.pop()
++        newnode = self.convert(self.grammar, popnode)
++        if newnode is not None:
++            if self.stack:
++                dfa, state, node = self.stack[-1]
++                node[-1].append(newnode)
++            else:
++                self.rootnode = newnode
++                self.rootnode.used_names = self.used_names
+diff -r 531f2e948299 refactor/pgen2/.svn/text-base/pgen.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/text-base/pgen.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,384 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++# Pgen imports
++from . import grammar, token, tokenize
++
++class PgenGrammar(grammar.Grammar):
++    pass
++
++class ParserGenerator(object):
++
++    def __init__(self, filename, stream=None):
++        close_stream = None
++        if stream is None:
++            stream = open(filename)
++            close_stream = stream.close
++        self.filename = filename
++        self.stream = stream
++        self.generator = tokenize.generate_tokens(stream.readline)
++        self.gettoken() # Initialize lookahead
++        self.dfas, self.startsymbol = self.parse()
++        if close_stream is not None:
++            close_stream()
++        self.first = {} # map from symbol name to set of tokens
++        self.addfirstsets()
++
++    def make_grammar(self):
++        c = PgenGrammar()
++        names = self.dfas.keys()
++        names.sort()
++        names.remove(self.startsymbol)
++        names.insert(0, self.startsymbol)
++        for name in names:
++            i = 256 + len(c.symbol2number)
++            c.symbol2number[name] = i
++            c.number2symbol[i] = name
++        for name in names:
++            dfa = self.dfas[name]
++            states = []
++            for state in dfa:
++                arcs = []
++                for label, next in state.arcs.iteritems():
++                    arcs.append((self.make_label(c, label), dfa.index(next)))
++                if state.isfinal:
++                    arcs.append((0, dfa.index(state)))
++                states.append(arcs)
++            c.states.append(states)
++            c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
++        c.start = c.symbol2number[self.startsymbol]
++        return c
++
++    def make_first(self, c, name):
++        rawfirst = self.first[name]
++        first = {}
++        for label in rawfirst:
++            ilabel = self.make_label(c, label)
++            ##assert ilabel not in first # XXX failed on <> ... !=
++            first[ilabel] = 1
++        return first
++
++    def make_label(self, c, label):
++        # XXX Maybe this should be a method on a subclass of converter?
++        ilabel = len(c.labels)
++        if label[0].isalpha():
++            # Either a symbol name or a named token
++            if label in c.symbol2number:
++                # A symbol name (a non-terminal)
++                if label in c.symbol2label:
++                    return c.symbol2label[label]
++                else:
++                    c.labels.append((c.symbol2number[label], None))
++                    c.symbol2label[label] = ilabel
++                    return ilabel
++            else:
++                # A named token (NAME, NUMBER, STRING)
++                itoken = getattr(token, label, None)
++                assert isinstance(itoken, int), label
++                assert itoken in token.tok_name, label
++                if itoken in c.tokens:
++                    return c.tokens[itoken]
++                else:
++                    c.labels.append((itoken, None))
++                    c.tokens[itoken] = ilabel
++                    return ilabel
++        else:
++            # Either a keyword or an operator
++            assert label[0] in ('"', "'"), label
++            value = eval(label)
++            if value[0].isalpha():
++                # A keyword
++                if value in c.keywords:
++                    return c.keywords[value]
++                else:
++                    c.labels.append((token.NAME, value))
++                    c.keywords[value] = ilabel
++                    return ilabel
++            else:
++                # An operator (any non-numeric token)
++                itoken = grammar.opmap[value] # Fails if unknown token
++                if itoken in c.tokens:
++                    return c.tokens[itoken]
++                else:
++                    c.labels.append((itoken, None))
++                    c.tokens[itoken] = ilabel
++                    return ilabel
++
++    def addfirstsets(self):
++        names = self.dfas.keys()
++        names.sort()
++        for name in names:
++            if name not in self.first:
++                self.calcfirst(name)
++            #print name, self.first[name].keys()
++
++    def calcfirst(self, name):
++        dfa = self.dfas[name]
++        self.first[name] = None # dummy to detect left recursion
++        state = dfa[0]
++        totalset = {}
++        overlapcheck = {}
++        for label, next in state.arcs.iteritems():
++            if label in self.dfas:
++                if label in self.first:
++                    fset = self.first[label]
++                    if fset is None:
++                        raise ValueError("recursion for rule %r" % name)
++                else:
++                    self.calcfirst(label)
++                    fset = self.first[label]
++                totalset.update(fset)
++                overlapcheck[label] = fset
++            else:
++                totalset[label] = 1
++                overlapcheck[label] = {label: 1}
++        inverse = {}
++        for label, itsfirst in overlapcheck.iteritems():
++            for symbol in itsfirst:
++                if symbol in inverse:
++                    raise ValueError("rule %s is ambiguous; %s is in the"
++                                     " first sets of %s as well as %s" %
++                                     (name, symbol, label, inverse[symbol]))
++                inverse[symbol] = label
++        self.first[name] = totalset
++
++    def parse(self):
++        dfas = {}
++        startsymbol = None
++        # MSTART: (NEWLINE | RULE)* ENDMARKER
++        while self.type != token.ENDMARKER:
++            while self.type == token.NEWLINE:
++                self.gettoken()
++            # RULE: NAME ':' RHS NEWLINE
++            name = self.expect(token.NAME)
++            self.expect(token.OP, ":")
++            a, z = self.parse_rhs()
++            self.expect(token.NEWLINE)
++            #self.dump_nfa(name, a, z)
++            dfa = self.make_dfa(a, z)
++            #self.dump_dfa(name, dfa)
++            oldlen = len(dfa)
++            self.simplify_dfa(dfa)
++            newlen = len(dfa)
++            dfas[name] = dfa
++            #print name, oldlen, newlen
++            if startsymbol is None:
++                startsymbol = name
++        return dfas, startsymbol
++
++    def make_dfa(self, start, finish):
++        # To turn an NFA into a DFA, we define the states of the DFA
++        # to correspond to *sets* of states of the NFA.  Then do some
++        # state reduction.  Let's represent sets as dicts with 1 for
++        # values.
++        assert isinstance(start, NFAState)
++        assert isinstance(finish, NFAState)
++        def closure(state):
++            base = {}
++            addclosure(state, base)
++            return base
++        def addclosure(state, base):
++            assert isinstance(state, NFAState)
++            if state in base:
++                return
++            base[state] = 1
++            for label, next in state.arcs:
++                if label is None:
++                    addclosure(next, base)
++        states = [DFAState(closure(start), finish)]
++        for state in states: # NB states grows while we're iterating
++            arcs = {}
++            for nfastate in state.nfaset:
++                for label, next in nfastate.arcs:
++                    if label is not None:
++                        addclosure(next, arcs.setdefault(label, {}))
++            for label, nfaset in arcs.iteritems():
++                for st in states:
++                    if st.nfaset == nfaset:
++                        break
++                else:
++                    st = DFAState(nfaset, finish)
++                    states.append(st)
++                state.addarc(st, label)
++        return states # List of DFAState instances; first one is start
++
++    def dump_nfa(self, name, start, finish):
++        print "Dump of NFA for", name
++        todo = [start]
++        for i, state in enumerate(todo):
++            print "  State", i, state is finish and "(final)" or ""
++            for label, next in state.arcs:
++                if next in todo:
++                    j = todo.index(next)
++                else:
++                    j = len(todo)
++                    todo.append(next)
++                if label is None:
++                    print "    -> %d" % j
++                else:
++                    print "    %s -> %d" % (label, j)
++
++    def dump_dfa(self, name, dfa):
++        print "Dump of DFA for", name
++        for i, state in enumerate(dfa):
++            print "  State", i, state.isfinal and "(final)" or ""
++            for label, next in state.arcs.iteritems():
++                print "    %s -> %d" % (label, dfa.index(next))
++
++    def simplify_dfa(self, dfa):
++        # This is not theoretically optimal, but works well enough.
++        # Algorithm: repeatedly look for two states that have the same
++        # set of arcs (same labels pointing to the same nodes) and
++        # unify them, until things stop changing.
++
++        # dfa is a list of DFAState instances
++        changes = True
++        while changes:
++            changes = False
++            for i, state_i in enumerate(dfa):
++                for j in range(i+1, len(dfa)):
++                    state_j = dfa[j]
++                    if state_i == state_j:
++                        #print "  unify", i, j
++                        del dfa[j]
++                        for state in dfa:
++                            state.unifystate(state_j, state_i)
++                        changes = True
++                        break
++
++    def parse_rhs(self):
++        # RHS: ALT ('|' ALT)*
++        a, z = self.parse_alt()
++        if self.value != "|":
++            return a, z
++        else:
++            aa = NFAState()
++            zz = NFAState()
++            aa.addarc(a)
++            z.addarc(zz)
++            while self.value == "|":
++                self.gettoken()
++                a, z = self.parse_alt()
++                aa.addarc(a)
++                z.addarc(zz)
++            return aa, zz
++
++    def parse_alt(self):
++        # ALT: ITEM+
++        a, b = self.parse_item()
++        while (self.value in ("(", "[") or
++               self.type in (token.NAME, token.STRING)):
++            c, d = self.parse_item()
++            b.addarc(c)
++            b = d
++        return a, b
++
++    def parse_item(self):
++        # ITEM: '[' RHS ']' | ATOM ['+' | '*']
++        if self.value == "[":
++            self.gettoken()
++            a, z = self.parse_rhs()
++            self.expect(token.OP, "]")
++            a.addarc(z)
++            return a, z
++        else:
++            a, z = self.parse_atom()
++            value = self.value
++            if value not in ("+", "*"):
++                return a, z
++            self.gettoken()
++            z.addarc(a)
++            if value == "+":
++                return a, z
++            else:
++                return a, a
++
++    def parse_atom(self):
++        # ATOM: '(' RHS ')' | NAME | STRING
++        if self.value == "(":
++            self.gettoken()
++            a, z = self.parse_rhs()
++            self.expect(token.OP, ")")
++            return a, z
++        elif self.type in (token.NAME, token.STRING):
++            a = NFAState()
++            z = NFAState()
++            a.addarc(z, self.value)
++            self.gettoken()
++            return a, z
++        else:
++            self.raise_error("expected (...) or NAME or STRING, got %s/%s",
++                             self.type, self.value)
++
++    def expect(self, type, value=None):
++        if self.type != type or (value is not None and self.value != value):
++            self.raise_error("expected %s/%s, got %s/%s",
++                             type, value, self.type, self.value)
++        value = self.value
++        self.gettoken()
++        return value
++
++    def gettoken(self):
++        tup = self.generator.next()
++        while tup[0] in (tokenize.COMMENT, tokenize.NL):
++            tup = self.generator.next()
++        self.type, self.value, self.begin, self.end, self.line = tup
++        #print token.tok_name[self.type], repr(self.value)
++
++    def raise_error(self, msg, *args):
++        if args:
++            try:
++                msg = msg % args
++            except:
++                msg = " ".join([msg] + map(str, args))
++        raise SyntaxError(msg, (self.filename, self.end[0],
++                                self.end[1], self.line))
++
++class NFAState(object):
++
++    def __init__(self):
++        self.arcs = [] # list of (label, NFAState) pairs
++
++    def addarc(self, next, label=None):
++        assert label is None or isinstance(label, str)
++        assert isinstance(next, NFAState)
++        self.arcs.append((label, next))
++
++class DFAState(object):
++
++    def __init__(self, nfaset, final):
++        assert isinstance(nfaset, dict)
++        assert isinstance(iter(nfaset).next(), NFAState)
++        assert isinstance(final, NFAState)
++        self.nfaset = nfaset
++        self.isfinal = final in nfaset
++        self.arcs = {} # map from label to DFAState
++
++    def addarc(self, next, label):
++        assert isinstance(label, str)
++        assert label not in self.arcs
++        assert isinstance(next, DFAState)
++        self.arcs[label] = next
++
++    def unifystate(self, old, new):
++        for label, next in self.arcs.iteritems():
++            if next is old:
++                self.arcs[label] = new
++
++    def __eq__(self, other):
++        # Equality test -- ignore the nfaset instance variable
++        assert isinstance(other, DFAState)
++        if self.isfinal != other.isfinal:
++            return False
++        # Can't just return self.arcs == other.arcs, because that
++        # would invoke this method recursively, with cycles...
++        if len(self.arcs) != len(other.arcs):
++            return False
++        for label, next in self.arcs.iteritems():
++            if next is not other.arcs.get(label):
++                return False
++        return True
++
++def generate_grammar(filename="Grammar.txt"):
++    p = ParserGenerator(filename)
++    return p.make_grammar()
+diff -r 531f2e948299 refactor/pgen2/.svn/text-base/token.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/text-base/token.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,82 @@
++#! /usr/bin/env python
++
++"""Token constants (from "token.h")."""
++
++#  Taken from Python (r53757) and modified to include some tokens
++#   originally monkeypatched in by pgen2.tokenize
++
++#--start constants--
++ENDMARKER = 0
++NAME = 1
++NUMBER = 2
++STRING = 3
++NEWLINE = 4
++INDENT = 5
++DEDENT = 6
++LPAR = 7
++RPAR = 8
++LSQB = 9
++RSQB = 10
++COLON = 11
++COMMA = 12
++SEMI = 13
++PLUS = 14
++MINUS = 15
++STAR = 16
++SLASH = 17
++VBAR = 18
++AMPER = 19
++LESS = 20
++GREATER = 21
++EQUAL = 22
++DOT = 23
++PERCENT = 24
++BACKQUOTE = 25
++LBRACE = 26
++RBRACE = 27
++EQEQUAL = 28
++NOTEQUAL = 29
++LESSEQUAL = 30
++GREATEREQUAL = 31
++TILDE = 32
++CIRCUMFLEX = 33
++LEFTSHIFT = 34
++RIGHTSHIFT = 35
++DOUBLESTAR = 36
++PLUSEQUAL = 37
++MINEQUAL = 38
++STAREQUAL = 39
++SLASHEQUAL = 40
++PERCENTEQUAL = 41
++AMPEREQUAL = 42
++VBAREQUAL = 43
++CIRCUMFLEXEQUAL = 44
++LEFTSHIFTEQUAL = 45
++RIGHTSHIFTEQUAL = 46
++DOUBLESTAREQUAL = 47
++DOUBLESLASH = 48
++DOUBLESLASHEQUAL = 49
++AT = 50
++OP = 51
++COMMENT = 52
++NL = 53
++RARROW = 54
++ERRORTOKEN = 55
++N_TOKENS = 56
++NT_OFFSET = 256
++#--end constants--
++
++tok_name = {}
++for _name, _value in globals().items():
++    if type(_value) is type(0):
++        tok_name[_value] = _name
++
++
++def ISTERMINAL(x):
++    return x < NT_OFFSET
++
++def ISNONTERMINAL(x):
++    return x >= NT_OFFSET
++
++def ISEOF(x):
++    return x == ENDMARKER
+diff -r 531f2e948299 refactor/pgen2/.svn/text-base/tokenize.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/.svn/text-base/tokenize.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,405 @@
++# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
++# All rights reserved.
++
++"""Tokenization help for Python programs.
++
++generate_tokens(readline) is a generator that breaks a stream of
++text into Python tokens.  It accepts a readline-like method which is called
++repeatedly to get the next line of input (or "" for EOF).  It generates
++5-tuples with these members:
++
++    the token type (see token.py)
++    the token (a string)
++    the starting (row, column) indices of the token (a 2-tuple of ints)
++    the ending (row, column) indices of the token (a 2-tuple of ints)
++    the original line (string)
++
++It is designed to match the working of the Python tokenizer exactly, except
++that it produces COMMENT tokens for comments and gives type OP for all
++operators
++
++Older entry points
++    tokenize_loop(readline, tokeneater)
++    tokenize(readline, tokeneater=printtoken)
++are the same, except instead of generating tokens, tokeneater is a callback
++function to which the 5 fields described above are passed as 5 arguments,
++each time a new token is found."""
++
++__author__ = 'Ka-Ping Yee <ping at lfw.org>'
++__credits__ = \
++    'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
++
++import string, re
++from lib2to3.pgen2.token import *
++
++from . import token
++__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
++           "generate_tokens", "untokenize"]
++del token
++
++def group(*choices): return '(' + '|'.join(choices) + ')'
++def any(*choices): return group(*choices) + '*'
++def maybe(*choices): return group(*choices) + '?'
++
++Whitespace = r'[ \f\t]*'
++Comment = r'#[^\r\n]*'
++Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
++Name = r'[a-zA-Z_]\w*'
++
++Binnumber = r'0[bB][01]*'
++Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
++Octnumber = r'0[oO]?[0-7]*[lL]?'
++Decnumber = r'[1-9]\d*[lL]?'
++Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
++Exponent = r'[eE][-+]?\d+'
++Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
++Expfloat = r'\d+' + Exponent
++Floatnumber = group(Pointfloat, Expfloat)
++Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
++Number = group(Imagnumber, Floatnumber, Intnumber)
++
++# Tail end of ' string.
++Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
++# Tail end of " string.
++Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
++# Tail end of ''' string.
++Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
++# Tail end of """ string.
++Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
++Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
++# Single-line ' or " string.
++String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
++               r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
++
++# Because of leftmost-then-longest match semantics, be sure to put the
++# longest operators first (e.g., if = came before ==, == would get
++# recognized as two instances of =).
++Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
++                 r"//=?", r"->",
++                 r"[+\-*/%&|^=<>]=?",
++                 r"~")
++
++Bracket = '[][(){}]'
++Special = group(r'\r?\n', r'[:;.,`@]')
++Funny = group(Operator, Bracket, Special)
++
++PlainToken = group(Number, Funny, String, Name)
++Token = Ignore + PlainToken
++
++# First (or only) line of ' or " string.
++ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
++                group("'", r'\\\r?\n'),
++                r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
++                group('"', r'\\\r?\n'))
++PseudoExtras = group(r'\\\r?\n', Comment, Triple)
++PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
++
++tokenprog, pseudoprog, single3prog, double3prog = map(
++    re.compile, (Token, PseudoToken, Single3, Double3))
++endprogs = {"'": re.compile(Single), '"': re.compile(Double),
++            "'''": single3prog, '"""': double3prog,
++            "r'''": single3prog, 'r"""': double3prog,
++            "u'''": single3prog, 'u"""': double3prog,
++            "b'''": single3prog, 'b"""': double3prog,
++            "ur'''": single3prog, 'ur"""': double3prog,
++            "br'''": single3prog, 'br"""': double3prog,
++            "R'''": single3prog, 'R"""': double3prog,
++            "U'''": single3prog, 'U"""': double3prog,
++            "B'''": single3prog, 'B"""': double3prog,
++            "uR'''": single3prog, 'uR"""': double3prog,
++            "Ur'''": single3prog, 'Ur"""': double3prog,
++            "UR'''": single3prog, 'UR"""': double3prog,
++            "bR'''": single3prog, 'bR"""': double3prog,
++            "Br'''": single3prog, 'Br"""': double3prog,
++            "BR'''": single3prog, 'BR"""': double3prog,
++            'r': None, 'R': None,
++            'u': None, 'U': None,
++            'b': None, 'B': None}
++
++triple_quoted = {}
++for t in ("'''", '"""',
++          "r'''", 'r"""', "R'''", 'R"""',
++          "u'''", 'u"""', "U'''", 'U"""',
++          "b'''", 'b"""', "B'''", 'B"""',
++          "ur'''", 'ur"""', "Ur'''", 'Ur"""',
++          "uR'''", 'uR"""', "UR'''", 'UR"""',
++          "br'''", 'br"""', "Br'''", 'Br"""',
++          "bR'''", 'bR"""', "BR'''", 'BR"""',):
++    triple_quoted[t] = t
++single_quoted = {}
++for t in ("'", '"',
++          "r'", 'r"', "R'", 'R"',
++          "u'", 'u"', "U'", 'U"',
++          "b'", 'b"', "B'", 'B"',
++          "ur'", 'ur"', "Ur'", 'Ur"',
++          "uR'", 'uR"', "UR'", 'UR"',
++          "br'", 'br"', "Br'", 'Br"',
++          "bR'", 'bR"', "BR'", 'BR"', ):
++    single_quoted[t] = t
++
++tabsize = 8
++
++class TokenError(Exception): pass
++
++class StopTokenizing(Exception): pass
++
++def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
++    print "%d,%d-%d,%d:\t%s\t%s" % \
++        (srow, scol, erow, ecol, tok_name[type], repr(token))
++
++def tokenize(readline, tokeneater=printtoken):
++    """
++    The tokenize() function accepts two parameters: one representing the
++    input stream, and one providing an output mechanism for tokenize().
++
++    The first parameter, readline, must be a callable object which provides
++    the same interface as the readline() method of built-in file objects.
++    Each call to the function should return one line of input as a string.
++
++    The second parameter, tokeneater, must also be a callable object. It is
++    called once for each token, with five arguments, corresponding to the
++    tuples generated by generate_tokens().
++    """
++    try:
++        tokenize_loop(readline, tokeneater)
++    except StopTokenizing:
++        pass
++
++# backwards compatible interface
++def tokenize_loop(readline, tokeneater):
++    for token_info in generate_tokens(readline):
++        tokeneater(*token_info)
++
++class Untokenizer:
++
++    def __init__(self):
++        self.tokens = []
++        self.prev_row = 1
++        self.prev_col = 0
++
++    def add_whitespace(self, start):
++        row, col = start
++        assert row <= self.prev_row
++        col_offset = col - self.prev_col
++        if col_offset:
++            self.tokens.append(" " * col_offset)
++
++    def untokenize(self, iterable):
++        for t in iterable:
++            if len(t) == 2:
++                self.compat(t, iterable)
++                break
++            tok_type, token, start, end, line = t
++            self.add_whitespace(start)
++            self.tokens.append(token)
++            self.prev_row, self.prev_col = end
++            if tok_type in (NEWLINE, NL):
++                self.prev_row += 1
++                self.prev_col = 0
++        return "".join(self.tokens)
++
++    def compat(self, token, iterable):
++        startline = False
++        indents = []
++        toks_append = self.tokens.append
++        toknum, tokval = token
++        if toknum in (NAME, NUMBER):
++            tokval += ' '
++        if toknum in (NEWLINE, NL):
++            startline = True
++        for tok in iterable:
++            toknum, tokval = tok[:2]
++
++            if toknum in (NAME, NUMBER):
++                tokval += ' '
++
++            if toknum == INDENT:
++                indents.append(tokval)
++                continue
++            elif toknum == DEDENT:
++                indents.pop()
++                continue
++            elif toknum in (NEWLINE, NL):
++                startline = True
++            elif startline and indents:
++                toks_append(indents[-1])
++                startline = False
++            toks_append(tokval)
++
++def untokenize(iterable):
++    """Transform tokens back into Python source code.
++
++    Each element returned by the iterable must be a token sequence
++    with at least two elements, a token number and token value.  If
++    only two tokens are passed, the resulting output is poor.
++
++    Round-trip invariant for full input:
++        Untokenized source will match input source exactly
++
++    Round-trip invariant for limited intput:
++        # Output text will tokenize the back to the input
++        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
++        newcode = untokenize(t1)
++        readline = iter(newcode.splitlines(1)).next
++        t2 = [tok[:2] for tokin generate_tokens(readline)]
++        assert t1 == t2
++    """
++    ut = Untokenizer()
++    return ut.untokenize(iterable)
++
++def generate_tokens(readline):
++    """
++    The generate_tokens() generator requires one argment, readline, which
++    must be a callable object which provides the same interface as the
++    readline() method of built-in file objects. Each call to the function
++    should return one line of input as a string.  Alternately, readline
++    can be a callable function terminating with StopIteration:
++        readline = open(myfile).next    # Example of alternate readline
++
++    The generator produces 5-tuples with these members: the token type; the
++    token string; a 2-tuple (srow, scol) of ints specifying the row and
++    column where the token begins in the source; a 2-tuple (erow, ecol) of
++    ints specifying the row and column where the token ends in the source;
++    and the line on which the token was found. The line passed is the
++    logical line; continuation lines are included.
++    """
++    lnum = parenlev = continued = 0
++    namechars, numchars = string.ascii_letters + '_', '0123456789'
++    contstr, needcont = '', 0
++    contline = None
++    indents = [0]
++
++    while 1:                                   # loop over lines in stream
++        try:
++            line = readline()
++        except StopIteration:
++            line = ''
++        lnum = lnum + 1
++        pos, max = 0, len(line)
++
++        if contstr:                            # continued string
++            if not line:
++                raise TokenError, ("EOF in multi-line string", strstart)
++            endmatch = endprog.match(line)
++            if endmatch:
++                pos = end = endmatch.end(0)
++                yield (STRING, contstr + line[:end],
++                       strstart, (lnum, end), contline + line)
++                contstr, needcont = '', 0
++                contline = None
++            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
++                yield (ERRORTOKEN, contstr + line,
++                           strstart, (lnum, len(line)), contline)
++                contstr = ''
++                contline = None
++                continue
++            else:
++                contstr = contstr + line
++                contline = contline + line
++                continue
++
++        elif parenlev == 0 and not continued:  # new statement
++            if not line: break
++            column = 0
++            while pos < max:                   # measure leading whitespace
++                if line[pos] == ' ': column = column + 1
++                elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
++                elif line[pos] == '\f': column = 0
++                else: break
++                pos = pos + 1
++            if pos == max: break
++
++            if line[pos] in '#\r\n':           # skip comments or blank lines
++                if line[pos] == '#':
++                    comment_token = line[pos:].rstrip('\r\n')
++                    nl_pos = pos + len(comment_token)
++                    yield (COMMENT, comment_token,
++                           (lnum, pos), (lnum, pos + len(comment_token)), line)
++                    yield (NL, line[nl_pos:],
++                           (lnum, nl_pos), (lnum, len(line)), line)
++                else:
++                    yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
++                           (lnum, pos), (lnum, len(line)), line)
++                continue
++
++            if column > indents[-1]:           # count indents or dedents
++                indents.append(column)
++                yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
++            while column < indents[-1]:
++                if column not in indents:
++                    raise IndentationError(
++                        "unindent does not match any outer indentation level",
++                        ("<tokenize>", lnum, pos, line))
++                indents = indents[:-1]
++                yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
++
++        else:                                  # continued statement
++            if not line:
++                raise TokenError, ("EOF in multi-line statement", (lnum, 0))
++            continued = 0
++
++        while pos < max:
++            pseudomatch = pseudoprog.match(line, pos)
++            if pseudomatch:                                # scan for tokens
++                start, end = pseudomatch.span(1)
++                spos, epos, pos = (lnum, start), (lnum, end), end
++                token, initial = line[start:end], line[start]
++
++                if initial in numchars or \
++                   (initial == '.' and token != '.'):      # ordinary number
++                    yield (NUMBER, token, spos, epos, line)
++                elif initial in '\r\n':
++                    newline = NEWLINE
++                    if parenlev > 0:
++                        newline = NL
++                    yield (newline, token, spos, epos, line)
++                elif initial == '#':
++                    assert not token.endswith("\n")
++                    yield (COMMENT, token, spos, epos, line)
++                elif token in triple_quoted:
++                    endprog = endprogs[token]
++                    endmatch = endprog.match(line, pos)
++                    if endmatch:                           # all on one line
++                        pos = endmatch.end(0)
++                        token = line[start:pos]
++                        yield (STRING, token, spos, (lnum, pos), line)
++                    else:
++                        strstart = (lnum, start)           # multiple lines
++                        contstr = line[start:]
++                        contline = line
++                        break
++                elif initial in single_quoted or \
++                    token[:2] in single_quoted or \
++                    token[:3] in single_quoted:
++                    if token[-1] == '\n':                  # continued string
++                        strstart = (lnum, start)
++                        endprog = (endprogs[initial] or endprogs[token[1]] or
++                                   endprogs[token[2]])
++                        contstr, needcont = line[start:], 1
++                        contline = line
++                        break
++                    else:                                  # ordinary string
++                        yield (STRING, token, spos, epos, line)
++                elif initial in namechars:                 # ordinary name
++                    yield (NAME, token, spos, epos, line)
++                elif initial == '\\':                      # continued stmt
++                    # This yield is new; needed for better idempotency:
++                    yield (NL, token, spos, (lnum, pos), line)
++                    continued = 1
++                else:
++                    if initial in '([{': parenlev = parenlev + 1
++                    elif initial in ')]}': parenlev = parenlev - 1
++                    yield (OP, token, spos, epos, line)
++            else:
++                yield (ERRORTOKEN, line[pos],
++                           (lnum, pos), (lnum, pos+1), line)
++                pos = pos + 1
++
++    for indent in indents[1:]:                 # pop remaining indent levels
++        yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
++    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
++
++if __name__ == '__main__':                     # testing
++    import sys
++    if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
++    else: tokenize(sys.stdin.readline)
+diff -r 531f2e948299 refactor/pgen2/__init__.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/__init__.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,12 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""The pgen2 package."""
++import conv
++import driver
++import grammar
++import literals
++import parse
++import pgen
++import tokenize
++import token
+diff -r 531f2e948299 refactor/pgen2/conv.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/conv.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,257 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Convert graminit.[ch] spit out by pgen to Python code.
++
++Pgen is the Python parser generator.  It is useful to quickly create a
++parser from a grammar file in Python's grammar notation.  But I don't
++want my parsers to be written in C (yet), so I'm translating the
++parsing tables to Python data structures and writing a Python parse
++engine.
++
++Note that the token numbers are constants determined by the standard
++Python tokenizer.  The standard token module defines these numbers and
++their names (the names are not used much).  The token numbers are
++hardcoded into the Python tokenizer and into pgen.  A Python
++implementation of the Python tokenizer is also available, in the
++standard tokenize module.
++
++On the other hand, symbol numbers (representing the grammar's
++non-terminals) are assigned by pgen based on the actual grammar
++input.
++
++Note: this module is pretty much obsolete; the pgen module generates
++equivalent grammar tables directly from the Grammar.txt input file
++without having to invoke the Python pgen C program.
++
++"""
++
++# Python imports
++import re
++
++# Local imports
++from . import grammar, token
++
++
++class Converter(grammar.Grammar):
++    """Grammar subclass that reads classic pgen output files.
++
++    The run() method reads the tables as produced by the pgen parser
++    generator, typically contained in two C files, graminit.h and
++    graminit.c.  The other methods are for internal use only.
++
++    See the base class for more documentation.
++
++    """
++
++    def run(self, graminit_h, graminit_c):
++        """Load the grammar tables from the text files written by pgen."""
++        self.parse_graminit_h(graminit_h)
++        self.parse_graminit_c(graminit_c)
++        self.finish_off()
++
++    def parse_graminit_h(self, filename):
++        """Parse the .h file writen by pgen.  (Internal)
++
++        This file is a sequence of #define statements defining the
++        nonterminals of the grammar as numbers.  We build two tables
++        mapping the numbers to names and back.
++
++        """
++        try:
++            f = open(filename)
++        except IOError, err:
++            print "Can't open %s: %s" % (filename, err)
++            return False
++        self.symbol2number = {}
++        self.number2symbol = {}
++        lineno = 0
++        for line in f:
++            lineno += 1
++            mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
++            if not mo and line.strip():
++                print "%s(%s): can't parse %s" % (filename, lineno,
++                                                  line.strip())
++            else:
++                symbol, number = mo.groups()
++                number = int(number)
++                assert symbol not in self.symbol2number
++                assert number not in self.number2symbol
++                self.symbol2number[symbol] = number
++                self.number2symbol[number] = symbol
++        return True
++
++    def parse_graminit_c(self, filename):
++        """Parse the .c file writen by pgen.  (Internal)
++
++        The file looks as follows.  The first two lines are always this:
++
++        #include "pgenheaders.h"
++        #include "grammar.h"
++
++        After that come four blocks:
++
++        1) one or more state definitions
++        2) a table defining dfas
++        3) a table defining labels
++        4) a struct defining the grammar
++
++        A state definition has the following form:
++        - one or more arc arrays, each of the form:
++          static arc arcs_<n>_<m>[<k>] = {
++                  {<i>, <j>},
++                  ...
++          };
++        - followed by a state array, of the form:
++          static state states_<s>[<t>] = {
++                  {<k>, arcs_<n>_<m>},
++                  ...
++          };
++
++        """
++        try:
++            f = open(filename)
++        except IOError, err:
++            print "Can't open %s: %s" % (filename, err)
++            return False
++        # The code below essentially uses f's iterator-ness!
++        lineno = 0
++
++        # Expect the two #include lines
++        lineno, line = lineno+1, f.next()
++        assert line == '#include "pgenheaders.h"\n', (lineno, line)
++        lineno, line = lineno+1, f.next()
++        assert line == '#include "grammar.h"\n', (lineno, line)
++
++        # Parse the state definitions
++        lineno, line = lineno+1, f.next()
++        allarcs = {}
++        states = []
++        while line.startswith("static arc "):
++            while line.startswith("static arc "):
++                mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
++                              line)
++                assert mo, (lineno, line)
++                n, m, k = map(int, mo.groups())
++                arcs = []
++                for _ in range(k):
++                    lineno, line = lineno+1, f.next()
++                    mo = re.match(r"\s+{(\d+), (\d+)},$", line)
++                    assert mo, (lineno, line)
++                    i, j = map(int, mo.groups())
++                    arcs.append((i, j))
++                lineno, line = lineno+1, f.next()
++                assert line == "};\n", (lineno, line)
++                allarcs[(n, m)] = arcs
++                lineno, line = lineno+1, f.next()
++            mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
++            assert mo, (lineno, line)
++            s, t = map(int, mo.groups())
++            assert s == len(states), (lineno, line)
++            state = []
++            for _ in range(t):
++                lineno, line = lineno+1, f.next()
++                mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
++                assert mo, (lineno, line)
++                k, n, m = map(int, mo.groups())
++                arcs = allarcs[n, m]
++                assert k == len(arcs), (lineno, line)
++                state.append(arcs)
++            states.append(state)
++            lineno, line = lineno+1, f.next()
++            assert line == "};\n", (lineno, line)
++            lineno, line = lineno+1, f.next()
++        self.states = states
++
++        # Parse the dfas
++        dfas = {}
++        mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
++        assert mo, (lineno, line)
++        ndfas = int(mo.group(1))
++        for i in range(ndfas):
++            lineno, line = lineno+1, f.next()
++            mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
++                          line)
++            assert mo, (lineno, line)
++            symbol = mo.group(2)
++            number, x, y, z = map(int, mo.group(1, 3, 4, 5))
++            assert self.symbol2number[symbol] == number, (lineno, line)
++            assert self.number2symbol[number] == symbol, (lineno, line)
++            assert x == 0, (lineno, line)
++            state = states[z]
++            assert y == len(state), (lineno, line)
++            lineno, line = lineno+1, f.next()
++            mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
++            assert mo, (lineno, line)
++            first = {}
++            rawbitset = eval(mo.group(1))
++            for i, c in enumerate(rawbitset):
++                byte = ord(c)
++                for j in range(8):
++                    if byte & (1<<j):
++                        first[i*8 + j] = 1
++            dfas[number] = (state, first)
++        lineno, line = lineno+1, f.next()
++        assert line == "};\n", (lineno, line)
++        self.dfas = dfas
++
++        # Parse the labels
++        labels = []
++        lineno, line = lineno+1, f.next()
++        mo = re.match(r"static label labels\[(\d+)\] = {$", line)
++        assert mo, (lineno, line)
++        nlabels = int(mo.group(1))
++        for i in range(nlabels):
++            lineno, line = lineno+1, f.next()
++            mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
++            assert mo, (lineno, line)
++            x, y = mo.groups()
++            x = int(x)
++            if y == "0":
++                y = None
++            else:
++                y = eval(y)
++            labels.append((x, y))
++        lineno, line = lineno+1, f.next()
++        assert line == "};\n", (lineno, line)
++        self.labels = labels
++
++        # Parse the grammar struct
++        lineno, line = lineno+1, f.next()
++        assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
++        lineno, line = lineno+1, f.next()
++        mo = re.match(r"\s+(\d+),$", line)
++        assert mo, (lineno, line)
++        ndfas = int(mo.group(1))
++        assert ndfas == len(self.dfas)
++        lineno, line = lineno+1, f.next()
++        assert line == "\tdfas,\n", (lineno, line)
++        lineno, line = lineno+1, f.next()
++        mo = re.match(r"\s+{(\d+), labels},$", line)
++        assert mo, (lineno, line)
++        nlabels = int(mo.group(1))
++        assert nlabels == len(self.labels), (lineno, line)
++        lineno, line = lineno+1, f.next()
++        mo = re.match(r"\s+(\d+)$", line)
++        assert mo, (lineno, line)
++        start = int(mo.group(1))
++        assert start in self.number2symbol, (lineno, line)
++        self.start = start
++        lineno, line = lineno+1, f.next()
++        assert line == "};\n", (lineno, line)
++        try:
++            lineno, line = lineno+1, f.next()
++        except StopIteration:
++            pass
++        else:
++            assert 0, (lineno, line)
++
++    def finish_off(self):
++        """Create additional useful structures.  (Internal)."""
++        self.keywords = {} # map from keyword strings to arc labels
++        self.tokens = {}   # map from numeric token values to arc labels
++        for ilabel, (type, value) in enumerate(self.labels):
++            if type == token.NAME and value is not None:
++                self.keywords[value] = ilabel
++            elif value is None:
++                self.tokens[type] = ilabel
+diff -r 531f2e948299 refactor/pgen2/driver.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/driver.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,146 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++# Modifications:
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Parser driver.
++
++This provides a high-level interface to parse a file into a syntax tree.
++
++"""
++
++__author__ = "Guido van Rossum <guido at python.org>"
++
++__all__ = ["Driver", "load_grammar"]
++
++# Python imports
++import os
++import logging
++import sys
++
++# Pgen imports
++from . import grammar, parse, token, tokenize, pgen
++
++
++class Driver(object):
++
++    def __init__(self, grammar, convert=None, logger=None):
++        self.grammar = grammar
++        if logger is None:
++            logger = logging.getLogger()
++        self.logger = logger
++        self.convert = convert
++
++    def parse_tokens(self, tokens, debug=False):
++        """Parse a series of tokens and return the syntax tree."""
++        # XXX Move the prefix computation into a wrapper around tokenize.
++        p = parse.Parser(self.grammar, self.convert)
++        p.setup()
++        lineno = 1
++        column = 0
++        type = value = start = end = line_text = None
++        prefix = ""
++        for quintuple in tokens:
++            type, value, start, end, line_text = quintuple
++            if start != (lineno, column):
++                assert (lineno, column) <= start, ((lineno, column), start)
++                s_lineno, s_column = start
++                if lineno < s_lineno:
++                    prefix += "\n" * (s_lineno - lineno)
++                    lineno = s_lineno
++                    column = 0
++                if column < s_column:
++                    prefix += line_text[column:s_column]
++                    column = s_column
++            if type in (tokenize.COMMENT, tokenize.NL):
++                prefix += value
++                lineno, column = end
++                if value.endswith("\n"):
++                    lineno += 1
++                    column = 0
++                continue
++            if type == token.OP:
++                type = grammar.opmap[value]
++            if debug:
++                self.logger.debug("%s %r (prefix=%r)",
++                                  token.tok_name[type], value, prefix)
++            if p.addtoken(type, value, (prefix, start)):
++                if debug:
++                    self.logger.debug("Stop.")
++                break
++            prefix = ""
++            lineno, column = end
++            if value.endswith("\n"):
++                lineno += 1
++                column = 0
++        else:
++            # We never broke out -- EOF is too soon (how can this happen???)
++            raise parse.ParseError("incomplete input",
++                                   type, value, (prefix, start))
++        return p.rootnode
++
++    def parse_stream_raw(self, stream, debug=False):
++        """Parse a stream and return the syntax tree."""
++        tokens = tokenize.generate_tokens(stream.readline)
++        return self.parse_tokens(tokens, debug)
++
++    def parse_stream(self, stream, debug=False):
++        """Parse a stream and return the syntax tree."""
++        return self.parse_stream_raw(stream, debug)
++
++    def parse_file(self, filename, debug=False):
++        """Parse a file and return the syntax tree."""
++        stream = open(filename)
++        try:
++            return self.parse_stream(stream, debug)
++        finally:
++            stream.close()
++
++    def parse_string(self, text, debug=False):
++        """Parse a string and return the syntax tree."""
++        tokens = tokenize.generate_tokens(generate_lines(text).next)
++        return self.parse_tokens(tokens, debug)
++
++
++def generate_lines(text):
++    """Generator that behaves like readline without using StringIO."""
++    for line in text.splitlines(True):
++        yield line
++    while True:
++        yield ""
++
++
++def load_grammar(gt="Grammar.txt", gp=None,
++                 save=True, force=False, logger=None):
++    """Load the grammar (maybe from a pickle)."""
++    if logger is None:
++        logger = logging.getLogger()
++    if gp is None:
++        head, tail = os.path.splitext(gt)
++        if tail == ".txt":
++            tail = ""
++        gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
++    if force or not _newer(gp, gt):
++        logger.info("Generating grammar tables from %s", gt)
++        g = pgen.generate_grammar(gt)
++        if save:
++            logger.info("Writing grammar tables to %s", gp)
++            try:
++                g.dump(gp)
++            except IOError, e:
++                logger.info("Writing failed:"+str(e))
++    else:
++        g = grammar.Grammar()
++        g.load(gp)
++    return g
++
++
++def _newer(a, b):
++    """Inquire whether file a was written since file b."""
++    if not os.path.exists(a):
++        return False
++    if not os.path.exists(b):
++        return True
++    return os.path.getmtime(a) >= os.path.getmtime(b)
+diff -r 531f2e948299 refactor/pgen2/grammar.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/grammar.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,171 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""This module defines the data structures used to represent a grammar.
++
++These are a bit arcane because they are derived from the data
++structures used by Python's 'pgen' parser generator.
++
++There's also a table here mapping operators to their names in the
++token module; the Python tokenize module reports all operators as the
++fallback token code OP, but the parser needs the actual token code.
++
++"""
++
++# Python imports
++import pickle
++
++# Local imports
++from . import token, tokenize
++
++
++class Grammar(object):
++    """Pgen parsing tables tables conversion class.
++
++    Once initialized, this class supplies the grammar tables for the
++    parsing engine implemented by parse.py.  The parsing engine
++    accesses the instance variables directly.  The class here does not
++    provide initialization of the tables; several subclasses exist to
++    do this (see the conv and pgen modules).
++
++    The load() method reads the tables from a pickle file, which is
++    much faster than the other ways offered by subclasses.  The pickle
++    file is written by calling dump() (after loading the grammar
++    tables using a subclass).  The report() method prints a readable
++    representation of the tables to stdout, for debugging.
++
++    The instance variables are as follows:
++
++    symbol2number -- a dict mapping symbol names to numbers.  Symbol
++                     numbers are always 256 or higher, to distinguish
++                     them from token numbers, which are between 0 and
++                     255 (inclusive).
++
++    number2symbol -- a dict mapping numbers to symbol names;
++                     these two are each other's inverse.
++
++    states        -- a list of DFAs, where each DFA is a list of
++                     states, each state is is a list of arcs, and each
++                     arc is a (i, j) pair where i is a label and j is
++                     a state number.  The DFA number is the index into
++                     this list.  (This name is slightly confusing.)
++                     Final states are represented by a special arc of
++                     the form (0, j) where j is its own state number.
++
++    dfas          -- a dict mapping symbol numbers to (DFA, first)
++                     pairs, where DFA is an item from the states list
++                     above, and first is a set of tokens that can
++                     begin this grammar rule (represented by a dict
++                     whose values are always 1).
++
++    labels        -- a list of (x, y) pairs where x is either a token
++                     number or a symbol number, and y is either None
++                     or a string; the strings are keywords.  The label
++                     number is the index in this list; label numbers
++                     are used to mark state transitions (arcs) in the
++                     DFAs.
++
++    start         -- the number of the grammar's start symbol.
++
++    keywords      -- a dict mapping keyword strings to arc labels.
++
++    tokens        -- a dict mapping token numbers to arc labels.
++
++    """
++
++    def __init__(self):
++        self.symbol2number = {}
++        self.number2symbol = {}
++        self.states = []
++        self.dfas = {}
++        self.labels = [(0, "EMPTY")]
++        self.keywords = {}
++        self.tokens = {}
++        self.symbol2label = {}
++        self.start = 256
++
++    def dump(self, filename):
++        """Dump the grammar tables to a pickle file."""
++        f = open(filename, "wb")
++        pickle.dump(self.__dict__, f, 2)
++        f.close()
++
++    def load(self, filename):
++        """Load the grammar tables from a pickle file."""
++        f = open(filename, "rb")
++        d = pickle.load(f)
++        f.close()
++        self.__dict__.update(d)
++
++    def report(self):
++        """Dump the grammar tables to standard output, for debugging."""
++        from pprint import pprint
++        print "s2n"
++        pprint(self.symbol2number)
++        print "n2s"
++        pprint(self.number2symbol)
++        print "states"
++        pprint(self.states)
++        print "dfas"
++        pprint(self.dfas)
++        print "labels"
++        pprint(self.labels)
++        print "start", self.start
++
++
++# Map from operator to number (since tokenize doesn't do this)
++
++opmap_raw = """
++( LPAR
++) RPAR
++[ LSQB
++] RSQB
++: COLON
++, COMMA
++; SEMI
+++ PLUS
++- MINUS
++* STAR
++/ SLASH
++| VBAR
++& AMPER
++< LESS
++> GREATER
++= EQUAL
++. DOT
++% PERCENT
++` BACKQUOTE
++{ LBRACE
++} RBRACE
++@ AT
++== EQEQUAL
++!= NOTEQUAL
++<> NOTEQUAL
++<= LESSEQUAL
++>= GREATEREQUAL
++~ TILDE
++^ CIRCUMFLEX
++<< LEFTSHIFT
++>> RIGHTSHIFT
++** DOUBLESTAR
+++= PLUSEQUAL
++-= MINEQUAL
++*= STAREQUAL
++/= SLASHEQUAL
++%= PERCENTEQUAL
++&= AMPEREQUAL
++|= VBAREQUAL
++^= CIRCUMFLEXEQUAL
++<<= LEFTSHIFTEQUAL
++>>= RIGHTSHIFTEQUAL
++**= DOUBLESTAREQUAL
++// DOUBLESLASH
++//= DOUBLESLASHEQUAL
++-> RARROW
++"""
++
++opmap = {}
++for line in opmap_raw.splitlines():
++    if line:
++        op, name = line.split()
++        opmap[op] = getattr(token, name)
+diff -r 531f2e948299 refactor/pgen2/literals.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/literals.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,60 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Safely evaluate Python string literals without using eval()."""
++
++import re
++
++simple_escapes = {"a": "\a",
++                  "b": "\b",
++                  "f": "\f",
++                  "n": "\n",
++                  "r": "\r",
++                  "t": "\t",
++                  "v": "\v",
++                  "'": "'",
++                  '"': '"',
++                  "\\": "\\"}
++
++def escape(m):
++    all, tail = m.group(0, 1)
++    assert all.startswith("\\")
++    esc = simple_escapes.get(tail)
++    if esc is not None:
++        return esc
++    if tail.startswith("x"):
++        hexes = tail[1:]
++        if len(hexes) < 2:
++            raise ValueError("invalid hex string escape ('\\%s')" % tail)
++        try:
++            i = int(hexes, 16)
++        except ValueError:
++            raise ValueError("invalid hex string escape ('\\%s')" % tail)
++    else:
++        try:
++            i = int(tail, 8)
++        except ValueError:
++            raise ValueError("invalid octal string escape ('\\%s')" % tail)
++    return chr(i)
++
++def evalString(s):
++    assert s.startswith("'") or s.startswith('"'), repr(s[:1])
++    q = s[0]
++    if s[:3] == q*3:
++        q = q*3
++    assert s.endswith(q), repr(s[-len(q):])
++    assert len(s) >= 2*len(q)
++    s = s[len(q):-len(q)]
++    return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)
++
++def test():
++    for i in range(256):
++        c = chr(i)
++        s = repr(c)
++        e = evalString(s)
++        if e != c:
++            print i, c, s, e
++
++
++if __name__ == "__main__":
++    test()
+diff -r 531f2e948299 refactor/pgen2/parse.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/parse.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,201 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Parser engine for the grammar tables generated by pgen.
++
++The grammar table must be loaded first.
++
++See Parser/parser.c in the Python distribution for additional info on
++how this parsing engine works.
++
++"""
++
++# Local imports
++from . import token
++
++class ParseError(Exception):
++    """Exception to signal the parser is stuck."""
++
++    def __init__(self, msg, type, value, context):
++        Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
++                           (msg, type, value, context))
++        self.msg = msg
++        self.type = type
++        self.value = value
++        self.context = context
++
++class Parser(object):
++    """Parser engine.
++
++    The proper usage sequence is:
++
++    p = Parser(grammar, [converter])  # create instance
++    p.setup([start])                  # prepare for parsing
++    <for each input token>:
++        if p.addtoken(...):           # parse a token; may raise ParseError
++            break
++    root = p.rootnode                 # root of abstract syntax tree
++
++    A Parser instance may be reused by calling setup() repeatedly.
++
++    A Parser instance contains state pertaining to the current token
++    sequence, and should not be used concurrently by different threads
++    to parse separate token sequences.
++
++    See driver.py for how to get input tokens by tokenizing a file or
++    string.
++
++    Parsing is complete when addtoken() returns True; the root of the
++    abstract syntax tree can then be retrieved from the rootnode
++    instance variable.  When a syntax error occurs, addtoken() raises
++    the ParseError exception.  There is no error recovery; the parser
++    cannot be used after a syntax error was reported (but it can be
++    reinitialized by calling setup()).
++
++    """
++
++    def __init__(self, grammar, convert=None):
++        """Constructor.
++
++        The grammar argument is a grammar.Grammar instance; see the
++        grammar module for more information.
++
++        The parser is not ready yet for parsing; you must call the
++        setup() method to get it started.
++
++        The optional convert argument is a function mapping concrete
++        syntax tree nodes to abstract syntax tree nodes.  If not
++        given, no conversion is done and the syntax tree produced is
++        the concrete syntax tree.  If given, it must be a function of
++        two arguments, the first being the grammar (a grammar.Grammar
++        instance), and the second being the concrete syntax tree node
++        to be converted.  The syntax tree is converted from the bottom
++        up.
++
++        A concrete syntax tree node is a (type, value, context, nodes)
++        tuple, where type is the node type (a token or symbol number),
++        value is None for symbols and a string for tokens, context is
++        None or an opaque value used for error reporting (typically a
++        (lineno, offset) pair), and nodes is a list of children for
++        symbols, and None for tokens.
++
++        An abstract syntax tree node may be anything; this is entirely
++        up to the converter function.
++
++        """
++        self.grammar = grammar
++        self.convert = convert or (lambda grammar, node: node)
++
++    def setup(self, start=None):
++        """Prepare for parsing.
++
++        This *must* be called before starting to parse.
++
++        The optional argument is an alternative start symbol; it
++        defaults to the grammar's start symbol.
++
++        You can use a Parser instance to parse any number of programs;
++        each time you call setup() the parser is reset to an initial
++        state determined by the (implicit or explicit) start symbol.
++
++        """
++        if start is None:
++            start = self.grammar.start
++        # Each stack entry is a tuple: (dfa, state, node).
++        # A node is a tuple: (type, value, context, children),
++        # where children is a list of nodes or None, and context may be None.
++        newnode = (start, None, None, [])
++        stackentry = (self.grammar.dfas[start], 0, newnode)
++        self.stack = [stackentry]
++        self.rootnode = None
++        self.used_names = set() # Aliased to self.rootnode.used_names in pop()
++
++    def addtoken(self, type, value, context):
++        """Add a token; return True iff this is the end of the program."""
++        # Map from token to label
++        ilabel = self.classify(type, value, context)
++        # Loop until the token is shifted; may raise exceptions
++        while True:
++            dfa, state, node = self.stack[-1]
++            states, first = dfa
++            arcs = states[state]
++            # Look for a state with this label
++            for i, newstate in arcs:
++                t, v = self.grammar.labels[i]
++                if ilabel == i:
++                    # Look it up in the list of labels
++                    assert t < 256
++                    # Shift a token; we're done with it
++                    self.shift(type, value, newstate, context)
++                    # Pop while we are in an accept-only state
++                    state = newstate
++                    while states[state] == [(0, state)]:
++                        self.pop()
++                        if not self.stack:
++                            # Done parsing!
++                            return True
++                        dfa, state, node = self.stack[-1]
++                        states, first = dfa
++                    # Done with this token
++                    return False
++                elif t >= 256:
++                    # See if it's a symbol and if we're in its first set
++                    itsdfa = self.grammar.dfas[t]
++                    itsstates, itsfirst = itsdfa
++                    if ilabel in itsfirst:
++                        # Push a symbol
++                        self.push(t, self.grammar.dfas[t], newstate, context)
++                        break # To continue the outer while loop
++            else:
++                if (0, state) in arcs:
++                    # An accepting state, pop it and try something else
++                    self.pop()
++                    if not self.stack:
++                        # Done parsing, but another token is input
++                        raise ParseError("too much input",
++                                         type, value, context)
++                else:
++                    # No success finding a transition
++                    raise ParseError("bad input", type, value, context)
++
++    def classify(self, type, value, context):
++        """Turn a token into a label.  (Internal)"""
++        if type == token.NAME:
++            # Keep a listing of all used names
++            self.used_names.add(value)
++            # Check for reserved words
++            ilabel = self.grammar.keywords.get(value)
++            if ilabel is not None:
++                return ilabel
++        ilabel = self.grammar.tokens.get(type)
++        if ilabel is None:
++            raise ParseError("bad token", type, value, context)
++        return ilabel
++
++    def shift(self, type, value, newstate, context):
++        """Shift a token.  (Internal)"""
++        dfa, state, node = self.stack[-1]
++        newnode = (type, value, context, None)
++        newnode = self.convert(self.grammar, newnode)
++        if newnode is not None:
++            node[-1].append(newnode)
++        self.stack[-1] = (dfa, newstate, node)
++
++    def push(self, type, newdfa, newstate, context):
++        """Push a nonterminal.  (Internal)"""
++        dfa, state, node = self.stack[-1]
++        newnode = (type, None, context, [])
++        self.stack[-1] = (dfa, newstate, node)
++        self.stack.append((newdfa, 0, newnode))
++
++    def pop(self):
++        """Pop a nonterminal.  (Internal)"""
++        popdfa, popstate, popnode = self.stack.pop()
++        newnode = self.convert(self.grammar, popnode)
++        if newnode is not None:
++            if self.stack:
++                dfa, state, node = self.stack[-1]
++                node[-1].append(newnode)
++            else:
++                self.rootnode = newnode
++                self.rootnode.used_names = self.used_names
+diff -r 531f2e948299 refactor/pgen2/pgen.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/pgen.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,384 @@
++# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++# Pgen imports
++from . import grammar, token, tokenize
++
++class PgenGrammar(grammar.Grammar):
++    pass
++
++class ParserGenerator(object):
++
++    def __init__(self, filename, stream=None):
++        close_stream = None
++        if stream is None:
++            stream = open(filename)
++            close_stream = stream.close
++        self.filename = filename
++        self.stream = stream
++        self.generator = tokenize.generate_tokens(stream.readline)
++        self.gettoken() # Initialize lookahead
++        self.dfas, self.startsymbol = self.parse()
++        if close_stream is not None:
++            close_stream()
++        self.first = {} # map from symbol name to set of tokens
++        self.addfirstsets()
++
++    def make_grammar(self):
++        c = PgenGrammar()
++        names = self.dfas.keys()
++        names.sort()
++        names.remove(self.startsymbol)
++        names.insert(0, self.startsymbol)
++        for name in names:
++            i = 256 + len(c.symbol2number)
++            c.symbol2number[name] = i
++            c.number2symbol[i] = name
++        for name in names:
++            dfa = self.dfas[name]
++            states = []
++            for state in dfa:
++                arcs = []
++                for label, next in state.arcs.iteritems():
++                    arcs.append((self.make_label(c, label), dfa.index(next)))
++                if state.isfinal:
++                    arcs.append((0, dfa.index(state)))
++                states.append(arcs)
++            c.states.append(states)
++            c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
++        c.start = c.symbol2number[self.startsymbol]
++        return c
++
++    def make_first(self, c, name):
++        rawfirst = self.first[name]
++        first = {}
++        for label in rawfirst:
++            ilabel = self.make_label(c, label)
++            ##assert ilabel not in first # XXX failed on <> ... !=
++            first[ilabel] = 1
++        return first
++
++    def make_label(self, c, label):
++        # XXX Maybe this should be a method on a subclass of converter?
++        ilabel = len(c.labels)
++        if label[0].isalpha():
++            # Either a symbol name or a named token
++            if label in c.symbol2number:
++                # A symbol name (a non-terminal)
++                if label in c.symbol2label:
++                    return c.symbol2label[label]
++                else:
++                    c.labels.append((c.symbol2number[label], None))
++                    c.symbol2label[label] = ilabel
++                    return ilabel
++            else:
++                # A named token (NAME, NUMBER, STRING)
++                itoken = getattr(token, label, None)
++                assert isinstance(itoken, int), label
++                assert itoken in token.tok_name, label
++                if itoken in c.tokens:
++                    return c.tokens[itoken]
++                else:
++                    c.labels.append((itoken, None))
++                    c.tokens[itoken] = ilabel
++                    return ilabel
++        else:
++            # Either a keyword or an operator
++            assert label[0] in ('"', "'"), label
++            value = eval(label)
++            if value[0].isalpha():
++                # A keyword
++                if value in c.keywords:
++                    return c.keywords[value]
++                else:
++                    c.labels.append((token.NAME, value))
++                    c.keywords[value] = ilabel
++                    return ilabel
++            else:
++                # An operator (any non-numeric token)
++                itoken = grammar.opmap[value] # Fails if unknown token
++                if itoken in c.tokens:
++                    return c.tokens[itoken]
++                else:
++                    c.labels.append((itoken, None))
++                    c.tokens[itoken] = ilabel
++                    return ilabel
++
++    def addfirstsets(self):
++        names = self.dfas.keys()
++        names.sort()
++        for name in names:
++            if name not in self.first:
++                self.calcfirst(name)
++            #print name, self.first[name].keys()
++
++    def calcfirst(self, name):
++        dfa = self.dfas[name]
++        self.first[name] = None # dummy to detect left recursion
++        state = dfa[0]
++        totalset = {}
++        overlapcheck = {}
++        for label, next in state.arcs.iteritems():
++            if label in self.dfas:
++                if label in self.first:
++                    fset = self.first[label]
++                    if fset is None:
++                        raise ValueError("recursion for rule %r" % name)
++                else:
++                    self.calcfirst(label)
++                    fset = self.first[label]
++                totalset.update(fset)
++                overlapcheck[label] = fset
++            else:
++                totalset[label] = 1
++                overlapcheck[label] = {label: 1}
++        inverse = {}
++        for label, itsfirst in overlapcheck.iteritems():
++            for symbol in itsfirst:
++                if symbol in inverse:
++                    raise ValueError("rule %s is ambiguous; %s is in the"
++                                     " first sets of %s as well as %s" %
++                                     (name, symbol, label, inverse[symbol]))
++                inverse[symbol] = label
++        self.first[name] = totalset
++
++    def parse(self):
++        dfas = {}
++        startsymbol = None
++        # MSTART: (NEWLINE | RULE)* ENDMARKER
++        while self.type != token.ENDMARKER:
++            while self.type == token.NEWLINE:
++                self.gettoken()
++            # RULE: NAME ':' RHS NEWLINE
++            name = self.expect(token.NAME)
++            self.expect(token.OP, ":")
++            a, z = self.parse_rhs()
++            self.expect(token.NEWLINE)
++            #self.dump_nfa(name, a, z)
++            dfa = self.make_dfa(a, z)
++            #self.dump_dfa(name, dfa)
++            oldlen = len(dfa)
++            self.simplify_dfa(dfa)
++            newlen = len(dfa)
++            dfas[name] = dfa
++            #print name, oldlen, newlen
++            if startsymbol is None:
++                startsymbol = name
++        return dfas, startsymbol
++
++    def make_dfa(self, start, finish):
++        # To turn an NFA into a DFA, we define the states of the DFA
++        # to correspond to *sets* of states of the NFA.  Then do some
++        # state reduction.  Let's represent sets as dicts with 1 for
++        # values.
++        assert isinstance(start, NFAState)
++        assert isinstance(finish, NFAState)
++        def closure(state):
++            base = {}
++            addclosure(state, base)
++            return base
++        def addclosure(state, base):
++            assert isinstance(state, NFAState)
++            if state in base:
++                return
++            base[state] = 1
++            for label, next in state.arcs:
++                if label is None:
++                    addclosure(next, base)
++        states = [DFAState(closure(start), finish)]
++        for state in states: # NB states grows while we're iterating
++            arcs = {}
++            for nfastate in state.nfaset:
++                for label, next in nfastate.arcs:
++                    if label is not None:
++                        addclosure(next, arcs.setdefault(label, {}))
++            for label, nfaset in arcs.iteritems():
++                for st in states:
++                    if st.nfaset == nfaset:
++                        break
++                else:
++                    st = DFAState(nfaset, finish)
++                    states.append(st)
++                state.addarc(st, label)
++        return states # List of DFAState instances; first one is start
++
++    def dump_nfa(self, name, start, finish):
++        print "Dump of NFA for", name
++        todo = [start]
++        for i, state in enumerate(todo):
++            print "  State", i, state is finish and "(final)" or ""
++            for label, next in state.arcs:
++                if next in todo:
++                    j = todo.index(next)
++                else:
++                    j = len(todo)
++                    todo.append(next)
++                if label is None:
++                    print "    -> %d" % j
++                else:
++                    print "    %s -> %d" % (label, j)
++
++    def dump_dfa(self, name, dfa):
++        print "Dump of DFA for", name
++        for i, state in enumerate(dfa):
++            print "  State", i, state.isfinal and "(final)" or ""
++            for label, next in state.arcs.iteritems():
++                print "    %s -> %d" % (label, dfa.index(next))
++
++    def simplify_dfa(self, dfa):
++        # This is not theoretically optimal, but works well enough.
++        # Algorithm: repeatedly look for two states that have the same
++        # set of arcs (same labels pointing to the same nodes) and
++        # unify them, until things stop changing.
++
++        # dfa is a list of DFAState instances
++        changes = True
++        while changes:
++            changes = False
++            for i, state_i in enumerate(dfa):
++                for j in range(i+1, len(dfa)):
++                    state_j = dfa[j]
++                    if state_i == state_j:
++                        #print "  unify", i, j
++                        del dfa[j]
++                        for state in dfa:
++                            state.unifystate(state_j, state_i)
++                        changes = True
++                        break
++
++    def parse_rhs(self):
++        # RHS: ALT ('|' ALT)*
++        a, z = self.parse_alt()
++        if self.value != "|":
++            return a, z
++        else:
++            aa = NFAState()
++            zz = NFAState()
++            aa.addarc(a)
++            z.addarc(zz)
++            while self.value == "|":
++                self.gettoken()
++                a, z = self.parse_alt()
++                aa.addarc(a)
++                z.addarc(zz)
++            return aa, zz
++
++    def parse_alt(self):
++        # ALT: ITEM+
++        a, b = self.parse_item()
++        while (self.value in ("(", "[") or
++               self.type in (token.NAME, token.STRING)):
++            c, d = self.parse_item()
++            b.addarc(c)
++            b = d
++        return a, b
++
++    def parse_item(self):
++        # ITEM: '[' RHS ']' | ATOM ['+' | '*']
++        if self.value == "[":
++            self.gettoken()
++            a, z = self.parse_rhs()
++            self.expect(token.OP, "]")
++            a.addarc(z)
++            return a, z
++        else:
++            a, z = self.parse_atom()
++            value = self.value
++            if value not in ("+", "*"):
++                return a, z
++            self.gettoken()
++            z.addarc(a)
++            if value == "+":
++                return a, z
++            else:
++                return a, a
++
++    def parse_atom(self):
++        # ATOM: '(' RHS ')' | NAME | STRING
++        if self.value == "(":
++            self.gettoken()
++            a, z = self.parse_rhs()
++            self.expect(token.OP, ")")
++            return a, z
++        elif self.type in (token.NAME, token.STRING):
++            a = NFAState()
++            z = NFAState()
++            a.addarc(z, self.value)
++            self.gettoken()
++            return a, z
++        else:
++            self.raise_error("expected (...) or NAME or STRING, got %s/%s",
++                             self.type, self.value)
++
++    def expect(self, type, value=None):
++        if self.type != type or (value is not None and self.value != value):
++            self.raise_error("expected %s/%s, got %s/%s",
++                             type, value, self.type, self.value)
++        value = self.value
++        self.gettoken()
++        return value
++
++    def gettoken(self):
++        tup = self.generator.next()
++        while tup[0] in (tokenize.COMMENT, tokenize.NL):
++            tup = self.generator.next()
++        self.type, self.value, self.begin, self.end, self.line = tup
++        #print token.tok_name[self.type], repr(self.value)
++
++    def raise_error(self, msg, *args):
++        if args:
++            try:
++                msg = msg % args
++            except:
++                msg = " ".join([msg] + map(str, args))
++        raise SyntaxError(msg, (self.filename, self.end[0],
++                                self.end[1], self.line))
++
++class NFAState(object):
++
++    def __init__(self):
++        self.arcs = [] # list of (label, NFAState) pairs
++
++    def addarc(self, next, label=None):
++        assert label is None or isinstance(label, str)
++        assert isinstance(next, NFAState)
++        self.arcs.append((label, next))
++
++class DFAState(object):
++
++    def __init__(self, nfaset, final):
++        assert isinstance(nfaset, dict)
++        assert isinstance(iter(nfaset).next(), NFAState)
++        assert isinstance(final, NFAState)
++        self.nfaset = nfaset
++        self.isfinal = final in nfaset
++        self.arcs = {} # map from label to DFAState
++
++    def addarc(self, next, label):
++        assert isinstance(label, str)
++        assert label not in self.arcs
++        assert isinstance(next, DFAState)
++        self.arcs[label] = next
++
++    def unifystate(self, old, new):
++        for label, next in self.arcs.iteritems():
++            if next is old:
++                self.arcs[label] = new
++
++    def __eq__(self, other):
++        # Equality test -- ignore the nfaset instance variable
++        assert isinstance(other, DFAState)
++        if self.isfinal != other.isfinal:
++            return False
++        # Can't just return self.arcs == other.arcs, because that
++        # would invoke this method recursively, with cycles...
++        if len(self.arcs) != len(other.arcs):
++            return False
++        for label, next in self.arcs.iteritems():
++            if next is not other.arcs.get(label):
++                return False
++        return True
++
++def generate_grammar(filename="Grammar.txt"):
++    p = ParserGenerator(filename)
++    return p.make_grammar()
+diff -r 531f2e948299 refactor/pgen2/token.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/token.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,82 @@
++#! /usr/bin/env python
++
++"""Token constants (from "token.h")."""
++
++#  Taken from Python (r53757) and modified to include some tokens
++#   originally monkeypatched in by pgen2.tokenize
++
++#--start constants--
++ENDMARKER = 0
++NAME = 1
++NUMBER = 2
++STRING = 3
++NEWLINE = 4
++INDENT = 5
++DEDENT = 6
++LPAR = 7
++RPAR = 8
++LSQB = 9
++RSQB = 10
++COLON = 11
++COMMA = 12
++SEMI = 13
++PLUS = 14
++MINUS = 15
++STAR = 16
++SLASH = 17
++VBAR = 18
++AMPER = 19
++LESS = 20
++GREATER = 21
++EQUAL = 22
++DOT = 23
++PERCENT = 24
++BACKQUOTE = 25
++LBRACE = 26
++RBRACE = 27
++EQEQUAL = 28
++NOTEQUAL = 29
++LESSEQUAL = 30
++GREATEREQUAL = 31
++TILDE = 32
++CIRCUMFLEX = 33
++LEFTSHIFT = 34
++RIGHTSHIFT = 35
++DOUBLESTAR = 36
++PLUSEQUAL = 37
++MINEQUAL = 38
++STAREQUAL = 39
++SLASHEQUAL = 40
++PERCENTEQUAL = 41
++AMPEREQUAL = 42
++VBAREQUAL = 43
++CIRCUMFLEXEQUAL = 44
++LEFTSHIFTEQUAL = 45
++RIGHTSHIFTEQUAL = 46
++DOUBLESTAREQUAL = 47
++DOUBLESLASH = 48
++DOUBLESLASHEQUAL = 49
++AT = 50
++OP = 51
++COMMENT = 52
++NL = 53
++RARROW = 54
++ERRORTOKEN = 55
++N_TOKENS = 56
++NT_OFFSET = 256
++#--end constants--
++
++tok_name = {}
++for _name, _value in globals().items():
++    if type(_value) is type(0):
++        tok_name[_value] = _name
++
++
++def ISTERMINAL(x):
++    return x < NT_OFFSET
++
++def ISNONTERMINAL(x):
++    return x >= NT_OFFSET
++
++def ISEOF(x):
++    return x == ENDMARKER
+diff -r 531f2e948299 refactor/pgen2/tokenize.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pgen2/tokenize.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,405 @@
++# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
++# All rights reserved.
++
++"""Tokenization help for Python programs.
++
++generate_tokens(readline) is a generator that breaks a stream of
++text into Python tokens.  It accepts a readline-like method which is called
++repeatedly to get the next line of input (or "" for EOF).  It generates
++5-tuples with these members:
++
++    the token type (see token.py)
++    the token (a string)
++    the starting (row, column) indices of the token (a 2-tuple of ints)
++    the ending (row, column) indices of the token (a 2-tuple of ints)
++    the original line (string)
++
++It is designed to match the working of the Python tokenizer exactly, except
++that it produces COMMENT tokens for comments and gives type OP for all
++operators
++
++Older entry points
++    tokenize_loop(readline, tokeneater)
++    tokenize(readline, tokeneater=printtoken)
++are the same, except instead of generating tokens, tokeneater is a callback
++function to which the 5 fields described above are passed as 5 arguments,
++each time a new token is found."""
++
++__author__ = 'Ka-Ping Yee <ping at lfw.org>'
++__credits__ = \
++    'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
++
++import string, re
++from .token import *
++
++from . import token
++__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
++           "generate_tokens", "untokenize"]
++del token
++
++def group(*choices): return '(' + '|'.join(choices) + ')'
++def any(*choices): return group(*choices) + '*'
++def maybe(*choices): return group(*choices) + '?'
++
++Whitespace = r'[ \f\t]*'
++Comment = r'#[^\r\n]*'
++Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
++Name = r'[a-zA-Z_]\w*'
++
++Binnumber = r'0[bB][01]*'
++Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
++Octnumber = r'0[oO]?[0-7]*[lL]?'
++Decnumber = r'[1-9]\d*[lL]?'
++Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
++Exponent = r'[eE][-+]?\d+'
++Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
++Expfloat = r'\d+' + Exponent
++Floatnumber = group(Pointfloat, Expfloat)
++Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
++Number = group(Imagnumber, Floatnumber, Intnumber)
++
++# Tail end of ' string.
++Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
++# Tail end of " string.
++Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
++# Tail end of ''' string.
++Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
++# Tail end of """ string.
++Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
++Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
++# Single-line ' or " string.
++String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
++               r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
++
++# Because of leftmost-then-longest match semantics, be sure to put the
++# longest operators first (e.g., if = came before ==, == would get
++# recognized as two instances of =).
++Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
++                 r"//=?", r"->",
++                 r"[+\-*/%&|^=<>]=?",
++                 r"~")
++
++Bracket = '[][(){}]'
++Special = group(r'\r?\n', r'[:;.,`@]')
++Funny = group(Operator, Bracket, Special)
++
++PlainToken = group(Number, Funny, String, Name)
++Token = Ignore + PlainToken
++
++# First (or only) line of ' or " string.
++ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
++                group("'", r'\\\r?\n'),
++                r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
++                group('"', r'\\\r?\n'))
++PseudoExtras = group(r'\\\r?\n', Comment, Triple)
++PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
++
++tokenprog, pseudoprog, single3prog, double3prog = map(
++    re.compile, (Token, PseudoToken, Single3, Double3))
++endprogs = {"'": re.compile(Single), '"': re.compile(Double),
++            "'''": single3prog, '"""': double3prog,
++            "r'''": single3prog, 'r"""': double3prog,
++            "u'''": single3prog, 'u"""': double3prog,
++            "b'''": single3prog, 'b"""': double3prog,
++            "ur'''": single3prog, 'ur"""': double3prog,
++            "br'''": single3prog, 'br"""': double3prog,
++            "R'''": single3prog, 'R"""': double3prog,
++            "U'''": single3prog, 'U"""': double3prog,
++            "B'''": single3prog, 'B"""': double3prog,
++            "uR'''": single3prog, 'uR"""': double3prog,
++            "Ur'''": single3prog, 'Ur"""': double3prog,
++            "UR'''": single3prog, 'UR"""': double3prog,
++            "bR'''": single3prog, 'bR"""': double3prog,
++            "Br'''": single3prog, 'Br"""': double3prog,
++            "BR'''": single3prog, 'BR"""': double3prog,
++            'r': None, 'R': None,
++            'u': None, 'U': None,
++            'b': None, 'B': None}
++
++triple_quoted = {}
++for t in ("'''", '"""',
++          "r'''", 'r"""', "R'''", 'R"""',
++          "u'''", 'u"""', "U'''", 'U"""',
++          "b'''", 'b"""', "B'''", 'B"""',
++          "ur'''", 'ur"""', "Ur'''", 'Ur"""',
++          "uR'''", 'uR"""', "UR'''", 'UR"""',
++          "br'''", 'br"""', "Br'''", 'Br"""',
++          "bR'''", 'bR"""', "BR'''", 'BR"""',):
++    triple_quoted[t] = t
++single_quoted = {}
++for t in ("'", '"',
++          "r'", 'r"', "R'", 'R"',
++          "u'", 'u"', "U'", 'U"',
++          "b'", 'b"', "B'", 'B"',
++          "ur'", 'ur"', "Ur'", 'Ur"',
++          "uR'", 'uR"', "UR'", 'UR"',
++          "br'", 'br"', "Br'", 'Br"',
++          "bR'", 'bR"', "BR'", 'BR"', ):
++    single_quoted[t] = t
++
++tabsize = 8
++
++class TokenError(Exception): pass
++
++class StopTokenizing(Exception): pass
++
++def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
++    print "%d,%d-%d,%d:\t%s\t%s" % \
++        (srow, scol, erow, ecol, tok_name[type], repr(token))
++
++def tokenize(readline, tokeneater=printtoken):
++    """
++    The tokenize() function accepts two parameters: one representing the
++    input stream, and one providing an output mechanism for tokenize().
++
++    The first parameter, readline, must be a callable object which provides
++    the same interface as the readline() method of built-in file objects.
++    Each call to the function should return one line of input as a string.
++
++    The second parameter, tokeneater, must also be a callable object. It is
++    called once for each token, with five arguments, corresponding to the
++    tuples generated by generate_tokens().
++    """
++    try:
++        tokenize_loop(readline, tokeneater)
++    except StopTokenizing:
++        pass
++
++# backwards compatible interface
++def tokenize_loop(readline, tokeneater):
++    for token_info in generate_tokens(readline):
++        tokeneater(*token_info)
++
++class Untokenizer:
++
++    def __init__(self):
++        self.tokens = []
++        self.prev_row = 1
++        self.prev_col = 0
++
++    def add_whitespace(self, start):
++        row, col = start
++        assert row <= self.prev_row
++        col_offset = col - self.prev_col
++        if col_offset:
++            self.tokens.append(" " * col_offset)
++
++    def untokenize(self, iterable):
++        for t in iterable:
++            if len(t) == 2:
++                self.compat(t, iterable)
++                break
++            tok_type, token, start, end, line = t
++            self.add_whitespace(start)
++            self.tokens.append(token)
++            self.prev_row, self.prev_col = end
++            if tok_type in (NEWLINE, NL):
++                self.prev_row += 1
++                self.prev_col = 0
++        return "".join(self.tokens)
++
++    def compat(self, token, iterable):
++        startline = False
++        indents = []
++        toks_append = self.tokens.append
++        toknum, tokval = token
++        if toknum in (NAME, NUMBER):
++            tokval += ' '
++        if toknum in (NEWLINE, NL):
++            startline = True
++        for tok in iterable:
++            toknum, tokval = tok[:2]
++
++            if toknum in (NAME, NUMBER):
++                tokval += ' '
++
++            if toknum == INDENT:
++                indents.append(tokval)
++                continue
++            elif toknum == DEDENT:
++                indents.pop()
++                continue
++            elif toknum in (NEWLINE, NL):
++                startline = True
++            elif startline and indents:
++                toks_append(indents[-1])
++                startline = False
++            toks_append(tokval)
++
++def untokenize(iterable):
++    """Transform tokens back into Python source code.
++
++    Each element returned by the iterable must be a token sequence
++    with at least two elements, a token number and token value.  If
++    only two tokens are passed, the resulting output is poor.
++
++    Round-trip invariant for full input:
++        Untokenized source will match input source exactly
++
++    Round-trip invariant for limited intput:
++        # Output text will tokenize the back to the input
++        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
++        newcode = untokenize(t1)
++        readline = iter(newcode.splitlines(1)).next
++        t2 = [tok[:2] for tokin generate_tokens(readline)]
++        assert t1 == t2
++    """
++    ut = Untokenizer()
++    return ut.untokenize(iterable)
++
++def generate_tokens(readline):
++    """
++    The generate_tokens() generator requires one argment, readline, which
++    must be a callable object which provides the same interface as the
++    readline() method of built-in file objects. Each call to the function
++    should return one line of input as a string.  Alternately, readline
++    can be a callable function terminating with StopIteration:
++        readline = open(myfile).next    # Example of alternate readline
++
++    The generator produces 5-tuples with these members: the token type; the
++    token string; a 2-tuple (srow, scol) of ints specifying the row and
++    column where the token begins in the source; a 2-tuple (erow, ecol) of
++    ints specifying the row and column where the token ends in the source;
++    and the line on which the token was found. The line passed is the
++    logical line; continuation lines are included.
++    """
++    lnum = parenlev = continued = 0
++    namechars, numchars = string.ascii_letters + '_', '0123456789'
++    contstr, needcont = '', 0
++    contline = None
++    indents = [0]
++
++    while 1:                                   # loop over lines in stream
++        try:
++            line = readline()
++        except StopIteration:
++            line = ''
++        lnum = lnum + 1
++        pos, max = 0, len(line)
++
++        if contstr:                            # continued string
++            if not line:
++                raise TokenError, ("EOF in multi-line string", strstart)
++            endmatch = endprog.match(line)
++            if endmatch:
++                pos = end = endmatch.end(0)
++                yield (STRING, contstr + line[:end],
++                       strstart, (lnum, end), contline + line)
++                contstr, needcont = '', 0
++                contline = None
++            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
++                yield (ERRORTOKEN, contstr + line,
++                           strstart, (lnum, len(line)), contline)
++                contstr = ''
++                contline = None
++                continue
++            else:
++                contstr = contstr + line
++                contline = contline + line
++                continue
++
++        elif parenlev == 0 and not continued:  # new statement
++            if not line: break
++            column = 0
++            while pos < max:                   # measure leading whitespace
++                if line[pos] == ' ': column = column + 1
++                elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
++                elif line[pos] == '\f': column = 0
++                else: break
++                pos = pos + 1
++            if pos == max: break
++
++            if line[pos] in '#\r\n':           # skip comments or blank lines
++                if line[pos] == '#':
++                    comment_token = line[pos:].rstrip('\r\n')
++                    nl_pos = pos + len(comment_token)
++                    yield (COMMENT, comment_token,
++                           (lnum, pos), (lnum, pos + len(comment_token)), line)
++                    yield (NL, line[nl_pos:],
++                           (lnum, nl_pos), (lnum, len(line)), line)
++                else:
++                    yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
++                           (lnum, pos), (lnum, len(line)), line)
++                continue
++
++            if column > indents[-1]:           # count indents or dedents
++                indents.append(column)
++                yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
++            while column < indents[-1]:
++                if column not in indents:
++                    raise IndentationError(
++                        "unindent does not match any outer indentation level",
++                        ("<tokenize>", lnum, pos, line))
++                indents = indents[:-1]
++                yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
++
++        else:                                  # continued statement
++            if not line:
++                raise TokenError, ("EOF in multi-line statement", (lnum, 0))
++            continued = 0
++
++        while pos < max:
++            pseudomatch = pseudoprog.match(line, pos)
++            if pseudomatch:                                # scan for tokens
++                start, end = pseudomatch.span(1)
++                spos, epos, pos = (lnum, start), (lnum, end), end
++                token, initial = line[start:end], line[start]
++
++                if initial in numchars or \
++                   (initial == '.' and token != '.'):      # ordinary number
++                    yield (NUMBER, token, spos, epos, line)
++                elif initial in '\r\n':
++                    newline = NEWLINE
++                    if parenlev > 0:
++                        newline = NL
++                    yield (newline, token, spos, epos, line)
++                elif initial == '#':
++                    assert not token.endswith("\n")
++                    yield (COMMENT, token, spos, epos, line)
++                elif token in triple_quoted:
++                    endprog = endprogs[token]
++                    endmatch = endprog.match(line, pos)
++                    if endmatch:                           # all on one line
++                        pos = endmatch.end(0)
++                        token = line[start:pos]
++                        yield (STRING, token, spos, (lnum, pos), line)
++                    else:
++                        strstart = (lnum, start)           # multiple lines
++                        contstr = line[start:]
++                        contline = line
++                        break
++                elif initial in single_quoted or \
++                    token[:2] in single_quoted or \
++                    token[:3] in single_quoted:
++                    if token[-1] == '\n':                  # continued string
++                        strstart = (lnum, start)
++                        endprog = (endprogs[initial] or endprogs[token[1]] or
++                                   endprogs[token[2]])
++                        contstr, needcont = line[start:], 1
++                        contline = line
++                        break
++                    else:                                  # ordinary string
++                        yield (STRING, token, spos, epos, line)
++                elif initial in namechars:                 # ordinary name
++                    yield (NAME, token, spos, epos, line)
++                elif initial == '\\':                      # continued stmt
++                    # This yield is new; needed for better idempotency:
++                    yield (NL, token, spos, (lnum, pos), line)
++                    continued = 1
++                else:
++                    if initial in '([{': parenlev = parenlev + 1
++                    elif initial in ')]}': parenlev = parenlev - 1
++                    yield (OP, token, spos, epos, line)
++            else:
++                yield (ERRORTOKEN, line[pos],
++                           (lnum, pos), (lnum, pos+1), line)
++                pos = pos + 1
++
++    for indent in indents[1:]:                 # pop remaining indent levels
++        yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
++    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
++
++if __name__ == '__main__':                     # testing
++    import sys
++    if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
++    else: tokenize(sys.stdin.readline)
+diff -r 531f2e948299 refactor/pygram.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pygram.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,31 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Export the Python grammar and symbols."""
++
++# Python imports
++import os
++
++# Local imports
++from .pgen2 import token
++from .pgen2 import driver
++from . import pytree
++
++# The grammar file
++_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
++
++
++class Symbols(object):
++
++    def __init__(self, grammar):
++        """Initializer.
++
++        Creates an attribute for each grammar symbol (nonterminal),
++        whose value is the symbol's type (an int >= 256).
++        """
++        for name, symbol in grammar.symbol2number.iteritems():
++            setattr(self, name, symbol)
++
++
++python_grammar = driver.load_grammar(_GRAMMAR_FILE)
++python_symbols = Symbols(python_grammar)
+diff -r 531f2e948299 refactor/pytree.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/pytree.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,846 @@
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""
++Python parse tree definitions.
++
++This is a very concrete parse tree; we need to keep every token and
++even the comments and whitespace between tokens.
++
++There's also a pattern matching implementation here.
++"""
++
++__author__ = "Guido van Rossum <guido at python.org>"
++
++import sys
++from StringIO import StringIO
++
++
++HUGE = 0x7FFFFFFF  # maximum repeat count, default max
++
++_type_reprs = {}
++def type_repr(type_num):
++    global _type_reprs
++    if not _type_reprs:
++        from .pygram import python_symbols
++        # printing tokens is possible but not as useful
++        # from .pgen2 import token // token.__dict__.items():
++        for name, val in python_symbols.__dict__.items():
++            if type(val) == int: _type_reprs[val] = name
++    return _type_reprs.setdefault(type_num, type_num)
++
++
++class Base(object):
++
++    """
++    Abstract base class for Node and Leaf.
++
++    This provides some default functionality and boilerplate using the
++    template pattern.
++
++    A node may be a subnode of at most one parent.
++    """
++
++    # Default values for instance variables
++    type = None    # int: token number (< 256) or symbol number (>= 256)
++    parent = None  # Parent node pointer, or None
++    children = ()  # Tuple of subnodes
++    was_changed = False
++
++    def __new__(cls, *args, **kwds):
++        """Constructor that prevents Base from being instantiated."""
++        assert cls is not Base, "Cannot instantiate Base"
++        return object.__new__(cls)
++
++    def __eq__(self, other):
++        """
++        Compare two nodes for equality.
++
++        This calls the method _eq().
++        """
++        if self.__class__ is not other.__class__:
++            return NotImplemented
++        return self._eq(other)
++
++    def __ne__(self, other):
++        """
++        Compare two nodes for inequality.
++
++        This calls the method _eq().
++        """
++        if self.__class__ is not other.__class__:
++            return NotImplemented
++        return not self._eq(other)
++
++    def _eq(self, other):
++        """
++        Compare two nodes for equality.
++
++        This is called by __eq__ and __ne__.  It is only called if the two nodes
++        have the same type.  This must be implemented by the concrete subclass.
++        Nodes should be considered equal if they have the same structure,
++        ignoring the prefix string and other context information.
++        """
++        raise NotImplementedError
++
++    def clone(self):
++        """
++        Return a cloned (deep) copy of self.
++
++        This must be implemented by the concrete subclass.
++        """
++        raise NotImplementedError
++
++    def post_order(self):
++        """
++        Return a post-order iterator for the tree.
++
++        This must be implemented by the concrete subclass.
++        """
++        raise NotImplementedError
++
++    def pre_order(self):
++        """
++        Return a pre-order iterator for the tree.
++
++        This must be implemented by the concrete subclass.
++        """
++        raise NotImplementedError
++
++    def set_prefix(self, prefix):
++        """
++        Set the prefix for the node (see Leaf class).
++
++        This must be implemented by the concrete subclass.
++        """
++        raise NotImplementedError
++
++    def get_prefix(self):
++        """
++        Return the prefix for the node (see Leaf class).
++
++        This must be implemented by the concrete subclass.
++        """
++        raise NotImplementedError
++
++    def replace(self, new):
++        """Replace this node with a new one in the parent."""
++        assert self.parent is not None, str(self)
++        assert new is not None
++        if not isinstance(new, list):
++            new = [new]
++        l_children = []
++        found = False
++        for ch in self.parent.children:
++            if ch is self:
++                assert not found, (self.parent.children, self, new)
++                if new is not None:
++                    l_children.extend(new)
++                found = True
++            else:
++                l_children.append(ch)
++        assert found, (self.children, self, new)
++        self.parent.changed()
++        self.parent.children = l_children
++        for x in new:
++            x.parent = self.parent
++        self.parent = None
++
++    def get_lineno(self):
++        """Return the line number which generated the invocant node."""
++        node = self
++        while not isinstance(node, Leaf):
++            if not node.children:
++                return
++            node = node.children[0]
++        return node.lineno
++
++    def changed(self):
++        if self.parent:
++            self.parent.changed()
++        self.was_changed = True
++
++    def remove(self):
++        """
++        Remove the node from the tree. Returns the position of the node in its
++        parent's children before it was removed.
++        """
++        if self.parent:
++            for i, node in enumerate(self.parent.children):
++                if node is self:
++                    self.parent.changed()
++                    del self.parent.children[i]
++                    self.parent = None
++                    return i
++
++    @property
++    def next_sibling(self):
++        """
++        The node immediately following the invocant in their parent's children
++        list. If the invocant does not have a next sibling, it is None
++        """
++        if self.parent is None:
++            return None
++
++        # Can't use index(); we need to test by identity
++        for i, child in enumerate(self.parent.children):
++            if child is self:
++                try:
++                    return self.parent.children[i+1]
++                except IndexError:
++                    return None
++
++    @property
++    def prev_sibling(self):
++        """
++        The node immediately preceding the invocant in their parent's children
++        list. If the invocant does not have a previous sibling, it is None.
++        """
++        if self.parent is None:
++            return None
++
++        # Can't use index(); we need to test by identity
++        for i, child in enumerate(self.parent.children):
++            if child is self:
++                if i == 0:
++                    return None
++                return self.parent.children[i-1]
++
++    def get_suffix(self):
++        """
++        Return the string immediately following the invocant node. This is
++        effectively equivalent to node.next_sibling.get_prefix()
++        """
++        next_sib = self.next_sibling
++        if next_sib is None:
++            return ""
++        return next_sib.get_prefix()
++
++
++class Node(Base):
++
++    """Concrete implementation for interior nodes."""
++
++    def __init__(self, type, children, context=None, prefix=None):
++        """
++        Initializer.
++
++        Takes a type constant (a symbol number >= 256), a sequence of
++        child nodes, and an optional context keyword argument.
++
++        As a side effect, the parent pointers of the children are updated.
++        """
++        assert type >= 256, type
++        self.type = type
++        self.children = list(children)
++        for ch in self.children:
++            assert ch.parent is None, repr(ch)
++            ch.parent = self
++        if prefix is not None:
++            self.set_prefix(prefix)
++
++    def __repr__(self):
++        """Return a canonical string representation."""
++        return "%s(%s, %r)" % (self.__class__.__name__,
++                               type_repr(self.type),
++                               self.children)
++
++    def __str__(self):
++        """
++        Return a pretty string representation.
++
++        This reproduces the input source exactly.
++        """
++        return "".join(map(str, self.children))
++
++    def _eq(self, other):
++        """Compare two nodes for equality."""
++        return (self.type, self.children) == (other.type, other.children)
++
++    def clone(self):
++        """Return a cloned (deep) copy of self."""
++        return Node(self.type, [ch.clone() for ch in self.children])
++
++    def post_order(self):
++        """Return a post-order iterator for the tree."""
++        for child in self.children:
++            for node in child.post_order():
++                yield node
++        yield self
++
++    def pre_order(self):
++        """Return a pre-order iterator for the tree."""
++        yield self
++        for child in self.children:
++            for node in child.post_order():
++                yield node
++
++    def set_prefix(self, prefix):
++        """
++        Set the prefix for the node.
++
++        This passes the responsibility on to the first child.
++        """
++        if self.children:
++            self.children[0].set_prefix(prefix)
++
++    def get_prefix(self):
++        """
++        Return the prefix for the node.
++
++        This passes the call on to the first child.
++        """
++        if not self.children:
++            return ""
++        return self.children[0].get_prefix()
++
++    def set_child(self, i, child):
++        """
++        Equivalent to 'node.children[i] = child'. This method also sets the
++        child's parent attribute appropriately.
++        """
++        child.parent = self
++        self.children[i].parent = None
++        self.children[i] = child
++        self.changed()
++
++    def insert_child(self, i, child):
++        """
++        Equivalent to 'node.children.insert(i, child)'. This method also sets
++        the child's parent attribute appropriately.
++        """
++        child.parent = self
++        self.children.insert(i, child)
++        self.changed()
++
++    def append_child(self, child):
++        """
++        Equivalent to 'node.children.append(child)'. This method also sets the
++        child's parent attribute appropriately.
++        """
++        child.parent = self
++        self.children.append(child)
++        self.changed()
++
++
++class Leaf(Base):
++
++    """Concrete implementation for leaf nodes."""
++
++    # Default values for instance variables
++    prefix = ""  # Whitespace and comments preceding this token in the input
++    lineno = 0   # Line where this token starts in the input
++    column = 0   # Column where this token tarts in the input
++
++    def __init__(self, type, value, context=None, prefix=None):
++        """
++        Initializer.
++
++        Takes a type constant (a token number < 256), a string value, and an
++        optional context keyword argument.
++        """
++        assert 0 <= type < 256, type
++        if context is not None:
++            self.prefix, (self.lineno, self.column) = context
++        self.type = type
++        self.value = value
++        if prefix is not None:
++            self.prefix = prefix
++
++    def __repr__(self):
++        """Return a canonical string representation."""
++        return "%s(%r, %r)" % (self.__class__.__name__,
++                               self.type,
++                               self.value)
++
++    def __str__(self):
++        """
++        Return a pretty string representation.
++
++        This reproduces the input source exactly.
++        """
++        return self.prefix + str(self.value)
++
++    def _eq(self, other):
++        """Compare two nodes for equality."""
++        return (self.type, self.value) == (other.type, other.value)
++
++    def clone(self):
++        """Return a cloned (deep) copy of self."""
++        return Leaf(self.type, self.value,
++                    (self.prefix, (self.lineno, self.column)))
++
++    def post_order(self):
++        """Return a post-order iterator for the tree."""
++        yield self
++
++    def pre_order(self):
++        """Return a pre-order iterator for the tree."""
++        yield self
++
++    def set_prefix(self, prefix):
++        """Set the prefix for the node."""
++        self.changed()
++        self.prefix = prefix
++
++    def get_prefix(self):
++        """Return the prefix for the node."""
++        return self.prefix
++
++
++def convert(gr, raw_node):
++    """
++    Convert raw node information to a Node or Leaf instance.
++
++    This is passed to the parser driver which calls it whenever a reduction of a
++    grammar rule produces a new complete node, so that the tree is build
++    strictly bottom-up.
++    """
++    type, value, context, children = raw_node
++    if children or type in gr.number2symbol:
++        # If there's exactly one child, return that child instead of
++        # creating a new node.
++        if len(children) == 1:
++            return children[0]
++        return Node(type, children, context=context)
++    else:
++        return Leaf(type, value, context=context)
++
++
++class BasePattern(object):
++
++    """
++    A pattern is a tree matching pattern.
++
++    It looks for a specific node type (token or symbol), and
++    optionally for a specific content.
++
++    This is an abstract base class.  There are three concrete
++    subclasses:
++
++    - LeafPattern matches a single leaf node;
++    - NodePattern matches a single node (usually non-leaf);
++    - WildcardPattern matches a sequence of nodes of variable length.
++    """
++
++    # Defaults for instance variables
++    type = None     # Node type (token if < 256, symbol if >= 256)
++    content = None  # Optional content matching pattern
++    name = None     # Optional name used to store match in results dict
++
++    def __new__(cls, *args, **kwds):
++        """Constructor that prevents BasePattern from being instantiated."""
++        assert cls is not BasePattern, "Cannot instantiate BasePattern"
++        return object.__new__(cls)
++
++    def __repr__(self):
++        args = [type_repr(self.type), self.content, self.name]
++        while args and args[-1] is None:
++            del args[-1]
++        return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
++
++    def optimize(self):
++        """
++        A subclass can define this as a hook for optimizations.
++
++        Returns either self or another node with the same effect.
++        """
++        return self
++
++    def match(self, node, results=None):
++        """
++        Does this pattern exactly match a node?
++
++        Returns True if it matches, False if not.
++
++        If results is not None, it must be a dict which will be
++        updated with the nodes matching named subpatterns.
++
++        Default implementation for non-wildcard patterns.
++        """
++        if self.type is not None and node.type != self.type:
++            return False
++        if self.content is not None:
++            r = None
++            if results is not None:
++                r = {}
++            if not self._submatch(node, r):
++                return False
++            if r:
++                results.update(r)
++        if results is not None and self.name:
++            results[self.name] = node
++        return True
++
++    def match_seq(self, nodes, results=None):
++        """
++        Does this pattern exactly match a sequence of nodes?
++
++        Default implementation for non-wildcard patterns.
++        """
++        if len(nodes) != 1:
++            return False
++        return self.match(nodes[0], results)
++
++    def generate_matches(self, nodes):
++        """
++        Generator yielding all matches for this pattern.
++
++        Default implementation for non-wildcard patterns.
++        """
++        r = {}
++        if nodes and self.match(nodes[0], r):
++            yield 1, r
++
++
++class LeafPattern(BasePattern):
++
++    def __init__(self, type=None, content=None, name=None):
++        """
++        Initializer.  Takes optional type, content, and name.
++
++        The type, if given must be a token type (< 256).  If not given,
++        this matches any *leaf* node; the content may still be required.
++
++        The content, if given, must be a string.
++
++        If a name is given, the matching node is stored in the results
++        dict under that key.
++        """
++        if type is not None:
++            assert 0 <= type < 256, type
++        if content is not None:
++            assert isinstance(content, basestring), repr(content)
++        self.type = type
++        self.content = content
++        self.name = name
++
++    def match(self, node, results=None):
++        """Override match() to insist on a leaf node."""
++        if not isinstance(node, Leaf):
++            return False
++        return BasePattern.match(self, node, results)
++
++    def _submatch(self, node, results=None):
++        """
++        Match the pattern's content to the node's children.
++
++        This assumes the node type matches and self.content is not None.
++
++        Returns True if it matches, False if not.
++
++        If results is not None, it must be a dict which will be
++        updated with the nodes matching named subpatterns.
++
++        When returning False, the results dict may still be updated.
++        """
++        return self.content == node.value
++
++
++class NodePattern(BasePattern):
++
++    wildcards = False
++
++    def __init__(self, type=None, content=None, name=None):
++        """
++        Initializer.  Takes optional type, content, and name.
++
++        The type, if given, must be a symbol type (>= 256).  If the
++        type is None this matches *any* single node (leaf or not),
++        except if content is not None, in which it only matches
++        non-leaf nodes that also match the content pattern.
++
++        The content, if not None, must be a sequence of Patterns that
++        must match the node's children exactly.  If the content is
++        given, the type must not be None.
++
++        If a name is given, the matching node is stored in the results
++        dict under that key.
++        """
++        if type is not None:
++            assert type >= 256, type
++        if content is not None:
++            assert not isinstance(content, basestring), repr(content)
++            content = list(content)
++            for i, item in enumerate(content):
++                assert isinstance(item, BasePattern), (i, item)
++                if isinstance(item, WildcardPattern):
++                    self.wildcards = True
++        self.type = type
++        self.content = content
++        self.name = name
++
++    def _submatch(self, node, results=None):
++        """
++        Match the pattern's content to the node's children.
++
++        This assumes the node type matches and self.content is not None.
++
++        Returns True if it matches, False if not.
++
++        If results is not None, it must be a dict which will be
++        updated with the nodes matching named subpatterns.
++
++        When returning False, the results dict may still be updated.
++        """
++        if self.wildcards:
++            for c, r in generate_matches(self.content, node.children):
++                if c == len(node.children):
++                    if results is not None:
++                        results.update(r)
++                    return True
++            return False
++        if len(self.content) != len(node.children):
++            return False
++        for subpattern, child in zip(self.content, node.children):
++            if not subpattern.match(child, results):
++                return False
++        return True
++
++
++class WildcardPattern(BasePattern):
++
++    """
++    A wildcard pattern can match zero or more nodes.
++
++    This has all the flexibility needed to implement patterns like:
++
++    .*      .+      .?      .{m,n}
++    (a b c | d e | f)
++    (...)*  (...)+  (...)?  (...){m,n}
++
++    except it always uses non-greedy matching.
++    """
++
++    def __init__(self, content=None, min=0, max=HUGE, name=None):
++        """
++        Initializer.
++
++        Args:
++            content: optional sequence of subsequences of patterns;
++                     if absent, matches one node;
++                     if present, each subsequence is an alternative [*]
++            min: optinal minumum number of times to match, default 0
++            max: optional maximum number of times tro match, default HUGE
++            name: optional name assigned to this match
++
++        [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
++            equivalent to (a b c | d e | f g h); if content is None,
++            this is equivalent to '.' in regular expression terms.
++            The min and max parameters work as follows:
++                min=0, max=maxint: .*
++                min=1, max=maxint: .+
++                min=0, max=1: .?
++                min=1, max=1: .
++            If content is not None, replace the dot with the parenthesized
++            list of alternatives, e.g. (a b c | d e | f g h)*
++        """
++        assert 0 <= min <= max <= HUGE, (min, max)
++        if content is not None:
++            content = tuple(map(tuple, content))  # Protect against alterations
++            # Check sanity of alternatives
++            assert len(content), repr(content)  # Can't have zero alternatives
++            for alt in content:
++                assert len(alt), repr(alt) # Can have empty alternatives
++        self.content = content
++        self.min = min
++        self.max = max
++        self.name = name
++
++    def optimize(self):
++        """Optimize certain stacked wildcard patterns."""
++        subpattern = None
++        if (self.content is not None and
++            len(self.content) == 1 and len(self.content[0]) == 1):
++            subpattern = self.content[0][0]
++        if self.min == 1 and self.max == 1:
++            if self.content is None:
++                return NodePattern(name=self.name)
++            if subpattern is not None and  self.name == subpattern.name:
++                return subpattern.optimize()
++        if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
++            subpattern.min <= 1 and self.name == subpattern.name):
++            return WildcardPattern(subpattern.content,
++                                   self.min*subpattern.min,
++                                   self.max*subpattern.max,
++                                   subpattern.name)
++        return self
++
++    def match(self, node, results=None):
++        """Does this pattern exactly match a node?"""
++        return self.match_seq([node], results)
++
++    def match_seq(self, nodes, results=None):
++        """Does this pattern exactly match a sequence of nodes?"""
++        for c, r in self.generate_matches(nodes):
++            if c == len(nodes):
++                if results is not None:
++                    results.update(r)
++                    if self.name:
++                        results[self.name] = list(nodes)
++                return True
++        return False
++
++    def generate_matches(self, nodes):
++        """
++        Generator yielding matches for a sequence of nodes.
++
++        Args:
++            nodes: sequence of nodes
++
++        Yields:
++            (count, results) tuples where:
++            count: the match comprises nodes[:count];
++            results: dict containing named submatches.
++        """
++        if self.content is None:
++            # Shortcut for special case (see __init__.__doc__)
++            for count in xrange(self.min, 1 + min(len(nodes), self.max)):
++                r = {}
++                if self.name:
++                    r[self.name] = nodes[:count]
++                yield count, r
++        elif self.name == "bare_name":
++            yield self._bare_name_matches(nodes)
++        else:
++            # The reason for this is that hitting the recursion limit usually
++            # results in some ugly messages about how RuntimeErrors are being
++            # ignored.
++            save_stderr = sys.stderr
++            sys.stderr = StringIO()
++            try:
++                for count, r in self._recursive_matches(nodes, 0):
++                    if self.name:
++                        r[self.name] = nodes[:count]
++                    yield count, r
++            except RuntimeError:
++                # We fall back to the iterative pattern matching scheme if the recursive
++                # scheme hits the recursion limit.
++                for count, r in self._iterative_matches(nodes):
++                    if self.name:
++                        r[self.name] = nodes[:count]
++                    yield count, r
++            finally:
++                sys.stderr = save_stderr
++
++    def _iterative_matches(self, nodes):
++        """Helper to iteratively yield the matches."""
++        nodelen = len(nodes)
++        if 0 >= self.min:
++            yield 0, {}
++
++        results = []
++        # generate matches that use just one alt from self.content
++        for alt in self.content:
++            for c, r in generate_matches(alt, nodes):
++                yield c, r
++                results.append((c, r))
++
++        # for each match, iterate down the nodes
++        while results:
++            new_results = []
++            for c0, r0 in results:
++                # stop if the entire set of nodes has been matched
++                if c0 < nodelen and c0 <= self.max:
++                    for alt in self.content:
++                        for c1, r1 in generate_matches(alt, nodes[c0:]):
++                            if c1 > 0:
++                                r = {}
++                                r.update(r0)
++                                r.update(r1)
++                                yield c0 + c1, r
++                                new_results.append((c0 + c1, r))
++            results = new_results
++
++    def _bare_name_matches(self, nodes):
++        """Special optimized matcher for bare_name."""
++        count = 0
++        r = {}
++        done = False
++        max = len(nodes)
++        while not done and count < max:
++            done = True
++            for leaf in self.content:
++                if leaf[0].match(nodes[count], r):
++                    count += 1
++                    done = False
++                    break
++        r[self.name] = nodes[:count]
++        return count, r
++
++    def _recursive_matches(self, nodes, count):
++        """Helper to recursively yield the matches."""
++        assert self.content is not None
++        if count >= self.min:
++            yield 0, {}
++        if count < self.max:
++            for alt in self.content:
++                for c0, r0 in generate_matches(alt, nodes):
++                    for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
++                        r = {}
++                        r.update(r0)
++                        r.update(r1)
++                        yield c0 + c1, r
++
++
++class NegatedPattern(BasePattern):
++
++    def __init__(self, content=None):
++        """
++        Initializer.
++
++        The argument is either a pattern or None.  If it is None, this
++        only matches an empty sequence (effectively '$' in regex
++        lingo).  If it is not None, this matches whenever the argument
++        pattern doesn't have any matches.
++        """
++        if content is not None:
++            assert isinstance(content, BasePattern), repr(content)
++        self.content = content
++
++    def match(self, node):
++        # We never match a node in its entirety
++        return False
++
++    def match_seq(self, nodes):
++        # We only match an empty sequence of nodes in its entirety
++        return len(nodes) == 0
++
++    def generate_matches(self, nodes):
++        if self.content is None:
++            # Return a match if there is an empty sequence
++            if len(nodes) == 0:
++                yield 0, {}
++        else:
++            # Return a match if the argument pattern has no matches
++            for c, r in self.content.generate_matches(nodes):
++                return
++            yield 0, {}
++
++
++def generate_matches(patterns, nodes):
++    """
++    Generator yielding matches for a sequence of patterns and nodes.
++
++    Args:
++        patterns: a sequence of patterns
++        nodes: a sequence of nodes
++
++    Yields:
++        (count, results) tuples where:
++        count: the entire sequence of patterns matches nodes[:count];
++        results: dict containing named submatches.
++        """
++    if not patterns:
++        yield 0, {}
++    else:
++        p, rest = patterns[0], patterns[1:]
++        for c0, r0 in p.generate_matches(nodes):
++            if not rest:
++                yield c0, r0
++            else:
++                for c1, r1 in generate_matches(rest, nodes[c0:]):
++                    r = {}
++                    r.update(r0)
++                    r.update(r1)
++                    yield c0 + c1, r
+diff -r 531f2e948299 refactor/refactor.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/refactor.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,515 @@
++#!/usr/bin/env python2.5
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Refactoring framework.
++
++Used as a main program, this can refactor any number of files and/or
++recursively descend down directories.  Imported as a module, this
++provides infrastructure to write your own refactoring tool.
++"""
++
++__author__ = "Guido van Rossum <guido at python.org>"
++
++
++# Python imports
++import os
++import sys
++import difflib
++import logging
++import operator
++from collections import defaultdict
++from itertools import chain
++
++# Local imports
++from .pgen2 import driver
++from .pgen2 import tokenize
++
++from . import pytree
++from . import patcomp
++from . import fixes
++from . import pygram
++
++
++def get_all_fix_names(fixer_pkg, remove_prefix=True):
++    """Return a sorted list of all available fix names in the given package."""
++    pkg = __import__(fixer_pkg, [], [], ["*"])
++    fixer_dir = os.path.dirname(pkg.__file__)
++    fix_names = []
++    for name in sorted(os.listdir(fixer_dir)):
++        if name.startswith("fix_") and name.endswith(".py"):
++            if remove_prefix:
++                name = name[4:]
++            fix_names.append(name[:-3])
++    return fix_names
++
++def get_head_types(pat):
++    """ Accepts a pytree Pattern Node and returns a set
++        of the pattern types which will match first. """
++
++    if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
++        # NodePatters must either have no type and no content
++        #   or a type and content -- so they don't get any farther
++        # Always return leafs
++        return set([pat.type])
++
++    if isinstance(pat, pytree.NegatedPattern):
++        if pat.content:
++            return get_head_types(pat.content)
++        return set([None]) # Negated Patterns don't have a type
++
++    if isinstance(pat, pytree.WildcardPattern):
++        # Recurse on each node in content
++        r = set()
++        for p in pat.content:
++            for x in p:
++                r.update(get_head_types(x))
++        return r
++
++    raise Exception("Oh no! I don't understand pattern %s" %(pat))
++
++def get_headnode_dict(fixer_list):
++    """ Accepts a list of fixers and returns a dictionary
++        of head node type --> fixer list.  """
++    head_nodes = defaultdict(list)
++    for fixer in fixer_list:
++        if not fixer.pattern:
++            head_nodes[None].append(fixer)
++            continue
++        for t in get_head_types(fixer.pattern):
++            head_nodes[t].append(fixer)
++    return head_nodes
++
++def get_fixers_from_package(pkg_name):
++    """
++    Return the fully qualified names for fixers in the package pkg_name.
++    """
++    return [pkg_name + "." + fix_name
++            for fix_name in get_all_fix_names(pkg_name, False)]
++
++
++class FixerError(Exception):
++    """A fixer could not be loaded."""
++
++
++class RefactoringTool(object):
++
++    _default_options = {"print_function": False}
++
++    CLASS_PREFIX = "Fix" # The prefix for fixer classes
++    FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
++
++    def __init__(self, fixer_names, options=None, explicit=None):
++        """Initializer.
++
++        Args:
++            fixer_names: a list of fixers to import
++            options: an dict with configuration.
++            explicit: a list of fixers to run even if they are explicit.
++        """
++        self.fixers = fixer_names
++        self.explicit = explicit or []
++        self.options = self._default_options.copy()
++        if options is not None:
++            self.options.update(options)
++        self.errors = []
++        self.logger = logging.getLogger("RefactoringTool")
++        self.fixer_log = []
++        self.wrote = False
++        if self.options["print_function"]:
++            del pygram.python_grammar.keywords["print"]
++        self.driver = driver.Driver(pygram.python_grammar,
++                                    convert=pytree.convert,
++                                    logger=self.logger)
++        self.pre_order, self.post_order = self.get_fixers()
++
++        self.pre_order_heads = get_headnode_dict(self.pre_order)
++        self.post_order_heads = get_headnode_dict(self.post_order)
++
++        self.files = []  # List of files that were or should be modified
++
++    def get_fixers(self):
++        """Inspects the options to load the requested patterns and handlers.
++
++        Returns:
++          (pre_order, post_order), where pre_order is the list of fixers that
++          want a pre-order AST traversal, and post_order is the list that want
++          post-order traversal.
++        """
++        pre_order_fixers = []
++        post_order_fixers = []
++        for fix_mod_path in self.fixers:
++            mod = __import__(fix_mod_path, {}, {}, ["*"])
++            fix_name = fix_mod_path.rsplit(".", 1)[-1]
++            if fix_name.startswith(self.FILE_PREFIX):
++                fix_name = fix_name[len(self.FILE_PREFIX):]
++            parts = fix_name.split("_")
++            class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
++            try:
++                fix_class = getattr(mod, class_name)
++            except AttributeError:
++                raise FixerError("Can't find %s.%s" % (fix_name, class_name))
++            fixer = fix_class(self.options, self.fixer_log)
++            if fixer.explicit and self.explicit is not True and \
++                    fix_mod_path not in self.explicit:
++                self.log_message("Skipping implicit fixer: %s", fix_name)
++                continue
++
++            self.log_debug("Adding transformation: %s", fix_name)
++            if fixer.order == "pre":
++                pre_order_fixers.append(fixer)
++            elif fixer.order == "post":
++                post_order_fixers.append(fixer)
++            else:
++                raise FixerError("Illegal fixer order: %r" % fixer.order)
++
++        key_func = operator.attrgetter("run_order")
++        pre_order_fixers.sort(key=key_func)
++        post_order_fixers.sort(key=key_func)
++        return (pre_order_fixers, post_order_fixers)
++
++    def log_error(self, msg, *args, **kwds):
++        """Called when an error occurs."""
++        raise
++
++    def log_message(self, msg, *args):
++        """Hook to log a message."""
++        if args:
++            msg = msg % args
++        self.logger.info(msg)
++
++    def log_debug(self, msg, *args):
++        if args:
++            msg = msg % args
++        self.logger.debug(msg)
++
++    def print_output(self, lines):
++        """Called with lines of output to give to the user."""
++        pass
++
++    def refactor(self, items, write=False, doctests_only=False):
++        """Refactor a list of files and directories."""
++        for dir_or_file in items:
++            if os.path.isdir(dir_or_file):
++                self.refactor_dir(dir_or_file, write, doctests_only)
++            else:
++                self.refactor_file(dir_or_file, write, doctests_only)
++
++    def refactor_dir(self, dir_name, write=False, doctests_only=False):
++        """Descends down a directory and refactor every Python file found.
++
++        Python files are assumed to have a .py extension.
++
++        Files and subdirectories starting with '.' are skipped.
++        """
++        for dirpath, dirnames, filenames in os.walk(dir_name):
++            self.log_debug("Descending into %s", dirpath)
++            dirnames.sort()
++            filenames.sort()
++            for name in filenames:
++                if not name.startswith(".") and name.endswith("py"):
++                    fullname = os.path.join(dirpath, name)
++                    self.refactor_file(fullname, write, doctests_only)
++            # Modify dirnames in-place to remove subdirs with leading dots
++            dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
++
++    def refactor_file(self, filename, write=False, doctests_only=False):
++        """Refactors a file."""
++        try:
++            f = open(filename)
++        except IOError, err:
++            self.log_error("Can't open %s: %s", filename, err)
++            return
++        try:
++            input = f.read() + "\n" # Silence certain parse errors
++        finally:
++            f.close()
++        if doctests_only:
++            self.log_debug("Refactoring doctests in %s", filename)
++            output = self.refactor_docstring(input, filename)
++            if output != input:
++                self.processed_file(output, filename, input, write=write)
++            else:
++                self.log_debug("No doctest changes in %s", filename)
++        else:
++            tree = self.refactor_string(input, filename)
++            if tree and tree.was_changed:
++                # The [:-1] is to take off the \n we added earlier
++                self.processed_file(str(tree)[:-1], filename, write=write)
++            else:
++                self.log_debug("No changes in %s", filename)
++
++    def refactor_string(self, data, name):
++        """Refactor a given input string.
++
++        Args:
++            data: a string holding the code to be refactored.
++            name: a human-readable name for use in error/log messages.
++
++        Returns:
++            An AST corresponding to the refactored input stream; None if
++            there were errors during the parse.
++        """
++        try:
++            tree = self.driver.parse_string(data)
++        except Exception, err:
++            self.log_error("Can't parse %s: %s: %s",
++                           name, err.__class__.__name__, err)
++            return
++        self.log_debug("Refactoring %s", name)
++        self.refactor_tree(tree, name)
++        return tree
++
++    def refactor_stdin(self, doctests_only=False):
++        input = sys.stdin.read()
++        if doctests_only:
++            self.log_debug("Refactoring doctests in stdin")
++            output = self.refactor_docstring(input, "<stdin>")
++            if output != input:
++                self.processed_file(output, "<stdin>", input)
++            else:
++                self.log_debug("No doctest changes in stdin")
++        else:
++            tree = self.refactor_string(input, "<stdin>")
++            if tree and tree.was_changed:
++                self.processed_file(str(tree), "<stdin>", input)
++            else:
++                self.log_debug("No changes in stdin")
++
++    def refactor_tree(self, tree, name):
++        """Refactors a parse tree (modifying the tree in place).
++
++        Args:
++            tree: a pytree.Node instance representing the root of the tree
++                  to be refactored.
++            name: a human-readable name for this tree.
++
++        Returns:
++            True if the tree was modified, False otherwise.
++        """
++        for fixer in chain(self.pre_order, self.post_order):
++            fixer.start_tree(tree, name)
++
++        self.traverse_by(self.pre_order_heads, tree.pre_order())
++        self.traverse_by(self.post_order_heads, tree.post_order())
++
++        for fixer in chain(self.pre_order, self.post_order):
++            fixer.finish_tree(tree, name)
++        return tree.was_changed
++
++    def traverse_by(self, fixers, traversal):
++        """Traverse an AST, applying a set of fixers to each node.
++
++        This is a helper method for refactor_tree().
++
++        Args:
++            fixers: a list of fixer instances.
++            traversal: a generator that yields AST nodes.
++
++        Returns:
++            None
++        """
++        if not fixers:
++            return
++        for node in traversal:
++            for fixer in fixers[node.type] + fixers[None]:
++                results = fixer.match(node)
++                if results:
++                    new = fixer.transform(node, results)
++                    if new is not None and (new != node or
++                                            str(new) != str(node)):
++                        node.replace(new)
++                        node = new
++
++    def processed_file(self, new_text, filename, old_text=None, write=False):
++        """
++        Called when a file has been refactored, and there are changes.
++        """
++        self.files.append(filename)
++        if old_text is None:
++            try:
++                f = open(filename, "r")
++            except IOError, err:
++                self.log_error("Can't read %s: %s", filename, err)
++                return
++            try:
++                old_text = f.read()
++            finally:
++                f.close()
++        if old_text == new_text:
++            self.log_debug("No changes to %s", filename)
++            return
++        self.print_output(diff_texts(old_text, new_text, filename))
++        if write:
++            self.write_file(new_text, filename, old_text)
++        else:
++            self.log_debug("Not writing changes to %s", filename)
++
++    def write_file(self, new_text, filename, old_text):
++        """Writes a string to a file.
++
++        It first shows a unified diff between the old text and the new text, and
++        then rewrites the file; the latter is only done if the write option is
++        set.
++        """
++        try:
++            f = open(filename, "w")
++        except os.error, err:
++            self.log_error("Can't create %s: %s", filename, err)
++            return
++        try:
++            f.write(new_text)
++        except os.error, err:
++            self.log_error("Can't write %s: %s", filename, err)
++        finally:
++            f.close()
++        self.log_debug("Wrote changes to %s", filename)
++        self.wrote = True
++
++    PS1 = ">>> "
++    PS2 = "... "
++
++    def refactor_docstring(self, input, filename):
++        """Refactors a docstring, looking for doctests.
++
++        This returns a modified version of the input string.  It looks
++        for doctests, which start with a ">>>" prompt, and may be
++        continued with "..." prompts, as long as the "..." is indented
++        the same as the ">>>".
++
++        (Unfortunately we can't use the doctest module's parser,
++        since, like most parsers, it is not geared towards preserving
++        the original source.)
++        """
++        result = []
++        block = None
++        block_lineno = None
++        indent = None
++        lineno = 0
++        for line in input.splitlines(True):
++            lineno += 1
++            if line.lstrip().startswith(self.PS1):
++                if block is not None:
++                    result.extend(self.refactor_doctest(block, block_lineno,
++                                                        indent, filename))
++                block_lineno = lineno
++                block = [line]
++                i = line.find(self.PS1)
++                indent = line[:i]
++            elif (indent is not None and
++                  (line.startswith(indent + self.PS2) or
++                   line == indent + self.PS2.rstrip() + "\n")):
++                block.append(line)
++            else:
++                if block is not None:
++                    result.extend(self.refactor_doctest(block, block_lineno,
++                                                        indent, filename))
++                block = None
++                indent = None
++                result.append(line)
++        if block is not None:
++            result.extend(self.refactor_doctest(block, block_lineno,
++                                                indent, filename))
++        return "".join(result)
++
++    def refactor_doctest(self, block, lineno, indent, filename):
++        """Refactors one doctest.
++
++        A doctest is given as a block of lines, the first of which starts
++        with ">>>" (possibly indented), while the remaining lines start
++        with "..." (identically indented).
++
++        """
++        try:
++            tree = self.parse_block(block, lineno, indent)
++        except Exception, err:
++            if self.log.isEnabledFor(logging.DEBUG):
++                for line in block:
++                    self.log_debug("Source: %s", line.rstrip("\n"))
++            self.log_error("Can't parse docstring in %s line %s: %s: %s",
++                           filename, lineno, err.__class__.__name__, err)
++            return block
++        if self.refactor_tree(tree, filename):
++            new = str(tree).splitlines(True)
++            # Undo the adjustment of the line numbers in wrap_toks() below.
++            clipped, new = new[:lineno-1], new[lineno-1:]
++            assert clipped == ["\n"] * (lineno-1), clipped
++            if not new[-1].endswith("\n"):
++                new[-1] += "\n"
++            block = [indent + self.PS1 + new.pop(0)]
++            if new:
++                block += [indent + self.PS2 + line for line in new]
++        return block
++
++    def summarize(self):
++        if self.wrote:
++            were = "were"
++        else:
++            were = "need to be"
++        if not self.files:
++            self.log_message("No files %s modified.", were)
++        else:
++            self.log_message("Files that %s modified:", were)
++            for file in self.files:
++                self.log_message(file)
++        if self.fixer_log:
++            self.log_message("Warnings/messages while refactoring:")
++            for message in self.fixer_log:
++                self.log_message(message)
++        if self.errors:
++            if len(self.errors) == 1:
++                self.log_message("There was 1 error:")
++            else:
++                self.log_message("There were %d errors:", len(self.errors))
++            for msg, args, kwds in self.errors:
++                self.log_message(msg, *args, **kwds)
++
++    def parse_block(self, block, lineno, indent):
++        """Parses a block into a tree.
++
++        This is necessary to get correct line number / offset information
++        in the parser diagnostics and embedded into the parse tree.
++        """
++        return self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
++
++    def wrap_toks(self, block, lineno, indent):
++        """Wraps a tokenize stream to systematically modify start/end."""
++        tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
++        for type, value, (line0, col0), (line1, col1), line_text in tokens:
++            line0 += lineno - 1
++            line1 += lineno - 1
++            # Don't bother updating the columns; this is too complicated
++            # since line_text would also have to be updated and it would
++            # still break for tokens spanning lines.  Let the user guess
++            # that the column numbers for doctests are relative to the
++            # end of the prompt string (PS1 or PS2).
++            yield type, value, (line0, col0), (line1, col1), line_text
++
++
++    def gen_lines(self, block, indent):
++        """Generates lines as expected by tokenize from a list of lines.
++
++        This strips the first len(indent + self.PS1) characters off each line.
++        """
++        prefix1 = indent + self.PS1
++        prefix2 = indent + self.PS2
++        prefix = prefix1
++        for line in block:
++            if line.startswith(prefix):
++                yield line[len(prefix):]
++            elif line == prefix.rstrip() + "\n":
++                yield "\n"
++            else:
++                raise AssertionError("line=%r, prefix=%r" % (line, prefix))
++            prefix = prefix2
++        while True:
++            yield ""
++
++
++def diff_texts(a, b, filename):
++    """Return a unified diff of two strings."""
++    a = a.splitlines()
++    b = b.splitlines()
++    return difflib.unified_diff(a, b, filename, filename,
++                                "(original)", "(refactored)",
++                                lineterm="")
+diff -r 531f2e948299 refactor/tests/.svn/all-wcprops
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/all-wcprops	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,59 @@
++K 25
++svn:wc:ra_dav:version-url
++V 57
++/projects/!svn/ver/69679/sandbox/trunk/2to3/lib2to3/tests
++END
++pytree_idempotency.py
++K 25
++svn:wc:ra_dav:version-url
++V 79
++/projects/!svn/ver/61730/sandbox/trunk/2to3/lib2to3/tests/pytree_idempotency.py
++END
++test_parser.py
++K 25
++svn:wc:ra_dav:version-url
++V 72
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/tests/test_parser.py
++END
++support.py
++K 25
++svn:wc:ra_dav:version-url
++V 68
++/projects/!svn/ver/66173/sandbox/trunk/2to3/lib2to3/tests/support.py
++END
++test_util.py
++K 25
++svn:wc:ra_dav:version-url
++V 70
++/projects/!svn/ver/67657/sandbox/trunk/2to3/lib2to3/tests/test_util.py
++END
++__init__.py
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/61428/sandbox/trunk/2to3/lib2to3/tests/__init__.py
++END
++test_fixers.py
++K 25
++svn:wc:ra_dav:version-url
++V 72
++/projects/!svn/ver/69673/sandbox/trunk/2to3/lib2to3/tests/test_fixers.py
++END
++test_refactor.py
++K 25
++svn:wc:ra_dav:version-url
++V 74
++/projects/!svn/ver/67387/sandbox/trunk/2to3/lib2to3/tests/test_refactor.py
++END
++test_all_fixers.py
++K 25
++svn:wc:ra_dav:version-url
++V 76
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/tests/test_all_fixers.py
++END
++test_pytree.py
++K 25
++svn:wc:ra_dav:version-url
++V 72
++/projects/!svn/ver/69679/sandbox/trunk/2to3/lib2to3/tests/test_pytree.py
++END
+diff -r 531f2e948299 refactor/tests/.svn/dir-prop-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/dir-prop-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,7 @@
++K 10
++svn:ignore
++V 10
++*.py[co]
++
++
++END
+diff -r 531f2e948299 refactor/tests/.svn/entries
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,337 @@
++9
++
++dir
++70822
++http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/tests
++http://svn.python.org/projects
++
++
++
++2009-02-16T17:36:06.789054Z
++69679
++benjamin.peterson
++has-props
++
++svn:special svn:externals svn:needs-lock
++
++
++
++
++
++
++
++
++
++
++
++6015fed2-1504-0410-9fe1-9d1591cc4771
++
++pytree_idempotency.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++e986d49af7eaec7f1c3b4aad0eba7a5f
++2008-03-22T01:20:58.257475Z
++61730
++martin.v.loewis
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++2388
++
++test_parser.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++cbd31809d8ce5f34919c9cc1e562af92
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++5442
++
++support.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++a6fa869036ac2d67729bfaef53816ea8
++2008-09-02T23:57:48.132672Z
++66173
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++1940
++
++test_util.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++34b1ab2a57dada2c15c0b151b66ad03f
++2008-12-08T00:29:35.627027Z
++67657
++armin.ronacher
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++19381
++
++__init__.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++330a2c5032ea141b9040018e7a8b3592
++2008-03-16T19:36:15.363093Z
++61428
++martin.v.loewis
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++673
++
++test_fixers.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++419bb7e4c3fdb437f6a96a41d08b8018
++2009-02-16T15:38:22.416590Z
++69673
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++105540
++
++data
++dir
++
++test_refactor.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++2feffd023443365043e7d4b84a2d51fe
++2008-11-25T22:47:54.627063Z
++67387
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++5263
++
++test_all_fixers.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++1b89e96fcb7c0ff2d9ef4f956297a15b
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++838
++
++test_pytree.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++fb40a7fff78f0f87d810ec19de7505da
++2009-02-16T17:36:06.789054Z
++69679
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++15756
++
+diff -r 531f2e948299 refactor/tests/.svn/format
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/format	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++9
+diff -r 531f2e948299 refactor/tests/.svn/prop-base/__init__.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/prop-base/__init__.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/.svn/prop-base/pytree_idempotency.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/prop-base/pytree_idempotency.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,13 @@
++K 13
++svn:eol-style
++V 6
++native
++K 14
++svn:executable
++V 1
++*
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/.svn/prop-base/support.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/prop-base/support.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/.svn/prop-base/test_all_fixers.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/prop-base/test_all_fixers.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/tests/.svn/prop-base/test_fixers.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/prop-base/test_fixers.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,13 @@
++K 13
++svn:eol-style
++V 6
++native
++K 14
++svn:executable
++V 1
++*
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/.svn/prop-base/test_parser.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/prop-base/test_parser.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/tests/.svn/prop-base/test_pytree.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/prop-base/test_pytree.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,13 @@
++K 13
++svn:eol-style
++V 6
++native
++K 14
++svn:executable
++V 1
++*
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/.svn/prop-base/test_refactor.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/prop-base/test_refactor.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/.svn/prop-base/test_util.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/prop-base/test_util.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/.svn/text-base/__init__.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/text-base/__init__.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,24 @@
++"""Make tests/ into a package. This allows us to "import tests" and
++have tests.all_tests be a TestSuite representing all test cases
++from all test_*.py files in tests/."""
++# Author: Collin Winter
++
++import os
++import os.path
++import unittest
++import types
++
++from . import support
++
++all_tests = unittest.TestSuite()
++
++tests_dir = os.path.join(os.path.dirname(__file__), '..', 'tests')
++tests = [t[0:-3] for t in os.listdir(tests_dir)
++                        if t.startswith('test_') and t.endswith('.py')]
++
++loader = unittest.TestLoader()
++
++for t in tests:
++    __import__("",globals(),locals(),[t],level=1)
++    mod = globals()[t]
++    all_tests.addTests(loader.loadTestsFromModule(mod))
+diff -r 531f2e948299 refactor/tests/.svn/text-base/pytree_idempotency.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/text-base/pytree_idempotency.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,92 @@
++#!/usr/bin/env python2.5
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Main program for testing the infrastructure."""
++
++__author__ = "Guido van Rossum <guido at python.org>"
++
++# Support imports (need to be imported first)
++from . import support
++
++# Python imports
++import os
++import sys
++import logging
++
++# Local imports
++from .. import pytree
++import pgen2
++from pgen2 import driver
++
++logging.basicConfig()
++
++def main():
++    gr = driver.load_grammar("Grammar.txt")
++    dr = driver.Driver(gr, convert=pytree.convert)
++
++    fn = "example.py"
++    tree = dr.parse_file(fn, debug=True)
++    if not diff(fn, tree):
++        print "No diffs."
++    if not sys.argv[1:]:
++        return # Pass a dummy argument to run the complete test suite below
++
++    problems = []
++
++    # Process every imported module
++    for name in sys.modules:
++        mod = sys.modules[name]
++        if mod is None or not hasattr(mod, "__file__"):
++            continue
++        fn = mod.__file__
++        if fn.endswith(".pyc"):
++            fn = fn[:-1]
++        if not fn.endswith(".py"):
++            continue
++        print >>sys.stderr, "Parsing", fn
++        tree = dr.parse_file(fn, debug=True)
++        if diff(fn, tree):
++            problems.append(fn)
++
++    # Process every single module on sys.path (but not in packages)
++    for dir in sys.path:
++        try:
++            names = os.listdir(dir)
++        except os.error:
++            continue
++        print >>sys.stderr, "Scanning", dir, "..."
++        for name in names:
++            if not name.endswith(".py"):
++                continue
++            print >>sys.stderr, "Parsing", name
++            fn = os.path.join(dir, name)
++            try:
++                tree = dr.parse_file(fn, debug=True)
++            except pgen2.parse.ParseError, err:
++                print "ParseError:", err
++            else:
++                if diff(fn, tree):
++                    problems.append(fn)
++
++    # Show summary of problem files
++    if not problems:
++        print "No problems.  Congratulations!"
++    else:
++        print "Problems in following files:"
++        for fn in problems:
++            print "***", fn
++
++def diff(fn, tree):
++    f = open("@", "w")
++    try:
++        f.write(str(tree))
++    finally:
++        f.close()
++    try:
++        return os.system("diff -u %s @" % fn)
++    finally:
++        os.remove("@")
++
++if __name__ == "__main__":
++    main()
+diff -r 531f2e948299 refactor/tests/.svn/text-base/support.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/text-base/support.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,63 @@
++"""Support code for test_*.py files"""
++# Author: Collin Winter
++
++# Python imports
++import unittest
++import sys
++import os
++import os.path
++import re
++from textwrap import dedent
++
++#sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
++
++# Local imports
++from .. import pytree
++from .. import refactor
++from ..pgen2 import driver
++
++test_dir = os.path.dirname(__file__)
++proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
++grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
++grammar = driver.load_grammar(grammar_path)
++driver = driver.Driver(grammar, convert=pytree.convert)
++
++def parse_string(string):
++    return driver.parse_string(reformat(string), debug=True)
++
++# Python 2.3's TestSuite is not iter()-able
++if sys.version_info < (2, 4):
++    def TestSuite_iter(self):
++        return iter(self._tests)
++    unittest.TestSuite.__iter__ = TestSuite_iter
++
++def run_all_tests(test_mod=None, tests=None):
++    if tests is None:
++        tests = unittest.TestLoader().loadTestsFromModule(test_mod)
++    unittest.TextTestRunner(verbosity=2).run(tests)
++
++def reformat(string):
++    return dedent(string) + "\n\n"
++
++def get_refactorer(fixers=None, options=None):
++    """
++    A convenience function for creating a RefactoringTool for tests.
++
++    fixers is a list of fixers for the RefactoringTool to use. By default
++    "lib2to3.fixes.*" is used. options is an optional dictionary of options to
++    be passed to the RefactoringTool.
++    """
++    if fixers is not None:
++        fixers = ["lib2to3.fixes.fix_" + fix for fix in fixers]
++    else:
++        fixers = refactor.get_fixers_from_package("lib2to3.fixes")
++    options = options or {}
++    return refactor.RefactoringTool(fixers, options, explicit=True)
++
++def all_project_files():
++    for dirpath, dirnames, filenames in os.walk(proj_dir):
++        for filename in filenames:
++            if filename.endswith(".py"):
++                yield os.path.join(dirpath, filename)
++
++TestCase = unittest.TestCase
+diff -r 531f2e948299 refactor/tests/.svn/text-base/test_all_fixers.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/text-base/test_all_fixers.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,35 @@
++#!/usr/bin/env python2.5
++"""Tests that run all fixer modules over an input stream.
++
++This has been broken out into its own test module because of its
++running time.
++"""
++# Author: Collin Winter
++
++# Testing imports
++try:
++    from . import support
++except ImportError:
++    import support
++
++# Python imports
++import unittest
++
++# Local imports
++from .. import pytree
++from .. import refactor
++
++class Test_all(support.TestCase):
++    def setUp(self):
++        options = {"print_function" : False}
++        self.refactor = support.get_refactorer(options=options)
++
++    def test_all_project_files(self):
++        for filepath in support.all_project_files():
++            print "Fixing %s..." % filepath
++            self.refactor.refactor_string(open(filepath).read(), filepath)
++
++
++if __name__ == "__main__":
++    import __main__
++    support.run_all_tests(__main__)
+diff -r 531f2e948299 refactor/tests/.svn/text-base/test_fixers.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/text-base/test_fixers.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,4033 @@
++#!/usr/bin/env python2.5
++""" Test suite for the fixer modules """
++# Author: Collin Winter
++
++# Testing imports
++try:
++    from tests import support
++except ImportError:
++    import support
++
++# Python imports
++import os
++import unittest
++from itertools import chain
++from operator import itemgetter
++
++# Local imports
++from lib2to3 import pygram, pytree, refactor, fixer_util
++
++
++class FixerTestCase(support.TestCase):
++    def setUp(self, fix_list=None):
++        if fix_list is None:
++            fix_list = [self.fixer]
++        options = {"print_function" : False}
++        self.refactor = support.get_refactorer(fix_list, options)
++        self.fixer_log = []
++        self.filename = "<string>"
++
++        for fixer in chain(self.refactor.pre_order,
++                           self.refactor.post_order):
++            fixer.log = self.fixer_log
++
++    def _check(self, before, after):
++        before = support.reformat(before)
++        after = support.reformat(after)
++        tree = self.refactor.refactor_string(before, self.filename)
++        self.failUnlessEqual(after, str(tree))
++        return tree
++
++    def check(self, before, after, ignore_warnings=False):
++        tree = self._check(before, after)
++        self.failUnless(tree.was_changed)
++        if not ignore_warnings:
++            self.failUnlessEqual(self.fixer_log, [])
++
++    def warns(self, before, after, message, unchanged=False):
++        tree = self._check(before, after)
++        self.failUnless(message in "".join(self.fixer_log))
++        if not unchanged:
++            self.failUnless(tree.was_changed)
++
++    def warns_unchanged(self, before, message):
++        self.warns(before, before, message, unchanged=True)
++
++    def unchanged(self, before, ignore_warnings=False):
++        self._check(before, before)
++        if not ignore_warnings:
++            self.failUnlessEqual(self.fixer_log, [])
++
++    def assert_runs_after(self, *names):
++        fixes = [self.fixer]
++        fixes.extend(names)
++        options = {"print_function" : False}
++        r = support.get_refactorer(fixes, options)
++        (pre, post) = r.get_fixers()
++        n = "fix_" + self.fixer
++        if post and post[-1].__class__.__module__.endswith(n):
++            # We're the last fixer to run
++            return
++        if pre and pre[-1].__class__.__module__.endswith(n) and not post:
++            # We're the last in pre and post is empty
++            return
++        self.fail("Fixer run order (%s) is incorrect; %s should be last."\
++               %(", ".join([x.__class__.__module__ for x in (pre+post)]), n))
++
++class Test_ne(FixerTestCase):
++    fixer = "ne"
++
++    def test_basic(self):
++        b = """if x <> y:
++            pass"""
++
++        a = """if x != y:
++            pass"""
++        self.check(b, a)
++
++    def test_no_spaces(self):
++        b = """if x<>y:
++            pass"""
++
++        a = """if x!=y:
++            pass"""
++        self.check(b, a)
++
++    def test_chained(self):
++        b = """if x<>y<>z:
++            pass"""
++
++        a = """if x!=y!=z:
++            pass"""
++        self.check(b, a)
++
++class Test_has_key(FixerTestCase):
++    fixer = "has_key"
++
++    def test_1(self):
++        b = """x = d.has_key("x") or d.has_key("y")"""
++        a = """x = "x" in d or "y" in d"""
++        self.check(b, a)
++
++    def test_2(self):
++        b = """x = a.b.c.d.has_key("x") ** 3"""
++        a = """x = ("x" in a.b.c.d) ** 3"""
++        self.check(b, a)
++
++    def test_3(self):
++        b = """x = a.b.has_key(1 + 2).__repr__()"""
++        a = """x = (1 + 2 in a.b).__repr__()"""
++        self.check(b, a)
++
++    def test_4(self):
++        b = """x = a.b.has_key(1 + 2).__repr__() ** -3 ** 4"""
++        a = """x = (1 + 2 in a.b).__repr__() ** -3 ** 4"""
++        self.check(b, a)
++
++    def test_5(self):
++        b = """x = a.has_key(f or g)"""
++        a = """x = (f or g) in a"""
++        self.check(b, a)
++
++    def test_6(self):
++        b = """x = a + b.has_key(c)"""
++        a = """x = a + (c in b)"""
++        self.check(b, a)
++
++    def test_7(self):
++        b = """x = a.has_key(lambda: 12)"""
++        a = """x = (lambda: 12) in a"""
++        self.check(b, a)
++
++    def test_8(self):
++        b = """x = a.has_key(a for a in b)"""
++        a = """x = (a for a in b) in a"""
++        self.check(b, a)
++
++    def test_9(self):
++        b = """if not a.has_key(b): pass"""
++        a = """if b not in a: pass"""
++        self.check(b, a)
++
++    def test_10(self):
++        b = """if not a.has_key(b).__repr__(): pass"""
++        a = """if not (b in a).__repr__(): pass"""
++        self.check(b, a)
++
++    def test_11(self):
++        b = """if not a.has_key(b) ** 2: pass"""
++        a = """if not (b in a) ** 2: pass"""
++        self.check(b, a)
++
++class Test_apply(FixerTestCase):
++    fixer = "apply"
++
++    def test_1(self):
++        b = """x = apply(f, g + h)"""
++        a = """x = f(*g + h)"""
++        self.check(b, a)
++
++    def test_2(self):
++        b = """y = apply(f, g, h)"""
++        a = """y = f(*g, **h)"""
++        self.check(b, a)
++
++    def test_3(self):
++        b = """z = apply(fs[0], g or h, h or g)"""
++        a = """z = fs[0](*g or h, **h or g)"""
++        self.check(b, a)
++
++    def test_4(self):
++        b = """apply(f, (x, y) + t)"""
++        a = """f(*(x, y) + t)"""
++        self.check(b, a)
++
++    def test_5(self):
++        b = """apply(f, args,)"""
++        a = """f(*args)"""
++        self.check(b, a)
++
++    def test_6(self):
++        b = """apply(f, args, kwds,)"""
++        a = """f(*args, **kwds)"""
++        self.check(b, a)
++
++    # Test that complex functions are parenthesized
++
++    def test_complex_1(self):
++        b = """x = apply(f+g, args)"""
++        a = """x = (f+g)(*args)"""
++        self.check(b, a)
++
++    def test_complex_2(self):
++        b = """x = apply(f*g, args)"""
++        a = """x = (f*g)(*args)"""
++        self.check(b, a)
++
++    def test_complex_3(self):
++        b = """x = apply(f**g, args)"""
++        a = """x = (f**g)(*args)"""
++        self.check(b, a)
++
++    # But dotted names etc. not
++
++    def test_dotted_name(self):
++        b = """x = apply(f.g, args)"""
++        a = """x = f.g(*args)"""
++        self.check(b, a)
++
++    def test_subscript(self):
++        b = """x = apply(f[x], args)"""
++        a = """x = f[x](*args)"""
++        self.check(b, a)
++
++    def test_call(self):
++        b = """x = apply(f(), args)"""
++        a = """x = f()(*args)"""
++        self.check(b, a)
++
++    # Extreme case
++    def test_extreme(self):
++        b = """x = apply(a.b.c.d.e.f, args, kwds)"""
++        a = """x = a.b.c.d.e.f(*args, **kwds)"""
++        self.check(b, a)
++
++    # XXX Comments in weird places still get lost
++    def test_weird_comments(self):
++        b = """apply(   # foo
++          f, # bar
++          args)"""
++        a = """f(*args)"""
++        self.check(b, a)
++
++    # These should *not* be touched
++
++    def test_unchanged_1(self):
++        s = """apply()"""
++        self.unchanged(s)
++
++    def test_unchanged_2(self):
++        s = """apply(f)"""
++        self.unchanged(s)
++
++    def test_unchanged_3(self):
++        s = """apply(f,)"""
++        self.unchanged(s)
++
++    def test_unchanged_4(self):
++        s = """apply(f, args, kwds, extras)"""
++        self.unchanged(s)
++
++    def test_unchanged_5(self):
++        s = """apply(f, *args, **kwds)"""
++        self.unchanged(s)
++
++    def test_unchanged_6(self):
++        s = """apply(f, *args)"""
++        self.unchanged(s)
++
++    def test_unchanged_7(self):
++        s = """apply(func=f, args=args, kwds=kwds)"""
++        self.unchanged(s)
++
++    def test_unchanged_8(self):
++        s = """apply(f, args=args, kwds=kwds)"""
++        self.unchanged(s)
++
++    def test_unchanged_9(self):
++        s = """apply(f, args, kwds=kwds)"""
++        self.unchanged(s)
++
++    def test_space_1(self):
++        a = """apply(  f,  args,   kwds)"""
++        b = """f(*args, **kwds)"""
++        self.check(a, b)
++
++    def test_space_2(self):
++        a = """apply(  f  ,args,kwds   )"""
++        b = """f(*args, **kwds)"""
++        self.check(a, b)
++
++class Test_intern(FixerTestCase):
++    fixer = "intern"
++
++    def test_prefix_preservation(self):
++        b = """x =   intern(  a  )"""
++        a = """import sys\nx =   sys.intern(  a  )"""
++        self.check(b, a)
++
++        b = """y = intern("b" # test
++              )"""
++        a = """import sys\ny = sys.intern("b" # test
++              )"""
++        self.check(b, a)
++
++        b = """z = intern(a+b+c.d,   )"""
++        a = """import sys\nz = sys.intern(a+b+c.d,   )"""
++        self.check(b, a)
++
++    def test(self):
++        b = """x = intern(a)"""
++        a = """import sys\nx = sys.intern(a)"""
++        self.check(b, a)
++
++        b = """z = intern(a+b+c.d,)"""
++        a = """import sys\nz = sys.intern(a+b+c.d,)"""
++        self.check(b, a)
++
++        b = """intern("y%s" % 5).replace("y", "")"""
++        a = """import sys\nsys.intern("y%s" % 5).replace("y", "")"""
++        self.check(b, a)
++
++    # These should not be refactored
++
++    def test_unchanged(self):
++        s = """intern(a=1)"""
++        self.unchanged(s)
++
++        s = """intern(f, g)"""
++        self.unchanged(s)
++
++        s = """intern(*h)"""
++        self.unchanged(s)
++
++        s = """intern(**i)"""
++        self.unchanged(s)
++
++        s = """intern()"""
++        self.unchanged(s)
++
++class Test_reduce(FixerTestCase):
++    fixer = "reduce"
++
++    def test_simple_call(self):
++        b = "reduce(a, b, c)"
++        a = "from functools import reduce\nreduce(a, b, c)"
++        self.check(b, a)
++
++    def test_call_with_lambda(self):
++        b = "reduce(lambda x, y: x + y, seq)"
++        a = "from functools import reduce\nreduce(lambda x, y: x + y, seq)"
++        self.check(b, a)
++
++    def test_unchanged(self):
++        s = "reduce(a)"
++        self.unchanged(s)
++
++        s = "reduce(a, b=42)"
++        self.unchanged(s)
++
++        s = "reduce(a, b, c, d)"
++        self.unchanged(s)
++
++        s = "reduce(**c)"
++        self.unchanged(s)
++
++        s = "reduce()"
++        self.unchanged(s)
++
++class Test_print(FixerTestCase):
++    fixer = "print"
++
++    def test_prefix_preservation(self):
++        b = """print 1,   1+1,   1+1+1"""
++        a = """print(1,   1+1,   1+1+1)"""
++        self.check(b, a)
++
++    def test_idempotency(self):
++        s = """print()"""
++        self.unchanged(s)
++
++        s = """print('')"""
++        self.unchanged(s)
++
++    def test_idempotency_print_as_function(self):
++        print_stmt = pygram.python_grammar.keywords.pop("print")
++        try:
++            s = """print(1, 1+1, 1+1+1)"""
++            self.unchanged(s)
++
++            s = """print()"""
++            self.unchanged(s)
++
++            s = """print('')"""
++            self.unchanged(s)
++        finally:
++            pygram.python_grammar.keywords["print"] = print_stmt
++
++    def test_1(self):
++        b = """print 1, 1+1, 1+1+1"""
++        a = """print(1, 1+1, 1+1+1)"""
++        self.check(b, a)
++
++    def test_2(self):
++        b = """print 1, 2"""
++        a = """print(1, 2)"""
++        self.check(b, a)
++
++    def test_3(self):
++        b = """print"""
++        a = """print()"""
++        self.check(b, a)
++
++    def test_4(self):
++        # from bug 3000
++        b = """print whatever; print"""
++        a = """print(whatever); print()"""
++        self.check(b, a)
++
++    def test_5(self):
++        b = """print; print whatever;"""
++        a = """print(); print(whatever);"""
++
++    def test_tuple(self):
++        b = """print (a, b, c)"""
++        a = """print((a, b, c))"""
++        self.check(b, a)
++
++    # trailing commas
++
++    def test_trailing_comma_1(self):
++        b = """print 1, 2, 3,"""
++        a = """print(1, 2, 3, end=' ')"""
++        self.check(b, a)
++
++    def test_trailing_comma_2(self):
++        b = """print 1, 2,"""
++        a = """print(1, 2, end=' ')"""
++        self.check(b, a)
++
++    def test_trailing_comma_3(self):
++        b = """print 1,"""
++        a = """print(1, end=' ')"""
++        self.check(b, a)
++
++    # >> stuff
++
++    def test_vargs_without_trailing_comma(self):
++        b = """print >>sys.stderr, 1, 2, 3"""
++        a = """print(1, 2, 3, file=sys.stderr)"""
++        self.check(b, a)
++
++    def test_with_trailing_comma(self):
++        b = """print >>sys.stderr, 1, 2,"""
++        a = """print(1, 2, end=' ', file=sys.stderr)"""
++        self.check(b, a)
++
++    def test_no_trailing_comma(self):
++        b = """print >>sys.stderr, 1+1"""
++        a = """print(1+1, file=sys.stderr)"""
++        self.check(b, a)
++
++    def test_spaces_before_file(self):
++        b = """print >>  sys.stderr"""
++        a = """print(file=sys.stderr)"""
++        self.check(b, a)
++
++    # With from __future__ import print_function
++    def test_with_future_print_function(self):
++        # XXX: These tests won't actually do anything until the parser
++        #      is fixed so it won't crash when it sees print(x=y).
++        #      When #2412 is fixed, the try/except block can be taken
++        #      out and the tests can be run like normal.
++        try:
++            s = "from __future__ import print_function\n"\
++                "print('Hai!', end=' ')"
++            self.unchanged(s)
++
++            b = "print 'Hello, world!'"
++            a = "print('Hello, world!')"
++            self.check(b, a)
++
++            s = "from __future__ import *\n"\
++                "print('Hai!', end=' ')"
++            self.unchanged(s)
++        except:
++            return
++        else:
++            self.assertFalse(True, "#2421 has been fixed -- printing tests "\
++                                   "need to be updated!")
++
++class Test_exec(FixerTestCase):
++    fixer = "exec"
++
++    def test_prefix_preservation(self):
++        b = """  exec code in ns1,   ns2"""
++        a = """  exec(code, ns1,   ns2)"""
++        self.check(b, a)
++
++    def test_basic(self):
++        b = """exec code"""
++        a = """exec(code)"""
++        self.check(b, a)
++
++    def test_with_globals(self):
++        b = """exec code in ns"""
++        a = """exec(code, ns)"""
++        self.check(b, a)
++
++    def test_with_globals_locals(self):
++        b = """exec code in ns1, ns2"""
++        a = """exec(code, ns1, ns2)"""
++        self.check(b, a)
++
++    def test_complex_1(self):
++        b = """exec (a.b()) in ns"""
++        a = """exec((a.b()), ns)"""
++        self.check(b, a)
++
++    def test_complex_2(self):
++        b = """exec a.b() + c in ns"""
++        a = """exec(a.b() + c, ns)"""
++        self.check(b, a)
++
++    # These should not be touched
++
++    def test_unchanged_1(self):
++        s = """exec(code)"""
++        self.unchanged(s)
++
++    def test_unchanged_2(self):
++        s = """exec (code)"""
++        self.unchanged(s)
++
++    def test_unchanged_3(self):
++        s = """exec(code, ns)"""
++        self.unchanged(s)
++
++    def test_unchanged_4(self):
++        s = """exec(code, ns1, ns2)"""
++        self.unchanged(s)
++
++class Test_repr(FixerTestCase):
++    fixer = "repr"
++
++    def test_prefix_preservation(self):
++        b = """x =   `1 + 2`"""
++        a = """x =   repr(1 + 2)"""
++        self.check(b, a)
++
++    def test_simple_1(self):
++        b = """x = `1 + 2`"""
++        a = """x = repr(1 + 2)"""
++        self.check(b, a)
++
++    def test_simple_2(self):
++        b = """y = `x`"""
++        a = """y = repr(x)"""
++        self.check(b, a)
++
++    def test_complex(self):
++        b = """z = `y`.__repr__()"""
++        a = """z = repr(y).__repr__()"""
++        self.check(b, a)
++
++    def test_tuple(self):
++        b = """x = `1, 2, 3`"""
++        a = """x = repr((1, 2, 3))"""
++        self.check(b, a)
++
++    def test_nested(self):
++        b = """x = `1 + `2``"""
++        a = """x = repr(1 + repr(2))"""
++        self.check(b, a)
++
++    def test_nested_tuples(self):
++        b = """x = `1, 2 + `3, 4``"""
++        a = """x = repr((1, 2 + repr((3, 4))))"""
++        self.check(b, a)
++
++class Test_except(FixerTestCase):
++    fixer = "except"
++
++    def test_prefix_preservation(self):
++        b = """
++            try:
++                pass
++            except (RuntimeError, ImportError),    e:
++                pass"""
++        a = """
++            try:
++                pass
++            except (RuntimeError, ImportError) as    e:
++                pass"""
++        self.check(b, a)
++
++    def test_simple(self):
++        b = """
++            try:
++                pass
++            except Foo, e:
++                pass"""
++        a = """
++            try:
++                pass
++            except Foo as e:
++                pass"""
++        self.check(b, a)
++
++    def test_simple_no_space_before_target(self):
++        b = """
++            try:
++                pass
++            except Foo,e:
++                pass"""
++        a = """
++            try:
++                pass
++            except Foo as e:
++                pass"""
++        self.check(b, a)
++
++    def test_tuple_unpack(self):
++        b = """
++            def foo():
++                try:
++                    pass
++                except Exception, (f, e):
++                    pass
++                except ImportError, e:
++                    pass"""
++
++        a = """
++            def foo():
++                try:
++                    pass
++                except Exception as xxx_todo_changeme:
++                    (f, e) = xxx_todo_changeme.args
++                    pass
++                except ImportError as e:
++                    pass"""
++        self.check(b, a)
++
++    def test_multi_class(self):
++        b = """
++            try:
++                pass
++            except (RuntimeError, ImportError), e:
++                pass"""
++
++        a = """
++            try:
++                pass
++            except (RuntimeError, ImportError) as e:
++                pass"""
++        self.check(b, a)
++
++    def test_list_unpack(self):
++        b = """
++            try:
++                pass
++            except Exception, [a, b]:
++                pass"""
++
++        a = """
++            try:
++                pass
++            except Exception as xxx_todo_changeme:
++                [a, b] = xxx_todo_changeme.args
++                pass"""
++        self.check(b, a)
++
++    def test_weird_target_1(self):
++        b = """
++            try:
++                pass
++            except Exception, d[5]:
++                pass"""
++
++        a = """
++            try:
++                pass
++            except Exception as xxx_todo_changeme:
++                d[5] = xxx_todo_changeme
++                pass"""
++        self.check(b, a)
++
++    def test_weird_target_2(self):
++        b = """
++            try:
++                pass
++            except Exception, a.foo:
++                pass"""
++
++        a = """
++            try:
++                pass
++            except Exception as xxx_todo_changeme:
++                a.foo = xxx_todo_changeme
++                pass"""
++        self.check(b, a)
++
++    def test_weird_target_3(self):
++        b = """
++            try:
++                pass
++            except Exception, a().foo:
++                pass"""
++
++        a = """
++            try:
++                pass
++            except Exception as xxx_todo_changeme:
++                a().foo = xxx_todo_changeme
++                pass"""
++        self.check(b, a)
++
++    def test_bare_except(self):
++        b = """
++            try:
++                pass
++            except Exception, a:
++                pass
++            except:
++                pass"""
++
++        a = """
++            try:
++                pass
++            except Exception as a:
++                pass
++            except:
++                pass"""
++        self.check(b, a)
++
++    def test_bare_except_and_else_finally(self):
++        b = """
++            try:
++                pass
++            except Exception, a:
++                pass
++            except:
++                pass
++            else:
++                pass
++            finally:
++                pass"""
++
++        a = """
++            try:
++                pass
++            except Exception as a:
++                pass
++            except:
++                pass
++            else:
++                pass
++            finally:
++                pass"""
++        self.check(b, a)
++
++    def test_multi_fixed_excepts_before_bare_except(self):
++        b = """
++            try:
++                pass
++            except TypeError, b:
++                pass
++            except Exception, a:
++                pass
++            except:
++                pass"""
++
++        a = """
++            try:
++                pass
++            except TypeError as b:
++                pass
++            except Exception as a:
++                pass
++            except:
++                pass"""
++        self.check(b, a)
++
++    # These should not be touched:
++
++    def test_unchanged_1(self):
++        s = """
++            try:
++                pass
++            except:
++                pass"""
++        self.unchanged(s)
++
++    def test_unchanged_2(self):
++        s = """
++            try:
++                pass
++            except Exception:
++                pass"""
++        self.unchanged(s)
++
++    def test_unchanged_3(self):
++        s = """
++            try:
++                pass
++            except (Exception, SystemExit):
++                pass"""
++        self.unchanged(s)
++
++class Test_raise(FixerTestCase):
++    fixer = "raise"
++
++    def test_basic(self):
++        b = """raise Exception, 5"""
++        a = """raise Exception(5)"""
++        self.check(b, a)
++
++    def test_prefix_preservation(self):
++        b = """raise Exception,5"""
++        a = """raise Exception(5)"""
++        self.check(b, a)
++
++        b = """raise   Exception,    5"""
++        a = """raise   Exception(5)"""
++        self.check(b, a)
++
++    def test_with_comments(self):
++        b = """raise Exception, 5 # foo"""
++        a = """raise Exception(5) # foo"""
++        self.check(b, a)
++
++        b = """raise E, (5, 6) % (a, b) # foo"""
++        a = """raise E((5, 6) % (a, b)) # foo"""
++        self.check(b, a)
++
++        b = """def foo():
++                    raise Exception, 5, 6 # foo"""
++        a = """def foo():
++                    raise Exception(5).with_traceback(6) # foo"""
++        self.check(b, a)
++
++    def test_tuple_value(self):
++        b = """raise Exception, (5, 6, 7)"""
++        a = """raise Exception(5, 6, 7)"""
++        self.check(b, a)
++
++    def test_tuple_detection(self):
++        b = """raise E, (5, 6) % (a, b)"""
++        a = """raise E((5, 6) % (a, b))"""
++        self.check(b, a)
++
++    def test_tuple_exc_1(self):
++        b = """raise (((E1, E2), E3), E4), V"""
++        a = """raise E1(V)"""
++        self.check(b, a)
++
++    def test_tuple_exc_2(self):
++        b = """raise (E1, (E2, E3), E4), V"""
++        a = """raise E1(V)"""
++        self.check(b, a)
++
++    # These should produce a warning
++
++    def test_string_exc(self):
++        s = """raise 'foo'"""
++        self.warns_unchanged(s, "Python 3 does not support string exceptions")
++
++    def test_string_exc_val(self):
++        s = """raise "foo", 5"""
++        self.warns_unchanged(s, "Python 3 does not support string exceptions")
++
++    def test_string_exc_val_tb(self):
++        s = """raise "foo", 5, 6"""
++        self.warns_unchanged(s, "Python 3 does not support string exceptions")
++
++    # These should result in traceback-assignment
++
++    def test_tb_1(self):
++        b = """def foo():
++                    raise Exception, 5, 6"""
++        a = """def foo():
++                    raise Exception(5).with_traceback(6)"""
++        self.check(b, a)
++
++    def test_tb_2(self):
++        b = """def foo():
++                    a = 5
++                    raise Exception, 5, 6
++                    b = 6"""
++        a = """def foo():
++                    a = 5
++                    raise Exception(5).with_traceback(6)
++                    b = 6"""
++        self.check(b, a)
++
++    def test_tb_3(self):
++        b = """def foo():
++                    raise Exception,5,6"""
++        a = """def foo():
++                    raise Exception(5).with_traceback(6)"""
++        self.check(b, a)
++
++    def test_tb_4(self):
++        b = """def foo():
++                    a = 5
++                    raise Exception,5,6
++                    b = 6"""
++        a = """def foo():
++                    a = 5
++                    raise Exception(5).with_traceback(6)
++                    b = 6"""
++        self.check(b, a)
++
++    def test_tb_5(self):
++        b = """def foo():
++                    raise Exception, (5, 6, 7), 6"""
++        a = """def foo():
++                    raise Exception(5, 6, 7).with_traceback(6)"""
++        self.check(b, a)
++
++    def test_tb_6(self):
++        b = """def foo():
++                    a = 5
++                    raise Exception, (5, 6, 7), 6
++                    b = 6"""
++        a = """def foo():
++                    a = 5
++                    raise Exception(5, 6, 7).with_traceback(6)
++                    b = 6"""
++        self.check(b, a)
++
++class Test_throw(FixerTestCase):
++    fixer = "throw"
++
++    def test_1(self):
++        b = """g.throw(Exception, 5)"""
++        a = """g.throw(Exception(5))"""
++        self.check(b, a)
++
++    def test_2(self):
++        b = """g.throw(Exception,5)"""
++        a = """g.throw(Exception(5))"""
++        self.check(b, a)
++
++    def test_3(self):
++        b = """g.throw(Exception, (5, 6, 7))"""
++        a = """g.throw(Exception(5, 6, 7))"""
++        self.check(b, a)
++
++    def test_4(self):
++        b = """5 + g.throw(Exception, 5)"""
++        a = """5 + g.throw(Exception(5))"""
++        self.check(b, a)
++
++    # These should produce warnings
++
++    def test_warn_1(self):
++        s = """g.throw("foo")"""
++        self.warns_unchanged(s, "Python 3 does not support string exceptions")
++
++    def test_warn_2(self):
++        s = """g.throw("foo", 5)"""
++        self.warns_unchanged(s, "Python 3 does not support string exceptions")
++
++    def test_warn_3(self):
++        s = """g.throw("foo", 5, 6)"""
++        self.warns_unchanged(s, "Python 3 does not support string exceptions")
++
++    # These should not be touched
++
++    def test_untouched_1(self):
++        s = """g.throw(Exception)"""
++        self.unchanged(s)
++
++    def test_untouched_2(self):
++        s = """g.throw(Exception(5, 6))"""
++        self.unchanged(s)
++
++    def test_untouched_3(self):
++        s = """5 + g.throw(Exception(5, 6))"""
++        self.unchanged(s)
++
++    # These should result in traceback-assignment
++
++    def test_tb_1(self):
++        b = """def foo():
++                    g.throw(Exception, 5, 6)"""
++        a = """def foo():
++                    g.throw(Exception(5).with_traceback(6))"""
++        self.check(b, a)
++
++    def test_tb_2(self):
++        b = """def foo():
++                    a = 5
++                    g.throw(Exception, 5, 6)
++                    b = 6"""
++        a = """def foo():
++                    a = 5
++                    g.throw(Exception(5).with_traceback(6))
++                    b = 6"""
++        self.check(b, a)
++
++    def test_tb_3(self):
++        b = """def foo():
++                    g.throw(Exception,5,6)"""
++        a = """def foo():
++                    g.throw(Exception(5).with_traceback(6))"""
++        self.check(b, a)
++
++    def test_tb_4(self):
++        b = """def foo():
++                    a = 5
++                    g.throw(Exception,5,6)
++                    b = 6"""
++        a = """def foo():
++                    a = 5
++                    g.throw(Exception(5).with_traceback(6))
++                    b = 6"""
++        self.check(b, a)
++
++    def test_tb_5(self):
++        b = """def foo():
++                    g.throw(Exception, (5, 6, 7), 6)"""
++        a = """def foo():
++                    g.throw(Exception(5, 6, 7).with_traceback(6))"""
++        self.check(b, a)
++
++    def test_tb_6(self):
++        b = """def foo():
++                    a = 5
++                    g.throw(Exception, (5, 6, 7), 6)
++                    b = 6"""
++        a = """def foo():
++                    a = 5
++                    g.throw(Exception(5, 6, 7).with_traceback(6))
++                    b = 6"""
++        self.check(b, a)
++
++    def test_tb_7(self):
++        b = """def foo():
++                    a + g.throw(Exception, 5, 6)"""
++        a = """def foo():
++                    a + g.throw(Exception(5).with_traceback(6))"""
++        self.check(b, a)
++
++    def test_tb_8(self):
++        b = """def foo():
++                    a = 5
++                    a + g.throw(Exception, 5, 6)
++                    b = 6"""
++        a = """def foo():
++                    a = 5
++                    a + g.throw(Exception(5).with_traceback(6))
++                    b = 6"""
++        self.check(b, a)
++
++class Test_long(FixerTestCase):
++    fixer = "long"
++
++    def test_1(self):
++        b = """x = long(x)"""
++        a = """x = int(x)"""
++        self.check(b, a)
++
++    def test_2(self):
++        b = """y = isinstance(x, long)"""
++        a = """y = isinstance(x, int)"""
++        self.check(b, a)
++
++    def test_3(self):
++        b = """z = type(x) in (int, long)"""
++        a = """z = type(x) in (int, int)"""
++        self.check(b, a)
++
++    def test_unchanged(self):
++        s = """long = True"""
++        self.unchanged(s)
++
++        s = """s.long = True"""
++        self.unchanged(s)
++
++        s = """def long(): pass"""
++        self.unchanged(s)
++
++        s = """class long(): pass"""
++        self.unchanged(s)
++
++        s = """def f(long): pass"""
++        self.unchanged(s)
++
++        s = """def f(g, long): pass"""
++        self.unchanged(s)
++
++        s = """def f(x, long=True): pass"""
++        self.unchanged(s)
++
++    def test_prefix_preservation(self):
++        b = """x =   long(  x  )"""
++        a = """x =   int(  x  )"""
++        self.check(b, a)
++
++
++class Test_execfile(FixerTestCase):
++    fixer = "execfile"
++
++    def test_conversion(self):
++        b = """execfile("fn")"""
++        a = """exec(compile(open("fn").read(), "fn", 'exec'))"""
++        self.check(b, a)
++
++        b = """execfile("fn", glob)"""
++        a = """exec(compile(open("fn").read(), "fn", 'exec'), glob)"""
++        self.check(b, a)
++
++        b = """execfile("fn", glob, loc)"""
++        a = """exec(compile(open("fn").read(), "fn", 'exec'), glob, loc)"""
++        self.check(b, a)
++
++        b = """execfile("fn", globals=glob)"""
++        a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob)"""
++        self.check(b, a)
++
++        b = """execfile("fn", locals=loc)"""
++        a = """exec(compile(open("fn").read(), "fn", 'exec'), locals=loc)"""
++        self.check(b, a)
++
++        b = """execfile("fn", globals=glob, locals=loc)"""
++        a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob, locals=loc)"""
++        self.check(b, a)
++
++    def test_spacing(self):
++        b = """execfile( "fn" )"""
++        a = """exec(compile(open( "fn" ).read(), "fn", 'exec'))"""
++        self.check(b, a)
++
++        b = """execfile("fn",  globals = glob)"""
++        a = """exec(compile(open("fn").read(), "fn", 'exec'),  globals = glob)"""
++        self.check(b, a)
++
++
++class Test_isinstance(FixerTestCase):
++    fixer = "isinstance"
++
++    def test_remove_multiple_items(self):
++        b = """isinstance(x, (int, int, int))"""
++        a = """isinstance(x, int)"""
++        self.check(b, a)
++
++        b = """isinstance(x, (int, float, int, int, float))"""
++        a = """isinstance(x, (int, float))"""
++        self.check(b, a)
++
++        b = """isinstance(x, (int, float, int, int, float, str))"""
++        a = """isinstance(x, (int, float, str))"""
++        self.check(b, a)
++
++        b = """isinstance(foo() + bar(), (x(), y(), x(), int, int))"""
++        a = """isinstance(foo() + bar(), (x(), y(), x(), int))"""
++        self.check(b, a)
++
++    def test_prefix_preservation(self):
++        b = """if    isinstance(  foo(), (  bar, bar, baz )) : pass"""
++        a = """if    isinstance(  foo(), (  bar, baz )) : pass"""
++        self.check(b, a)
++
++    def test_unchanged(self):
++        self.unchanged("isinstance(x, (str, int))")
++
++class Test_dict(FixerTestCase):
++    fixer = "dict"
++
++    def test_prefix_preservation(self):
++        b = "if   d. keys  (  )  : pass"
++        a = "if   list(d. keys  (  ))  : pass"
++        self.check(b, a)
++
++        b = "if   d. items  (  )  : pass"
++        a = "if   list(d. items  (  ))  : pass"
++        self.check(b, a)
++
++        b = "if   d. iterkeys  ( )  : pass"
++        a = "if   iter(d. keys  ( ))  : pass"
++        self.check(b, a)
++
++        b = "[i for i in    d.  iterkeys(  )  ]"
++        a = "[i for i in    d.  keys(  )  ]"
++        self.check(b, a)
++
++    def test_trailing_comment(self):
++        b = "d.keys() # foo"
++        a = "list(d.keys()) # foo"
++        self.check(b, a)
++
++        b = "d.items()  # foo"
++        a = "list(d.items())  # foo"
++        self.check(b, a)
++
++        b = "d.iterkeys()  # foo"
++        a = "iter(d.keys())  # foo"
++        self.check(b, a)
++
++        b = """[i for i in d.iterkeys() # foo
++               ]"""
++        a = """[i for i in d.keys() # foo
++               ]"""
++        self.check(b, a)
++
++    def test_unchanged(self):
++        for wrapper in fixer_util.consuming_calls:
++            s = "s = %s(d.keys())" % wrapper
++            self.unchanged(s)
++
++            s = "s = %s(d.values())" % wrapper
++            self.unchanged(s)
++
++            s = "s = %s(d.items())" % wrapper
++            self.unchanged(s)
++
++    def test_01(self):
++        b = "d.keys()"
++        a = "list(d.keys())"
++        self.check(b, a)
++
++        b = "a[0].foo().keys()"
++        a = "list(a[0].foo().keys())"
++        self.check(b, a)
++
++    def test_02(self):
++        b = "d.items()"
++        a = "list(d.items())"
++        self.check(b, a)
++
++    def test_03(self):
++        b = "d.values()"
++        a = "list(d.values())"
++        self.check(b, a)
++
++    def test_04(self):
++        b = "d.iterkeys()"
++        a = "iter(d.keys())"
++        self.check(b, a)
++
++    def test_05(self):
++        b = "d.iteritems()"
++        a = "iter(d.items())"
++        self.check(b, a)
++
++    def test_06(self):
++        b = "d.itervalues()"
++        a = "iter(d.values())"
++        self.check(b, a)
++
++    def test_07(self):
++        s = "list(d.keys())"
++        self.unchanged(s)
++
++    def test_08(self):
++        s = "sorted(d.keys())"
++        self.unchanged(s)
++
++    def test_09(self):
++        b = "iter(d.keys())"
++        a = "iter(list(d.keys()))"
++        self.check(b, a)
++
++    def test_10(self):
++        b = "foo(d.keys())"
++        a = "foo(list(d.keys()))"
++        self.check(b, a)
++
++    def test_11(self):
++        b = "for i in d.keys(): print i"
++        a = "for i in list(d.keys()): print i"
++        self.check(b, a)
++
++    def test_12(self):
++        b = "for i in d.iterkeys(): print i"
++        a = "for i in d.keys(): print i"
++        self.check(b, a)
++
++    def test_13(self):
++        b = "[i for i in d.keys()]"
++        a = "[i for i in list(d.keys())]"
++        self.check(b, a)
++
++    def test_14(self):
++        b = "[i for i in d.iterkeys()]"
++        a = "[i for i in d.keys()]"
++        self.check(b, a)
++
++    def test_15(self):
++        b = "(i for i in d.keys())"
++        a = "(i for i in list(d.keys()))"
++        self.check(b, a)
++
++    def test_16(self):
++        b = "(i for i in d.iterkeys())"
++        a = "(i for i in d.keys())"
++        self.check(b, a)
++
++    def test_17(self):
++        b = "iter(d.iterkeys())"
++        a = "iter(d.keys())"
++        self.check(b, a)
++
++    def test_18(self):
++        b = "list(d.iterkeys())"
++        a = "list(d.keys())"
++        self.check(b, a)
++
++    def test_19(self):
++        b = "sorted(d.iterkeys())"
++        a = "sorted(d.keys())"
++        self.check(b, a)
++
++    def test_20(self):
++        b = "foo(d.iterkeys())"
++        a = "foo(iter(d.keys()))"
++        self.check(b, a)
++
++    def test_21(self):
++        b = "print h.iterkeys().next()"
++        a = "print iter(h.keys()).next()"
++        self.check(b, a)
++
++    def test_22(self):
++        b = "print h.keys()[0]"
++        a = "print list(h.keys())[0]"
++        self.check(b, a)
++
++    def test_23(self):
++        b = "print list(h.iterkeys().next())"
++        a = "print list(iter(h.keys()).next())"
++        self.check(b, a)
++
++    def test_24(self):
++        b = "for x in h.keys()[0]: print x"
++        a = "for x in list(h.keys())[0]: print x"
++        self.check(b, a)
++
++class Test_xrange(FixerTestCase):
++    fixer = "xrange"
++
++    def test_prefix_preservation(self):
++        b = """x =    xrange(  10  )"""
++        a = """x =    range(  10  )"""
++        self.check(b, a)
++
++        b = """x = xrange(  1  ,  10   )"""
++        a = """x = range(  1  ,  10   )"""
++        self.check(b, a)
++
++        b = """x = xrange(  0  ,  10 ,  2 )"""
++        a = """x = range(  0  ,  10 ,  2 )"""
++        self.check(b, a)
++
++    def test_single_arg(self):
++        b = """x = xrange(10)"""
++        a = """x = range(10)"""
++        self.check(b, a)
++
++    def test_two_args(self):
++        b = """x = xrange(1, 10)"""
++        a = """x = range(1, 10)"""
++        self.check(b, a)
++
++    def test_three_args(self):
++        b = """x = xrange(0, 10, 2)"""
++        a = """x = range(0, 10, 2)"""
++        self.check(b, a)
++
++    def test_wrap_in_list(self):
++        b = """x = range(10, 3, 9)"""
++        a = """x = list(range(10, 3, 9))"""
++        self.check(b, a)
++
++        b = """x = foo(range(10, 3, 9))"""
++        a = """x = foo(list(range(10, 3, 9)))"""
++        self.check(b, a)
++
++        b = """x = range(10, 3, 9) + [4]"""
++        a = """x = list(range(10, 3, 9)) + [4]"""
++        self.check(b, a)
++
++        b = """x = range(10)[::-1]"""
++        a = """x = list(range(10))[::-1]"""
++        self.check(b, a)
++
++        b = """x = range(10)  [3]"""
++        a = """x = list(range(10))  [3]"""
++        self.check(b, a)
++
++    def test_xrange_in_for(self):
++        b = """for i in xrange(10):\n    j=i"""
++        a = """for i in range(10):\n    j=i"""
++        self.check(b, a)
++
++        b = """[i for i in xrange(10)]"""
++        a = """[i for i in range(10)]"""
++        self.check(b, a)
++
++    def test_range_in_for(self):
++        self.unchanged("for i in range(10): pass")
++        self.unchanged("[i for i in range(10)]")
++
++    def test_in_contains_test(self):
++        self.unchanged("x in range(10, 3, 9)")
++
++    def test_in_consuming_context(self):
++        for call in fixer_util.consuming_calls:
++            self.unchanged("a = %s(range(10))" % call)
++
++class Test_raw_input(FixerTestCase):
++    fixer = "raw_input"
++
++    def test_prefix_preservation(self):
++        b = """x =    raw_input(   )"""
++        a = """x =    input(   )"""
++        self.check(b, a)
++
++        b = """x = raw_input(   ''   )"""
++        a = """x = input(   ''   )"""
++        self.check(b, a)
++
++    def test_1(self):
++        b = """x = raw_input()"""
++        a = """x = input()"""
++        self.check(b, a)
++
++    def test_2(self):
++        b = """x = raw_input('')"""
++        a = """x = input('')"""
++        self.check(b, a)
++
++    def test_3(self):
++        b = """x = raw_input('prompt')"""
++        a = """x = input('prompt')"""
++        self.check(b, a)
++
++    def test_4(self):
++        b = """x = raw_input(foo(a) + 6)"""
++        a = """x = input(foo(a) + 6)"""
++        self.check(b, a)
++
++    def test_5(self):
++        b = """x = raw_input(invite).split()"""
++        a = """x = input(invite).split()"""
++        self.check(b, a)
++
++    def test_6(self):
++        b = """x = raw_input(invite) . split ()"""
++        a = """x = input(invite) . split ()"""
++        self.check(b, a)
++
++    def test_8(self):
++        b = "x = int(raw_input())"
++        a = "x = int(input())"
++        self.check(b, a)
++
++class Test_funcattrs(FixerTestCase):
++    fixer = "funcattrs"
++
++    attrs = ["closure", "doc", "name", "defaults", "code", "globals", "dict"]
++
++    def test(self):
++        for attr in self.attrs:
++            b = "a.func_%s" % attr
++            a = "a.__%s__" % attr
++            self.check(b, a)
++
++            b = "self.foo.func_%s.foo_bar" % attr
++            a = "self.foo.__%s__.foo_bar" % attr
++            self.check(b, a)
++
++    def test_unchanged(self):
++        for attr in self.attrs:
++            s = "foo(func_%s + 5)" % attr
++            self.unchanged(s)
++
++            s = "f(foo.__%s__)" % attr
++            self.unchanged(s)
++
++            s = "f(foo.__%s__.foo)" % attr
++            self.unchanged(s)
++
++class Test_xreadlines(FixerTestCase):
++    fixer = "xreadlines"
++
++    def test_call(self):
++        b = "for x in f.xreadlines(): pass"
++        a = "for x in f: pass"
++        self.check(b, a)
++
++        b = "for x in foo().xreadlines(): pass"
++        a = "for x in foo(): pass"
++        self.check(b, a)
++
++        b = "for x in (5 + foo()).xreadlines(): pass"
++        a = "for x in (5 + foo()): pass"
++        self.check(b, a)
++
++    def test_attr_ref(self):
++        b = "foo(f.xreadlines + 5)"
++        a = "foo(f.__iter__ + 5)"
++        self.check(b, a)
++
++        b = "foo(f().xreadlines + 5)"
++        a = "foo(f().__iter__ + 5)"
++        self.check(b, a)
++
++        b = "foo((5 + f()).xreadlines + 5)"
++        a = "foo((5 + f()).__iter__ + 5)"
++        self.check(b, a)
++
++    def test_unchanged(self):
++        s = "for x in f.xreadlines(5): pass"
++        self.unchanged(s)
++
++        s = "for x in f.xreadlines(k=5): pass"
++        self.unchanged(s)
++
++        s = "for x in f.xreadlines(*k, **v): pass"
++        self.unchanged(s)
++
++        s = "foo(xreadlines)"
++        self.unchanged(s)
++
++
++class ImportsFixerTests:
++
++    def test_import_module(self):
++        for old, new in self.modules.items():
++            b = "import %s" % old
++            a = "import %s" % new
++            self.check(b, a)
++
++            b = "import foo, %s, bar" % old
++            a = "import foo, %s, bar" % new
++            self.check(b, a)
++
++    def test_import_from(self):
++        for old, new in self.modules.items():
++            b = "from %s import foo" % old
++            a = "from %s import foo" % new
++            self.check(b, a)
++
++            b = "from %s import foo, bar" % old
++            a = "from %s import foo, bar" % new
++            self.check(b, a)
++
++            b = "from %s import (yes, no)" % old
++            a = "from %s import (yes, no)" % new
++            self.check(b, a)
++
++    def test_import_module_as(self):
++        for old, new in self.modules.items():
++            b = "import %s as foo_bar" % old
++            a = "import %s as foo_bar" % new
++            self.check(b, a)
++
++            b = "import %s as foo_bar" % old
++            a = "import %s as foo_bar" % new
++            self.check(b, a)
++
++    def test_import_from_as(self):
++        for old, new in self.modules.items():
++            b = "from %s import foo as bar" % old
++            a = "from %s import foo as bar" % new
++            self.check(b, a)
++
++    def test_star(self):
++        for old, new in self.modules.items():
++            b = "from %s import *" % old
++            a = "from %s import *" % new
++            self.check(b, a)
++
++    def test_import_module_usage(self):
++        for old, new in self.modules.items():
++            b = """
++                import %s
++                foo(%s.bar)
++                """ % (old, old)
++            a = """
++                import %s
++                foo(%s.bar)
++                """ % (new, new)
++            self.check(b, a)
++
++            b = """
++                from %s import x
++                %s = 23
++                """ % (old, old)
++            a = """
++                from %s import x
++                %s = 23
++                """ % (new, old)
++            self.check(b, a)
++
++            s = """
++                def f():
++                    %s.method()
++                """ % (old,)
++            self.unchanged(s)
++
++            # test nested usage
++            b = """
++                import %s
++                %s.bar(%s.foo)
++                """ % (old, old, old)
++            a = """
++                import %s
++                %s.bar(%s.foo)
++                """ % (new, new, new)
++            self.check(b, a)
++
++            b = """
++                import %s
++                x.%s
++                """ % (old, old)
++            a = """
++                import %s
++                x.%s
++                """ % (new, old)
++            self.check(b, a)
++
++
++class Test_imports(FixerTestCase, ImportsFixerTests):
++    fixer = "imports"
++    from ..fixes.fix_imports import MAPPING as modules
++
++    def test_multiple_imports(self):
++        b = """import urlparse, cStringIO"""
++        a = """import urllib.parse, io"""
++        self.check(b, a)
++
++    def test_multiple_imports_as(self):
++        b = """
++            import copy_reg as bar, HTMLParser as foo, urlparse
++            s = urlparse.spam(bar.foo())
++            """
++        a = """
++            import copyreg as bar, html.parser as foo, urllib.parse
++            s = urllib.parse.spam(bar.foo())
++            """
++        self.check(b, a)
++
++
++class Test_imports2(FixerTestCase, ImportsFixerTests):
++    fixer = "imports2"
++    from ..fixes.fix_imports2 import MAPPING as modules
++
++
++class Test_imports_fixer_order(FixerTestCase, ImportsFixerTests):
++
++    def setUp(self):
++        super(Test_imports_fixer_order, self).setUp(['imports', 'imports2'])
++        from ..fixes.fix_imports2 import MAPPING as mapping2
++        self.modules = mapping2.copy()
++        from ..fixes.fix_imports import MAPPING as mapping1
++        for key in ('dbhash', 'dumbdbm', 'dbm', 'gdbm'):
++            self.modules[key] = mapping1[key]
++
++
++class Test_urllib(FixerTestCase):
++    fixer = "urllib"
++    from ..fixes.fix_urllib import MAPPING as modules
++
++    def test_import_module(self):
++        for old, changes in self.modules.items():
++            b = "import %s" % old
++            a = "import %s" % ", ".join(map(itemgetter(0), changes))
++            self.check(b, a)
++
++    def test_import_from(self):
++        for old, changes in self.modules.items():
++            all_members = []
++            for new, members in changes:
++                for member in members:
++                    all_members.append(member)
++                    b = "from %s import %s" % (old, member)
++                    a = "from %s import %s" % (new, member)
++                    self.check(b, a)
++
++                    s = "from foo import %s" % member
++                    self.unchanged(s)
++
++                b = "from %s import %s" % (old, ", ".join(members))
++                a = "from %s import %s" % (new, ", ".join(members))
++                self.check(b, a)
++
++                s = "from foo import %s" % ", ".join(members)
++                self.unchanged(s)
++
++            # test the breaking of a module into multiple replacements
++            b = "from %s import %s" % (old, ", ".join(all_members))
++            a = "\n".join(["from %s import %s" % (new, ", ".join(members))
++                            for (new, members) in changes])
++            self.check(b, a)
++
++    def test_import_module_as(self):
++        for old in self.modules:
++            s = "import %s as foo" % old
++            self.warns_unchanged(s, "This module is now multiple modules")
++
++    def test_import_from_as(self):
++        for old, changes in self.modules.items():
++            for new, members in changes:
++                for member in members:
++                    b = "from %s import %s as foo_bar" % (old, member)
++                    a = "from %s import %s as foo_bar" % (new, member)
++                    self.check(b, a)
++
++    def test_star(self):
++        for old in self.modules:
++            s = "from %s import *" % old
++            self.warns_unchanged(s, "Cannot handle star imports")
++
++    def test_import_module_usage(self):
++        for old, changes in self.modules.items():
++            for new, members in changes:
++                for member in members:
++                    b = """
++                        import %s
++                        foo(%s.%s)
++                        """ % (old, old, member)
++                    a = """
++                        import %s
++                        foo(%s.%s)
++                        """ % (", ".join([n for (n, mems)
++                                           in self.modules[old]]),
++                                         new, member)
++                    self.check(b, a)
++
++
++class Test_input(FixerTestCase):
++    fixer = "input"
++
++    def test_prefix_preservation(self):
++        b = """x =   input(   )"""
++        a = """x =   eval(input(   ))"""
++        self.check(b, a)
++
++        b = """x = input(   ''   )"""
++        a = """x = eval(input(   ''   ))"""
++        self.check(b, a)
++
++    def test_trailing_comment(self):
++        b = """x = input()  #  foo"""
++        a = """x = eval(input())  #  foo"""
++        self.check(b, a)
++
++    def test_idempotency(self):
++        s = """x = eval(input())"""
++        self.unchanged(s)
++
++        s = """x = eval(input(''))"""
++        self.unchanged(s)
++
++        s = """x = eval(input(foo(5) + 9))"""
++        self.unchanged(s)
++
++    def test_1(self):
++        b = """x = input()"""
++        a = """x = eval(input())"""
++        self.check(b, a)
++
++    def test_2(self):
++        b = """x = input('')"""
++        a = """x = eval(input(''))"""
++        self.check(b, a)
++
++    def test_3(self):
++        b = """x = input('prompt')"""
++        a = """x = eval(input('prompt'))"""
++        self.check(b, a)
++
++    def test_4(self):
++        b = """x = input(foo(5) + 9)"""
++        a = """x = eval(input(foo(5) + 9))"""
++        self.check(b, a)
++
++class Test_tuple_params(FixerTestCase):
++    fixer = "tuple_params"
++
++    def test_unchanged_1(self):
++        s = """def foo(): pass"""
++        self.unchanged(s)
++
++    def test_unchanged_2(self):
++        s = """def foo(a, b, c): pass"""
++        self.unchanged(s)
++
++    def test_unchanged_3(self):
++        s = """def foo(a=3, b=4, c=5): pass"""
++        self.unchanged(s)
++
++    def test_1(self):
++        b = """
++            def foo(((a, b), c)):
++                x = 5"""
++
++        a = """
++            def foo(xxx_todo_changeme):
++                ((a, b), c) = xxx_todo_changeme
++                x = 5"""
++        self.check(b, a)
++
++    def test_2(self):
++        b = """
++            def foo(((a, b), c), d):
++                x = 5"""
++
++        a = """
++            def foo(xxx_todo_changeme, d):
++                ((a, b), c) = xxx_todo_changeme
++                x = 5"""
++        self.check(b, a)
++
++    def test_3(self):
++        b = """
++            def foo(((a, b), c), d) -> e:
++                x = 5"""
++
++        a = """
++            def foo(xxx_todo_changeme, d) -> e:
++                ((a, b), c) = xxx_todo_changeme
++                x = 5"""
++        self.check(b, a)
++
++    def test_semicolon(self):
++        b = """
++            def foo(((a, b), c)): x = 5; y = 7"""
++
++        a = """
++            def foo(xxx_todo_changeme): ((a, b), c) = xxx_todo_changeme; x = 5; y = 7"""
++        self.check(b, a)
++
++    def test_keywords(self):
++        b = """
++            def foo(((a, b), c), d, e=5) -> z:
++                x = 5"""
++
++        a = """
++            def foo(xxx_todo_changeme, d, e=5) -> z:
++                ((a, b), c) = xxx_todo_changeme
++                x = 5"""
++        self.check(b, a)
++
++    def test_varargs(self):
++        b = """
++            def foo(((a, b), c), d, *vargs, **kwargs) -> z:
++                x = 5"""
++
++        a = """
++            def foo(xxx_todo_changeme, d, *vargs, **kwargs) -> z:
++                ((a, b), c) = xxx_todo_changeme
++                x = 5"""
++        self.check(b, a)
++
++    def test_multi_1(self):
++        b = """
++            def foo(((a, b), c), (d, e, f)) -> z:
++                x = 5"""
++
++        a = """
++            def foo(xxx_todo_changeme, xxx_todo_changeme1) -> z:
++                ((a, b), c) = xxx_todo_changeme
++                (d, e, f) = xxx_todo_changeme1
++                x = 5"""
++        self.check(b, a)
++
++    def test_multi_2(self):
++        b = """
++            def foo(x, ((a, b), c), d, (e, f, g), y) -> z:
++                x = 5"""
++
++        a = """
++            def foo(x, xxx_todo_changeme, d, xxx_todo_changeme1, y) -> z:
++                ((a, b), c) = xxx_todo_changeme
++                (e, f, g) = xxx_todo_changeme1
++                x = 5"""
++        self.check(b, a)
++
++    def test_docstring(self):
++        b = """
++            def foo(((a, b), c), (d, e, f)) -> z:
++                "foo foo foo foo"
++                x = 5"""
++
++        a = """
++            def foo(xxx_todo_changeme, xxx_todo_changeme1) -> z:
++                "foo foo foo foo"
++                ((a, b), c) = xxx_todo_changeme
++                (d, e, f) = xxx_todo_changeme1
++                x = 5"""
++        self.check(b, a)
++
++    def test_lambda_no_change(self):
++        s = """lambda x: x + 5"""
++        self.unchanged(s)
++
++    def test_lambda_parens_single_arg(self):
++        b = """lambda (x): x + 5"""
++        a = """lambda x: x + 5"""
++        self.check(b, a)
++
++        b = """lambda(x): x + 5"""
++        a = """lambda x: x + 5"""
++        self.check(b, a)
++
++        b = """lambda ((((x)))): x + 5"""
++        a = """lambda x: x + 5"""
++        self.check(b, a)
++
++        b = """lambda((((x)))): x + 5"""
++        a = """lambda x: x + 5"""
++        self.check(b, a)
++
++    def test_lambda_simple(self):
++        b = """lambda (x, y): x + f(y)"""
++        a = """lambda x_y: x_y[0] + f(x_y[1])"""
++        self.check(b, a)
++
++        b = """lambda(x, y): x + f(y)"""
++        a = """lambda x_y: x_y[0] + f(x_y[1])"""
++        self.check(b, a)
++
++        b = """lambda (((x, y))): x + f(y)"""
++        a = """lambda x_y: x_y[0] + f(x_y[1])"""
++        self.check(b, a)
++
++        b = """lambda(((x, y))): x + f(y)"""
++        a = """lambda x_y: x_y[0] + f(x_y[1])"""
++        self.check(b, a)
++
++    def test_lambda_one_tuple(self):
++        b = """lambda (x,): x + f(x)"""
++        a = """lambda x1: x1[0] + f(x1[0])"""
++        self.check(b, a)
++
++        b = """lambda (((x,))): x + f(x)"""
++        a = """lambda x1: x1[0] + f(x1[0])"""
++        self.check(b, a)
++
++    def test_lambda_simple_multi_use(self):
++        b = """lambda (x, y): x + x + f(x) + x"""
++        a = """lambda x_y: x_y[0] + x_y[0] + f(x_y[0]) + x_y[0]"""
++        self.check(b, a)
++
++    def test_lambda_simple_reverse(self):
++        b = """lambda (x, y): y + x"""
++        a = """lambda x_y: x_y[1] + x_y[0]"""
++        self.check(b, a)
++
++    def test_lambda_nested(self):
++        b = """lambda (x, (y, z)): x + y + z"""
++        a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + x_y_z[1][1]"""
++        self.check(b, a)
++
++        b = """lambda (((x, (y, z)))): x + y + z"""
++        a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + x_y_z[1][1]"""
++        self.check(b, a)
++
++    def test_lambda_nested_multi_use(self):
++        b = """lambda (x, (y, z)): x + y + f(y)"""
++        a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + f(x_y_z[1][0])"""
++        self.check(b, a)
++
++class Test_methodattrs(FixerTestCase):
++    fixer = "methodattrs"
++
++    attrs = ["func", "self", "class"]
++
++    def test(self):
++        for attr in self.attrs:
++            b = "a.im_%s" % attr
++            if attr == "class":
++                a = "a.__self__.__class__"
++            else:
++                a = "a.__%s__" % attr
++            self.check(b, a)
++
++            b = "self.foo.im_%s.foo_bar" % attr
++            if attr == "class":
++                a = "self.foo.__self__.__class__.foo_bar"
++            else:
++                a = "self.foo.__%s__.foo_bar" % attr
++            self.check(b, a)
++
++    def test_unchanged(self):
++        for attr in self.attrs:
++            s = "foo(im_%s + 5)" % attr
++            self.unchanged(s)
++
++            s = "f(foo.__%s__)" % attr
++            self.unchanged(s)
++
++            s = "f(foo.__%s__.foo)" % attr
++            self.unchanged(s)
++
++class Test_next(FixerTestCase):
++    fixer = "next"
++
++    def test_1(self):
++        b = """it.next()"""
++        a = """next(it)"""
++        self.check(b, a)
++
++    def test_2(self):
++        b = """a.b.c.d.next()"""
++        a = """next(a.b.c.d)"""
++        self.check(b, a)
++
++    def test_3(self):
++        b = """(a + b).next()"""
++        a = """next((a + b))"""
++        self.check(b, a)
++
++    def test_4(self):
++        b = """a().next()"""
++        a = """next(a())"""
++        self.check(b, a)
++
++    def test_5(self):
++        b = """a().next() + b"""
++        a = """next(a()) + b"""
++        self.check(b, a)
++
++    def test_6(self):
++        b = """c(      a().next() + b)"""
++        a = """c(      next(a()) + b)"""
++        self.check(b, a)
++
++    def test_prefix_preservation_1(self):
++        b = """
++            for a in b:
++                foo(a)
++                a.next()
++            """
++        a = """
++            for a in b:
++                foo(a)
++                next(a)
++            """
++        self.check(b, a)
++
++    def test_prefix_preservation_2(self):
++        b = """
++            for a in b:
++                foo(a) # abc
++                # def
++                a.next()
++            """
++        a = """
++            for a in b:
++                foo(a) # abc
++                # def
++                next(a)
++            """
++        self.check(b, a)
++
++    def test_prefix_preservation_3(self):
++        b = """
++            next = 5
++            for a in b:
++                foo(a)
++                a.next()
++            """
++        a = """
++            next = 5
++            for a in b:
++                foo(a)
++                a.__next__()
++            """
++        self.check(b, a, ignore_warnings=True)
++
++    def test_prefix_preservation_4(self):
++        b = """
++            next = 5
++            for a in b:
++                foo(a) # abc
++                # def
++                a.next()
++            """
++        a = """
++            next = 5
++            for a in b:
++                foo(a) # abc
++                # def
++                a.__next__()
++            """
++        self.check(b, a, ignore_warnings=True)
++
++    def test_prefix_preservation_5(self):
++        b = """
++            next = 5
++            for a in b:
++                foo(foo(a), # abc
++                    a.next())
++            """
++        a = """
++            next = 5
++            for a in b:
++                foo(foo(a), # abc
++                    a.__next__())
++            """
++        self.check(b, a, ignore_warnings=True)
++
++    def test_prefix_preservation_6(self):
++        b = """
++            for a in b:
++                foo(foo(a), # abc
++                    a.next())
++            """
++        a = """
++            for a in b:
++                foo(foo(a), # abc
++                    next(a))
++            """
++        self.check(b, a)
++
++    def test_method_1(self):
++        b = """
++            class A:
++                def next(self):
++                    pass
++            """
++        a = """
++            class A:
++                def __next__(self):
++                    pass
++            """
++        self.check(b, a)
++
++    def test_method_2(self):
++        b = """
++            class A(object):
++                def next(self):
++                    pass
++            """
++        a = """
++            class A(object):
++                def __next__(self):
++                    pass
++            """
++        self.check(b, a)
++
++    def test_method_3(self):
++        b = """
++            class A:
++                def next(x):
++                    pass
++            """
++        a = """
++            class A:
++                def __next__(x):
++                    pass
++            """
++        self.check(b, a)
++
++    def test_method_4(self):
++        b = """
++            class A:
++                def __init__(self, foo):
++                    self.foo = foo
++
++                def next(self):
++                    pass
++
++                def __iter__(self):
++                    return self
++            """
++        a = """
++            class A:
++                def __init__(self, foo):
++                    self.foo = foo
++
++                def __next__(self):
++                    pass
++
++                def __iter__(self):
++                    return self
++            """
++        self.check(b, a)
++
++    def test_method_unchanged(self):
++        s = """
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.unchanged(s)
++
++    def test_shadowing_assign_simple(self):
++        s = """
++            next = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_assign_tuple_1(self):
++        s = """
++            (next, a) = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_assign_tuple_2(self):
++        s = """
++            (a, (b, (next, c)), a) = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_assign_list_1(self):
++        s = """
++            [next, a] = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_assign_list_2(self):
++        s = """
++            [a, [b, [next, c]], a] = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_builtin_assign(self):
++        s = """
++            def foo():
++                __builtin__.next = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_builtin_assign_in_tuple(self):
++        s = """
++            def foo():
++                (a, __builtin__.next) = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_builtin_assign_in_list(self):
++        s = """
++            def foo():
++                [a, __builtin__.next] = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_assign_to_next(self):
++        s = """
++            def foo():
++                A.next = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.unchanged(s)
++
++    def test_assign_to_next_in_tuple(self):
++        s = """
++            def foo():
++                (a, A.next) = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.unchanged(s)
++
++    def test_assign_to_next_in_list(self):
++        s = """
++            def foo():
++                [a, A.next] = foo
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.unchanged(s)
++
++    def test_shadowing_import_1(self):
++        s = """
++            import foo.bar as next
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_import_2(self):
++        s = """
++            import bar, bar.foo as next
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_import_3(self):
++        s = """
++            import bar, bar.foo as next, baz
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_import_from_1(self):
++        s = """
++            from x import next
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_import_from_2(self):
++        s = """
++            from x.a import next
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_import_from_3(self):
++        s = """
++            from x import a, next, b
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_import_from_4(self):
++        s = """
++            from x.a import a, next, b
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_funcdef_1(self):
++        s = """
++            def next(a):
++                pass
++
++            class A:
++                def next(self, a, b):
++                    pass
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_funcdef_2(self):
++        b = """
++            def next(a):
++                pass
++
++            class A:
++                def next(self):
++                    pass
++
++            it.next()
++            """
++        a = """
++            def next(a):
++                pass
++
++            class A:
++                def __next__(self):
++                    pass
++
++            it.__next__()
++            """
++        self.warns(b, a, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_global_1(self):
++        s = """
++            def f():
++                global next
++                next = 5
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_global_2(self):
++        s = """
++            def f():
++                global a, next, b
++                next = 5
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_for_simple(self):
++        s = """
++            for next in it():
++                pass
++
++            b = 5
++            c = 6
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_for_tuple_1(self):
++        s = """
++            for next, b in it():
++                pass
++
++            b = 5
++            c = 6
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_shadowing_for_tuple_2(self):
++        s = """
++            for a, (next, c), b in it():
++                pass
++
++            b = 5
++            c = 6
++            """
++        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
++
++    def test_noncall_access_1(self):
++        b = """gnext = g.next"""
++        a = """gnext = g.__next__"""
++        self.check(b, a)
++
++    def test_noncall_access_2(self):
++        b = """f(g.next + 5)"""
++        a = """f(g.__next__ + 5)"""
++        self.check(b, a)
++
++    def test_noncall_access_3(self):
++        b = """f(g().next + 5)"""
++        a = """f(g().__next__ + 5)"""
++        self.check(b, a)
++
++class Test_nonzero(FixerTestCase):
++    fixer = "nonzero"
++
++    def test_1(self):
++        b = """
++            class A:
++                def __nonzero__(self):
++                    pass
++            """
++        a = """
++            class A:
++                def __bool__(self):
++                    pass
++            """
++        self.check(b, a)
++
++    def test_2(self):
++        b = """
++            class A(object):
++                def __nonzero__(self):
++                    pass
++            """
++        a = """
++            class A(object):
++                def __bool__(self):
++                    pass
++            """
++        self.check(b, a)
++
++    def test_unchanged_1(self):
++        s = """
++            class A(object):
++                def __bool__(self):
++                    pass
++            """
++        self.unchanged(s)
++
++    def test_unchanged_2(self):
++        s = """
++            class A(object):
++                def __nonzero__(self, a):
++                    pass
++            """
++        self.unchanged(s)
++
++    def test_unchanged_func(self):
++        s = """
++            def __nonzero__(self):
++                pass
++            """
++        self.unchanged(s)
++
++class Test_numliterals(FixerTestCase):
++    fixer = "numliterals"
++
++    def test_octal_1(self):
++        b = """0755"""
++        a = """0o755"""
++        self.check(b, a)
++
++    def test_long_int_1(self):
++        b = """a = 12L"""
++        a = """a = 12"""
++        self.check(b, a)
++
++    def test_long_int_2(self):
++        b = """a = 12l"""
++        a = """a = 12"""
++        self.check(b, a)
++
++    def test_long_hex(self):
++        b = """b = 0x12l"""
++        a = """b = 0x12"""
++        self.check(b, a)
++
++    def test_comments_and_spacing(self):
++        b = """b =   0x12L"""
++        a = """b =   0x12"""
++        self.check(b, a)
++
++        b = """b = 0755 # spam"""
++        a = """b = 0o755 # spam"""
++        self.check(b, a)
++
++    def test_unchanged_int(self):
++        s = """5"""
++        self.unchanged(s)
++
++    def test_unchanged_float(self):
++        s = """5.0"""
++        self.unchanged(s)
++
++    def test_unchanged_octal(self):
++        s = """0o755"""
++        self.unchanged(s)
++
++    def test_unchanged_hex(self):
++        s = """0xABC"""
++        self.unchanged(s)
++
++    def test_unchanged_exp(self):
++        s = """5.0e10"""
++        self.unchanged(s)
++
++    def test_unchanged_complex_int(self):
++        s = """5 + 4j"""
++        self.unchanged(s)
++
++    def test_unchanged_complex_float(self):
++        s = """5.4 + 4.9j"""
++        self.unchanged(s)
++
++    def test_unchanged_complex_bare(self):
++        s = """4j"""
++        self.unchanged(s)
++        s = """4.4j"""
++        self.unchanged(s)
++
++class Test_renames(FixerTestCase):
++    fixer = "renames"
++
++    modules = {"sys":  ("maxint", "maxsize"),
++              }
++
++    def test_import_from(self):
++        for mod, (old, new) in self.modules.items():
++            b = "from %s import %s" % (mod, old)
++            a = "from %s import %s" % (mod, new)
++            self.check(b, a)
++
++            s = "from foo import %s" % old
++            self.unchanged(s)
++
++    def test_import_from_as(self):
++        for mod, (old, new) in self.modules.items():
++            b = "from %s import %s as foo_bar" % (mod, old)
++            a = "from %s import %s as foo_bar" % (mod, new)
++            self.check(b, a)
++
++    def test_import_module_usage(self):
++        for mod, (old, new) in self.modules.items():
++            b = """
++                import %s
++                foo(%s, %s.%s)
++                """ % (mod, mod, mod, old)
++            a = """
++                import %s
++                foo(%s, %s.%s)
++                """ % (mod, mod, mod, new)
++            self.check(b, a)
++
++    def XXX_test_from_import_usage(self):
++        # not implemented yet
++        for mod, (old, new) in self.modules.items():
++            b = """
++                from %s import %s
++                foo(%s, %s)
++                """ % (mod, old, mod, old)
++            a = """
++                from %s import %s
++                foo(%s, %s)
++                """ % (mod, new, mod, new)
++            self.check(b, a)
++
++class Test_unicode(FixerTestCase):
++    fixer = "unicode"
++
++    def test_unicode_call(self):
++        b = """unicode(x, y, z)"""
++        a = """str(x, y, z)"""
++        self.check(b, a)
++
++    def test_unicode_literal_1(self):
++        b = '''u"x"'''
++        a = '''"x"'''
++        self.check(b, a)
++
++    def test_unicode_literal_2(self):
++        b = """ur'x'"""
++        a = """r'x'"""
++        self.check(b, a)
++
++    def test_unicode_literal_3(self):
++        b = """UR'''x'''"""
++        a = """R'''x'''"""
++        self.check(b, a)
++
++class Test_callable(FixerTestCase):
++    fixer = "callable"
++
++    def test_prefix_preservation(self):
++        b = """callable(    x)"""
++        a = """hasattr(    x, '__call__')"""
++        self.check(b, a)
++
++        b = """if     callable(x): pass"""
++        a = """if     hasattr(x, '__call__'): pass"""
++        self.check(b, a)
++
++    def test_callable_call(self):
++        b = """callable(x)"""
++        a = """hasattr(x, '__call__')"""
++        self.check(b, a)
++
++    def test_callable_should_not_change(self):
++        a = """callable(*x)"""
++        self.unchanged(a)
++
++        a = """callable(x, y)"""
++        self.unchanged(a)
++
++        a = """callable(x, kw=y)"""
++        self.unchanged(a)
++
++        a = """callable()"""
++        self.unchanged(a)
++
++class Test_filter(FixerTestCase):
++    fixer = "filter"
++
++    def test_prefix_preservation(self):
++        b = """x =   filter(    foo,     'abc'   )"""
++        a = """x =   list(filter(    foo,     'abc'   ))"""
++        self.check(b, a)
++
++        b = """x =   filter(  None , 'abc'  )"""
++        a = """x =   [_f for _f in 'abc' if _f]"""
++        self.check(b, a)
++
++    def test_filter_basic(self):
++        b = """x = filter(None, 'abc')"""
++        a = """x = [_f for _f in 'abc' if _f]"""
++        self.check(b, a)
++
++        b = """x = len(filter(f, 'abc'))"""
++        a = """x = len(list(filter(f, 'abc')))"""
++        self.check(b, a)
++
++        b = """x = filter(lambda x: x%2 == 0, range(10))"""
++        a = """x = [x for x in range(10) if x%2 == 0]"""
++        self.check(b, a)
++
++        # Note the parens around x
++        b = """x = filter(lambda (x): x%2 == 0, range(10))"""
++        a = """x = [x for x in range(10) if x%2 == 0]"""
++        self.check(b, a)
++
++        # XXX This (rare) case is not supported
++##         b = """x = filter(f, 'abc')[0]"""
++##         a = """x = list(filter(f, 'abc'))[0]"""
++##         self.check(b, a)
++
++    def test_filter_nochange(self):
++        a = """b.join(filter(f, 'abc'))"""
++        self.unchanged(a)
++        a = """(a + foo(5)).join(filter(f, 'abc'))"""
++        self.unchanged(a)
++        a = """iter(filter(f, 'abc'))"""
++        self.unchanged(a)
++        a = """list(filter(f, 'abc'))"""
++        self.unchanged(a)
++        a = """list(filter(f, 'abc'))[0]"""
++        self.unchanged(a)
++        a = """set(filter(f, 'abc'))"""
++        self.unchanged(a)
++        a = """set(filter(f, 'abc')).pop()"""
++        self.unchanged(a)
++        a = """tuple(filter(f, 'abc'))"""
++        self.unchanged(a)
++        a = """any(filter(f, 'abc'))"""
++        self.unchanged(a)
++        a = """all(filter(f, 'abc'))"""
++        self.unchanged(a)
++        a = """sum(filter(f, 'abc'))"""
++        self.unchanged(a)
++        a = """sorted(filter(f, 'abc'))"""
++        self.unchanged(a)
++        a = """sorted(filter(f, 'abc'), key=blah)"""
++        self.unchanged(a)
++        a = """sorted(filter(f, 'abc'), key=blah)[0]"""
++        self.unchanged(a)
++        a = """for i in filter(f, 'abc'): pass"""
++        self.unchanged(a)
++        a = """[x for x in filter(f, 'abc')]"""
++        self.unchanged(a)
++        a = """(x for x in filter(f, 'abc'))"""
++        self.unchanged(a)
++
++    def test_future_builtins(self):
++        a = "from future_builtins import spam, filter; filter(f, 'ham')"
++        self.unchanged(a)
++
++        b = """from future_builtins import spam; x = filter(f, 'abc')"""
++        a = """from future_builtins import spam; x = list(filter(f, 'abc'))"""
++        self.check(b, a)
++
++        a = "from future_builtins import *; filter(f, 'ham')"
++        self.unchanged(a)
++
++class Test_map(FixerTestCase):
++    fixer = "map"
++
++    def check(self, b, a):
++        self.unchanged("from future_builtins import map; " + b, a)
++        super(Test_map, self).check(b, a)
++
++    def test_prefix_preservation(self):
++        b = """x =    map(   f,    'abc'   )"""
++        a = """x =    list(map(   f,    'abc'   ))"""
++        self.check(b, a)
++
++    def test_trailing_comment(self):
++        b = """x = map(f, 'abc')   #   foo"""
++        a = """x = list(map(f, 'abc'))   #   foo"""
++        self.check(b, a)
++
++    def test_map_basic(self):
++        b = """x = map(f, 'abc')"""
++        a = """x = list(map(f, 'abc'))"""
++        self.check(b, a)
++
++        b = """x = len(map(f, 'abc', 'def'))"""
++        a = """x = len(list(map(f, 'abc', 'def')))"""
++        self.check(b, a)
++
++        b = """x = map(None, 'abc')"""
++        a = """x = list('abc')"""
++        self.check(b, a)
++
++        b = """x = map(None, 'abc', 'def')"""
++        a = """x = list(map(None, 'abc', 'def'))"""
++        self.check(b, a)
++
++        b = """x = map(lambda x: x+1, range(4))"""
++        a = """x = [x+1 for x in range(4)]"""
++        self.check(b, a)
++
++        # Note the parens around x
++        b = """x = map(lambda (x): x+1, range(4))"""
++        a = """x = [x+1 for x in range(4)]"""
++        self.check(b, a)
++
++        b = """
++            foo()
++            # foo
++            map(f, x)
++            """
++        a = """
++            foo()
++            # foo
++            list(map(f, x))
++            """
++        self.warns(b, a, "You should use a for loop here")
++
++        # XXX This (rare) case is not supported
++##         b = """x = map(f, 'abc')[0]"""
++##         a = """x = list(map(f, 'abc'))[0]"""
++##         self.check(b, a)
++
++    def test_map_nochange(self):
++        a = """b.join(map(f, 'abc'))"""
++        self.unchanged(a)
++        a = """(a + foo(5)).join(map(f, 'abc'))"""
++        self.unchanged(a)
++        a = """iter(map(f, 'abc'))"""
++        self.unchanged(a)
++        a = """list(map(f, 'abc'))"""
++        self.unchanged(a)
++        a = """list(map(f, 'abc'))[0]"""
++        self.unchanged(a)
++        a = """set(map(f, 'abc'))"""
++        self.unchanged(a)
++        a = """set(map(f, 'abc')).pop()"""
++        self.unchanged(a)
++        a = """tuple(map(f, 'abc'))"""
++        self.unchanged(a)
++        a = """any(map(f, 'abc'))"""
++        self.unchanged(a)
++        a = """all(map(f, 'abc'))"""
++        self.unchanged(a)
++        a = """sum(map(f, 'abc'))"""
++        self.unchanged(a)
++        a = """sorted(map(f, 'abc'))"""
++        self.unchanged(a)
++        a = """sorted(map(f, 'abc'), key=blah)"""
++        self.unchanged(a)
++        a = """sorted(map(f, 'abc'), key=blah)[0]"""
++        self.unchanged(a)
++        a = """for i in map(f, 'abc'): pass"""
++        self.unchanged(a)
++        a = """[x for x in map(f, 'abc')]"""
++        self.unchanged(a)
++        a = """(x for x in map(f, 'abc'))"""
++        self.unchanged(a)
++
++    def test_future_builtins(self):
++        a = "from future_builtins import spam, map, eggs; map(f, 'ham')"
++        self.unchanged(a)
++
++        b = """from future_builtins import spam, eggs; x = map(f, 'abc')"""
++        a = """from future_builtins import spam, eggs; x = list(map(f, 'abc'))"""
++        self.check(b, a)
++
++        a = "from future_builtins import *; map(f, 'ham')"
++        self.unchanged(a)
++
++class Test_zip(FixerTestCase):
++    fixer = "zip"
++
++    def check(self, b, a):
++        self.unchanged("from future_builtins import zip; " + b, a)
++        super(Test_zip, self).check(b, a)
++
++    def test_zip_basic(self):
++        b = """x = zip(a, b, c)"""
++        a = """x = list(zip(a, b, c))"""
++        self.check(b, a)
++
++        b = """x = len(zip(a, b))"""
++        a = """x = len(list(zip(a, b)))"""
++        self.check(b, a)
++
++    def test_zip_nochange(self):
++        a = """b.join(zip(a, b))"""
++        self.unchanged(a)
++        a = """(a + foo(5)).join(zip(a, b))"""
++        self.unchanged(a)
++        a = """iter(zip(a, b))"""
++        self.unchanged(a)
++        a = """list(zip(a, b))"""
++        self.unchanged(a)
++        a = """list(zip(a, b))[0]"""
++        self.unchanged(a)
++        a = """set(zip(a, b))"""
++        self.unchanged(a)
++        a = """set(zip(a, b)).pop()"""
++        self.unchanged(a)
++        a = """tuple(zip(a, b))"""
++        self.unchanged(a)
++        a = """any(zip(a, b))"""
++        self.unchanged(a)
++        a = """all(zip(a, b))"""
++        self.unchanged(a)
++        a = """sum(zip(a, b))"""
++        self.unchanged(a)
++        a = """sorted(zip(a, b))"""
++        self.unchanged(a)
++        a = """sorted(zip(a, b), key=blah)"""
++        self.unchanged(a)
++        a = """sorted(zip(a, b), key=blah)[0]"""
++        self.unchanged(a)
++        a = """for i in zip(a, b): pass"""
++        self.unchanged(a)
++        a = """[x for x in zip(a, b)]"""
++        self.unchanged(a)
++        a = """(x for x in zip(a, b))"""
++        self.unchanged(a)
++
++    def test_future_builtins(self):
++        a = "from future_builtins import spam, zip, eggs; zip(a, b)"
++        self.unchanged(a)
++
++        b = """from future_builtins import spam, eggs; x = zip(a, b)"""
++        a = """from future_builtins import spam, eggs; x = list(zip(a, b))"""
++        self.check(b, a)
++
++        a = "from future_builtins import *; zip(a, b)"
++        self.unchanged(a)
++
++class Test_standarderror(FixerTestCase):
++    fixer = "standarderror"
++
++    def test(self):
++        b = """x =    StandardError()"""
++        a = """x =    Exception()"""
++        self.check(b, a)
++
++        b = """x = StandardError(a, b, c)"""
++        a = """x = Exception(a, b, c)"""
++        self.check(b, a)
++
++        b = """f(2 + StandardError(a, b, c))"""
++        a = """f(2 + Exception(a, b, c))"""
++        self.check(b, a)
++
++class Test_types(FixerTestCase):
++    fixer = "types"
++
++    def test_basic_types_convert(self):
++        b = """types.StringType"""
++        a = """bytes"""
++        self.check(b, a)
++
++        b = """types.DictType"""
++        a = """dict"""
++        self.check(b, a)
++
++        b = """types . IntType"""
++        a = """int"""
++        self.check(b, a)
++
++        b = """types.ListType"""
++        a = """list"""
++        self.check(b, a)
++
++        b = """types.LongType"""
++        a = """int"""
++        self.check(b, a)
++
++        b = """types.NoneType"""
++        a = """type(None)"""
++        self.check(b, a)
++
++class Test_idioms(FixerTestCase):
++    fixer = "idioms"
++
++    def test_while(self):
++        b = """while 1: foo()"""
++        a = """while True: foo()"""
++        self.check(b, a)
++
++        b = """while   1: foo()"""
++        a = """while   True: foo()"""
++        self.check(b, a)
++
++        b = """
++            while 1:
++                foo()
++            """
++        a = """
++            while True:
++                foo()
++            """
++        self.check(b, a)
++
++    def test_while_unchanged(self):
++        s = """while 11: foo()"""
++        self.unchanged(s)
++
++        s = """while 0: foo()"""
++        self.unchanged(s)
++
++        s = """while foo(): foo()"""
++        self.unchanged(s)
++
++        s = """while []: foo()"""
++        self.unchanged(s)
++
++    def test_eq_simple(self):
++        b = """type(x) == T"""
++        a = """isinstance(x, T)"""
++        self.check(b, a)
++
++        b = """if   type(x) == T: pass"""
++        a = """if   isinstance(x, T): pass"""
++        self.check(b, a)
++
++    def test_eq_reverse(self):
++        b = """T == type(x)"""
++        a = """isinstance(x, T)"""
++        self.check(b, a)
++
++        b = """if   T == type(x): pass"""
++        a = """if   isinstance(x, T): pass"""
++        self.check(b, a)
++
++    def test_eq_expression(self):
++        b = """type(x+y) == d.get('T')"""
++        a = """isinstance(x+y, d.get('T'))"""
++        self.check(b, a)
++
++        b = """type(   x  +  y) == d.get('T')"""
++        a = """isinstance(x  +  y, d.get('T'))"""
++        self.check(b, a)
++
++    def test_is_simple(self):
++        b = """type(x) is T"""
++        a = """isinstance(x, T)"""
++        self.check(b, a)
++
++        b = """if   type(x) is T: pass"""
++        a = """if   isinstance(x, T): pass"""
++        self.check(b, a)
++
++    def test_is_reverse(self):
++        b = """T is type(x)"""
++        a = """isinstance(x, T)"""
++        self.check(b, a)
++
++        b = """if   T is type(x): pass"""
++        a = """if   isinstance(x, T): pass"""
++        self.check(b, a)
++
++    def test_is_expression(self):
++        b = """type(x+y) is d.get('T')"""
++        a = """isinstance(x+y, d.get('T'))"""
++        self.check(b, a)
++
++        b = """type(   x  +  y) is d.get('T')"""
++        a = """isinstance(x  +  y, d.get('T'))"""
++        self.check(b, a)
++
++    def test_is_not_simple(self):
++        b = """type(x) is not T"""
++        a = """not isinstance(x, T)"""
++        self.check(b, a)
++
++        b = """if   type(x) is not T: pass"""
++        a = """if   not isinstance(x, T): pass"""
++        self.check(b, a)
++
++    def test_is_not_reverse(self):
++        b = """T is not type(x)"""
++        a = """not isinstance(x, T)"""
++        self.check(b, a)
++
++        b = """if   T is not type(x): pass"""
++        a = """if   not isinstance(x, T): pass"""
++        self.check(b, a)
++
++    def test_is_not_expression(self):
++        b = """type(x+y) is not d.get('T')"""
++        a = """not isinstance(x+y, d.get('T'))"""
++        self.check(b, a)
++
++        b = """type(   x  +  y) is not d.get('T')"""
++        a = """not isinstance(x  +  y, d.get('T'))"""
++        self.check(b, a)
++
++    def test_ne_simple(self):
++        b = """type(x) != T"""
++        a = """not isinstance(x, T)"""
++        self.check(b, a)
++
++        b = """if   type(x) != T: pass"""
++        a = """if   not isinstance(x, T): pass"""
++        self.check(b, a)
++
++    def test_ne_reverse(self):
++        b = """T != type(x)"""
++        a = """not isinstance(x, T)"""
++        self.check(b, a)
++
++        b = """if   T != type(x): pass"""
++        a = """if   not isinstance(x, T): pass"""
++        self.check(b, a)
++
++    def test_ne_expression(self):
++        b = """type(x+y) != d.get('T')"""
++        a = """not isinstance(x+y, d.get('T'))"""
++        self.check(b, a)
++
++        b = """type(   x  +  y) != d.get('T')"""
++        a = """not isinstance(x  +  y, d.get('T'))"""
++        self.check(b, a)
++
++    def test_type_unchanged(self):
++        a = """type(x).__name__"""
++        self.unchanged(a)
++
++    def test_sort_list_call(self):
++        b = """
++            v = list(t)
++            v.sort()
++            foo(v)
++            """
++        a = """
++            v = sorted(t)
++            foo(v)
++            """
++        self.check(b, a)
++
++        b = """
++            v = list(foo(b) + d)
++            v.sort()
++            foo(v)
++            """
++        a = """
++            v = sorted(foo(b) + d)
++            foo(v)
++            """
++        self.check(b, a)
++
++        b = """
++            while x:
++                v = list(t)
++                v.sort()
++                foo(v)
++            """
++        a = """
++            while x:
++                v = sorted(t)
++                foo(v)
++            """
++        self.check(b, a)
++
++        b = """
++            v = list(t)
++            # foo
++            v.sort()
++            foo(v)
++            """
++        a = """
++            v = sorted(t)
++            # foo
++            foo(v)
++            """
++        self.check(b, a)
++
++        b = r"""
++            v = list(   t)
++            v.sort()
++            foo(v)
++            """
++        a = r"""
++            v = sorted(   t)
++            foo(v)
++            """
++        self.check(b, a)
++
++    def test_sort_simple_expr(self):
++        b = """
++            v = t
++            v.sort()
++            foo(v)
++            """
++        a = """
++            v = sorted(t)
++            foo(v)
++            """
++        self.check(b, a)
++
++        b = """
++            v = foo(b)
++            v.sort()
++            foo(v)
++            """
++        a = """
++            v = sorted(foo(b))
++            foo(v)
++            """
++        self.check(b, a)
++
++        b = """
++            v = b.keys()
++            v.sort()
++            foo(v)
++            """
++        a = """
++            v = sorted(b.keys())
++            foo(v)
++            """
++        self.check(b, a)
++
++        b = """
++            v = foo(b) + d
++            v.sort()
++            foo(v)
++            """
++        a = """
++            v = sorted(foo(b) + d)
++            foo(v)
++            """
++        self.check(b, a)
++
++        b = """
++            while x:
++                v = t
++                v.sort()
++                foo(v)
++            """
++        a = """
++            while x:
++                v = sorted(t)
++                foo(v)
++            """
++        self.check(b, a)
++
++        b = """
++            v = t
++            # foo
++            v.sort()
++            foo(v)
++            """
++        a = """
++            v = sorted(t)
++            # foo
++            foo(v)
++            """
++        self.check(b, a)
++
++        b = r"""
++            v =   t
++            v.sort()
++            foo(v)
++            """
++        a = r"""
++            v =   sorted(t)
++            foo(v)
++            """
++        self.check(b, a)
++
++    def test_sort_unchanged(self):
++        s = """
++            v = list(t)
++            w.sort()
++            foo(w)
++            """
++        self.unchanged(s)
++
++        s = """
++            v = list(t)
++            v.sort(u)
++            foo(v)
++            """
++        self.unchanged(s)
++
++class Test_basestring(FixerTestCase):
++    fixer = "basestring"
++
++    def test_basestring(self):
++        b = """isinstance(x, basestring)"""
++        a = """isinstance(x, str)"""
++        self.check(b, a)
++
++class Test_buffer(FixerTestCase):
++    fixer = "buffer"
++
++    def test_buffer(self):
++        b = """x = buffer(y)"""
++        a = """x = memoryview(y)"""
++        self.check(b, a)
++
++class Test_future(FixerTestCase):
++    fixer = "future"
++
++    def test_future(self):
++        b = """from __future__ import braces"""
++        a = """"""
++        self.check(b, a)
++
++        b = """# comment\nfrom __future__ import braces"""
++        a = """# comment\n"""
++        self.check(b, a)
++
++        b = """from __future__ import braces\n# comment"""
++        a = """\n# comment"""
++        self.check(b, a)
++
++    def test_run_order(self):
++        self.assert_runs_after('print')
++
++class Test_itertools(FixerTestCase):
++    fixer = "itertools"
++
++    def checkall(self, before, after):
++        # Because we need to check with and without the itertools prefix
++        # and on each of the three functions, these loops make it all
++        # much easier
++        for i in ('itertools.', ''):
++            for f in ('map', 'filter', 'zip'):
++                b = before %(i+'i'+f)
++                a = after %(f)
++                self.check(b, a)
++
++    def test_0(self):
++        # A simple example -- test_1 covers exactly the same thing,
++        # but it's not quite as clear.
++        b = "itertools.izip(a, b)"
++        a = "zip(a, b)"
++        self.check(b, a)
++
++    def test_1(self):
++        b = """%s(f, a)"""
++        a = """%s(f, a)"""
++        self.checkall(b, a)
++
++    def test_2(self):
++        b = """itertools.ifilterfalse(a, b)"""
++        a = """itertools.filterfalse(a, b)"""
++        self.check(b, a)
++
++    def test_4(self):
++        b = """ifilterfalse(a, b)"""
++        a = """filterfalse(a, b)"""
++        self.check(b, a)
++
++    def test_space_1(self):
++        b = """    %s(f, a)"""
++        a = """    %s(f, a)"""
++        self.checkall(b, a)
++
++    def test_space_2(self):
++        b = """    itertools.ifilterfalse(a, b)"""
++        a = """    itertools.filterfalse(a, b)"""
++        self.check(b, a)
++
++    def test_run_order(self):
++        self.assert_runs_after('map', 'zip', 'filter')
++
++class Test_itertools_imports(FixerTestCase):
++    fixer = 'itertools_imports'
++
++    def test_reduced(self):
++        b = "from itertools import imap, izip, foo"
++        a = "from itertools import foo"
++        self.check(b, a)
++
++        b = "from itertools import bar, imap, izip, foo"
++        a = "from itertools import bar, foo"
++        self.check(b, a)
++
++    def test_comments(self):
++        b = "#foo\nfrom itertools import imap, izip"
++        a = "#foo\n"
++        self.check(b, a)
++
++    def test_none(self):
++        b = "from itertools import imap, izip"
++        a = ""
++        self.check(b, a)
++
++        b = "from itertools import izip"
++        a = ""
++        self.check(b, a)
++
++    def test_import_as(self):
++        b = "from itertools import izip, bar as bang, imap"
++        a = "from itertools import bar as bang"
++        self.check(b, a)
++
++        b = "from itertools import izip as _zip, imap, bar"
++        a = "from itertools import bar"
++        self.check(b, a)
++
++        b = "from itertools import imap as _map"
++        a = ""
++        self.check(b, a)
++
++        b = "from itertools import imap as _map, izip as _zip"
++        a = ""
++        self.check(b, a)
++
++        s = "from itertools import bar as bang"
++        self.unchanged(s)
++
++    def test_ifilter(self):
++        b = "from itertools import ifilterfalse"
++        a = "from itertools import filterfalse"
++        self.check(b, a)
++
++        b = "from itertools import imap, ifilterfalse, foo"
++        a = "from itertools import filterfalse, foo"
++        self.check(b, a)
++
++        b = "from itertools import bar, ifilterfalse, foo"
++        a = "from itertools import bar, filterfalse, foo"
++        self.check(b, a)
++
++
++    def test_unchanged(self):
++        s = "from itertools import foo"
++        self.unchanged(s)
++
++class Test_import(FixerTestCase):
++    fixer = "import"
++
++    def setUp(self):
++        super(Test_import, self).setUp()
++        # Need to replace fix_import's exists method
++        # so we can check that it's doing the right thing
++        self.files_checked = []
++        self.present_files = set()
++        self.always_exists = True
++        def fake_exists(name):
++            self.files_checked.append(name)
++            return self.always_exists or (name in self.present_files)
++
++        from ..fixes import fix_import
++        fix_import.exists = fake_exists
++
++    def tearDown(self):
++        from lib2to3.fixes import fix_import
++        fix_import.exists = os.path.exists
++
++    def check_both(self, b, a):
++        self.always_exists = True
++        super(Test_import, self).check(b, a)
++        self.always_exists = False
++        super(Test_import, self).unchanged(b)
++
++    def test_files_checked(self):
++        def p(path):
++            # Takes a unix path and returns a path with correct separators
++            return os.path.pathsep.join(path.split("/"))
++
++        self.always_exists = False
++        self.present_files = set(['__init__.py'])
++        expected_extensions = ('.py', os.path.pathsep, '.pyc', '.so',
++                               '.sl', '.pyd')
++        names_to_test = (p("/spam/eggs.py"), "ni.py", p("../../shrubbery.py"))
++
++        for name in names_to_test:
++            self.files_checked = []
++            self.filename = name
++            self.unchanged("import jam")
++
++            if os.path.dirname(name):
++                name = os.path.dirname(name) + '/jam'
++            else:
++                name = 'jam'
++            expected_checks = set(name + ext for ext in expected_extensions)
++            expected_checks.add("__init__.py")
++
++            self.assertEqual(set(self.files_checked), expected_checks)
++
++    def test_not_in_package(self):
++        s = "import bar"
++        self.always_exists = False
++        self.present_files = set(["bar.py"])
++        self.unchanged(s)
++
++    def test_in_package(self):
++        b = "import bar"
++        a = "from . import bar"
++        self.always_exists = False
++        self.present_files = set(["__init__.py", "bar.py"])
++        self.check(b, a)
++
++    def test_comments_and_indent(self):
++        b = "import bar # Foo"
++        a = "from . import bar # Foo"
++        self.check(b, a)
++
++    def test_from(self):
++        b = "from foo import bar, baz"
++        a = "from .foo import bar, baz"
++        self.check_both(b, a)
++
++        b = "from foo import bar"
++        a = "from .foo import bar"
++        self.check_both(b, a)
++
++        b = "from foo import (bar, baz)"
++        a = "from .foo import (bar, baz)"
++        self.check_both(b, a)
++
++    def test_dotted_from(self):
++        b = "from green.eggs import ham"
++        a = "from .green.eggs import ham"
++        self.check_both(b, a)
++
++    def test_from_as(self):
++        b = "from green.eggs import ham as spam"
++        a = "from .green.eggs import ham as spam"
++        self.check_both(b, a)
++
++    def test_import(self):
++        b = "import foo"
++        a = "from . import foo"
++        self.check_both(b, a)
++
++        b = "import foo, bar"
++        a = "from . import foo, bar"
++        self.check_both(b, a)
++
++        b = "import foo, bar, x"
++        a = "from . import foo, bar, x"
++        self.check_both(b, a)
++
++        b = "import x, y, z"
++        a = "from . import x, y, z"
++        self.check_both(b, a)
++
++    def test_import_as(self):
++        b = "import foo as x"
++        a = "from . import foo as x"
++        self.check_both(b, a)
++
++        b = "import a as b, b as c, c as d"
++        a = "from . import a as b, b as c, c as d"
++        self.check_both(b, a)
++
++    def test_local_and_absolute(self):
++        self.always_exists = False
++        self.present_files = set(["foo.py", "__init__.py"])
++
++        s = "import foo, bar"
++        self.warns_unchanged(s, "absolute and local imports together")
++
++    def test_dotted_import(self):
++        b = "import foo.bar"
++        a = "from . import foo.bar"
++        self.check_both(b, a)
++
++    def test_dotted_import_as(self):
++        b = "import foo.bar as bang"
++        a = "from . import foo.bar as bang"
++        self.check_both(b, a)
++
++    def test_prefix(self):
++        b = """
++        # prefix
++        import foo.bar
++        """
++        a = """
++        # prefix
++        from . import foo.bar
++        """
++        self.check_both(b, a)
++
++
++class Test_set_literal(FixerTestCase):
++
++    fixer = "set_literal"
++
++    def test_basic(self):
++        b = """set([1, 2, 3])"""
++        a = """{1, 2, 3}"""
++        self.check(b, a)
++
++        b = """set((1, 2, 3))"""
++        a = """{1, 2, 3}"""
++        self.check(b, a)
++
++        b = """set((1,))"""
++        a = """{1}"""
++        self.check(b, a)
++
++        b = """set([1])"""
++        self.check(b, a)
++
++        b = """set((a, b))"""
++        a = """{a, b}"""
++        self.check(b, a)
++
++        b = """set([a, b])"""
++        self.check(b, a)
++
++        b = """set((a*234, f(args=23)))"""
++        a = """{a*234, f(args=23)}"""
++        self.check(b, a)
++
++        b = """set([a*23, f(23)])"""
++        a = """{a*23, f(23)}"""
++        self.check(b, a)
++
++        b = """set([a-234**23])"""
++        a = """{a-234**23}"""
++        self.check(b, a)
++
++    def test_listcomps(self):
++        b = """set([x for x in y])"""
++        a = """{x for x in y}"""
++        self.check(b, a)
++
++        b = """set([x for x in y if x == m])"""
++        a = """{x for x in y if x == m}"""
++        self.check(b, a)
++
++        b = """set([x for x in y for a in b])"""
++        a = """{x for x in y for a in b}"""
++        self.check(b, a)
++
++        b = """set([f(x) - 23 for x in y])"""
++        a = """{f(x) - 23 for x in y}"""
++        self.check(b, a)
++
++    def test_whitespace(self):
++        b = """set( [1, 2])"""
++        a = """{1, 2}"""
++        self.check(b, a)
++
++        b = """set([1 ,  2])"""
++        a = """{1 ,  2}"""
++        self.check(b, a)
++
++        b = """set([ 1 ])"""
++        a = """{ 1 }"""
++        self.check(b, a)
++
++        b = """set( [1] )"""
++        a = """{1}"""
++        self.check(b, a)
++
++        b = """set([  1,  2  ])"""
++        a = """{  1,  2  }"""
++        self.check(b, a)
++
++        b = """set([x  for x in y ])"""
++        a = """{x  for x in y }"""
++        self.check(b, a)
++
++        b = """set(
++                   [1, 2]
++               )
++            """
++        a = """{1, 2}\n"""
++        self.check(b, a)
++
++    def test_comments(self):
++        b = """set((1, 2)) # Hi"""
++        a = """{1, 2} # Hi"""
++        self.check(b, a)
++
++        # This isn't optimal behavior, but the fixer is optional.
++        b = """
++            # Foo
++            set( # Bar
++               (1, 2)
++            )
++            """
++        a = """
++            # Foo
++            {1, 2}
++            """
++        self.check(b, a)
++
++    def test_unchanged(self):
++        s = """set()"""
++        self.unchanged(s)
++
++        s = """set(a)"""
++        self.unchanged(s)
++
++        s = """set(a, b, c)"""
++        self.unchanged(s)
++
++        # Don't transform generators because they might have to be lazy.
++        s = """set(x for x in y)"""
++        self.unchanged(s)
++
++        s = """set(x for x in y if z)"""
++        self.unchanged(s)
++
++        s = """set(a*823-23**2 + f(23))"""
++        self.unchanged(s)
++
++
++class Test_sys_exc(FixerTestCase):
++    fixer = "sys_exc"
++
++    def test_0(self):
++        b = "sys.exc_type"
++        a = "sys.exc_info()[0]"
++        self.check(b, a)
++
++    def test_1(self):
++        b = "sys.exc_value"
++        a = "sys.exc_info()[1]"
++        self.check(b, a)
++
++    def test_2(self):
++        b = "sys.exc_traceback"
++        a = "sys.exc_info()[2]"
++        self.check(b, a)
++
++    def test_3(self):
++        b = "sys.exc_type # Foo"
++        a = "sys.exc_info()[0] # Foo"
++        self.check(b, a)
++
++    def test_4(self):
++        b = "sys.  exc_type"
++        a = "sys.  exc_info()[0]"
++        self.check(b, a)
++
++    def test_5(self):
++        b = "sys  .exc_type"
++        a = "sys  .exc_info()[0]"
++        self.check(b, a)
++
++
++class Test_paren(FixerTestCase):
++    fixer = "paren"
++
++    def test_0(self):
++        b = """[i for i in 1, 2 ]"""
++        a = """[i for i in (1, 2) ]"""
++        self.check(b, a)
++
++    def test_1(self):
++        b = """[i for i in 1, 2, ]"""
++        a = """[i for i in (1, 2,) ]"""
++        self.check(b, a)
++
++    def test_2(self):
++        b = """[i for i  in     1, 2 ]"""
++        a = """[i for i  in     (1, 2) ]"""
++        self.check(b, a)
++
++    def test_3(self):
++        b = """[i for i in 1, 2 if i]"""
++        a = """[i for i in (1, 2) if i]"""
++        self.check(b, a)
++
++    def test_4(self):
++        b = """[i for i in 1,    2    ]"""
++        a = """[i for i in (1,    2)    ]"""
++        self.check(b, a)
++
++    def test_5(self):
++        b = """(i for i in 1, 2)"""
++        a = """(i for i in (1, 2))"""
++        self.check(b, a)
++
++    def test_6(self):
++        b = """(i for i in 1   ,2   if i)"""
++        a = """(i for i in (1   ,2)   if i)"""
++        self.check(b, a)
++
++    def test_unchanged_0(self):
++        s = """[i for i in (1, 2)]"""
++        self.unchanged(s)
++
++    def test_unchanged_1(self):
++        s = """[i for i in foo()]"""
++        self.unchanged(s)
++
++    def test_unchanged_2(self):
++        s = """[i for i in (1, 2) if nothing]"""
++        self.unchanged(s)
++
++    def test_unchanged_3(self):
++        s = """(i for i in (1, 2))"""
++        self.unchanged(s)
++
++    def test_unchanged_4(self):
++        s = """[i for i in m]"""
++        self.unchanged(s)
++
++class Test_metaclass(FixerTestCase):
++
++    fixer = 'metaclass'
++
++    def test_unchanged(self):
++        self.unchanged("class X(): pass")
++        self.unchanged("class X(object): pass")
++        self.unchanged("class X(object1, object2): pass")
++        self.unchanged("class X(object1, object2, object3): pass")
++        self.unchanged("class X(metaclass=Meta): pass")
++        self.unchanged("class X(b, arg=23, metclass=Meta): pass")
++        self.unchanged("class X(b, arg=23, metaclass=Meta, other=42): pass")
++
++        s = """
++        class X:
++            def __metaclass__(self): pass
++        """
++        self.unchanged(s)
++
++        s = """
++        class X:
++            a[23] = 74
++        """
++        self.unchanged(s)
++
++    def test_comments(self):
++        b = """
++        class X:
++            # hi
++            __metaclass__ = AppleMeta
++        """
++        a = """
++        class X(metaclass=AppleMeta):
++            # hi
++            pass
++        """
++        self.check(b, a)
++
++        b = """
++        class X:
++            __metaclass__ = Meta
++            # Bedtime!
++        """
++        a = """
++        class X(metaclass=Meta):
++            pass
++            # Bedtime!
++        """
++        self.check(b, a)
++
++    def test_meta(self):
++        # no-parent class, odd body
++        b = """
++        class X():
++            __metaclass__ = Q
++            pass
++        """
++        a = """
++        class X(metaclass=Q):
++            pass
++        """
++        self.check(b, a)
++
++        # one parent class, no body
++        b = """class X(object): __metaclass__ = Q"""
++        a = """class X(object, metaclass=Q): pass"""
++        self.check(b, a)
++
++
++        # one parent, simple body
++        b = """
++        class X(object):
++            __metaclass__ = Meta
++            bar = 7
++        """
++        a = """
++        class X(object, metaclass=Meta):
++            bar = 7
++        """
++        self.check(b, a)
++
++        b = """
++        class X:
++            __metaclass__ = Meta; x = 4; g = 23
++        """
++        a = """
++        class X(metaclass=Meta):
++            x = 4; g = 23
++        """
++        self.check(b, a)
++
++        # one parent, simple body, __metaclass__ last
++        b = """
++        class X(object):
++            bar = 7
++            __metaclass__ = Meta
++        """
++        a = """
++        class X(object, metaclass=Meta):
++            bar = 7
++        """
++        self.check(b, a)
++
++        # redefining __metaclass__
++        b = """
++        class X():
++            __metaclass__ = A
++            __metaclass__ = B
++            bar = 7
++        """
++        a = """
++        class X(metaclass=B):
++            bar = 7
++        """
++        self.check(b, a)
++
++        # multiple inheritance, simple body
++        b = """
++        class X(clsA, clsB):
++            __metaclass__ = Meta
++            bar = 7
++        """
++        a = """
++        class X(clsA, clsB, metaclass=Meta):
++            bar = 7
++        """
++        self.check(b, a)
++
++        # keywords in the class statement
++        b = """class m(a, arg=23): __metaclass__ = Meta"""
++        a = """class m(a, arg=23, metaclass=Meta): pass"""
++        self.check(b, a)
++
++        b = """
++        class X(expression(2 + 4)):
++            __metaclass__ = Meta
++        """
++        a = """
++        class X(expression(2 + 4), metaclass=Meta):
++            pass
++        """
++        self.check(b, a)
++
++        b = """
++        class X(expression(2 + 4), x**4):
++            __metaclass__ = Meta
++        """
++        a = """
++        class X(expression(2 + 4), x**4, metaclass=Meta):
++            pass
++        """
++        self.check(b, a)
++
++        b = """
++        class X:
++            __metaclass__ = Meta
++            save.py = 23
++        """
++        a = """
++        class X(metaclass=Meta):
++            save.py = 23
++        """
++        self.check(b, a)
++
++
++class Test_getcwdu(FixerTestCase):
++
++    fixer = 'getcwdu'
++
++    def test_basic(self):
++        b = """os.getcwdu"""
++        a = """os.getcwd"""
++        self.check(b, a)
++
++        b = """os.getcwdu()"""
++        a = """os.getcwd()"""
++        self.check(b, a)
++
++        b = """meth = os.getcwdu"""
++        a = """meth = os.getcwd"""
++        self.check(b, a)
++
++        b = """os.getcwdu(args)"""
++        a = """os.getcwd(args)"""
++        self.check(b, a)
++
++    def test_comment(self):
++        b = """os.getcwdu() # Foo"""
++        a = """os.getcwd() # Foo"""
++        self.check(b, a)
++
++    def test_unchanged(self):
++        s = """os.getcwd()"""
++        self.unchanged(s)
++
++        s = """getcwdu()"""
++        self.unchanged(s)
++
++        s = """os.getcwdb()"""
++        self.unchanged(s)
++
++    def test_indentation(self):
++        b = """
++            if 1:
++                os.getcwdu()
++            """
++        a = """
++            if 1:
++                os.getcwd()
++            """
++        self.check(b, a)
++
++    def test_multilation(self):
++        b = """os .getcwdu()"""
++        a = """os .getcwd()"""
++        self.check(b, a)
++
++        b = """os.  getcwdu"""
++        a = """os.  getcwd"""
++        self.check(b, a)
++
++        b = """os.getcwdu (  )"""
++        a = """os.getcwd (  )"""
++        self.check(b, a)
++
++
++if __name__ == "__main__":
++    import __main__
++    support.run_all_tests(__main__)
+diff -r 531f2e948299 refactor/tests/.svn/text-base/test_parser.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/text-base/test_parser.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,202 @@
++#!/usr/bin/env python2.5
++"""Test suite for 2to3's parser and grammar files.
++
++This is the place to add tests for changes to 2to3's grammar, such as those
++merging the grammars for Python 2 and 3. In addition to specific tests for
++parts of the grammar we've changed, we also make sure we can parse the
++test_grammar.py files from both Python 2 and Python 3.
++"""
++# Author: Collin Winter
++
++# Testing imports
++from . import support
++from .support import driver, test_dir
++
++# Python imports
++import os
++import os.path
++
++# Local imports
++from ..pgen2.parse import ParseError
++
++
++class GrammarTest(support.TestCase):
++    def validate(self, code):
++        support.parse_string(code)
++
++    def invalid_syntax(self, code):
++        try:
++            self.validate(code)
++        except ParseError:
++            pass
++        else:
++            raise AssertionError("Syntax shouldn't have been valid")
++
++
++class TestRaiseChanges(GrammarTest):
++    def test_2x_style_1(self):
++        self.validate("raise")
++
++    def test_2x_style_2(self):
++        self.validate("raise E, V")
++
++    def test_2x_style_3(self):
++        self.validate("raise E, V, T")
++
++    def test_2x_style_invalid_1(self):
++        self.invalid_syntax("raise E, V, T, Z")
++
++    def test_3x_style(self):
++        self.validate("raise E1 from E2")
++
++    def test_3x_style_invalid_1(self):
++        self.invalid_syntax("raise E, V from E1")
++
++    def test_3x_style_invalid_2(self):
++        self.invalid_syntax("raise E from E1, E2")
++
++    def test_3x_style_invalid_3(self):
++        self.invalid_syntax("raise from E1, E2")
++
++    def test_3x_style_invalid_4(self):
++        self.invalid_syntax("raise E from")
++
++
++# Adapated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
++class TestFunctionAnnotations(GrammarTest):
++    def test_1(self):
++        self.validate("""def f(x) -> list: pass""")
++
++    def test_2(self):
++        self.validate("""def f(x:int): pass""")
++
++    def test_3(self):
++        self.validate("""def f(*x:str): pass""")
++
++    def test_4(self):
++        self.validate("""def f(**x:float): pass""")
++
++    def test_5(self):
++        self.validate("""def f(x, y:1+2): pass""")
++
++    def test_6(self):
++        self.validate("""def f(a, (b:1, c:2, d)): pass""")
++
++    def test_7(self):
++        self.validate("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
++
++    def test_8(self):
++        s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
++                        *g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
++        self.validate(s)
++
++
++class TestExcept(GrammarTest):
++    def test_new(self):
++        s = """
++            try:
++                x
++            except E as N:
++                y"""
++        self.validate(s)
++
++    def test_old(self):
++        s = """
++            try:
++                x
++            except E, N:
++                y"""
++        self.validate(s)
++
++
++# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
++class TestSetLiteral(GrammarTest):
++    def test_1(self):
++        self.validate("""x = {'one'}""")
++
++    def test_2(self):
++        self.validate("""x = {'one', 1,}""")
++
++    def test_3(self):
++        self.validate("""x = {'one', 'two', 'three'}""")
++
++    def test_4(self):
++        self.validate("""x = {2, 3, 4,}""")
++
++
++class TestNumericLiterals(GrammarTest):
++    def test_new_octal_notation(self):
++        self.validate("""0o7777777777777""")
++        self.invalid_syntax("""0o7324528887""")
++
++    def test_new_binary_notation(self):
++        self.validate("""0b101010""")
++        self.invalid_syntax("""0b0101021""")
++
++
++class TestClassDef(GrammarTest):
++    def test_new_syntax(self):
++        self.validate("class B(t=7): pass")
++        self.validate("class B(t, *args): pass")
++        self.validate("class B(t, **kwargs): pass")
++        self.validate("class B(t, *args, **kwargs): pass")
++        self.validate("class B(t, y=9, *args, **kwargs): pass")
++
++
++class TestParserIdempotency(support.TestCase):
++
++    """A cut-down version of pytree_idempotency.py."""
++
++    def test_all_project_files(self):
++        for filepath in support.all_project_files():
++            print "Parsing %s..." % filepath
++            tree = driver.parse_file(filepath, debug=True)
++            if diff(filepath, tree):
++                self.fail("Idempotency failed: %s" % filepath)
++
++
++class TestLiterals(GrammarTest):
++
++    def test_multiline_bytes_literals(self):
++        s = """
++            md5test(b"\xaa" * 80,
++                    (b"Test Using Larger Than Block-Size Key "
++                     b"and Larger Than One Block-Size Data"),
++                    "6f630fad67cda0ee1fb1f562db3aa53e")
++            """
++        self.validate(s)
++
++    def test_multiline_bytes_tripquote_literals(self):
++        s = '''
++            b"""
++            <?xml version="1.0" encoding="UTF-8"?>
++            <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
++            """
++            '''
++        self.validate(s)
++
++    def test_multiline_str_literals(self):
++        s = """
++            md5test("\xaa" * 80,
++                    ("Test Using Larger Than Block-Size Key "
++                     "and Larger Than One Block-Size Data"),
++                    "6f630fad67cda0ee1fb1f562db3aa53e")
++            """
++        self.validate(s)
++
++
++def diff(fn, tree):
++    f = open("@", "w")
++    try:
++        f.write(str(tree))
++    finally:
++        f.close()
++    try:
++        return os.system("diff -u %s @" % fn)
++    finally:
++        os.remove("@")
++
++
++if __name__ == "__main__":
++    import __main__
++    support.run_all_tests(__main__)
+diff -r 531f2e948299 refactor/tests/.svn/text-base/test_pytree.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/text-base/test_pytree.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,458 @@
++#!/usr/bin/env python2.5
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Unit tests for pytree.py.
++
++NOTE: Please *don't* add doc strings to individual test methods!
++In verbose mode, printing of the module, class and method name is much
++more helpful than printing of (the first line of) the docstring,
++especially when debugging a test.
++"""
++
++# Testing imports
++from . import support
++
++# Local imports (XXX should become a package)
++from .. import pytree
++
++try:
++    sorted
++except NameError:
++    def sorted(lst):
++        l = list(lst)
++        l.sort()
++        return l
++
++class TestNodes(support.TestCase):
++
++    """Unit tests for nodes (Base, Leaf, Node)."""
++
++    def testBaseCantConstruct(self):
++        if __debug__:
++            # Test that instantiating Base() raises an AssertionError
++            self.assertRaises(AssertionError, pytree.Base)
++
++    def testLeaf(self):
++        l1 = pytree.Leaf(100, "foo")
++        self.assertEqual(l1.type, 100)
++        self.assertEqual(l1.value, "foo")
++
++    def testLeafRepr(self):
++        l1 = pytree.Leaf(100, "foo")
++        self.assertEqual(repr(l1), "Leaf(100, 'foo')")
++
++    def testLeafStr(self):
++        l1 = pytree.Leaf(100, "foo")
++        self.assertEqual(str(l1), "foo")
++        l2 = pytree.Leaf(100, "foo", context=(" ", (10, 1)))
++        self.assertEqual(str(l2), " foo")
++
++    def testLeafStrNumericValue(self):
++        # Make sure that the Leaf's value is stringified. Failing to
++        #  do this can cause a TypeError in certain situations.
++        l1 = pytree.Leaf(2, 5)
++        l1.set_prefix("foo_")
++        self.assertEqual(str(l1), "foo_5")
++
++    def testLeafEq(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "foo", context=(" ", (1, 0)))
++        self.assertEqual(l1, l2)
++        l3 = pytree.Leaf(101, "foo")
++        l4 = pytree.Leaf(100, "bar")
++        self.assertNotEqual(l1, l3)
++        self.assertNotEqual(l1, l4)
++
++    def testLeafPrefix(self):
++        l1 = pytree.Leaf(100, "foo")
++        self.assertEqual(l1.get_prefix(), "")
++        self.failIf(l1.was_changed)
++        l1.set_prefix("  ##\n\n")
++        self.assertEqual(l1.get_prefix(), "  ##\n\n")
++        self.failUnless(l1.was_changed)
++
++    def testNode(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(200, "bar")
++        n1 = pytree.Node(1000, [l1, l2])
++        self.assertEqual(n1.type, 1000)
++        self.assertEqual(n1.children, [l1, l2])
++
++    def testNodeRepr(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
++        n1 = pytree.Node(1000, [l1, l2])
++        self.assertEqual(repr(n1),
++                         "Node(1000, [%s, %s])" % (repr(l1), repr(l2)))
++
++    def testNodeStr(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
++        n1 = pytree.Node(1000, [l1, l2])
++        self.assertEqual(str(n1), "foo bar")
++
++    def testNodePrefix(self):
++        l1 = pytree.Leaf(100, "foo")
++        self.assertEqual(l1.get_prefix(), "")
++        n1 = pytree.Node(1000, [l1])
++        self.assertEqual(n1.get_prefix(), "")
++        n1.set_prefix(" ")
++        self.assertEqual(n1.get_prefix(), " ")
++        self.assertEqual(l1.get_prefix(), " ")
++
++    def testGetSuffix(self):
++        l1 = pytree.Leaf(100, "foo", prefix="a")
++        l2 = pytree.Leaf(100, "bar", prefix="b")
++        n1 = pytree.Node(1000, [l1, l2])
++
++        self.assertEqual(l1.get_suffix(), l2.get_prefix())
++        self.assertEqual(l2.get_suffix(), "")
++        self.assertEqual(n1.get_suffix(), "")
++
++        l3 = pytree.Leaf(100, "bar", prefix="c")
++        n2 = pytree.Node(1000, [n1, l3])
++
++        self.assertEqual(n1.get_suffix(), l3.get_prefix())
++        self.assertEqual(l3.get_suffix(), "")
++        self.assertEqual(n2.get_suffix(), "")
++
++    def testNodeEq(self):
++        n1 = pytree.Node(1000, ())
++        n2 = pytree.Node(1000, [], context=(" ", (1, 0)))
++        self.assertEqual(n1, n2)
++        n3 = pytree.Node(1001, ())
++        self.assertNotEqual(n1, n3)
++
++    def testNodeEqRecursive(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1])
++        n2 = pytree.Node(1000, [l2])
++        self.assertEqual(n1, n2)
++        l3 = pytree.Leaf(100, "bar")
++        n3 = pytree.Node(1000, [l3])
++        self.assertNotEqual(n1, n3)
++
++    def testReplace(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "+")
++        l3 = pytree.Leaf(100, "bar")
++        n1 = pytree.Node(1000, [l1, l2, l3])
++        self.assertEqual(n1.children, [l1, l2, l3])
++        self.failUnless(isinstance(n1.children, list))
++        self.failIf(n1.was_changed)
++        l2new = pytree.Leaf(100, "-")
++        l2.replace(l2new)
++        self.assertEqual(n1.children, [l1, l2new, l3])
++        self.failUnless(isinstance(n1.children, list))
++        self.failUnless(n1.was_changed)
++
++    def testReplaceWithList(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "+")
++        l3 = pytree.Leaf(100, "bar")
++        n1 = pytree.Node(1000, [l1, l2, l3])
++
++        l2.replace([pytree.Leaf(100, "*"), pytree.Leaf(100, "*")])
++        self.assertEqual(str(n1), "foo**bar")
++        self.failUnless(isinstance(n1.children, list))
++
++    def testPostOrder(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar")
++        n1 = pytree.Node(1000, [l1, l2])
++        self.assertEqual(list(n1.post_order()), [l1, l2, n1])
++
++    def testPreOrder(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar")
++        n1 = pytree.Node(1000, [l1, l2])
++        self.assertEqual(list(n1.pre_order()), [n1, l1, l2])
++
++    def testChangedLeaf(self):
++        l1 = pytree.Leaf(100, "f")
++        self.failIf(l1.was_changed)
++
++        l1.changed()
++        self.failUnless(l1.was_changed)
++
++    def testChangedNode(self):
++        l1 = pytree.Leaf(100, "f")
++        n1 = pytree.Node(1000, [l1])
++        self.failIf(n1.was_changed)
++
++        n1.changed()
++        self.failUnless(n1.was_changed)
++
++    def testChangedRecursive(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "+")
++        l3 = pytree.Leaf(100, "bar")
++        n1 = pytree.Node(1000, [l1, l2, l3])
++        n2 = pytree.Node(1000, [n1])
++        self.failIf(l1.was_changed)
++        self.failIf(n1.was_changed)
++        self.failIf(n2.was_changed)
++
++        n1.changed()
++        self.failUnless(n1.was_changed)
++        self.failUnless(n2.was_changed)
++        self.failIf(l1.was_changed)
++
++    def testLeafConstructorPrefix(self):
++        for prefix in ("xyz_", ""):
++            l1 = pytree.Leaf(100, "self", prefix=prefix)
++            self.failUnless(str(l1), prefix + "self")
++            self.assertEqual(l1.get_prefix(), prefix)
++
++    def testNodeConstructorPrefix(self):
++        for prefix in ("xyz_", ""):
++            l1 = pytree.Leaf(100, "self")
++            l2 = pytree.Leaf(100, "foo", prefix="_")
++            n1 = pytree.Node(1000, [l1, l2], prefix=prefix)
++            self.failUnless(str(n1), prefix + "self_foo")
++            self.assertEqual(n1.get_prefix(), prefix)
++            self.assertEqual(l1.get_prefix(), prefix)
++            self.assertEqual(l2.get_prefix(), "_")
++
++    def testRemove(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1, l2])
++        n2 = pytree.Node(1000, [n1])
++
++        self.assertEqual(n1.remove(), 0)
++        self.assertEqual(n2.children, [])
++        self.assertEqual(l1.parent, n1)
++        self.assertEqual(n1.parent, None)
++        self.assertEqual(n2.parent, None)
++        self.failIf(n1.was_changed)
++        self.failUnless(n2.was_changed)
++
++        self.assertEqual(l2.remove(), 1)
++        self.assertEqual(l1.remove(), 0)
++        self.assertEqual(n1.children, [])
++        self.assertEqual(l1.parent, None)
++        self.assertEqual(n1.parent, None)
++        self.assertEqual(n2.parent, None)
++        self.failUnless(n1.was_changed)
++        self.failUnless(n2.was_changed)
++
++    def testRemoveParentless(self):
++        n1 = pytree.Node(1000, [])
++        n1.remove()
++        self.assertEqual(n1.parent, None)
++
++        l1 = pytree.Leaf(100, "foo")
++        l1.remove()
++        self.assertEqual(l1.parent, None)
++
++    def testNodeSetChild(self):
++        l1 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1])
++
++        l2 = pytree.Leaf(100, "bar")
++        n1.set_child(0, l2)
++        self.assertEqual(l1.parent, None)
++        self.assertEqual(l2.parent, n1)
++        self.assertEqual(n1.children, [l2])
++
++        n2 = pytree.Node(1000, [l1])
++        n2.set_child(0, n1)
++        self.assertEqual(l1.parent, None)
++        self.assertEqual(n1.parent, n2)
++        self.assertEqual(n2.parent, None)
++        self.assertEqual(n2.children, [n1])
++
++        self.assertRaises(IndexError, n1.set_child, 4, l2)
++        # I don't care what it raises, so long as it's an exception
++        self.assertRaises(Exception, n1.set_child, 0, list)
++
++    def testNodeInsertChild(self):
++        l1 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1])
++
++        l2 = pytree.Leaf(100, "bar")
++        n1.insert_child(0, l2)
++        self.assertEqual(l2.parent, n1)
++        self.assertEqual(n1.children, [l2, l1])
++
++        l3 = pytree.Leaf(100, "abc")
++        n1.insert_child(2, l3)
++        self.assertEqual(n1.children, [l2, l1, l3])
++
++        # I don't care what it raises, so long as it's an exception
++        self.assertRaises(Exception, n1.insert_child, 0, list)
++
++    def testNodeAppendChild(self):
++        n1 = pytree.Node(1000, [])
++
++        l1 = pytree.Leaf(100, "foo")
++        n1.append_child(l1)
++        self.assertEqual(l1.parent, n1)
++        self.assertEqual(n1.children, [l1])
++
++        l2 = pytree.Leaf(100, "bar")
++        n1.append_child(l2)
++        self.assertEqual(l2.parent, n1)
++        self.assertEqual(n1.children, [l1, l2])
++
++        # I don't care what it raises, so long as it's an exception
++        self.assertRaises(Exception, n1.append_child, list)
++
++    def testNodeNextSibling(self):
++        n1 = pytree.Node(1000, [])
++        n2 = pytree.Node(1000, [])
++        p1 = pytree.Node(1000, [n1, n2])
++
++        self.failUnless(n1.next_sibling is n2)
++        self.assertEqual(n2.next_sibling, None)
++        self.assertEqual(p1.next_sibling, None)
++
++    def testLeafNextSibling(self):
++        l1 = pytree.Leaf(100, "a")
++        l2 = pytree.Leaf(100, "b")
++        p1 = pytree.Node(1000, [l1, l2])
++
++        self.failUnless(l1.next_sibling is l2)
++        self.assertEqual(l2.next_sibling, None)
++        self.assertEqual(p1.next_sibling, None)
++
++    def testNodePrevSibling(self):
++        n1 = pytree.Node(1000, [])
++        n2 = pytree.Node(1000, [])
++        p1 = pytree.Node(1000, [n1, n2])
++
++        self.failUnless(n2.prev_sibling is n1)
++        self.assertEqual(n1.prev_sibling, None)
++        self.assertEqual(p1.prev_sibling, None)
++
++    def testLeafPrevSibling(self):
++        l1 = pytree.Leaf(100, "a")
++        l2 = pytree.Leaf(100, "b")
++        p1 = pytree.Node(1000, [l1, l2])
++
++        self.failUnless(l2.prev_sibling is l1)
++        self.assertEqual(l1.prev_sibling, None)
++        self.assertEqual(p1.prev_sibling, None)
++
++
++class TestPatterns(support.TestCase):
++
++    """Unit tests for tree matching patterns."""
++
++    def testBasicPatterns(self):
++        # Build a tree
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar")
++        l3 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1, l2])
++        n2 = pytree.Node(1000, [l3])
++        root = pytree.Node(1000, [n1, n2])
++        # Build a pattern matching a leaf
++        pl = pytree.LeafPattern(100, "foo", name="pl")
++        r = {}
++        self.assertFalse(pl.match(root, results=r))
++        self.assertEqual(r, {})
++        self.assertFalse(pl.match(n1, results=r))
++        self.assertEqual(r, {})
++        self.assertFalse(pl.match(n2, results=r))
++        self.assertEqual(r, {})
++        self.assertTrue(pl.match(l1, results=r))
++        self.assertEqual(r, {"pl": l1})
++        r = {}
++        self.assertFalse(pl.match(l2, results=r))
++        self.assertEqual(r, {})
++        # Build a pattern matching a node
++        pn = pytree.NodePattern(1000, [pl], name="pn")
++        self.assertFalse(pn.match(root, results=r))
++        self.assertEqual(r, {})
++        self.assertFalse(pn.match(n1, results=r))
++        self.assertEqual(r, {})
++        self.assertTrue(pn.match(n2, results=r))
++        self.assertEqual(r, {"pn": n2, "pl": l3})
++        r = {}
++        self.assertFalse(pn.match(l1, results=r))
++        self.assertEqual(r, {})
++        self.assertFalse(pn.match(l2, results=r))
++        self.assertEqual(r, {})
++
++    def testWildcardPatterns(self):
++        # Build a tree for testing
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar")
++        l3 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1, l2])
++        n2 = pytree.Node(1000, [l3])
++        root = pytree.Node(1000, [n1, n2])
++        # Build a pattern
++        pl = pytree.LeafPattern(100, "foo", name="pl")
++        pn = pytree.NodePattern(1000, [pl], name="pn")
++        pw = pytree.WildcardPattern([[pn], [pl, pl]], name="pw")
++        r = {}
++        self.assertFalse(pw.match_seq([root], r))
++        self.assertEqual(r, {})
++        self.assertFalse(pw.match_seq([n1], r))
++        self.assertEqual(r, {})
++        self.assertTrue(pw.match_seq([n2], r))
++        # These are easier to debug
++        self.assertEqual(sorted(r.keys()), ["pl", "pn", "pw"])
++        self.assertEqual(r["pl"], l1)
++        self.assertEqual(r["pn"], n2)
++        self.assertEqual(r["pw"], [n2])
++        # But this is equivalent
++        self.assertEqual(r, {"pl": l1, "pn": n2, "pw": [n2]})
++        r = {}
++        self.assertTrue(pw.match_seq([l1, l3], r))
++        self.assertEqual(r, {"pl": l3, "pw": [l1, l3]})
++        self.assert_(r["pl"] is l3)
++        r = {}
++
++    def testGenerateMatches(self):
++        la = pytree.Leaf(1, "a")
++        lb = pytree.Leaf(1, "b")
++        lc = pytree.Leaf(1, "c")
++        ld = pytree.Leaf(1, "d")
++        le = pytree.Leaf(1, "e")
++        lf = pytree.Leaf(1, "f")
++        leaves = [la, lb, lc, ld, le, lf]
++        root = pytree.Node(1000, leaves)
++        pa = pytree.LeafPattern(1, "a", "pa")
++        pb = pytree.LeafPattern(1, "b", "pb")
++        pc = pytree.LeafPattern(1, "c", "pc")
++        pd = pytree.LeafPattern(1, "d", "pd")
++        pe = pytree.LeafPattern(1, "e", "pe")
++        pf = pytree.LeafPattern(1, "f", "pf")
++        pw = pytree.WildcardPattern([[pa, pb, pc], [pd, pe],
++                                     [pa, pb], [pc, pd], [pe, pf]],
++                                    min=1, max=4, name="pw")
++        self.assertEqual([x[0] for x in pw.generate_matches(leaves)],
++                         [3, 5, 2, 4, 6])
++        pr = pytree.NodePattern(type=1000, content=[pw], name="pr")
++        matches = list(pytree.generate_matches([pr], [root]))
++        self.assertEqual(len(matches), 1)
++        c, r = matches[0]
++        self.assertEqual(c, 1)
++        self.assertEqual(str(r["pr"]), "abcdef")
++        self.assertEqual(r["pw"], [la, lb, lc, ld, le, lf])
++        for c in "abcdef":
++            self.assertEqual(r["p" + c], pytree.Leaf(1, c))
++
++    def testHasKeyExample(self):
++        pattern = pytree.NodePattern(331,
++                                     (pytree.LeafPattern(7),
++                                      pytree.WildcardPattern(name="args"),
++                                      pytree.LeafPattern(8)))
++        l1 = pytree.Leaf(7, "(")
++        l2 = pytree.Leaf(3, "x")
++        l3 = pytree.Leaf(8, ")")
++        node = pytree.Node(331, [l1, l2, l3])
++        r = {}
++        self.assert_(pattern.match(node, r))
++        self.assertEqual(r["args"], [l2])
++
++
++if __name__ == "__main__":
++    import __main__
++    support.run_all_tests(__main__)
+diff -r 531f2e948299 refactor/tests/.svn/text-base/test_refactor.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/text-base/test_refactor.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,168 @@
++"""
++Unit tests for refactor.py.
++"""
++
++import sys
++import os
++import operator
++import StringIO
++import tempfile
++import unittest
++
++from lib2to3 import refactor, pygram, fixer_base
++
++from . import support
++
++
++FIXER_DIR = os.path.join(os.path.dirname(__file__), "data/fixers")
++
++sys.path.append(FIXER_DIR)
++try:
++    _DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
++finally:
++    sys.path.pop()
++
++class TestRefactoringTool(unittest.TestCase):
++
++    def setUp(self):
++        sys.path.append(FIXER_DIR)
++
++    def tearDown(self):
++        sys.path.pop()
++
++    def check_instances(self, instances, classes):
++        for inst, cls in zip(instances, classes):
++            if not isinstance(inst, cls):
++                self.fail("%s are not instances of %s" % instances, classes)
++
++    def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
++        return refactor.RefactoringTool(fixers, options, explicit)
++
++    def test_print_function_option(self):
++        gram = pygram.python_grammar
++        save = gram.keywords["print"]
++        try:
++            rt = self.rt({"print_function" : True})
++            self.assertRaises(KeyError, operator.itemgetter("print"),
++                              gram.keywords)
++        finally:
++            gram.keywords["print"] = save
++
++    def test_fixer_loading_helpers(self):
++        contents = ["explicit", "first", "last", "parrot", "preorder"]
++        non_prefixed = refactor.get_all_fix_names("myfixes")
++        prefixed = refactor.get_all_fix_names("myfixes", False)
++        full_names = refactor.get_fixers_from_package("myfixes")
++        self.assertEqual(prefixed, ["fix_" + name for name in contents])
++        self.assertEqual(non_prefixed, contents)
++        self.assertEqual(full_names,
++                         ["myfixes.fix_" + name for name in contents])
++
++    def test_get_headnode_dict(self):
++        class NoneFix(fixer_base.BaseFix):
++            PATTERN = None
++
++        class FileInputFix(fixer_base.BaseFix):
++            PATTERN = "file_input< any * >"
++
++        no_head = NoneFix({}, [])
++        with_head = FileInputFix({}, [])
++        d = refactor.get_headnode_dict([no_head, with_head])
++        expected = {None: [no_head],
++                    pygram.python_symbols.file_input : [with_head]}
++        self.assertEqual(d, expected)
++
++    def test_fixer_loading(self):
++        from myfixes.fix_first import FixFirst
++        from myfixes.fix_last import FixLast
++        from myfixes.fix_parrot import FixParrot
++        from myfixes.fix_preorder import FixPreorder
++
++        rt = self.rt()
++        pre, post = rt.get_fixers()
++
++        self.check_instances(pre, [FixPreorder])
++        self.check_instances(post, [FixFirst, FixParrot, FixLast])
++
++    def test_naughty_fixers(self):
++        self.assertRaises(ImportError, self.rt, fixers=["not_here"])
++        self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
++        self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
++
++    def test_refactor_string(self):
++        rt = self.rt()
++        input = "def parrot(): pass\n\n"
++        tree = rt.refactor_string(input, "<test>")
++        self.assertNotEqual(str(tree), input)
++
++        input = "def f(): pass\n\n"
++        tree = rt.refactor_string(input, "<test>")
++        self.assertEqual(str(tree), input)
++
++    def test_refactor_stdin(self):
++
++        class MyRT(refactor.RefactoringTool):
++
++            def print_output(self, lines):
++                diff_lines.extend(lines)
++
++        diff_lines = []
++        rt = MyRT(_DEFAULT_FIXERS)
++        save = sys.stdin
++        sys.stdin = StringIO.StringIO("def parrot(): pass\n\n")
++        try:
++            rt.refactor_stdin()
++        finally:
++            sys.stdin = save
++        expected = """--- <stdin> (original)
+++++ <stdin> (refactored)
++@@ -1,2 +1,2 @@
++-def parrot(): pass
+++def cheese(): pass""".splitlines()
++        self.assertEqual(diff_lines[:-1], expected)
++
++    def test_refactor_file(self):
++        test_file = os.path.join(FIXER_DIR, "parrot_example.py")
++        old_contents = open(test_file, "r").read()
++        rt = self.rt()
++
++        rt.refactor_file(test_file)
++        self.assertEqual(old_contents, open(test_file, "r").read())
++
++        rt.refactor_file(test_file, True)
++        try:
++            self.assertNotEqual(old_contents, open(test_file, "r").read())
++        finally:
++            open(test_file, "w").write(old_contents)
++
++    def test_refactor_docstring(self):
++        rt = self.rt()
++
++        def example():
++            """
++            >>> example()
++            42
++            """
++        out = rt.refactor_docstring(example.__doc__, "<test>")
++        self.assertEqual(out, example.__doc__)
++
++        def parrot():
++            """
++            >>> def parrot():
++            ...      return 43
++            """
++        out = rt.refactor_docstring(parrot.__doc__, "<test>")
++        self.assertNotEqual(out, parrot.__doc__)
++
++    def test_explicit(self):
++        from myfixes.fix_explicit import FixExplicit
++
++        rt = self.rt(fixers=["myfixes.fix_explicit"])
++        self.assertEqual(len(rt.post_order), 0)
++
++        rt = self.rt(explicit=["myfixes.fix_explicit"])
++        for fix in rt.post_order:
++            if isinstance(fix, FixExplicit):
++                break
++        else:
++            self.fail("explicit fixer not loaded")
+diff -r 531f2e948299 refactor/tests/.svn/text-base/test_util.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/.svn/text-base/test_util.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,559 @@
++#!/usr/bin/env python2.5
++""" Test suite for the code in fixes.util """
++# Author: Collin Winter
++
++# Testing imports
++from . import support
++
++# Python imports
++import os.path
++
++# Local imports
++from .. import pytree
++from .. import fixer_util
++from ..fixer_util import Attr, Name
++
++
++def parse(code, strip_levels=0):
++    # The topmost node is file_input, which we don't care about.
++    # The next-topmost node is a *_stmt node, which we also don't care about
++    tree = support.parse_string(code)
++    for i in range(strip_levels):
++        tree = tree.children[0]
++    tree.parent = None
++    return tree
++
++class MacroTestCase(support.TestCase):
++    def assertStr(self, node, string):
++        if isinstance(node, (tuple, list)):
++            node = pytree.Node(fixer_util.syms.simple_stmt, node)
++        self.assertEqual(str(node), string)
++
++
++class Test_is_tuple(support.TestCase):
++    def is_tuple(self, string):
++        return fixer_util.is_tuple(parse(string, strip_levels=2))
++
++    def test_valid(self):
++        self.failUnless(self.is_tuple("(a, b)"))
++        self.failUnless(self.is_tuple("(a, (b, c))"))
++        self.failUnless(self.is_tuple("((a, (b, c)),)"))
++        self.failUnless(self.is_tuple("(a,)"))
++        self.failUnless(self.is_tuple("()"))
++
++    def test_invalid(self):
++        self.failIf(self.is_tuple("(a)"))
++        self.failIf(self.is_tuple("('foo') % (b, c)"))
++
++
++class Test_is_list(support.TestCase):
++    def is_list(self, string):
++        return fixer_util.is_list(parse(string, strip_levels=2))
++
++    def test_valid(self):
++        self.failUnless(self.is_list("[]"))
++        self.failUnless(self.is_list("[a]"))
++        self.failUnless(self.is_list("[a, b]"))
++        self.failUnless(self.is_list("[a, [b, c]]"))
++        self.failUnless(self.is_list("[[a, [b, c]],]"))
++
++    def test_invalid(self):
++        self.failIf(self.is_list("[]+[]"))
++
++
++class Test_Attr(MacroTestCase):
++    def test(self):
++        call = parse("foo()", strip_levels=2)
++
++        self.assertStr(Attr(Name("a"), Name("b")), "a.b")
++        self.assertStr(Attr(call, Name("b")), "foo().b")
++
++    def test_returns(self):
++        attr = Attr(Name("a"), Name("b"))
++        self.assertEqual(type(attr), list)
++
++
++class Test_Name(MacroTestCase):
++    def test(self):
++        self.assertStr(Name("a"), "a")
++        self.assertStr(Name("foo.foo().bar"), "foo.foo().bar")
++        self.assertStr(Name("a", prefix="b"), "ba")
++
++
++class Test_does_tree_import(support.TestCase):
++    def _find_bind_rec(self, name, node):
++        # Search a tree for a binding -- used to find the starting
++        # point for these tests.
++        c = fixer_util.find_binding(name, node)
++        if c: return c
++        for child in node.children:
++            c = self._find_bind_rec(name, child)
++            if c: return c
++
++    def does_tree_import(self, package, name, string):
++        node = parse(string)
++        # Find the binding of start -- that's what we'll go from
++        node = self._find_bind_rec('start', node)
++        return fixer_util.does_tree_import(package, name, node)
++
++    def try_with(self, string):
++        failing_tests = (("a", "a", "from a import b"),
++                         ("a.d", "a", "from a.d import b"),
++                         ("d.a", "a", "from d.a import b"),
++                         (None, "a", "import b"),
++                         (None, "a", "import b, c, d"))
++        for package, name, import_ in failing_tests:
++            n = self.does_tree_import(package, name, import_ + "\n" + string)
++            self.failIf(n)
++            n = self.does_tree_import(package, name, string + "\n" + import_)
++            self.failIf(n)
++
++        passing_tests = (("a", "a", "from a import a"),
++                         ("x", "a", "from x import a"),
++                         ("x", "a", "from x import b, c, a, d"),
++                         ("x.b", "a", "from x.b import a"),
++                         ("x.b", "a", "from x.b import b, c, a, d"),
++                         (None, "a", "import a"),
++                         (None, "a", "import b, c, a, d"))
++        for package, name, import_ in passing_tests:
++            n = self.does_tree_import(package, name, import_ + "\n" + string)
++            self.failUnless(n)
++            n = self.does_tree_import(package, name, string + "\n" + import_)
++            self.failUnless(n)
++
++    def test_in_function(self):
++        self.try_with("def foo():\n\tbar.baz()\n\tstart=3")
++
++class Test_find_binding(support.TestCase):
++    def find_binding(self, name, string, package=None):
++        return fixer_util.find_binding(name, parse(string), package)
++
++    def test_simple_assignment(self):
++        self.failUnless(self.find_binding("a", "a = b"))
++        self.failUnless(self.find_binding("a", "a = [b, c, d]"))
++        self.failUnless(self.find_binding("a", "a = foo()"))
++        self.failUnless(self.find_binding("a", "a = foo().foo.foo[6][foo]"))
++        self.failIf(self.find_binding("a", "foo = a"))
++        self.failIf(self.find_binding("a", "foo = (a, b, c)"))
++
++    def test_tuple_assignment(self):
++        self.failUnless(self.find_binding("a", "(a,) = b"))
++        self.failUnless(self.find_binding("a", "(a, b, c) = [b, c, d]"))
++        self.failUnless(self.find_binding("a", "(c, (d, a), b) = foo()"))
++        self.failUnless(self.find_binding("a", "(a, b) = foo().foo[6][foo]"))
++        self.failIf(self.find_binding("a", "(foo, b) = (b, a)"))
++        self.failIf(self.find_binding("a", "(foo, (b, c)) = (a, b, c)"))
++
++    def test_list_assignment(self):
++        self.failUnless(self.find_binding("a", "[a] = b"))
++        self.failUnless(self.find_binding("a", "[a, b, c] = [b, c, d]"))
++        self.failUnless(self.find_binding("a", "[c, [d, a], b] = foo()"))
++        self.failUnless(self.find_binding("a", "[a, b] = foo().foo[a][foo]"))
++        self.failIf(self.find_binding("a", "[foo, b] = (b, a)"))
++        self.failIf(self.find_binding("a", "[foo, [b, c]] = (a, b, c)"))
++
++    def test_invalid_assignments(self):
++        self.failIf(self.find_binding("a", "foo.a = 5"))
++        self.failIf(self.find_binding("a", "foo[a] = 5"))
++        self.failIf(self.find_binding("a", "foo(a) = 5"))
++        self.failIf(self.find_binding("a", "foo(a, b) = 5"))
++
++    def test_simple_import(self):
++        self.failUnless(self.find_binding("a", "import a"))
++        self.failUnless(self.find_binding("a", "import b, c, a, d"))
++        self.failIf(self.find_binding("a", "import b"))
++        self.failIf(self.find_binding("a", "import b, c, d"))
++
++    def test_from_import(self):
++        self.failUnless(self.find_binding("a", "from x import a"))
++        self.failUnless(self.find_binding("a", "from a import a"))
++        self.failUnless(self.find_binding("a", "from x import b, c, a, d"))
++        self.failUnless(self.find_binding("a", "from x.b import a"))
++        self.failUnless(self.find_binding("a", "from x.b import b, c, a, d"))
++        self.failIf(self.find_binding("a", "from a import b"))
++        self.failIf(self.find_binding("a", "from a.d import b"))
++        self.failIf(self.find_binding("a", "from d.a import b"))
++
++    def test_import_as(self):
++        self.failUnless(self.find_binding("a", "import b as a"))
++        self.failUnless(self.find_binding("a", "import b as a, c, a as f, d"))
++        self.failIf(self.find_binding("a", "import a as f"))
++        self.failIf(self.find_binding("a", "import b, c as f, d as e"))
++
++    def test_from_import_as(self):
++        self.failUnless(self.find_binding("a", "from x import b as a"))
++        self.failUnless(self.find_binding("a", "from x import g as a, d as b"))
++        self.failUnless(self.find_binding("a", "from x.b import t as a"))
++        self.failUnless(self.find_binding("a", "from x.b import g as a, d"))
++        self.failIf(self.find_binding("a", "from a import b as t"))
++        self.failIf(self.find_binding("a", "from a.d import b as t"))
++        self.failIf(self.find_binding("a", "from d.a import b as t"))
++
++    def test_simple_import_with_package(self):
++        self.failUnless(self.find_binding("b", "import b"))
++        self.failUnless(self.find_binding("b", "import b, c, d"))
++        self.failIf(self.find_binding("b", "import b", "b"))
++        self.failIf(self.find_binding("b", "import b, c, d", "c"))
++
++    def test_from_import_with_package(self):
++        self.failUnless(self.find_binding("a", "from x import a", "x"))
++        self.failUnless(self.find_binding("a", "from a import a", "a"))
++        self.failUnless(self.find_binding("a", "from x import *", "x"))
++        self.failUnless(self.find_binding("a", "from x import b, c, a, d", "x"))
++        self.failUnless(self.find_binding("a", "from x.b import a", "x.b"))
++        self.failUnless(self.find_binding("a", "from x.b import *", "x.b"))
++        self.failUnless(self.find_binding("a", "from x.b import b, c, a, d", "x.b"))
++        self.failIf(self.find_binding("a", "from a import b", "a"))
++        self.failIf(self.find_binding("a", "from a.d import b", "a.d"))
++        self.failIf(self.find_binding("a", "from d.a import b", "a.d"))
++        self.failIf(self.find_binding("a", "from x.y import *", "a.b"))
++
++    def test_import_as_with_package(self):
++        self.failIf(self.find_binding("a", "import b.c as a", "b.c"))
++        self.failIf(self.find_binding("a", "import a as f", "f"))
++        self.failIf(self.find_binding("a", "import a as f", "a"))
++
++    def test_from_import_as_with_package(self):
++        # Because it would take a lot of special-case code in the fixers
++        # to deal with from foo import bar as baz, we'll simply always
++        # fail if there is an "from ... import ... as ..."
++        self.failIf(self.find_binding("a", "from x import b as a", "x"))
++        self.failIf(self.find_binding("a", "from x import g as a, d as b", "x"))
++        self.failIf(self.find_binding("a", "from x.b import t as a", "x.b"))
++        self.failIf(self.find_binding("a", "from x.b import g as a, d", "x.b"))
++        self.failIf(self.find_binding("a", "from a import b as t", "a"))
++        self.failIf(self.find_binding("a", "from a import b as t", "b"))
++        self.failIf(self.find_binding("a", "from a import b as t", "t"))
++
++    def test_function_def(self):
++        self.failUnless(self.find_binding("a", "def a(): pass"))
++        self.failUnless(self.find_binding("a", "def a(b, c, d): pass"))
++        self.failUnless(self.find_binding("a", "def a(): b = 7"))
++        self.failIf(self.find_binding("a", "def d(b, (c, a), e): pass"))
++        self.failIf(self.find_binding("a", "def d(a=7): pass"))
++        self.failIf(self.find_binding("a", "def d(a): pass"))
++        self.failIf(self.find_binding("a", "def d(): a = 7"))
++
++        s = """
++            def d():
++                def a():
++                    pass"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_class_def(self):
++        self.failUnless(self.find_binding("a", "class a: pass"))
++        self.failUnless(self.find_binding("a", "class a(): pass"))
++        self.failUnless(self.find_binding("a", "class a(b): pass"))
++        self.failUnless(self.find_binding("a", "class a(b, c=8): pass"))
++        self.failIf(self.find_binding("a", "class d: pass"))
++        self.failIf(self.find_binding("a", "class d(a): pass"))
++        self.failIf(self.find_binding("a", "class d(b, a=7): pass"))
++        self.failIf(self.find_binding("a", "class d(b, *a): pass"))
++        self.failIf(self.find_binding("a", "class d(b, **a): pass"))
++        self.failIf(self.find_binding("a", "class d: a = 7"))
++
++        s = """
++            class d():
++                class a():
++                    pass"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_for(self):
++        self.failUnless(self.find_binding("a", "for a in r: pass"))
++        self.failUnless(self.find_binding("a", "for a, b in r: pass"))
++        self.failUnless(self.find_binding("a", "for (a, b) in r: pass"))
++        self.failUnless(self.find_binding("a", "for c, (a,) in r: pass"))
++        self.failUnless(self.find_binding("a", "for c, (a, b) in r: pass"))
++        self.failUnless(self.find_binding("a", "for c in r: a = c"))
++        self.failIf(self.find_binding("a", "for c in a: pass"))
++
++    def test_for_nested(self):
++        s = """
++            for b in r:
++                for a in b:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for a, c in b:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for (a, c) in b:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for (a,) in b:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for c, (a, d) in b:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for c in b:
++                    a = 7"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for c in b:
++                    d = a"""
++        self.failIf(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for c in a:
++                    d = 7"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_if(self):
++        self.failUnless(self.find_binding("a", "if b in r: a = c"))
++        self.failIf(self.find_binding("a", "if a in r: d = e"))
++
++    def test_if_nested(self):
++        s = """
++            if b in r:
++                if c in d:
++                    a = c"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            if b in r:
++                if c in d:
++                    c = a"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_while(self):
++        self.failUnless(self.find_binding("a", "while b in r: a = c"))
++        self.failIf(self.find_binding("a", "while a in r: d = e"))
++
++    def test_while_nested(self):
++        s = """
++            while b in r:
++                while c in d:
++                    a = c"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            while b in r:
++                while c in d:
++                    c = a"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_try_except(self):
++        s = """
++            try:
++                a = 6
++            except:
++                b = 8"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except KeyError:
++                pass
++            except:
++                a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                b = 6"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_try_except_nested(self):
++        s = """
++            try:
++                try:
++                    a = 6
++                except:
++                    pass
++            except:
++                b = 8"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                try:
++                    a = 6
++                except:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                try:
++                    pass
++                except:
++                    a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                try:
++                    b = 8
++                except KeyError:
++                    pass
++                except:
++                    a = 6
++            except:
++                pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                pass
++            except:
++                try:
++                    b = 8
++                except KeyError:
++                    pass
++                except:
++                    a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                b = 6"""
++        self.failIf(self.find_binding("a", s))
++
++        s = """
++            try:
++                try:
++                    b = 8
++                except:
++                    c = d
++            except:
++                try:
++                    b = 6
++                except:
++                    t = 8
++                except:
++                    o = y"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_try_except_finally(self):
++        s = """
++            try:
++                c = 6
++            except:
++                b = 8
++            finally:
++                a = 9"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            finally:
++                a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            finally:
++                b = 6"""
++        self.failIf(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                b = 9
++            finally:
++                b = 6"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_try_except_finally_nested(self):
++        s = """
++            try:
++                c = 6
++            except:
++                b = 8
++            finally:
++                try:
++                    a = 9
++                except:
++                    b = 9
++                finally:
++                    c = 9"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            finally:
++                try:
++                    pass
++                finally:
++                    a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            finally:
++                try:
++                    b = 6
++                finally:
++                    b = 7"""
++        self.failIf(self.find_binding("a", s))
++
++class Test_touch_import(support.TestCase):
++
++    def test_after_docstring(self):
++        node = parse('"""foo"""\nbar()')
++        fixer_util.touch_import(None, "foo", node)
++        self.assertEqual(str(node), '"""foo"""\nimport foo\nbar()\n\n')
++
++    def test_after_imports(self):
++        node = parse('"""foo"""\nimport bar\nbar()')
++        fixer_util.touch_import(None, "foo", node)
++        self.assertEqual(str(node), '"""foo"""\nimport bar\nimport foo\nbar()\n\n')
++
++    def test_beginning(self):
++        node = parse('bar()')
++        fixer_util.touch_import(None, "foo", node)
++        self.assertEqual(str(node), 'import foo\nbar()\n\n')
++
++    def test_from_import(self):
++        node = parse('bar()')
++        fixer_util.touch_import("cgi", "escape", node)
++        self.assertEqual(str(node), 'from cgi import escape\nbar()\n\n')
++
++    def test_name_import(self):
++        node = parse('bar()')
++        fixer_util.touch_import(None, "cgi", node)
++        self.assertEqual(str(node), 'import cgi\nbar()\n\n')
++
++
++if __name__ == "__main__":
++    import __main__
++    support.run_all_tests(__main__)
+diff -r 531f2e948299 refactor/tests/__init__.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/__init__.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,24 @@
++"""Make tests/ into a package. This allows us to "import tests" and
++have tests.all_tests be a TestSuite representing all test cases
++from all test_*.py files in tests/."""
++# Author: Collin Winter
++
++import os
++import os.path
++import unittest
++import types
++
++from . import support
++
++all_tests = unittest.TestSuite()
++
++tests_dir = os.path.join(os.path.dirname(__file__), '..', 'tests')
++tests = [t[0:-3] for t in os.listdir(tests_dir)
++                        if t.startswith('test_') and t.endswith('.py')]
++
++loader = unittest.TestLoader()
++
++for t in tests:
++    __import__("",globals(),locals(),[t],level=1)
++    mod = globals()[t]
++    all_tests.addTests(loader.loadTestsFromModule(mod))
+diff -r 531f2e948299 refactor/tests/data/.svn/all-wcprops
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/all-wcprops	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,29 @@
++K 25
++svn:wc:ra_dav:version-url
++V 62
++/projects/!svn/ver/67993/sandbox/trunk/2to3/lib2to3/tests/data
++END
++infinite_recursion.py
++K 25
++svn:wc:ra_dav:version-url
++V 84
++/projects/!svn/ver/67433/sandbox/trunk/2to3/lib2to3/tests/data/infinite_recursion.py
++END
++py2_test_grammar.py
++K 25
++svn:wc:ra_dav:version-url
++V 82
++/projects/!svn/ver/66191/sandbox/trunk/2to3/lib2to3/tests/data/py2_test_grammar.py
++END
++py3_test_grammar.py
++K 25
++svn:wc:ra_dav:version-url
++V 82
++/projects/!svn/ver/67993/sandbox/trunk/2to3/lib2to3/tests/data/py3_test_grammar.py
++END
++README
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/66805/sandbox/trunk/2to3/lib2to3/tests/data/README
++END
+diff -r 531f2e948299 refactor/tests/data/.svn/dir-prop-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/dir-prop-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,7 @@
++K 10
++svn:ignore
++V 10
++*.py[co]
++
++
++END
+diff -r 531f2e948299 refactor/tests/data/.svn/entries
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,167 @@
++9
++
++dir
++70822
++http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/tests/data
++http://svn.python.org/projects
++
++
++
++2008-12-28T21:04:32.707707Z
++67993
++benjamin.peterson
++has-props
++
++svn:special svn:externals svn:needs-lock
++
++
++
++
++
++
++
++
++
++
++
++6015fed2-1504-0410-9fe1-9d1591cc4771
++
++fixers
++dir
++
++infinite_recursion.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++1814d487c988fd5798c42a2f0cd95ddf
++2008-11-28T23:18:48.744865Z
++67433
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++93071
++
++py2_test_grammar.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++e942fb49c8000cb9a6b412726618c3a1
++2008-09-03T22:00:52.351755Z
++66191
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++30527
++
++py3_test_grammar.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++420f3727371001b7eeeb3375fbfa95aa
++2008-12-28T21:04:32.707707Z
++67993
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++29861
++
++README
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++49f64b8dc7eb5e8d1d21281604faaaa7
++2008-10-05T01:11:02.545241Z
++66805
++benjamin.peterson
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++404
++
+diff -r 531f2e948299 refactor/tests/data/.svn/format
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/format	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++9
+diff -r 531f2e948299 refactor/tests/data/.svn/prop-base/infinite_recursion.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/prop-base/infinite_recursion.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++K 13
++svn:eol-style
++V 6
++native
++END
+diff -r 531f2e948299 refactor/tests/data/.svn/prop-base/py2_test_grammar.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/prop-base/py2_test_grammar.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/.svn/prop-base/py3_test_grammar.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/prop-base/py3_test_grammar.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/.svn/text-base/README.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/text-base/README.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,6 @@
++In this directory:
++- py2_test_grammar.py -- test file that exercises most/all of Python 2.x's grammar.
++- py3_test_grammar.py -- test file that exercises most/all of Python 3.x's grammar.
++- infinite_recursion.py -- test file that causes lib2to3's faster recursive pattern matching
++  scheme to fail, but passes when lib2to3 falls back to iterative pattern matching.
++- fixes/ -- for use by test_refactor.py
+diff -r 531f2e948299 refactor/tests/data/.svn/text-base/infinite_recursion.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/text-base/infinite_recursion.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,2669 @@
++# This file is used to verify that 2to3 falls back to a slower, iterative pattern matching
++# scheme in the event that the faster recursive system fails due to infinite recursion.
++from ctypes import *
++STRING = c_char_p
++
++
++OSUnknownByteOrder = 0
++UIT_PROMPT = 1
++P_PGID = 2
++P_PID = 1
++UIT_ERROR = 5
++UIT_INFO = 4
++UIT_NONE = 0
++P_ALL = 0
++UIT_VERIFY = 2
++OSBigEndian = 2
++UIT_BOOLEAN = 3
++OSLittleEndian = 1
++__darwin_nl_item = c_int
++__darwin_wctrans_t = c_int
++__darwin_wctype_t = c_ulong
++__int8_t = c_byte
++__uint8_t = c_ubyte
++__int16_t = c_short
++__uint16_t = c_ushort
++__int32_t = c_int
++__uint32_t = c_uint
++__int64_t = c_longlong
++__uint64_t = c_ulonglong
++__darwin_intptr_t = c_long
++__darwin_natural_t = c_uint
++__darwin_ct_rune_t = c_int
++class __mbstate_t(Union):
++    pass
++__mbstate_t._pack_ = 4
++__mbstate_t._fields_ = [
++    ('__mbstate8', c_char * 128),
++    ('_mbstateL', c_longlong),
++]
++assert sizeof(__mbstate_t) == 128, sizeof(__mbstate_t)
++assert alignment(__mbstate_t) == 4, alignment(__mbstate_t)
++__darwin_mbstate_t = __mbstate_t
++__darwin_ptrdiff_t = c_int
++__darwin_size_t = c_ulong
++__darwin_va_list = STRING
++__darwin_wchar_t = c_int
++__darwin_rune_t = __darwin_wchar_t
++__darwin_wint_t = c_int
++__darwin_clock_t = c_ulong
++__darwin_socklen_t = __uint32_t
++__darwin_ssize_t = c_long
++__darwin_time_t = c_long
++sig_atomic_t = c_int
++class sigcontext(Structure):
++    pass
++sigcontext._fields_ = [
++    ('sc_onstack', c_int),
++    ('sc_mask', c_int),
++    ('sc_eax', c_uint),
++    ('sc_ebx', c_uint),
++    ('sc_ecx', c_uint),
++    ('sc_edx', c_uint),
++    ('sc_edi', c_uint),
++    ('sc_esi', c_uint),
++    ('sc_ebp', c_uint),
++    ('sc_esp', c_uint),
++    ('sc_ss', c_uint),
++    ('sc_eflags', c_uint),
++    ('sc_eip', c_uint),
++    ('sc_cs', c_uint),
++    ('sc_ds', c_uint),
++    ('sc_es', c_uint),
++    ('sc_fs', c_uint),
++    ('sc_gs', c_uint),
++]
++assert sizeof(sigcontext) == 72, sizeof(sigcontext)
++assert alignment(sigcontext) == 4, alignment(sigcontext)
++u_int8_t = c_ubyte
++u_int16_t = c_ushort
++u_int32_t = c_uint
++u_int64_t = c_ulonglong
++int32_t = c_int
++register_t = int32_t
++user_addr_t = u_int64_t
++user_size_t = u_int64_t
++int64_t = c_longlong
++user_ssize_t = int64_t
++user_long_t = int64_t
++user_ulong_t = u_int64_t
++user_time_t = int64_t
++syscall_arg_t = u_int64_t
++
++# values for unnamed enumeration
++class aes_key_st(Structure):
++    pass
++aes_key_st._fields_ = [
++    ('rd_key', c_ulong * 60),
++    ('rounds', c_int),
++]
++assert sizeof(aes_key_st) == 244, sizeof(aes_key_st)
++assert alignment(aes_key_st) == 4, alignment(aes_key_st)
++AES_KEY = aes_key_st
++class asn1_ctx_st(Structure):
++    pass
++asn1_ctx_st._fields_ = [
++    ('p', POINTER(c_ubyte)),
++    ('eos', c_int),
++    ('error', c_int),
++    ('inf', c_int),
++    ('tag', c_int),
++    ('xclass', c_int),
++    ('slen', c_long),
++    ('max', POINTER(c_ubyte)),
++    ('q', POINTER(c_ubyte)),
++    ('pp', POINTER(POINTER(c_ubyte))),
++    ('line', c_int),
++]
++assert sizeof(asn1_ctx_st) == 44, sizeof(asn1_ctx_st)
++assert alignment(asn1_ctx_st) == 4, alignment(asn1_ctx_st)
++ASN1_CTX = asn1_ctx_st
++class asn1_object_st(Structure):
++    pass
++asn1_object_st._fields_ = [
++    ('sn', STRING),
++    ('ln', STRING),
++    ('nid', c_int),
++    ('length', c_int),
++    ('data', POINTER(c_ubyte)),
++    ('flags', c_int),
++]
++assert sizeof(asn1_object_st) == 24, sizeof(asn1_object_st)
++assert alignment(asn1_object_st) == 4, alignment(asn1_object_st)
++ASN1_OBJECT = asn1_object_st
++class asn1_string_st(Structure):
++    pass
++asn1_string_st._fields_ = [
++    ('length', c_int),
++    ('type', c_int),
++    ('data', POINTER(c_ubyte)),
++    ('flags', c_long),
++]
++assert sizeof(asn1_string_st) == 16, sizeof(asn1_string_st)
++assert alignment(asn1_string_st) == 4, alignment(asn1_string_st)
++ASN1_STRING = asn1_string_st
++class ASN1_ENCODING_st(Structure):
++    pass
++ASN1_ENCODING_st._fields_ = [
++    ('enc', POINTER(c_ubyte)),
++    ('len', c_long),
++    ('modified', c_int),
++]
++assert sizeof(ASN1_ENCODING_st) == 12, sizeof(ASN1_ENCODING_st)
++assert alignment(ASN1_ENCODING_st) == 4, alignment(ASN1_ENCODING_st)
++ASN1_ENCODING = ASN1_ENCODING_st
++class asn1_string_table_st(Structure):
++    pass
++asn1_string_table_st._fields_ = [
++    ('nid', c_int),
++    ('minsize', c_long),
++    ('maxsize', c_long),
++    ('mask', c_ulong),
++    ('flags', c_ulong),
++]
++assert sizeof(asn1_string_table_st) == 20, sizeof(asn1_string_table_st)
++assert alignment(asn1_string_table_st) == 4, alignment(asn1_string_table_st)
++ASN1_STRING_TABLE = asn1_string_table_st
++class ASN1_TEMPLATE_st(Structure):
++    pass
++ASN1_TEMPLATE_st._fields_ = [
++]
++ASN1_TEMPLATE = ASN1_TEMPLATE_st
++class ASN1_ITEM_st(Structure):
++    pass
++ASN1_ITEM = ASN1_ITEM_st
++ASN1_ITEM_st._fields_ = [
++]
++class ASN1_TLC_st(Structure):
++    pass
++ASN1_TLC = ASN1_TLC_st
++ASN1_TLC_st._fields_ = [
++]
++class ASN1_VALUE_st(Structure):
++    pass
++ASN1_VALUE_st._fields_ = [
++]
++ASN1_VALUE = ASN1_VALUE_st
++ASN1_ITEM_EXP = ASN1_ITEM
++class asn1_type_st(Structure):
++    pass
++class N12asn1_type_st4DOLLAR_11E(Union):
++    pass
++ASN1_BOOLEAN = c_int
++ASN1_INTEGER = asn1_string_st
++ASN1_ENUMERATED = asn1_string_st
++ASN1_BIT_STRING = asn1_string_st
++ASN1_OCTET_STRING = asn1_string_st
++ASN1_PRINTABLESTRING = asn1_string_st
++ASN1_T61STRING = asn1_string_st
++ASN1_IA5STRING = asn1_string_st
++ASN1_GENERALSTRING = asn1_string_st
++ASN1_BMPSTRING = asn1_string_st
++ASN1_UNIVERSALSTRING = asn1_string_st
++ASN1_UTCTIME = asn1_string_st
++ASN1_GENERALIZEDTIME = asn1_string_st
++ASN1_VISIBLESTRING = asn1_string_st
++ASN1_UTF8STRING = asn1_string_st
++N12asn1_type_st4DOLLAR_11E._fields_ = [
++    ('ptr', STRING),
++    ('boolean', ASN1_BOOLEAN),
++    ('asn1_string', POINTER(ASN1_STRING)),
++    ('object', POINTER(ASN1_OBJECT)),
++    ('integer', POINTER(ASN1_INTEGER)),
++    ('enumerated', POINTER(ASN1_ENUMERATED)),
++    ('bit_string', POINTER(ASN1_BIT_STRING)),
++    ('octet_string', POINTER(ASN1_OCTET_STRING)),
++    ('printablestring', POINTER(ASN1_PRINTABLESTRING)),
++    ('t61string', POINTER(ASN1_T61STRING)),
++    ('ia5string', POINTER(ASN1_IA5STRING)),
++    ('generalstring', POINTER(ASN1_GENERALSTRING)),
++    ('bmpstring', POINTER(ASN1_BMPSTRING)),
++    ('universalstring', POINTER(ASN1_UNIVERSALSTRING)),
++    ('utctime', POINTER(ASN1_UTCTIME)),
++    ('generalizedtime', POINTER(ASN1_GENERALIZEDTIME)),
++    ('visiblestring', POINTER(ASN1_VISIBLESTRING)),
++    ('utf8string', POINTER(ASN1_UTF8STRING)),
++    ('set', POINTER(ASN1_STRING)),
++    ('sequence', POINTER(ASN1_STRING)),
++]
++assert sizeof(N12asn1_type_st4DOLLAR_11E) == 4, sizeof(N12asn1_type_st4DOLLAR_11E)
++assert alignment(N12asn1_type_st4DOLLAR_11E) == 4, alignment(N12asn1_type_st4DOLLAR_11E)
++asn1_type_st._fields_ = [
++    ('type', c_int),
++    ('value', N12asn1_type_st4DOLLAR_11E),
++]
++assert sizeof(asn1_type_st) == 8, sizeof(asn1_type_st)
++assert alignment(asn1_type_st) == 4, alignment(asn1_type_st)
++ASN1_TYPE = asn1_type_st
++class asn1_method_st(Structure):
++    pass
++asn1_method_st._fields_ = [
++    ('i2d', CFUNCTYPE(c_int)),
++    ('d2i', CFUNCTYPE(STRING)),
++    ('create', CFUNCTYPE(STRING)),
++    ('destroy', CFUNCTYPE(None)),
++]
++assert sizeof(asn1_method_st) == 16, sizeof(asn1_method_st)
++assert alignment(asn1_method_st) == 4, alignment(asn1_method_st)
++ASN1_METHOD = asn1_method_st
++class asn1_header_st(Structure):
++    pass
++asn1_header_st._fields_ = [
++    ('header', POINTER(ASN1_OCTET_STRING)),
++    ('data', STRING),
++    ('meth', POINTER(ASN1_METHOD)),
++]
++assert sizeof(asn1_header_st) == 12, sizeof(asn1_header_st)
++assert alignment(asn1_header_st) == 4, alignment(asn1_header_st)
++ASN1_HEADER = asn1_header_st
++class BIT_STRING_BITNAME_st(Structure):
++    pass
++BIT_STRING_BITNAME_st._fields_ = [
++    ('bitnum', c_int),
++    ('lname', STRING),
++    ('sname', STRING),
++]
++assert sizeof(BIT_STRING_BITNAME_st) == 12, sizeof(BIT_STRING_BITNAME_st)
++assert alignment(BIT_STRING_BITNAME_st) == 4, alignment(BIT_STRING_BITNAME_st)
++BIT_STRING_BITNAME = BIT_STRING_BITNAME_st
++class bio_st(Structure):
++    pass
++BIO = bio_st
++bio_info_cb = CFUNCTYPE(None, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)
++class bio_method_st(Structure):
++    pass
++bio_method_st._fields_ = [
++    ('type', c_int),
++    ('name', STRING),
++    ('bwrite', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
++    ('bread', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
++    ('bputs', CFUNCTYPE(c_int, POINTER(BIO), STRING)),
++    ('bgets', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
++    ('ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, c_long, c_void_p)),
++    ('create', CFUNCTYPE(c_int, POINTER(BIO))),
++    ('destroy', CFUNCTYPE(c_int, POINTER(BIO))),
++    ('callback_ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, POINTER(bio_info_cb))),
++]
++assert sizeof(bio_method_st) == 40, sizeof(bio_method_st)
++assert alignment(bio_method_st) == 4, alignment(bio_method_st)
++BIO_METHOD = bio_method_st
++class crypto_ex_data_st(Structure):
++    pass
++class stack_st(Structure):
++    pass
++STACK = stack_st
++crypto_ex_data_st._fields_ = [
++    ('sk', POINTER(STACK)),
++    ('dummy', c_int),
++]
++assert sizeof(crypto_ex_data_st) == 8, sizeof(crypto_ex_data_st)
++assert alignment(crypto_ex_data_st) == 4, alignment(crypto_ex_data_st)
++CRYPTO_EX_DATA = crypto_ex_data_st
++bio_st._fields_ = [
++    ('method', POINTER(BIO_METHOD)),
++    ('callback', CFUNCTYPE(c_long, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)),
++    ('cb_arg', STRING),
++    ('init', c_int),
++    ('shutdown', c_int),
++    ('flags', c_int),
++    ('retry_reason', c_int),
++    ('num', c_int),
++    ('ptr', c_void_p),
++    ('next_bio', POINTER(bio_st)),
++    ('prev_bio', POINTER(bio_st)),
++    ('references', c_int),
++    ('num_read', c_ulong),
++    ('num_write', c_ulong),
++    ('ex_data', CRYPTO_EX_DATA),
++]
++assert sizeof(bio_st) == 64, sizeof(bio_st)
++assert alignment(bio_st) == 4, alignment(bio_st)
++class bio_f_buffer_ctx_struct(Structure):
++    pass
++bio_f_buffer_ctx_struct._fields_ = [
++    ('ibuf_size', c_int),
++    ('obuf_size', c_int),
++    ('ibuf', STRING),
++    ('ibuf_len', c_int),
++    ('ibuf_off', c_int),
++    ('obuf', STRING),
++    ('obuf_len', c_int),
++    ('obuf_off', c_int),
++]
++assert sizeof(bio_f_buffer_ctx_struct) == 32, sizeof(bio_f_buffer_ctx_struct)
++assert alignment(bio_f_buffer_ctx_struct) == 4, alignment(bio_f_buffer_ctx_struct)
++BIO_F_BUFFER_CTX = bio_f_buffer_ctx_struct
++class hostent(Structure):
++    pass
++hostent._fields_ = [
++]
++class bf_key_st(Structure):
++    pass
++bf_key_st._fields_ = [
++    ('P', c_uint * 18),
++    ('S', c_uint * 1024),
++]
++assert sizeof(bf_key_st) == 4168, sizeof(bf_key_st)
++assert alignment(bf_key_st) == 4, alignment(bf_key_st)
++BF_KEY = bf_key_st
++class bignum_st(Structure):
++    pass
++bignum_st._fields_ = [
++    ('d', POINTER(c_ulong)),
++    ('top', c_int),
++    ('dmax', c_int),
++    ('neg', c_int),
++    ('flags', c_int),
++]
++assert sizeof(bignum_st) == 20, sizeof(bignum_st)
++assert alignment(bignum_st) == 4, alignment(bignum_st)
++BIGNUM = bignum_st
++class bignum_ctx(Structure):
++    pass
++bignum_ctx._fields_ = [
++]
++BN_CTX = bignum_ctx
++class bn_blinding_st(Structure):
++    pass
++bn_blinding_st._fields_ = [
++    ('init', c_int),
++    ('A', POINTER(BIGNUM)),
++    ('Ai', POINTER(BIGNUM)),
++    ('mod', POINTER(BIGNUM)),
++    ('thread_id', c_ulong),
++]
++assert sizeof(bn_blinding_st) == 20, sizeof(bn_blinding_st)
++assert alignment(bn_blinding_st) == 4, alignment(bn_blinding_st)
++BN_BLINDING = bn_blinding_st
++class bn_mont_ctx_st(Structure):
++    pass
++bn_mont_ctx_st._fields_ = [
++    ('ri', c_int),
++    ('RR', BIGNUM),
++    ('N', BIGNUM),
++    ('Ni', BIGNUM),
++    ('n0', c_ulong),
++    ('flags', c_int),
++]
++assert sizeof(bn_mont_ctx_st) == 72, sizeof(bn_mont_ctx_st)
++assert alignment(bn_mont_ctx_st) == 4, alignment(bn_mont_ctx_st)
++BN_MONT_CTX = bn_mont_ctx_st
++class bn_recp_ctx_st(Structure):
++    pass
++bn_recp_ctx_st._fields_ = [
++    ('N', BIGNUM),
++    ('Nr', BIGNUM),
++    ('num_bits', c_int),
++    ('shift', c_int),
++    ('flags', c_int),
++]
++assert sizeof(bn_recp_ctx_st) == 52, sizeof(bn_recp_ctx_st)
++assert alignment(bn_recp_ctx_st) == 4, alignment(bn_recp_ctx_st)
++BN_RECP_CTX = bn_recp_ctx_st
++class buf_mem_st(Structure):
++    pass
++buf_mem_st._fields_ = [
++    ('length', c_int),
++    ('data', STRING),
++    ('max', c_int),
++]
++assert sizeof(buf_mem_st) == 12, sizeof(buf_mem_st)
++assert alignment(buf_mem_st) == 4, alignment(buf_mem_st)
++BUF_MEM = buf_mem_st
++class cast_key_st(Structure):
++    pass
++cast_key_st._fields_ = [
++    ('data', c_ulong * 32),
++    ('short_key', c_int),
++]
++assert sizeof(cast_key_st) == 132, sizeof(cast_key_st)
++assert alignment(cast_key_st) == 4, alignment(cast_key_st)
++CAST_KEY = cast_key_st
++class comp_method_st(Structure):
++    pass
++comp_method_st._fields_ = [
++    ('type', c_int),
++    ('name', STRING),
++    ('init', CFUNCTYPE(c_int)),
++    ('finish', CFUNCTYPE(None)),
++    ('compress', CFUNCTYPE(c_int)),
++    ('expand', CFUNCTYPE(c_int)),
++    ('ctrl', CFUNCTYPE(c_long)),
++    ('callback_ctrl', CFUNCTYPE(c_long)),
++]
++assert sizeof(comp_method_st) == 32, sizeof(comp_method_st)
++assert alignment(comp_method_st) == 4, alignment(comp_method_st)
++COMP_METHOD = comp_method_st
++class comp_ctx_st(Structure):
++    pass
++comp_ctx_st._fields_ = [
++    ('meth', POINTER(COMP_METHOD)),
++    ('compress_in', c_ulong),
++    ('compress_out', c_ulong),
++    ('expand_in', c_ulong),
++    ('expand_out', c_ulong),
++    ('ex_data', CRYPTO_EX_DATA),
++]
++assert sizeof(comp_ctx_st) == 28, sizeof(comp_ctx_st)
++assert alignment(comp_ctx_st) == 4, alignment(comp_ctx_st)
++COMP_CTX = comp_ctx_st
++class CRYPTO_dynlock_value(Structure):
++    pass
++CRYPTO_dynlock_value._fields_ = [
++]
++class CRYPTO_dynlock(Structure):
++    pass
++CRYPTO_dynlock._fields_ = [
++    ('references', c_int),
++    ('data', POINTER(CRYPTO_dynlock_value)),
++]
++assert sizeof(CRYPTO_dynlock) == 8, sizeof(CRYPTO_dynlock)
++assert alignment(CRYPTO_dynlock) == 4, alignment(CRYPTO_dynlock)
++BIO_dummy = bio_st
++CRYPTO_EX_new = CFUNCTYPE(c_int, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
++CRYPTO_EX_free = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
++CRYPTO_EX_dup = CFUNCTYPE(c_int, POINTER(CRYPTO_EX_DATA), POINTER(CRYPTO_EX_DATA), c_void_p, c_int, c_long, c_void_p)
++class crypto_ex_data_func_st(Structure):
++    pass
++crypto_ex_data_func_st._fields_ = [
++    ('argl', c_long),
++    ('argp', c_void_p),
++    ('new_func', POINTER(CRYPTO_EX_new)),
++    ('free_func', POINTER(CRYPTO_EX_free)),
++    ('dup_func', POINTER(CRYPTO_EX_dup)),
++]
++assert sizeof(crypto_ex_data_func_st) == 20, sizeof(crypto_ex_data_func_st)
++assert alignment(crypto_ex_data_func_st) == 4, alignment(crypto_ex_data_func_st)
++CRYPTO_EX_DATA_FUNCS = crypto_ex_data_func_st
++class st_CRYPTO_EX_DATA_IMPL(Structure):
++    pass
++CRYPTO_EX_DATA_IMPL = st_CRYPTO_EX_DATA_IMPL
++st_CRYPTO_EX_DATA_IMPL._fields_ = [
++]
++CRYPTO_MEM_LEAK_CB = CFUNCTYPE(c_void_p, c_ulong, STRING, c_int, c_int, c_void_p)
++DES_cblock = c_ubyte * 8
++const_DES_cblock = c_ubyte * 8
++class DES_ks(Structure):
++    pass
++class N6DES_ks3DOLLAR_9E(Union):
++    pass
++N6DES_ks3DOLLAR_9E._fields_ = [
++    ('cblock', DES_cblock),
++    ('deslong', c_ulong * 2),
++]
++assert sizeof(N6DES_ks3DOLLAR_9E) == 8, sizeof(N6DES_ks3DOLLAR_9E)
++assert alignment(N6DES_ks3DOLLAR_9E) == 4, alignment(N6DES_ks3DOLLAR_9E)
++DES_ks._fields_ = [
++    ('ks', N6DES_ks3DOLLAR_9E * 16),
++]
++assert sizeof(DES_ks) == 128, sizeof(DES_ks)
++assert alignment(DES_ks) == 4, alignment(DES_ks)
++DES_key_schedule = DES_ks
++_ossl_old_des_cblock = c_ubyte * 8
++class _ossl_old_des_ks_struct(Structure):
++    pass
++class N23_ossl_old_des_ks_struct4DOLLAR_10E(Union):
++    pass
++N23_ossl_old_des_ks_struct4DOLLAR_10E._fields_ = [
++    ('_', _ossl_old_des_cblock),
++    ('pad', c_ulong * 2),
++]
++assert sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 8, sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E)
++assert alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 4, alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E)
++_ossl_old_des_ks_struct._fields_ = [
++    ('ks', N23_ossl_old_des_ks_struct4DOLLAR_10E),
++]
++assert sizeof(_ossl_old_des_ks_struct) == 8, sizeof(_ossl_old_des_ks_struct)
++assert alignment(_ossl_old_des_ks_struct) == 4, alignment(_ossl_old_des_ks_struct)
++_ossl_old_des_key_schedule = _ossl_old_des_ks_struct * 16
++class dh_st(Structure):
++    pass
++DH = dh_st
++class dh_method(Structure):
++    pass
++dh_method._fields_ = [
++    ('name', STRING),
++    ('generate_key', CFUNCTYPE(c_int, POINTER(DH))),
++    ('compute_key', CFUNCTYPE(c_int, POINTER(c_ubyte), POINTER(BIGNUM), POINTER(DH))),
++    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DH), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
++    ('init', CFUNCTYPE(c_int, POINTER(DH))),
++    ('finish', CFUNCTYPE(c_int, POINTER(DH))),
++    ('flags', c_int),
++    ('app_data', STRING),
++]
++assert sizeof(dh_method) == 32, sizeof(dh_method)
++assert alignment(dh_method) == 4, alignment(dh_method)
++DH_METHOD = dh_method
++class engine_st(Structure):
++    pass
++ENGINE = engine_st
++dh_st._fields_ = [
++    ('pad', c_int),
++    ('version', c_int),
++    ('p', POINTER(BIGNUM)),
++    ('g', POINTER(BIGNUM)),
++    ('length', c_long),
++    ('pub_key', POINTER(BIGNUM)),
++    ('priv_key', POINTER(BIGNUM)),
++    ('flags', c_int),
++    ('method_mont_p', STRING),
++    ('q', POINTER(BIGNUM)),
++    ('j', POINTER(BIGNUM)),
++    ('seed', POINTER(c_ubyte)),
++    ('seedlen', c_int),
++    ('counter', POINTER(BIGNUM)),
++    ('references', c_int),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('meth', POINTER(DH_METHOD)),
++    ('engine', POINTER(ENGINE)),
++]
++assert sizeof(dh_st) == 76, sizeof(dh_st)
++assert alignment(dh_st) == 4, alignment(dh_st)
++class dsa_st(Structure):
++    pass
++DSA = dsa_st
++class DSA_SIG_st(Structure):
++    pass
++DSA_SIG_st._fields_ = [
++    ('r', POINTER(BIGNUM)),
++    ('s', POINTER(BIGNUM)),
++]
++assert sizeof(DSA_SIG_st) == 8, sizeof(DSA_SIG_st)
++assert alignment(DSA_SIG_st) == 4, alignment(DSA_SIG_st)
++DSA_SIG = DSA_SIG_st
++class dsa_method(Structure):
++    pass
++dsa_method._fields_ = [
++    ('name', STRING),
++    ('dsa_do_sign', CFUNCTYPE(POINTER(DSA_SIG), POINTER(c_ubyte), c_int, POINTER(DSA))),
++    ('dsa_sign_setup', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BN_CTX), POINTER(POINTER(BIGNUM)), POINTER(POINTER(BIGNUM)))),
++    ('dsa_do_verify', CFUNCTYPE(c_int, POINTER(c_ubyte), c_int, POINTER(DSA_SIG), POINTER(DSA))),
++    ('dsa_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
++    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
++    ('init', CFUNCTYPE(c_int, POINTER(DSA))),
++    ('finish', CFUNCTYPE(c_int, POINTER(DSA))),
++    ('flags', c_int),
++    ('app_data', STRING),
++]
++assert sizeof(dsa_method) == 40, sizeof(dsa_method)
++assert alignment(dsa_method) == 4, alignment(dsa_method)
++DSA_METHOD = dsa_method
++dsa_st._fields_ = [
++    ('pad', c_int),
++    ('version', c_long),
++    ('write_params', c_int),
++    ('p', POINTER(BIGNUM)),
++    ('q', POINTER(BIGNUM)),
++    ('g', POINTER(BIGNUM)),
++    ('pub_key', POINTER(BIGNUM)),
++    ('priv_key', POINTER(BIGNUM)),
++    ('kinv', POINTER(BIGNUM)),
++    ('r', POINTER(BIGNUM)),
++    ('flags', c_int),
++    ('method_mont_p', STRING),
++    ('references', c_int),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('meth', POINTER(DSA_METHOD)),
++    ('engine', POINTER(ENGINE)),
++]
++assert sizeof(dsa_st) == 68, sizeof(dsa_st)
++assert alignment(dsa_st) == 4, alignment(dsa_st)
++class evp_pkey_st(Structure):
++    pass
++class N11evp_pkey_st4DOLLAR_12E(Union):
++    pass
++class rsa_st(Structure):
++    pass
++N11evp_pkey_st4DOLLAR_12E._fields_ = [
++    ('ptr', STRING),
++    ('rsa', POINTER(rsa_st)),
++    ('dsa', POINTER(dsa_st)),
++    ('dh', POINTER(dh_st)),
++]
++assert sizeof(N11evp_pkey_st4DOLLAR_12E) == 4, sizeof(N11evp_pkey_st4DOLLAR_12E)
++assert alignment(N11evp_pkey_st4DOLLAR_12E) == 4, alignment(N11evp_pkey_st4DOLLAR_12E)
++evp_pkey_st._fields_ = [
++    ('type', c_int),
++    ('save_type', c_int),
++    ('references', c_int),
++    ('pkey', N11evp_pkey_st4DOLLAR_12E),
++    ('save_parameters', c_int),
++    ('attributes', POINTER(STACK)),
++]
++assert sizeof(evp_pkey_st) == 24, sizeof(evp_pkey_st)
++assert alignment(evp_pkey_st) == 4, alignment(evp_pkey_st)
++class env_md_st(Structure):
++    pass
++class env_md_ctx_st(Structure):
++    pass
++EVP_MD_CTX = env_md_ctx_st
++env_md_st._fields_ = [
++    ('type', c_int),
++    ('pkey_type', c_int),
++    ('md_size', c_int),
++    ('flags', c_ulong),
++    ('init', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
++    ('update', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), c_void_p, c_ulong)),
++    ('final', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(c_ubyte))),
++    ('copy', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(EVP_MD_CTX))),
++    ('cleanup', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
++    ('sign', CFUNCTYPE(c_int)),
++    ('verify', CFUNCTYPE(c_int)),
++    ('required_pkey_type', c_int * 5),
++    ('block_size', c_int),
++    ('ctx_size', c_int),
++]
++assert sizeof(env_md_st) == 72, sizeof(env_md_st)
++assert alignment(env_md_st) == 4, alignment(env_md_st)
++EVP_MD = env_md_st
++env_md_ctx_st._fields_ = [
++    ('digest', POINTER(EVP_MD)),
++    ('engine', POINTER(ENGINE)),
++    ('flags', c_ulong),
++    ('md_data', c_void_p),
++]
++assert sizeof(env_md_ctx_st) == 16, sizeof(env_md_ctx_st)
++assert alignment(env_md_ctx_st) == 4, alignment(env_md_ctx_st)
++class evp_cipher_st(Structure):
++    pass
++class evp_cipher_ctx_st(Structure):
++    pass
++EVP_CIPHER_CTX = evp_cipher_ctx_st
++evp_cipher_st._fields_ = [
++    ('nid', c_int),
++    ('block_size', c_int),
++    ('key_len', c_int),
++    ('iv_len', c_int),
++    ('flags', c_ulong),
++    ('init', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_int)),
++    ('do_cipher', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_uint)),
++    ('cleanup', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX))),
++    ('ctx_size', c_int),
++    ('set_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
++    ('get_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
++    ('ctrl', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), c_int, c_int, c_void_p)),
++    ('app_data', c_void_p),
++]
++assert sizeof(evp_cipher_st) == 52, sizeof(evp_cipher_st)
++assert alignment(evp_cipher_st) == 4, alignment(evp_cipher_st)
++class evp_cipher_info_st(Structure):
++    pass
++EVP_CIPHER = evp_cipher_st
++evp_cipher_info_st._fields_ = [
++    ('cipher', POINTER(EVP_CIPHER)),
++    ('iv', c_ubyte * 16),
++]
++assert sizeof(evp_cipher_info_st) == 20, sizeof(evp_cipher_info_st)
++assert alignment(evp_cipher_info_st) == 4, alignment(evp_cipher_info_st)
++EVP_CIPHER_INFO = evp_cipher_info_st
++evp_cipher_ctx_st._fields_ = [
++    ('cipher', POINTER(EVP_CIPHER)),
++    ('engine', POINTER(ENGINE)),
++    ('encrypt', c_int),
++    ('buf_len', c_int),
++    ('oiv', c_ubyte * 16),
++    ('iv', c_ubyte * 16),
++    ('buf', c_ubyte * 32),
++    ('num', c_int),
++    ('app_data', c_void_p),
++    ('key_len', c_int),
++    ('flags', c_ulong),
++    ('cipher_data', c_void_p),
++    ('final_used', c_int),
++    ('block_mask', c_int),
++    ('final', c_ubyte * 32),
++]
++assert sizeof(evp_cipher_ctx_st) == 140, sizeof(evp_cipher_ctx_st)
++assert alignment(evp_cipher_ctx_st) == 4, alignment(evp_cipher_ctx_st)
++class evp_Encode_Ctx_st(Structure):
++    pass
++evp_Encode_Ctx_st._fields_ = [
++    ('num', c_int),
++    ('length', c_int),
++    ('enc_data', c_ubyte * 80),
++    ('line_num', c_int),
++    ('expect_nl', c_int),
++]
++assert sizeof(evp_Encode_Ctx_st) == 96, sizeof(evp_Encode_Ctx_st)
++assert alignment(evp_Encode_Ctx_st) == 4, alignment(evp_Encode_Ctx_st)
++EVP_ENCODE_CTX = evp_Encode_Ctx_st
++EVP_PBE_KEYGEN = CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), STRING, c_int, POINTER(ASN1_TYPE), POINTER(EVP_CIPHER), POINTER(EVP_MD), c_int)
++class lhash_node_st(Structure):
++    pass
++lhash_node_st._fields_ = [
++    ('data', c_void_p),
++    ('next', POINTER(lhash_node_st)),
++    ('hash', c_ulong),
++]
++assert sizeof(lhash_node_st) == 12, sizeof(lhash_node_st)
++assert alignment(lhash_node_st) == 4, alignment(lhash_node_st)
++LHASH_NODE = lhash_node_st
++LHASH_COMP_FN_TYPE = CFUNCTYPE(c_int, c_void_p, c_void_p)
++LHASH_HASH_FN_TYPE = CFUNCTYPE(c_ulong, c_void_p)
++LHASH_DOALL_FN_TYPE = CFUNCTYPE(None, c_void_p)
++LHASH_DOALL_ARG_FN_TYPE = CFUNCTYPE(None, c_void_p, c_void_p)
++class lhash_st(Structure):
++    pass
++lhash_st._fields_ = [
++    ('b', POINTER(POINTER(LHASH_NODE))),
++    ('comp', LHASH_COMP_FN_TYPE),
++    ('hash', LHASH_HASH_FN_TYPE),
++    ('num_nodes', c_uint),
++    ('num_alloc_nodes', c_uint),
++    ('p', c_uint),
++    ('pmax', c_uint),
++    ('up_load', c_ulong),
++    ('down_load', c_ulong),
++    ('num_items', c_ulong),
++    ('num_expands', c_ulong),
++    ('num_expand_reallocs', c_ulong),
++    ('num_contracts', c_ulong),
++    ('num_contract_reallocs', c_ulong),
++    ('num_hash_calls', c_ulong),
++    ('num_comp_calls', c_ulong),
++    ('num_insert', c_ulong),
++    ('num_replace', c_ulong),
++    ('num_delete', c_ulong),
++    ('num_no_delete', c_ulong),
++    ('num_retrieve', c_ulong),
++    ('num_retrieve_miss', c_ulong),
++    ('num_hash_comps', c_ulong),
++    ('error', c_int),
++]
++assert sizeof(lhash_st) == 96, sizeof(lhash_st)
++assert alignment(lhash_st) == 4, alignment(lhash_st)
++LHASH = lhash_st
++class MD2state_st(Structure):
++    pass
++MD2state_st._fields_ = [
++    ('num', c_int),
++    ('data', c_ubyte * 16),
++    ('cksm', c_uint * 16),
++    ('state', c_uint * 16),
++]
++assert sizeof(MD2state_st) == 148, sizeof(MD2state_st)
++assert alignment(MD2state_st) == 4, alignment(MD2state_st)
++MD2_CTX = MD2state_st
++class MD4state_st(Structure):
++    pass
++MD4state_st._fields_ = [
++    ('A', c_uint),
++    ('B', c_uint),
++    ('C', c_uint),
++    ('D', c_uint),
++    ('Nl', c_uint),
++    ('Nh', c_uint),
++    ('data', c_uint * 16),
++    ('num', c_int),
++]
++assert sizeof(MD4state_st) == 92, sizeof(MD4state_st)
++assert alignment(MD4state_st) == 4, alignment(MD4state_st)
++MD4_CTX = MD4state_st
++class MD5state_st(Structure):
++    pass
++MD5state_st._fields_ = [
++    ('A', c_uint),
++    ('B', c_uint),
++    ('C', c_uint),
++    ('D', c_uint),
++    ('Nl', c_uint),
++    ('Nh', c_uint),
++    ('data', c_uint * 16),
++    ('num', c_int),
++]
++assert sizeof(MD5state_st) == 92, sizeof(MD5state_st)
++assert alignment(MD5state_st) == 4, alignment(MD5state_st)
++MD5_CTX = MD5state_st
++class mdc2_ctx_st(Structure):
++    pass
++mdc2_ctx_st._fields_ = [
++    ('num', c_int),
++    ('data', c_ubyte * 8),
++    ('h', DES_cblock),
++    ('hh', DES_cblock),
++    ('pad_type', c_int),
++]
++assert sizeof(mdc2_ctx_st) == 32, sizeof(mdc2_ctx_st)
++assert alignment(mdc2_ctx_st) == 4, alignment(mdc2_ctx_st)
++MDC2_CTX = mdc2_ctx_st
++class obj_name_st(Structure):
++    pass
++obj_name_st._fields_ = [
++    ('type', c_int),
++    ('alias', c_int),
++    ('name', STRING),
++    ('data', STRING),
++]
++assert sizeof(obj_name_st) == 16, sizeof(obj_name_st)
++assert alignment(obj_name_st) == 4, alignment(obj_name_st)
++OBJ_NAME = obj_name_st
++ASN1_TIME = asn1_string_st
++ASN1_NULL = c_int
++EVP_PKEY = evp_pkey_st
++class x509_st(Structure):
++    pass
++X509 = x509_st
++class X509_algor_st(Structure):
++    pass
++X509_ALGOR = X509_algor_st
++class X509_crl_st(Structure):
++    pass
++X509_CRL = X509_crl_st
++class X509_name_st(Structure):
++    pass
++X509_NAME = X509_name_st
++class x509_store_st(Structure):
++    pass
++X509_STORE = x509_store_st
++class x509_store_ctx_st(Structure):
++    pass
++X509_STORE_CTX = x509_store_ctx_st
++engine_st._fields_ = [
++]
++class PEM_Encode_Seal_st(Structure):
++    pass
++PEM_Encode_Seal_st._fields_ = [
++    ('encode', EVP_ENCODE_CTX),
++    ('md', EVP_MD_CTX),
++    ('cipher', EVP_CIPHER_CTX),
++]
++assert sizeof(PEM_Encode_Seal_st) == 252, sizeof(PEM_Encode_Seal_st)
++assert alignment(PEM_Encode_Seal_st) == 4, alignment(PEM_Encode_Seal_st)
++PEM_ENCODE_SEAL_CTX = PEM_Encode_Seal_st
++class pem_recip_st(Structure):
++    pass
++pem_recip_st._fields_ = [
++    ('name', STRING),
++    ('dn', POINTER(X509_NAME)),
++    ('cipher', c_int),
++    ('key_enc', c_int),
++]
++assert sizeof(pem_recip_st) == 16, sizeof(pem_recip_st)
++assert alignment(pem_recip_st) == 4, alignment(pem_recip_st)
++PEM_USER = pem_recip_st
++class pem_ctx_st(Structure):
++    pass
++class N10pem_ctx_st4DOLLAR_16E(Structure):
++    pass
++N10pem_ctx_st4DOLLAR_16E._fields_ = [
++    ('version', c_int),
++    ('mode', c_int),
++]
++assert sizeof(N10pem_ctx_st4DOLLAR_16E) == 8, sizeof(N10pem_ctx_st4DOLLAR_16E)
++assert alignment(N10pem_ctx_st4DOLLAR_16E) == 4, alignment(N10pem_ctx_st4DOLLAR_16E)
++class N10pem_ctx_st4DOLLAR_17E(Structure):
++    pass
++N10pem_ctx_st4DOLLAR_17E._fields_ = [
++    ('cipher', c_int),
++]
++assert sizeof(N10pem_ctx_st4DOLLAR_17E) == 4, sizeof(N10pem_ctx_st4DOLLAR_17E)
++assert alignment(N10pem_ctx_st4DOLLAR_17E) == 4, alignment(N10pem_ctx_st4DOLLAR_17E)
++pem_ctx_st._fields_ = [
++    ('type', c_int),
++    ('proc_type', N10pem_ctx_st4DOLLAR_16E),
++    ('domain', STRING),
++    ('DEK_info', N10pem_ctx_st4DOLLAR_17E),
++    ('originator', POINTER(PEM_USER)),
++    ('num_recipient', c_int),
++    ('recipient', POINTER(POINTER(PEM_USER))),
++    ('x509_chain', POINTER(STACK)),
++    ('md', POINTER(EVP_MD)),
++    ('md_enc', c_int),
++    ('md_len', c_int),
++    ('md_data', STRING),
++    ('dec', POINTER(EVP_CIPHER)),
++    ('key_len', c_int),
++    ('key', POINTER(c_ubyte)),
++    ('data_enc', c_int),
++    ('data_len', c_int),
++    ('data', POINTER(c_ubyte)),
++]
++assert sizeof(pem_ctx_st) == 76, sizeof(pem_ctx_st)
++assert alignment(pem_ctx_st) == 4, alignment(pem_ctx_st)
++PEM_CTX = pem_ctx_st
++pem_password_cb = CFUNCTYPE(c_int, STRING, c_int, c_int, c_void_p)
++class pkcs7_issuer_and_serial_st(Structure):
++    pass
++pkcs7_issuer_and_serial_st._fields_ = [
++    ('issuer', POINTER(X509_NAME)),
++    ('serial', POINTER(ASN1_INTEGER)),
++]
++assert sizeof(pkcs7_issuer_and_serial_st) == 8, sizeof(pkcs7_issuer_and_serial_st)
++assert alignment(pkcs7_issuer_and_serial_st) == 4, alignment(pkcs7_issuer_and_serial_st)
++PKCS7_ISSUER_AND_SERIAL = pkcs7_issuer_and_serial_st
++class pkcs7_signer_info_st(Structure):
++    pass
++pkcs7_signer_info_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
++    ('digest_alg', POINTER(X509_ALGOR)),
++    ('auth_attr', POINTER(STACK)),
++    ('digest_enc_alg', POINTER(X509_ALGOR)),
++    ('enc_digest', POINTER(ASN1_OCTET_STRING)),
++    ('unauth_attr', POINTER(STACK)),
++    ('pkey', POINTER(EVP_PKEY)),
++]
++assert sizeof(pkcs7_signer_info_st) == 32, sizeof(pkcs7_signer_info_st)
++assert alignment(pkcs7_signer_info_st) == 4, alignment(pkcs7_signer_info_st)
++PKCS7_SIGNER_INFO = pkcs7_signer_info_st
++class pkcs7_recip_info_st(Structure):
++    pass
++pkcs7_recip_info_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
++    ('key_enc_algor', POINTER(X509_ALGOR)),
++    ('enc_key', POINTER(ASN1_OCTET_STRING)),
++    ('cert', POINTER(X509)),
++]
++assert sizeof(pkcs7_recip_info_st) == 20, sizeof(pkcs7_recip_info_st)
++assert alignment(pkcs7_recip_info_st) == 4, alignment(pkcs7_recip_info_st)
++PKCS7_RECIP_INFO = pkcs7_recip_info_st
++class pkcs7_signed_st(Structure):
++    pass
++class pkcs7_st(Structure):
++    pass
++pkcs7_signed_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('md_algs', POINTER(STACK)),
++    ('cert', POINTER(STACK)),
++    ('crl', POINTER(STACK)),
++    ('signer_info', POINTER(STACK)),
++    ('contents', POINTER(pkcs7_st)),
++]
++assert sizeof(pkcs7_signed_st) == 24, sizeof(pkcs7_signed_st)
++assert alignment(pkcs7_signed_st) == 4, alignment(pkcs7_signed_st)
++PKCS7_SIGNED = pkcs7_signed_st
++class pkcs7_enc_content_st(Structure):
++    pass
++pkcs7_enc_content_st._fields_ = [
++    ('content_type', POINTER(ASN1_OBJECT)),
++    ('algorithm', POINTER(X509_ALGOR)),
++    ('enc_data', POINTER(ASN1_OCTET_STRING)),
++    ('cipher', POINTER(EVP_CIPHER)),
++]
++assert sizeof(pkcs7_enc_content_st) == 16, sizeof(pkcs7_enc_content_st)
++assert alignment(pkcs7_enc_content_st) == 4, alignment(pkcs7_enc_content_st)
++PKCS7_ENC_CONTENT = pkcs7_enc_content_st
++class pkcs7_enveloped_st(Structure):
++    pass
++pkcs7_enveloped_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('recipientinfo', POINTER(STACK)),
++    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
++]
++assert sizeof(pkcs7_enveloped_st) == 12, sizeof(pkcs7_enveloped_st)
++assert alignment(pkcs7_enveloped_st) == 4, alignment(pkcs7_enveloped_st)
++PKCS7_ENVELOPE = pkcs7_enveloped_st
++class pkcs7_signedandenveloped_st(Structure):
++    pass
++pkcs7_signedandenveloped_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('md_algs', POINTER(STACK)),
++    ('cert', POINTER(STACK)),
++    ('crl', POINTER(STACK)),
++    ('signer_info', POINTER(STACK)),
++    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
++    ('recipientinfo', POINTER(STACK)),
++]
++assert sizeof(pkcs7_signedandenveloped_st) == 28, sizeof(pkcs7_signedandenveloped_st)
++assert alignment(pkcs7_signedandenveloped_st) == 4, alignment(pkcs7_signedandenveloped_st)
++PKCS7_SIGN_ENVELOPE = pkcs7_signedandenveloped_st
++class pkcs7_digest_st(Structure):
++    pass
++pkcs7_digest_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('md', POINTER(X509_ALGOR)),
++    ('contents', POINTER(pkcs7_st)),
++    ('digest', POINTER(ASN1_OCTET_STRING)),
++]
++assert sizeof(pkcs7_digest_st) == 16, sizeof(pkcs7_digest_st)
++assert alignment(pkcs7_digest_st) == 4, alignment(pkcs7_digest_st)
++PKCS7_DIGEST = pkcs7_digest_st
++class pkcs7_encrypted_st(Structure):
++    pass
++pkcs7_encrypted_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
++]
++assert sizeof(pkcs7_encrypted_st) == 8, sizeof(pkcs7_encrypted_st)
++assert alignment(pkcs7_encrypted_st) == 4, alignment(pkcs7_encrypted_st)
++PKCS7_ENCRYPT = pkcs7_encrypted_st
++class N8pkcs7_st4DOLLAR_15E(Union):
++    pass
++N8pkcs7_st4DOLLAR_15E._fields_ = [
++    ('ptr', STRING),
++    ('data', POINTER(ASN1_OCTET_STRING)),
++    ('sign', POINTER(PKCS7_SIGNED)),
++    ('enveloped', POINTER(PKCS7_ENVELOPE)),
++    ('signed_and_enveloped', POINTER(PKCS7_SIGN_ENVELOPE)),
++    ('digest', POINTER(PKCS7_DIGEST)),
++    ('encrypted', POINTER(PKCS7_ENCRYPT)),
++    ('other', POINTER(ASN1_TYPE)),
++]
++assert sizeof(N8pkcs7_st4DOLLAR_15E) == 4, sizeof(N8pkcs7_st4DOLLAR_15E)
++assert alignment(N8pkcs7_st4DOLLAR_15E) == 4, alignment(N8pkcs7_st4DOLLAR_15E)
++pkcs7_st._fields_ = [
++    ('asn1', POINTER(c_ubyte)),
++    ('length', c_long),
++    ('state', c_int),
++    ('detached', c_int),
++    ('type', POINTER(ASN1_OBJECT)),
++    ('d', N8pkcs7_st4DOLLAR_15E),
++]
++assert sizeof(pkcs7_st) == 24, sizeof(pkcs7_st)
++assert alignment(pkcs7_st) == 4, alignment(pkcs7_st)
++PKCS7 = pkcs7_st
++class rc2_key_st(Structure):
++    pass
++rc2_key_st._fields_ = [
++    ('data', c_uint * 64),
++]
++assert sizeof(rc2_key_st) == 256, sizeof(rc2_key_st)
++assert alignment(rc2_key_st) == 4, alignment(rc2_key_st)
++RC2_KEY = rc2_key_st
++class rc4_key_st(Structure):
++    pass
++rc4_key_st._fields_ = [
++    ('x', c_ubyte),
++    ('y', c_ubyte),
++    ('data', c_ubyte * 256),
++]
++assert sizeof(rc4_key_st) == 258, sizeof(rc4_key_st)
++assert alignment(rc4_key_st) == 1, alignment(rc4_key_st)
++RC4_KEY = rc4_key_st
++class rc5_key_st(Structure):
++    pass
++rc5_key_st._fields_ = [
++    ('rounds', c_int),
++    ('data', c_ulong * 34),
++]
++assert sizeof(rc5_key_st) == 140, sizeof(rc5_key_st)
++assert alignment(rc5_key_st) == 4, alignment(rc5_key_st)
++RC5_32_KEY = rc5_key_st
++class RIPEMD160state_st(Structure):
++    pass
++RIPEMD160state_st._fields_ = [
++    ('A', c_uint),
++    ('B', c_uint),
++    ('C', c_uint),
++    ('D', c_uint),
++    ('E', c_uint),
++    ('Nl', c_uint),
++    ('Nh', c_uint),
++    ('data', c_uint * 16),
++    ('num', c_int),
++]
++assert sizeof(RIPEMD160state_st) == 96, sizeof(RIPEMD160state_st)
++assert alignment(RIPEMD160state_st) == 4, alignment(RIPEMD160state_st)
++RIPEMD160_CTX = RIPEMD160state_st
++RSA = rsa_st
++class rsa_meth_st(Structure):
++    pass
++rsa_meth_st._fields_ = [
++    ('name', STRING),
++    ('rsa_pub_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
++    ('rsa_pub_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
++    ('rsa_priv_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
++    ('rsa_priv_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
++    ('rsa_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(RSA))),
++    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
++    ('init', CFUNCTYPE(c_int, POINTER(RSA))),
++    ('finish', CFUNCTYPE(c_int, POINTER(RSA))),
++    ('flags', c_int),
++    ('app_data', STRING),
++    ('rsa_sign', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), POINTER(c_uint), POINTER(RSA))),
++    ('rsa_verify', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), c_uint, POINTER(RSA))),
++]
++assert sizeof(rsa_meth_st) == 52, sizeof(rsa_meth_st)
++assert alignment(rsa_meth_st) == 4, alignment(rsa_meth_st)
++RSA_METHOD = rsa_meth_st
++rsa_st._fields_ = [
++    ('pad', c_int),
++    ('version', c_long),
++    ('meth', POINTER(RSA_METHOD)),
++    ('engine', POINTER(ENGINE)),
++    ('n', POINTER(BIGNUM)),
++    ('e', POINTER(BIGNUM)),
++    ('d', POINTER(BIGNUM)),
++    ('p', POINTER(BIGNUM)),
++    ('q', POINTER(BIGNUM)),
++    ('dmp1', POINTER(BIGNUM)),
++    ('dmq1', POINTER(BIGNUM)),
++    ('iqmp', POINTER(BIGNUM)),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('references', c_int),
++    ('flags', c_int),
++    ('_method_mod_n', POINTER(BN_MONT_CTX)),
++    ('_method_mod_p', POINTER(BN_MONT_CTX)),
++    ('_method_mod_q', POINTER(BN_MONT_CTX)),
++    ('bignum_data', STRING),
++    ('blinding', POINTER(BN_BLINDING)),
++]
++assert sizeof(rsa_st) == 84, sizeof(rsa_st)
++assert alignment(rsa_st) == 4, alignment(rsa_st)
++openssl_fptr = CFUNCTYPE(None)
++class SHAstate_st(Structure):
++    pass
++SHAstate_st._fields_ = [
++    ('h0', c_uint),
++    ('h1', c_uint),
++    ('h2', c_uint),
++    ('h3', c_uint),
++    ('h4', c_uint),
++    ('Nl', c_uint),
++    ('Nh', c_uint),
++    ('data', c_uint * 16),
++    ('num', c_int),
++]
++assert sizeof(SHAstate_st) == 96, sizeof(SHAstate_st)
++assert alignment(SHAstate_st) == 4, alignment(SHAstate_st)
++SHA_CTX = SHAstate_st
++class ssl_st(Structure):
++    pass
++ssl_crock_st = POINTER(ssl_st)
++class ssl_cipher_st(Structure):
++    pass
++ssl_cipher_st._fields_ = [
++    ('valid', c_int),
++    ('name', STRING),
++    ('id', c_ulong),
++    ('algorithms', c_ulong),
++    ('algo_strength', c_ulong),
++    ('algorithm2', c_ulong),
++    ('strength_bits', c_int),
++    ('alg_bits', c_int),
++    ('mask', c_ulong),
++    ('mask_strength', c_ulong),
++]
++assert sizeof(ssl_cipher_st) == 40, sizeof(ssl_cipher_st)
++assert alignment(ssl_cipher_st) == 4, alignment(ssl_cipher_st)
++SSL_CIPHER = ssl_cipher_st
++SSL = ssl_st
++class ssl_ctx_st(Structure):
++    pass
++SSL_CTX = ssl_ctx_st
++class ssl_method_st(Structure):
++    pass
++class ssl3_enc_method(Structure):
++    pass
++ssl_method_st._fields_ = [
++    ('version', c_int),
++    ('ssl_new', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_clear', CFUNCTYPE(None, POINTER(SSL))),
++    ('ssl_free', CFUNCTYPE(None, POINTER(SSL))),
++    ('ssl_accept', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_connect', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_read', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
++    ('ssl_peek', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
++    ('ssl_write', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
++    ('ssl_shutdown', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_renegotiate', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_renegotiate_check', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, c_long, c_void_p)),
++    ('ssl_ctx_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, c_long, c_void_p)),
++    ('get_cipher_by_char', CFUNCTYPE(POINTER(SSL_CIPHER), POINTER(c_ubyte))),
++    ('put_cipher_by_char', CFUNCTYPE(c_int, POINTER(SSL_CIPHER), POINTER(c_ubyte))),
++    ('ssl_pending', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('num_ciphers', CFUNCTYPE(c_int)),
++    ('get_cipher', CFUNCTYPE(POINTER(SSL_CIPHER), c_uint)),
++    ('get_ssl_method', CFUNCTYPE(POINTER(ssl_method_st), c_int)),
++    ('get_timeout', CFUNCTYPE(c_long)),
++    ('ssl3_enc', POINTER(ssl3_enc_method)),
++    ('ssl_version', CFUNCTYPE(c_int)),
++    ('ssl_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, CFUNCTYPE(None))),
++    ('ssl_ctx_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, CFUNCTYPE(None))),
++]
++assert sizeof(ssl_method_st) == 100, sizeof(ssl_method_st)
++assert alignment(ssl_method_st) == 4, alignment(ssl_method_st)
++ssl3_enc_method._fields_ = [
++]
++SSL_METHOD = ssl_method_st
++class ssl_session_st(Structure):
++    pass
++class sess_cert_st(Structure):
++    pass
++ssl_session_st._fields_ = [
++    ('ssl_version', c_int),
++    ('key_arg_length', c_uint),
++    ('key_arg', c_ubyte * 8),
++    ('master_key_length', c_int),
++    ('master_key', c_ubyte * 48),
++    ('session_id_length', c_uint),
++    ('session_id', c_ubyte * 32),
++    ('sid_ctx_length', c_uint),
++    ('sid_ctx', c_ubyte * 32),
++    ('not_resumable', c_int),
++    ('sess_cert', POINTER(sess_cert_st)),
++    ('peer', POINTER(X509)),
++    ('verify_result', c_long),
++    ('references', c_int),
++    ('timeout', c_long),
++    ('time', c_long),
++    ('compress_meth', c_int),
++    ('cipher', POINTER(SSL_CIPHER)),
++    ('cipher_id', c_ulong),
++    ('ciphers', POINTER(STACK)),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('prev', POINTER(ssl_session_st)),
++    ('next', POINTER(ssl_session_st)),
++]
++assert sizeof(ssl_session_st) == 200, sizeof(ssl_session_st)
++assert alignment(ssl_session_st) == 4, alignment(ssl_session_st)
++sess_cert_st._fields_ = [
++]
++SSL_SESSION = ssl_session_st
++GEN_SESSION_CB = CFUNCTYPE(c_int, POINTER(SSL), POINTER(c_ubyte), POINTER(c_uint))
++class ssl_comp_st(Structure):
++    pass
++ssl_comp_st._fields_ = [
++    ('id', c_int),
++    ('name', STRING),
++    ('method', POINTER(COMP_METHOD)),
++]
++assert sizeof(ssl_comp_st) == 12, sizeof(ssl_comp_st)
++assert alignment(ssl_comp_st) == 4, alignment(ssl_comp_st)
++SSL_COMP = ssl_comp_st
++class N10ssl_ctx_st4DOLLAR_18E(Structure):
++    pass
++N10ssl_ctx_st4DOLLAR_18E._fields_ = [
++    ('sess_connect', c_int),
++    ('sess_connect_renegotiate', c_int),
++    ('sess_connect_good', c_int),
++    ('sess_accept', c_int),
++    ('sess_accept_renegotiate', c_int),
++    ('sess_accept_good', c_int),
++    ('sess_miss', c_int),
++    ('sess_timeout', c_int),
++    ('sess_cache_full', c_int),
++    ('sess_hit', c_int),
++    ('sess_cb_hit', c_int),
++]
++assert sizeof(N10ssl_ctx_st4DOLLAR_18E) == 44, sizeof(N10ssl_ctx_st4DOLLAR_18E)
++assert alignment(N10ssl_ctx_st4DOLLAR_18E) == 4, alignment(N10ssl_ctx_st4DOLLAR_18E)
++class cert_st(Structure):
++    pass
++ssl_ctx_st._fields_ = [
++    ('method', POINTER(SSL_METHOD)),
++    ('cipher_list', POINTER(STACK)),
++    ('cipher_list_by_id', POINTER(STACK)),
++    ('cert_store', POINTER(x509_store_st)),
++    ('sessions', POINTER(lhash_st)),
++    ('session_cache_size', c_ulong),
++    ('session_cache_head', POINTER(ssl_session_st)),
++    ('session_cache_tail', POINTER(ssl_session_st)),
++    ('session_cache_mode', c_int),
++    ('session_timeout', c_long),
++    ('new_session_cb', CFUNCTYPE(c_int, POINTER(ssl_st), POINTER(SSL_SESSION))),
++    ('remove_session_cb', CFUNCTYPE(None, POINTER(ssl_ctx_st), POINTER(SSL_SESSION))),
++    ('get_session_cb', CFUNCTYPE(POINTER(SSL_SESSION), POINTER(ssl_st), POINTER(c_ubyte), c_int, POINTER(c_int))),
++    ('stats', N10ssl_ctx_st4DOLLAR_18E),
++    ('references', c_int),
++    ('app_verify_callback', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), c_void_p)),
++    ('app_verify_arg', c_void_p),
++    ('default_passwd_callback', POINTER(pem_password_cb)),
++    ('default_passwd_callback_userdata', c_void_p),
++    ('client_cert_cb', CFUNCTYPE(c_int, POINTER(SSL), POINTER(POINTER(X509)), POINTER(POINTER(EVP_PKEY)))),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('rsa_md5', POINTER(EVP_MD)),
++    ('md5', POINTER(EVP_MD)),
++    ('sha1', POINTER(EVP_MD)),
++    ('extra_certs', POINTER(STACK)),
++    ('comp_methods', POINTER(STACK)),
++    ('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
++    ('client_CA', POINTER(STACK)),
++    ('options', c_ulong),
++    ('mode', c_ulong),
++    ('max_cert_list', c_long),
++    ('cert', POINTER(cert_st)),
++    ('read_ahead', c_int),
++    ('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
++    ('msg_callback_arg', c_void_p),
++    ('verify_mode', c_int),
++    ('verify_depth', c_int),
++    ('sid_ctx_length', c_uint),
++    ('sid_ctx', c_ubyte * 32),
++    ('default_verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
++    ('generate_session_id', GEN_SESSION_CB),
++    ('purpose', c_int),
++    ('trust', c_int),
++    ('quiet_shutdown', c_int),
++]
++assert sizeof(ssl_ctx_st) == 248, sizeof(ssl_ctx_st)
++assert alignment(ssl_ctx_st) == 4, alignment(ssl_ctx_st)
++cert_st._fields_ = [
++]
++class ssl2_state_st(Structure):
++    pass
++class ssl3_state_st(Structure):
++    pass
++ssl_st._fields_ = [
++    ('version', c_int),
++    ('type', c_int),
++    ('method', POINTER(SSL_METHOD)),
++    ('rbio', POINTER(BIO)),
++    ('wbio', POINTER(BIO)),
++    ('bbio', POINTER(BIO)),
++    ('rwstate', c_int),
++    ('in_handshake', c_int),
++    ('handshake_func', CFUNCTYPE(c_int)),
++    ('server', c_int),
++    ('new_session', c_int),
++    ('quiet_shutdown', c_int),
++    ('shutdown', c_int),
++    ('state', c_int),
++    ('rstate', c_int),
++    ('init_buf', POINTER(BUF_MEM)),
++    ('init_msg', c_void_p),
++    ('init_num', c_int),
++    ('init_off', c_int),
++    ('packet', POINTER(c_ubyte)),
++    ('packet_length', c_uint),
++    ('s2', POINTER(ssl2_state_st)),
++    ('s3', POINTER(ssl3_state_st)),
++    ('read_ahead', c_int),
++    ('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
++    ('msg_callback_arg', c_void_p),
++    ('hit', c_int),
++    ('purpose', c_int),
++    ('trust', c_int),
++    ('cipher_list', POINTER(STACK)),
++    ('cipher_list_by_id', POINTER(STACK)),
++    ('enc_read_ctx', POINTER(EVP_CIPHER_CTX)),
++    ('read_hash', POINTER(EVP_MD)),
++    ('expand', POINTER(COMP_CTX)),
++    ('enc_write_ctx', POINTER(EVP_CIPHER_CTX)),
++    ('write_hash', POINTER(EVP_MD)),
++    ('compress', POINTER(COMP_CTX)),
++    ('cert', POINTER(cert_st)),
++    ('sid_ctx_length', c_uint),
++    ('sid_ctx', c_ubyte * 32),
++    ('session', POINTER(SSL_SESSION)),
++    ('generate_session_id', GEN_SESSION_CB),
++    ('verify_mode', c_int),
++    ('verify_depth', c_int),
++    ('verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
++    ('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
++    ('error', c_int),
++    ('error_code', c_int),
++    ('ctx', POINTER(SSL_CTX)),
++    ('debug', c_int),
++    ('verify_result', c_long),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('client_CA', POINTER(STACK)),
++    ('references', c_int),
++    ('options', c_ulong),
++    ('mode', c_ulong),
++    ('max_cert_list', c_long),
++    ('first_packet', c_int),
++    ('client_version', c_int),
++]
++assert sizeof(ssl_st) == 268, sizeof(ssl_st)
++assert alignment(ssl_st) == 4, alignment(ssl_st)
++class N13ssl2_state_st4DOLLAR_19E(Structure):
++    pass
++N13ssl2_state_st4DOLLAR_19E._fields_ = [
++    ('conn_id_length', c_uint),
++    ('cert_type', c_uint),
++    ('cert_length', c_uint),
++    ('csl', c_uint),
++    ('clear', c_uint),
++    ('enc', c_uint),
++    ('ccl', c_ubyte * 32),
++    ('cipher_spec_length', c_uint),
++    ('session_id_length', c_uint),
++    ('clen', c_uint),
++    ('rlen', c_uint),
++]
++assert sizeof(N13ssl2_state_st4DOLLAR_19E) == 72, sizeof(N13ssl2_state_st4DOLLAR_19E)
++assert alignment(N13ssl2_state_st4DOLLAR_19E) == 4, alignment(N13ssl2_state_st4DOLLAR_19E)
++ssl2_state_st._fields_ = [
++    ('three_byte_header', c_int),
++    ('clear_text', c_int),
++    ('escape', c_int),
++    ('ssl2_rollback', c_int),
++    ('wnum', c_uint),
++    ('wpend_tot', c_int),
++    ('wpend_buf', POINTER(c_ubyte)),
++    ('wpend_off', c_int),
++    ('wpend_len', c_int),
++    ('wpend_ret', c_int),
++    ('rbuf_left', c_int),
++    ('rbuf_offs', c_int),
++    ('rbuf', POINTER(c_ubyte)),
++    ('wbuf', POINTER(c_ubyte)),
++    ('write_ptr', POINTER(c_ubyte)),
++    ('padding', c_uint),
++    ('rlength', c_uint),
++    ('ract_data_length', c_int),
++    ('wlength', c_uint),
++    ('wact_data_length', c_int),
++    ('ract_data', POINTER(c_ubyte)),
++    ('wact_data', POINTER(c_ubyte)),
++    ('mac_data', POINTER(c_ubyte)),
++    ('read_key', POINTER(c_ubyte)),
++    ('write_key', POINTER(c_ubyte)),
++    ('challenge_length', c_uint),
++    ('challenge', c_ubyte * 32),
++    ('conn_id_length', c_uint),
++    ('conn_id', c_ubyte * 16),
++    ('key_material_length', c_uint),
++    ('key_material', c_ubyte * 48),
++    ('read_sequence', c_ulong),
++    ('write_sequence', c_ulong),
++    ('tmp', N13ssl2_state_st4DOLLAR_19E),
++]
++assert sizeof(ssl2_state_st) == 288, sizeof(ssl2_state_st)
++assert alignment(ssl2_state_st) == 4, alignment(ssl2_state_st)
++SSL2_STATE = ssl2_state_st
++class ssl3_record_st(Structure):
++    pass
++ssl3_record_st._fields_ = [
++    ('type', c_int),
++    ('length', c_uint),
++    ('off', c_uint),
++    ('data', POINTER(c_ubyte)),
++    ('input', POINTER(c_ubyte)),
++    ('comp', POINTER(c_ubyte)),
++]
++assert sizeof(ssl3_record_st) == 24, sizeof(ssl3_record_st)
++assert alignment(ssl3_record_st) == 4, alignment(ssl3_record_st)
++SSL3_RECORD = ssl3_record_st
++class ssl3_buffer_st(Structure):
++    pass
++size_t = __darwin_size_t
++ssl3_buffer_st._fields_ = [
++    ('buf', POINTER(c_ubyte)),
++    ('len', size_t),
++    ('offset', c_int),
++    ('left', c_int),
++]
++assert sizeof(ssl3_buffer_st) == 16, sizeof(ssl3_buffer_st)
++assert alignment(ssl3_buffer_st) == 4, alignment(ssl3_buffer_st)
++SSL3_BUFFER = ssl3_buffer_st
++class N13ssl3_state_st4DOLLAR_20E(Structure):
++    pass
++N13ssl3_state_st4DOLLAR_20E._fields_ = [
++    ('cert_verify_md', c_ubyte * 72),
++    ('finish_md', c_ubyte * 72),
++    ('finish_md_len', c_int),
++    ('peer_finish_md', c_ubyte * 72),
++    ('peer_finish_md_len', c_int),
++    ('message_size', c_ulong),
++    ('message_type', c_int),
++    ('new_cipher', POINTER(SSL_CIPHER)),
++    ('dh', POINTER(DH)),
++    ('next_state', c_int),
++    ('reuse_message', c_int),
++    ('cert_req', c_int),
++    ('ctype_num', c_int),
++    ('ctype', c_char * 7),
++    ('ca_names', POINTER(STACK)),
++    ('use_rsa_tmp', c_int),
++    ('key_block_length', c_int),
++    ('key_block', POINTER(c_ubyte)),
++    ('new_sym_enc', POINTER(EVP_CIPHER)),
++    ('new_hash', POINTER(EVP_MD)),
++    ('new_compression', POINTER(SSL_COMP)),
++    ('cert_request', c_int),
++]
++assert sizeof(N13ssl3_state_st4DOLLAR_20E) == 296, sizeof(N13ssl3_state_st4DOLLAR_20E)
++assert alignment(N13ssl3_state_st4DOLLAR_20E) == 4, alignment(N13ssl3_state_st4DOLLAR_20E)
++ssl3_state_st._fields_ = [
++    ('flags', c_long),
++    ('delay_buf_pop_ret', c_int),
++    ('read_sequence', c_ubyte * 8),
++    ('read_mac_secret', c_ubyte * 36),
++    ('write_sequence', c_ubyte * 8),
++    ('write_mac_secret', c_ubyte * 36),
++    ('server_random', c_ubyte * 32),
++    ('client_random', c_ubyte * 32),
++    ('need_empty_fragments', c_int),
++    ('empty_fragment_done', c_int),
++    ('rbuf', SSL3_BUFFER),
++    ('wbuf', SSL3_BUFFER),
++    ('rrec', SSL3_RECORD),
++    ('wrec', SSL3_RECORD),
++    ('alert_fragment', c_ubyte * 2),
++    ('alert_fragment_len', c_uint),
++    ('handshake_fragment', c_ubyte * 4),
++    ('handshake_fragment_len', c_uint),
++    ('wnum', c_uint),
++    ('wpend_tot', c_int),
++    ('wpend_type', c_int),
++    ('wpend_ret', c_int),
++    ('wpend_buf', POINTER(c_ubyte)),
++    ('finish_dgst1', EVP_MD_CTX),
++    ('finish_dgst2', EVP_MD_CTX),
++    ('change_cipher_spec', c_int),
++    ('warn_alert', c_int),
++    ('fatal_alert', c_int),
++    ('alert_dispatch', c_int),
++    ('send_alert', c_ubyte * 2),
++    ('renegotiate', c_int),
++    ('total_renegotiations', c_int),
++    ('num_renegotiations', c_int),
++    ('in_read_app_data', c_int),
++    ('tmp', N13ssl3_state_st4DOLLAR_20E),
++]
++assert sizeof(ssl3_state_st) == 648, sizeof(ssl3_state_st)
++assert alignment(ssl3_state_st) == 4, alignment(ssl3_state_st)
++SSL3_STATE = ssl3_state_st
++stack_st._fields_ = [
++    ('num', c_int),
++    ('data', POINTER(STRING)),
++    ('sorted', c_int),
++    ('num_alloc', c_int),
++    ('comp', CFUNCTYPE(c_int, POINTER(STRING), POINTER(STRING))),
++]
++assert sizeof(stack_st) == 20, sizeof(stack_st)
++assert alignment(stack_st) == 4, alignment(stack_st)
++class ui_st(Structure):
++    pass
++ui_st._fields_ = [
++]
++UI = ui_st
++class ui_method_st(Structure):
++    pass
++ui_method_st._fields_ = [
++]
++UI_METHOD = ui_method_st
++class ui_string_st(Structure):
++    pass
++ui_string_st._fields_ = [
++]
++UI_STRING = ui_string_st
++
++# values for enumeration 'UI_string_types'
++UI_string_types = c_int # enum
++class X509_objects_st(Structure):
++    pass
++X509_objects_st._fields_ = [
++    ('nid', c_int),
++    ('a2i', CFUNCTYPE(c_int)),
++    ('i2a', CFUNCTYPE(c_int)),
++]
++assert sizeof(X509_objects_st) == 12, sizeof(X509_objects_st)
++assert alignment(X509_objects_st) == 4, alignment(X509_objects_st)
++X509_OBJECTS = X509_objects_st
++X509_algor_st._fields_ = [
++    ('algorithm', POINTER(ASN1_OBJECT)),
++    ('parameter', POINTER(ASN1_TYPE)),
++]
++assert sizeof(X509_algor_st) == 8, sizeof(X509_algor_st)
++assert alignment(X509_algor_st) == 4, alignment(X509_algor_st)
++class X509_val_st(Structure):
++    pass
++X509_val_st._fields_ = [
++    ('notBefore', POINTER(ASN1_TIME)),
++    ('notAfter', POINTER(ASN1_TIME)),
++]
++assert sizeof(X509_val_st) == 8, sizeof(X509_val_st)
++assert alignment(X509_val_st) == 4, alignment(X509_val_st)
++X509_VAL = X509_val_st
++class X509_pubkey_st(Structure):
++    pass
++X509_pubkey_st._fields_ = [
++    ('algor', POINTER(X509_ALGOR)),
++    ('public_key', POINTER(ASN1_BIT_STRING)),
++    ('pkey', POINTER(EVP_PKEY)),
++]
++assert sizeof(X509_pubkey_st) == 12, sizeof(X509_pubkey_st)
++assert alignment(X509_pubkey_st) == 4, alignment(X509_pubkey_st)
++X509_PUBKEY = X509_pubkey_st
++class X509_sig_st(Structure):
++    pass
++X509_sig_st._fields_ = [
++    ('algor', POINTER(X509_ALGOR)),
++    ('digest', POINTER(ASN1_OCTET_STRING)),
++]
++assert sizeof(X509_sig_st) == 8, sizeof(X509_sig_st)
++assert alignment(X509_sig_st) == 4, alignment(X509_sig_st)
++X509_SIG = X509_sig_st
++class X509_name_entry_st(Structure):
++    pass
++X509_name_entry_st._fields_ = [
++    ('object', POINTER(ASN1_OBJECT)),
++    ('value', POINTER(ASN1_STRING)),
++    ('set', c_int),
++    ('size', c_int),
++]
++assert sizeof(X509_name_entry_st) == 16, sizeof(X509_name_entry_st)
++assert alignment(X509_name_entry_st) == 4, alignment(X509_name_entry_st)
++X509_NAME_ENTRY = X509_name_entry_st
++X509_name_st._fields_ = [
++    ('entries', POINTER(STACK)),
++    ('modified', c_int),
++    ('bytes', POINTER(BUF_MEM)),
++    ('hash', c_ulong),
++]
++assert sizeof(X509_name_st) == 16, sizeof(X509_name_st)
++assert alignment(X509_name_st) == 4, alignment(X509_name_st)
++class X509_extension_st(Structure):
++    pass
++X509_extension_st._fields_ = [
++    ('object', POINTER(ASN1_OBJECT)),
++    ('critical', ASN1_BOOLEAN),
++    ('value', POINTER(ASN1_OCTET_STRING)),
++]
++assert sizeof(X509_extension_st) == 12, sizeof(X509_extension_st)
++assert alignment(X509_extension_st) == 4, alignment(X509_extension_st)
++X509_EXTENSION = X509_extension_st
++class x509_attributes_st(Structure):
++    pass
++class N18x509_attributes_st4DOLLAR_13E(Union):
++    pass
++N18x509_attributes_st4DOLLAR_13E._fields_ = [
++    ('ptr', STRING),
++    ('set', POINTER(STACK)),
++    ('single', POINTER(ASN1_TYPE)),
++]
++assert sizeof(N18x509_attributes_st4DOLLAR_13E) == 4, sizeof(N18x509_attributes_st4DOLLAR_13E)
++assert alignment(N18x509_attributes_st4DOLLAR_13E) == 4, alignment(N18x509_attributes_st4DOLLAR_13E)
++x509_attributes_st._fields_ = [
++    ('object', POINTER(ASN1_OBJECT)),
++    ('single', c_int),
++    ('value', N18x509_attributes_st4DOLLAR_13E),
++]
++assert sizeof(x509_attributes_st) == 12, sizeof(x509_attributes_st)
++assert alignment(x509_attributes_st) == 4, alignment(x509_attributes_st)
++X509_ATTRIBUTE = x509_attributes_st
++class X509_req_info_st(Structure):
++    pass
++X509_req_info_st._fields_ = [
++    ('enc', ASN1_ENCODING),
++    ('version', POINTER(ASN1_INTEGER)),
++    ('subject', POINTER(X509_NAME)),
++    ('pubkey', POINTER(X509_PUBKEY)),
++    ('attributes', POINTER(STACK)),
++]
++assert sizeof(X509_req_info_st) == 28, sizeof(X509_req_info_st)
++assert alignment(X509_req_info_st) == 4, alignment(X509_req_info_st)
++X509_REQ_INFO = X509_req_info_st
++class X509_req_st(Structure):
++    pass
++X509_req_st._fields_ = [
++    ('req_info', POINTER(X509_REQ_INFO)),
++    ('sig_alg', POINTER(X509_ALGOR)),
++    ('signature', POINTER(ASN1_BIT_STRING)),
++    ('references', c_int),
++]
++assert sizeof(X509_req_st) == 16, sizeof(X509_req_st)
++assert alignment(X509_req_st) == 4, alignment(X509_req_st)
++X509_REQ = X509_req_st
++class x509_cinf_st(Structure):
++    pass
++x509_cinf_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('serialNumber', POINTER(ASN1_INTEGER)),
++    ('signature', POINTER(X509_ALGOR)),
++    ('issuer', POINTER(X509_NAME)),
++    ('validity', POINTER(X509_VAL)),
++    ('subject', POINTER(X509_NAME)),
++    ('key', POINTER(X509_PUBKEY)),
++    ('issuerUID', POINTER(ASN1_BIT_STRING)),
++    ('subjectUID', POINTER(ASN1_BIT_STRING)),
++    ('extensions', POINTER(STACK)),
++]
++assert sizeof(x509_cinf_st) == 40, sizeof(x509_cinf_st)
++assert alignment(x509_cinf_st) == 4, alignment(x509_cinf_st)
++X509_CINF = x509_cinf_st
++class x509_cert_aux_st(Structure):
++    pass
++x509_cert_aux_st._fields_ = [
++    ('trust', POINTER(STACK)),
++    ('reject', POINTER(STACK)),
++    ('alias', POINTER(ASN1_UTF8STRING)),
++    ('keyid', POINTER(ASN1_OCTET_STRING)),
++    ('other', POINTER(STACK)),
++]
++assert sizeof(x509_cert_aux_st) == 20, sizeof(x509_cert_aux_st)
++assert alignment(x509_cert_aux_st) == 4, alignment(x509_cert_aux_st)
++X509_CERT_AUX = x509_cert_aux_st
++class AUTHORITY_KEYID_st(Structure):
++    pass
++x509_st._fields_ = [
++    ('cert_info', POINTER(X509_CINF)),
++    ('sig_alg', POINTER(X509_ALGOR)),
++    ('signature', POINTER(ASN1_BIT_STRING)),
++    ('valid', c_int),
++    ('references', c_int),
++    ('name', STRING),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('ex_pathlen', c_long),
++    ('ex_flags', c_ulong),
++    ('ex_kusage', c_ulong),
++    ('ex_xkusage', c_ulong),
++    ('ex_nscert', c_ulong),
++    ('skid', POINTER(ASN1_OCTET_STRING)),
++    ('akid', POINTER(AUTHORITY_KEYID_st)),
++    ('sha1_hash', c_ubyte * 20),
++    ('aux', POINTER(X509_CERT_AUX)),
++]
++assert sizeof(x509_st) == 84, sizeof(x509_st)
++assert alignment(x509_st) == 4, alignment(x509_st)
++AUTHORITY_KEYID_st._fields_ = [
++]
++class x509_trust_st(Structure):
++    pass
++x509_trust_st._fields_ = [
++    ('trust', c_int),
++    ('flags', c_int),
++    ('check_trust', CFUNCTYPE(c_int, POINTER(x509_trust_st), POINTER(X509), c_int)),
++    ('name', STRING),
++    ('arg1', c_int),
++    ('arg2', c_void_p),
++]
++assert sizeof(x509_trust_st) == 24, sizeof(x509_trust_st)
++assert alignment(x509_trust_st) == 4, alignment(x509_trust_st)
++X509_TRUST = x509_trust_st
++class X509_revoked_st(Structure):
++    pass
++X509_revoked_st._fields_ = [
++    ('serialNumber', POINTER(ASN1_INTEGER)),
++    ('revocationDate', POINTER(ASN1_TIME)),
++    ('extensions', POINTER(STACK)),
++    ('sequence', c_int),
++]
++assert sizeof(X509_revoked_st) == 16, sizeof(X509_revoked_st)
++assert alignment(X509_revoked_st) == 4, alignment(X509_revoked_st)
++X509_REVOKED = X509_revoked_st
++class X509_crl_info_st(Structure):
++    pass
++X509_crl_info_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('sig_alg', POINTER(X509_ALGOR)),
++    ('issuer', POINTER(X509_NAME)),
++    ('lastUpdate', POINTER(ASN1_TIME)),
++    ('nextUpdate', POINTER(ASN1_TIME)),
++    ('revoked', POINTER(STACK)),
++    ('extensions', POINTER(STACK)),
++    ('enc', ASN1_ENCODING),
++]
++assert sizeof(X509_crl_info_st) == 40, sizeof(X509_crl_info_st)
++assert alignment(X509_crl_info_st) == 4, alignment(X509_crl_info_st)
++X509_CRL_INFO = X509_crl_info_st
++X509_crl_st._fields_ = [
++    ('crl', POINTER(X509_CRL_INFO)),
++    ('sig_alg', POINTER(X509_ALGOR)),
++    ('signature', POINTER(ASN1_BIT_STRING)),
++    ('references', c_int),
++]
++assert sizeof(X509_crl_st) == 16, sizeof(X509_crl_st)
++assert alignment(X509_crl_st) == 4, alignment(X509_crl_st)
++class private_key_st(Structure):
++    pass
++private_key_st._fields_ = [
++    ('version', c_int),
++    ('enc_algor', POINTER(X509_ALGOR)),
++    ('enc_pkey', POINTER(ASN1_OCTET_STRING)),
++    ('dec_pkey', POINTER(EVP_PKEY)),
++    ('key_length', c_int),
++    ('key_data', STRING),
++    ('key_free', c_int),
++    ('cipher', EVP_CIPHER_INFO),
++    ('references', c_int),
++]
++assert sizeof(private_key_st) == 52, sizeof(private_key_st)
++assert alignment(private_key_st) == 4, alignment(private_key_st)
++X509_PKEY = private_key_st
++class X509_info_st(Structure):
++    pass
++X509_info_st._fields_ = [
++    ('x509', POINTER(X509)),
++    ('crl', POINTER(X509_CRL)),
++    ('x_pkey', POINTER(X509_PKEY)),
++    ('enc_cipher', EVP_CIPHER_INFO),
++    ('enc_len', c_int),
++    ('enc_data', STRING),
++    ('references', c_int),
++]
++assert sizeof(X509_info_st) == 44, sizeof(X509_info_st)
++assert alignment(X509_info_st) == 4, alignment(X509_info_st)
++X509_INFO = X509_info_st
++class Netscape_spkac_st(Structure):
++    pass
++Netscape_spkac_st._fields_ = [
++    ('pubkey', POINTER(X509_PUBKEY)),
++    ('challenge', POINTER(ASN1_IA5STRING)),
++]
++assert sizeof(Netscape_spkac_st) == 8, sizeof(Netscape_spkac_st)
++assert alignment(Netscape_spkac_st) == 4, alignment(Netscape_spkac_st)
++NETSCAPE_SPKAC = Netscape_spkac_st
++class Netscape_spki_st(Structure):
++    pass
++Netscape_spki_st._fields_ = [
++    ('spkac', POINTER(NETSCAPE_SPKAC)),
++    ('sig_algor', POINTER(X509_ALGOR)),
++    ('signature', POINTER(ASN1_BIT_STRING)),
++]
++assert sizeof(Netscape_spki_st) == 12, sizeof(Netscape_spki_st)
++assert alignment(Netscape_spki_st) == 4, alignment(Netscape_spki_st)
++NETSCAPE_SPKI = Netscape_spki_st
++class Netscape_certificate_sequence(Structure):
++    pass
++Netscape_certificate_sequence._fields_ = [
++    ('type', POINTER(ASN1_OBJECT)),
++    ('certs', POINTER(STACK)),
++]
++assert sizeof(Netscape_certificate_sequence) == 8, sizeof(Netscape_certificate_sequence)
++assert alignment(Netscape_certificate_sequence) == 4, alignment(Netscape_certificate_sequence)
++NETSCAPE_CERT_SEQUENCE = Netscape_certificate_sequence
++class PBEPARAM_st(Structure):
++    pass
++PBEPARAM_st._fields_ = [
++    ('salt', POINTER(ASN1_OCTET_STRING)),
++    ('iter', POINTER(ASN1_INTEGER)),
++]
++assert sizeof(PBEPARAM_st) == 8, sizeof(PBEPARAM_st)
++assert alignment(PBEPARAM_st) == 4, alignment(PBEPARAM_st)
++PBEPARAM = PBEPARAM_st
++class PBE2PARAM_st(Structure):
++    pass
++PBE2PARAM_st._fields_ = [
++    ('keyfunc', POINTER(X509_ALGOR)),
++    ('encryption', POINTER(X509_ALGOR)),
++]
++assert sizeof(PBE2PARAM_st) == 8, sizeof(PBE2PARAM_st)
++assert alignment(PBE2PARAM_st) == 4, alignment(PBE2PARAM_st)
++PBE2PARAM = PBE2PARAM_st
++class PBKDF2PARAM_st(Structure):
++    pass
++PBKDF2PARAM_st._fields_ = [
++    ('salt', POINTER(ASN1_TYPE)),
++    ('iter', POINTER(ASN1_INTEGER)),
++    ('keylength', POINTER(ASN1_INTEGER)),
++    ('prf', POINTER(X509_ALGOR)),
++]
++assert sizeof(PBKDF2PARAM_st) == 16, sizeof(PBKDF2PARAM_st)
++assert alignment(PBKDF2PARAM_st) == 4, alignment(PBKDF2PARAM_st)
++PBKDF2PARAM = PBKDF2PARAM_st
++class pkcs8_priv_key_info_st(Structure):
++    pass
++pkcs8_priv_key_info_st._fields_ = [
++    ('broken', c_int),
++    ('version', POINTER(ASN1_INTEGER)),
++    ('pkeyalg', POINTER(X509_ALGOR)),
++    ('pkey', POINTER(ASN1_TYPE)),
++    ('attributes', POINTER(STACK)),
++]
++assert sizeof(pkcs8_priv_key_info_st) == 20, sizeof(pkcs8_priv_key_info_st)
++assert alignment(pkcs8_priv_key_info_st) == 4, alignment(pkcs8_priv_key_info_st)
++PKCS8_PRIV_KEY_INFO = pkcs8_priv_key_info_st
++class x509_hash_dir_st(Structure):
++    pass
++x509_hash_dir_st._fields_ = [
++    ('num_dirs', c_int),
++    ('dirs', POINTER(STRING)),
++    ('dirs_type', POINTER(c_int)),
++    ('num_dirs_alloced', c_int),
++]
++assert sizeof(x509_hash_dir_st) == 16, sizeof(x509_hash_dir_st)
++assert alignment(x509_hash_dir_st) == 4, alignment(x509_hash_dir_st)
++X509_HASH_DIR_CTX = x509_hash_dir_st
++class x509_file_st(Structure):
++    pass
++x509_file_st._fields_ = [
++    ('num_paths', c_int),
++    ('num_alloced', c_int),
++    ('paths', POINTER(STRING)),
++    ('path_type', POINTER(c_int)),
++]
++assert sizeof(x509_file_st) == 16, sizeof(x509_file_st)
++assert alignment(x509_file_st) == 4, alignment(x509_file_st)
++X509_CERT_FILE_CTX = x509_file_st
++class x509_object_st(Structure):
++    pass
++class N14x509_object_st4DOLLAR_14E(Union):
++    pass
++N14x509_object_st4DOLLAR_14E._fields_ = [
++    ('ptr', STRING),
++    ('x509', POINTER(X509)),
++    ('crl', POINTER(X509_CRL)),
++    ('pkey', POINTER(EVP_PKEY)),
++]
++assert sizeof(N14x509_object_st4DOLLAR_14E) == 4, sizeof(N14x509_object_st4DOLLAR_14E)
++assert alignment(N14x509_object_st4DOLLAR_14E) == 4, alignment(N14x509_object_st4DOLLAR_14E)
++x509_object_st._fields_ = [
++    ('type', c_int),
++    ('data', N14x509_object_st4DOLLAR_14E),
++]
++assert sizeof(x509_object_st) == 8, sizeof(x509_object_st)
++assert alignment(x509_object_st) == 4, alignment(x509_object_st)
++X509_OBJECT = x509_object_st
++class x509_lookup_st(Structure):
++    pass
++X509_LOOKUP = x509_lookup_st
++class x509_lookup_method_st(Structure):
++    pass
++x509_lookup_method_st._fields_ = [
++    ('name', STRING),
++    ('new_item', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
++    ('free', CFUNCTYPE(None, POINTER(X509_LOOKUP))),
++    ('init', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
++    ('shutdown', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
++    ('ctrl', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_long, POINTER(STRING))),
++    ('get_by_subject', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(X509_OBJECT))),
++    ('get_by_issuer_serial', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(ASN1_INTEGER), POINTER(X509_OBJECT))),
++    ('get_by_fingerprint', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(c_ubyte), c_int, POINTER(X509_OBJECT))),
++    ('get_by_alias', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_int, POINTER(X509_OBJECT))),
++]
++assert sizeof(x509_lookup_method_st) == 40, sizeof(x509_lookup_method_st)
++assert alignment(x509_lookup_method_st) == 4, alignment(x509_lookup_method_st)
++X509_LOOKUP_METHOD = x509_lookup_method_st
++x509_store_st._fields_ = [
++    ('cache', c_int),
++    ('objs', POINTER(STACK)),
++    ('get_cert_methods', POINTER(STACK)),
++    ('flags', c_ulong),
++    ('purpose', c_int),
++    ('trust', c_int),
++    ('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
++    ('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
++    ('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
++    ('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
++    ('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
++    ('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
++    ('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('references', c_int),
++    ('depth', c_int),
++]
++assert sizeof(x509_store_st) == 76, sizeof(x509_store_st)
++assert alignment(x509_store_st) == 4, alignment(x509_store_st)
++x509_lookup_st._fields_ = [
++    ('init', c_int),
++    ('skip', c_int),
++    ('method', POINTER(X509_LOOKUP_METHOD)),
++    ('method_data', STRING),
++    ('store_ctx', POINTER(X509_STORE)),
++]
++assert sizeof(x509_lookup_st) == 20, sizeof(x509_lookup_st)
++assert alignment(x509_lookup_st) == 4, alignment(x509_lookup_st)
++time_t = __darwin_time_t
++x509_store_ctx_st._fields_ = [
++    ('ctx', POINTER(X509_STORE)),
++    ('current_method', c_int),
++    ('cert', POINTER(X509)),
++    ('untrusted', POINTER(STACK)),
++    ('purpose', c_int),
++    ('trust', c_int),
++    ('check_time', time_t),
++    ('flags', c_ulong),
++    ('other_ctx', c_void_p),
++    ('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
++    ('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
++    ('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
++    ('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
++    ('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
++    ('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
++    ('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('depth', c_int),
++    ('valid', c_int),
++    ('last_untrusted', c_int),
++    ('chain', POINTER(STACK)),
++    ('error_depth', c_int),
++    ('error', c_int),
++    ('current_cert', POINTER(X509)),
++    ('current_issuer', POINTER(X509)),
++    ('current_crl', POINTER(X509_CRL)),
++    ('ex_data', CRYPTO_EX_DATA),
++]
++assert sizeof(x509_store_ctx_st) == 116, sizeof(x509_store_ctx_st)
++assert alignment(x509_store_ctx_st) == 4, alignment(x509_store_ctx_st)
++va_list = __darwin_va_list
++__darwin_off_t = __int64_t
++fpos_t = __darwin_off_t
++class __sbuf(Structure):
++    pass
++__sbuf._fields_ = [
++    ('_base', POINTER(c_ubyte)),
++    ('_size', c_int),
++]
++assert sizeof(__sbuf) == 8, sizeof(__sbuf)
++assert alignment(__sbuf) == 4, alignment(__sbuf)
++class __sFILEX(Structure):
++    pass
++__sFILEX._fields_ = [
++]
++class __sFILE(Structure):
++    pass
++__sFILE._pack_ = 4
++__sFILE._fields_ = [
++    ('_p', POINTER(c_ubyte)),
++    ('_r', c_int),
++    ('_w', c_int),
++    ('_flags', c_short),
++    ('_file', c_short),
++    ('_bf', __sbuf),
++    ('_lbfsize', c_int),
++    ('_cookie', c_void_p),
++    ('_close', CFUNCTYPE(c_int, c_void_p)),
++    ('_read', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
++    ('_seek', CFUNCTYPE(fpos_t, c_void_p, c_longlong, c_int)),
++    ('_write', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
++    ('_ub', __sbuf),
++    ('_extra', POINTER(__sFILEX)),
++    ('_ur', c_int),
++    ('_ubuf', c_ubyte * 3),
++    ('_nbuf', c_ubyte * 1),
++    ('_lb', __sbuf),
++    ('_blksize', c_int),
++    ('_offset', fpos_t),
++]
++assert sizeof(__sFILE) == 88, sizeof(__sFILE)
++assert alignment(__sFILE) == 4, alignment(__sFILE)
++FILE = __sFILE
++ct_rune_t = __darwin_ct_rune_t
++rune_t = __darwin_rune_t
++class div_t(Structure):
++    pass
++div_t._fields_ = [
++    ('quot', c_int),
++    ('rem', c_int),
++]
++assert sizeof(div_t) == 8, sizeof(div_t)
++assert alignment(div_t) == 4, alignment(div_t)
++class ldiv_t(Structure):
++    pass
++ldiv_t._fields_ = [
++    ('quot', c_long),
++    ('rem', c_long),
++]
++assert sizeof(ldiv_t) == 8, sizeof(ldiv_t)
++assert alignment(ldiv_t) == 4, alignment(ldiv_t)
++class lldiv_t(Structure):
++    pass
++lldiv_t._pack_ = 4
++lldiv_t._fields_ = [
++    ('quot', c_longlong),
++    ('rem', c_longlong),
++]
++assert sizeof(lldiv_t) == 16, sizeof(lldiv_t)
++assert alignment(lldiv_t) == 4, alignment(lldiv_t)
++__darwin_dev_t = __int32_t
++dev_t = __darwin_dev_t
++__darwin_mode_t = __uint16_t
++mode_t = __darwin_mode_t
++class mcontext(Structure):
++    pass
++mcontext._fields_ = [
++]
++class mcontext64(Structure):
++    pass
++mcontext64._fields_ = [
++]
++class __darwin_pthread_handler_rec(Structure):
++    pass
++__darwin_pthread_handler_rec._fields_ = [
++    ('__routine', CFUNCTYPE(None, c_void_p)),
++    ('__arg', c_void_p),
++    ('__next', POINTER(__darwin_pthread_handler_rec)),
++]
++assert sizeof(__darwin_pthread_handler_rec) == 12, sizeof(__darwin_pthread_handler_rec)
++assert alignment(__darwin_pthread_handler_rec) == 4, alignment(__darwin_pthread_handler_rec)
++class _opaque_pthread_attr_t(Structure):
++    pass
++_opaque_pthread_attr_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 36),
++]
++assert sizeof(_opaque_pthread_attr_t) == 40, sizeof(_opaque_pthread_attr_t)
++assert alignment(_opaque_pthread_attr_t) == 4, alignment(_opaque_pthread_attr_t)
++class _opaque_pthread_cond_t(Structure):
++    pass
++_opaque_pthread_cond_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 24),
++]
++assert sizeof(_opaque_pthread_cond_t) == 28, sizeof(_opaque_pthread_cond_t)
++assert alignment(_opaque_pthread_cond_t) == 4, alignment(_opaque_pthread_cond_t)
++class _opaque_pthread_condattr_t(Structure):
++    pass
++_opaque_pthread_condattr_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 4),
++]
++assert sizeof(_opaque_pthread_condattr_t) == 8, sizeof(_opaque_pthread_condattr_t)
++assert alignment(_opaque_pthread_condattr_t) == 4, alignment(_opaque_pthread_condattr_t)
++class _opaque_pthread_mutex_t(Structure):
++    pass
++_opaque_pthread_mutex_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 40),
++]
++assert sizeof(_opaque_pthread_mutex_t) == 44, sizeof(_opaque_pthread_mutex_t)
++assert alignment(_opaque_pthread_mutex_t) == 4, alignment(_opaque_pthread_mutex_t)
++class _opaque_pthread_mutexattr_t(Structure):
++    pass
++_opaque_pthread_mutexattr_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 8),
++]
++assert sizeof(_opaque_pthread_mutexattr_t) == 12, sizeof(_opaque_pthread_mutexattr_t)
++assert alignment(_opaque_pthread_mutexattr_t) == 4, alignment(_opaque_pthread_mutexattr_t)
++class _opaque_pthread_once_t(Structure):
++    pass
++_opaque_pthread_once_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 4),
++]
++assert sizeof(_opaque_pthread_once_t) == 8, sizeof(_opaque_pthread_once_t)
++assert alignment(_opaque_pthread_once_t) == 4, alignment(_opaque_pthread_once_t)
++class _opaque_pthread_rwlock_t(Structure):
++    pass
++_opaque_pthread_rwlock_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 124),
++]
++assert sizeof(_opaque_pthread_rwlock_t) == 128, sizeof(_opaque_pthread_rwlock_t)
++assert alignment(_opaque_pthread_rwlock_t) == 4, alignment(_opaque_pthread_rwlock_t)
++class _opaque_pthread_rwlockattr_t(Structure):
++    pass
++_opaque_pthread_rwlockattr_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 12),
++]
++assert sizeof(_opaque_pthread_rwlockattr_t) == 16, sizeof(_opaque_pthread_rwlockattr_t)
++assert alignment(_opaque_pthread_rwlockattr_t) == 4, alignment(_opaque_pthread_rwlockattr_t)
++class _opaque_pthread_t(Structure):
++    pass
++_opaque_pthread_t._fields_ = [
++    ('__sig', c_long),
++    ('__cleanup_stack', POINTER(__darwin_pthread_handler_rec)),
++    ('__opaque', c_char * 596),
++]
++assert sizeof(_opaque_pthread_t) == 604, sizeof(_opaque_pthread_t)
++assert alignment(_opaque_pthread_t) == 4, alignment(_opaque_pthread_t)
++__darwin_blkcnt_t = __int64_t
++__darwin_blksize_t = __int32_t
++__darwin_fsblkcnt_t = c_uint
++__darwin_fsfilcnt_t = c_uint
++__darwin_gid_t = __uint32_t
++__darwin_id_t = __uint32_t
++__darwin_ino_t = __uint32_t
++__darwin_mach_port_name_t = __darwin_natural_t
++__darwin_mach_port_t = __darwin_mach_port_name_t
++__darwin_mcontext_t = POINTER(mcontext)
++__darwin_mcontext64_t = POINTER(mcontext64)
++__darwin_pid_t = __int32_t
++__darwin_pthread_attr_t = _opaque_pthread_attr_t
++__darwin_pthread_cond_t = _opaque_pthread_cond_t
++__darwin_pthread_condattr_t = _opaque_pthread_condattr_t
++__darwin_pthread_key_t = c_ulong
++__darwin_pthread_mutex_t = _opaque_pthread_mutex_t
++__darwin_pthread_mutexattr_t = _opaque_pthread_mutexattr_t
++__darwin_pthread_once_t = _opaque_pthread_once_t
++__darwin_pthread_rwlock_t = _opaque_pthread_rwlock_t
++__darwin_pthread_rwlockattr_t = _opaque_pthread_rwlockattr_t
++__darwin_pthread_t = POINTER(_opaque_pthread_t)
++__darwin_sigset_t = __uint32_t
++__darwin_suseconds_t = __int32_t
++__darwin_uid_t = __uint32_t
++__darwin_useconds_t = __uint32_t
++__darwin_uuid_t = c_ubyte * 16
++class sigaltstack(Structure):
++    pass
++sigaltstack._fields_ = [
++    ('ss_sp', c_void_p),
++    ('ss_size', __darwin_size_t),
++    ('ss_flags', c_int),
++]
++assert sizeof(sigaltstack) == 12, sizeof(sigaltstack)
++assert alignment(sigaltstack) == 4, alignment(sigaltstack)
++__darwin_stack_t = sigaltstack
++class ucontext(Structure):
++    pass
++ucontext._fields_ = [
++    ('uc_onstack', c_int),
++    ('uc_sigmask', __darwin_sigset_t),
++    ('uc_stack', __darwin_stack_t),
++    ('uc_link', POINTER(ucontext)),
++    ('uc_mcsize', __darwin_size_t),
++    ('uc_mcontext', __darwin_mcontext_t),
++]
++assert sizeof(ucontext) == 32, sizeof(ucontext)
++assert alignment(ucontext) == 4, alignment(ucontext)
++__darwin_ucontext_t = ucontext
++class ucontext64(Structure):
++    pass
++ucontext64._fields_ = [
++    ('uc_onstack', c_int),
++    ('uc_sigmask', __darwin_sigset_t),
++    ('uc_stack', __darwin_stack_t),
++    ('uc_link', POINTER(ucontext64)),
++    ('uc_mcsize', __darwin_size_t),
++    ('uc_mcontext64', __darwin_mcontext64_t),
++]
++assert sizeof(ucontext64) == 32, sizeof(ucontext64)
++assert alignment(ucontext64) == 4, alignment(ucontext64)
++__darwin_ucontext64_t = ucontext64
++class timeval(Structure):
++    pass
++timeval._fields_ = [
++    ('tv_sec', __darwin_time_t),
++    ('tv_usec', __darwin_suseconds_t),
++]
++assert sizeof(timeval) == 8, sizeof(timeval)
++assert alignment(timeval) == 4, alignment(timeval)
++rlim_t = __int64_t
++class rusage(Structure):
++    pass
++rusage._fields_ = [
++    ('ru_utime', timeval),
++    ('ru_stime', timeval),
++    ('ru_maxrss', c_long),
++    ('ru_ixrss', c_long),
++    ('ru_idrss', c_long),
++    ('ru_isrss', c_long),
++    ('ru_minflt', c_long),
++    ('ru_majflt', c_long),
++    ('ru_nswap', c_long),
++    ('ru_inblock', c_long),
++    ('ru_oublock', c_long),
++    ('ru_msgsnd', c_long),
++    ('ru_msgrcv', c_long),
++    ('ru_nsignals', c_long),
++    ('ru_nvcsw', c_long),
++    ('ru_nivcsw', c_long),
++]
++assert sizeof(rusage) == 72, sizeof(rusage)
++assert alignment(rusage) == 4, alignment(rusage)
++class rlimit(Structure):
++    pass
++rlimit._pack_ = 4
++rlimit._fields_ = [
++    ('rlim_cur', rlim_t),
++    ('rlim_max', rlim_t),
++]
++assert sizeof(rlimit) == 16, sizeof(rlimit)
++assert alignment(rlimit) == 4, alignment(rlimit)
++mcontext_t = __darwin_mcontext_t
++mcontext64_t = __darwin_mcontext64_t
++pthread_attr_t = __darwin_pthread_attr_t
++sigset_t = __darwin_sigset_t
++ucontext_t = __darwin_ucontext_t
++ucontext64_t = __darwin_ucontext64_t
++uid_t = __darwin_uid_t
++class sigval(Union):
++    pass
++sigval._fields_ = [
++    ('sival_int', c_int),
++    ('sival_ptr', c_void_p),
++]
++assert sizeof(sigval) == 4, sizeof(sigval)
++assert alignment(sigval) == 4, alignment(sigval)
++class sigevent(Structure):
++    pass
++sigevent._fields_ = [
++    ('sigev_notify', c_int),
++    ('sigev_signo', c_int),
++    ('sigev_value', sigval),
++    ('sigev_notify_function', CFUNCTYPE(None, sigval)),
++    ('sigev_notify_attributes', POINTER(pthread_attr_t)),
++]
++assert sizeof(sigevent) == 20, sizeof(sigevent)
++assert alignment(sigevent) == 4, alignment(sigevent)
++class __siginfo(Structure):
++    pass
++pid_t = __darwin_pid_t
++__siginfo._fields_ = [
++    ('si_signo', c_int),
++    ('si_errno', c_int),
++    ('si_code', c_int),
++    ('si_pid', pid_t),
++    ('si_uid', uid_t),
++    ('si_status', c_int),
++    ('si_addr', c_void_p),
++    ('si_value', sigval),
++    ('si_band', c_long),
++    ('pad', c_ulong * 7),
++]
++assert sizeof(__siginfo) == 64, sizeof(__siginfo)
++assert alignment(__siginfo) == 4, alignment(__siginfo)
++siginfo_t = __siginfo
++class __sigaction_u(Union):
++    pass
++__sigaction_u._fields_ = [
++    ('__sa_handler', CFUNCTYPE(None, c_int)),
++    ('__sa_sigaction', CFUNCTYPE(None, c_int, POINTER(__siginfo), c_void_p)),
++]
++assert sizeof(__sigaction_u) == 4, sizeof(__sigaction_u)
++assert alignment(__sigaction_u) == 4, alignment(__sigaction_u)
++class __sigaction(Structure):
++    pass
++__sigaction._fields_ = [
++    ('__sigaction_u', __sigaction_u),
++    ('sa_tramp', CFUNCTYPE(None, c_void_p, c_int, c_int, POINTER(siginfo_t), c_void_p)),
++    ('sa_mask', sigset_t),
++    ('sa_flags', c_int),
++]
++assert sizeof(__sigaction) == 16, sizeof(__sigaction)
++assert alignment(__sigaction) == 4, alignment(__sigaction)
++class sigaction(Structure):
++    pass
++sigaction._fields_ = [
++    ('__sigaction_u', __sigaction_u),
++    ('sa_mask', sigset_t),
++    ('sa_flags', c_int),
++]
++assert sizeof(sigaction) == 12, sizeof(sigaction)
++assert alignment(sigaction) == 4, alignment(sigaction)
++sig_t = CFUNCTYPE(None, c_int)
++stack_t = __darwin_stack_t
++class sigvec(Structure):
++    pass
++sigvec._fields_ = [
++    ('sv_handler', CFUNCTYPE(None, c_int)),
++    ('sv_mask', c_int),
++    ('sv_flags', c_int),
++]
++assert sizeof(sigvec) == 12, sizeof(sigvec)
++assert alignment(sigvec) == 4, alignment(sigvec)
++class sigstack(Structure):
++    pass
++sigstack._fields_ = [
++    ('ss_sp', STRING),
++    ('ss_onstack', c_int),
++]
++assert sizeof(sigstack) == 8, sizeof(sigstack)
++assert alignment(sigstack) == 4, alignment(sigstack)
++u_char = c_ubyte
++u_short = c_ushort
++u_int = c_uint
++u_long = c_ulong
++ushort = c_ushort
++uint = c_uint
++u_quad_t = u_int64_t
++quad_t = int64_t
++qaddr_t = POINTER(quad_t)
++caddr_t = STRING
++daddr_t = int32_t
++fixpt_t = u_int32_t
++blkcnt_t = __darwin_blkcnt_t
++blksize_t = __darwin_blksize_t
++gid_t = __darwin_gid_t
++in_addr_t = __uint32_t
++in_port_t = __uint16_t
++ino_t = __darwin_ino_t
++key_t = __int32_t
++nlink_t = __uint16_t
++off_t = __darwin_off_t
++segsz_t = int32_t
++swblk_t = int32_t
++clock_t = __darwin_clock_t
++ssize_t = __darwin_ssize_t
++useconds_t = __darwin_useconds_t
++suseconds_t = __darwin_suseconds_t
++fd_mask = __int32_t
++class fd_set(Structure):
++    pass
++fd_set._fields_ = [
++    ('fds_bits', __int32_t * 32),
++]
++assert sizeof(fd_set) == 128, sizeof(fd_set)
++assert alignment(fd_set) == 4, alignment(fd_set)
++pthread_cond_t = __darwin_pthread_cond_t
++pthread_condattr_t = __darwin_pthread_condattr_t
++pthread_mutex_t = __darwin_pthread_mutex_t
++pthread_mutexattr_t = __darwin_pthread_mutexattr_t
++pthread_once_t = __darwin_pthread_once_t
++pthread_rwlock_t = __darwin_pthread_rwlock_t
++pthread_rwlockattr_t = __darwin_pthread_rwlockattr_t
++pthread_t = __darwin_pthread_t
++pthread_key_t = __darwin_pthread_key_t
++fsblkcnt_t = __darwin_fsblkcnt_t
++fsfilcnt_t = __darwin_fsfilcnt_t
++
++# values for enumeration 'idtype_t'
++idtype_t = c_int # enum
++id_t = __darwin_id_t
++class wait(Union):
++    pass
++class N4wait3DOLLAR_3E(Structure):
++    pass
++N4wait3DOLLAR_3E._fields_ = [
++    ('w_Termsig', c_uint, 7),
++    ('w_Coredump', c_uint, 1),
++    ('w_Retcode', c_uint, 8),
++    ('w_Filler', c_uint, 16),
++]
++assert sizeof(N4wait3DOLLAR_3E) == 4, sizeof(N4wait3DOLLAR_3E)
++assert alignment(N4wait3DOLLAR_3E) == 4, alignment(N4wait3DOLLAR_3E)
++class N4wait3DOLLAR_4E(Structure):
++    pass
++N4wait3DOLLAR_4E._fields_ = [
++    ('w_Stopval', c_uint, 8),
++    ('w_Stopsig', c_uint, 8),
++    ('w_Filler', c_uint, 16),
++]
++assert sizeof(N4wait3DOLLAR_4E) == 4, sizeof(N4wait3DOLLAR_4E)
++assert alignment(N4wait3DOLLAR_4E) == 4, alignment(N4wait3DOLLAR_4E)
++wait._fields_ = [
++    ('w_status', c_int),
++    ('w_T', N4wait3DOLLAR_3E),
++    ('w_S', N4wait3DOLLAR_4E),
++]
++assert sizeof(wait) == 4, sizeof(wait)
++assert alignment(wait) == 4, alignment(wait)
++class timespec(Structure):
++    pass
++timespec._fields_ = [
++    ('tv_sec', time_t),
++    ('tv_nsec', c_long),
++]
++assert sizeof(timespec) == 8, sizeof(timespec)
++assert alignment(timespec) == 4, alignment(timespec)
++class tm(Structure):
++    pass
++tm._fields_ = [
++    ('tm_sec', c_int),
++    ('tm_min', c_int),
++    ('tm_hour', c_int),
++    ('tm_mday', c_int),
++    ('tm_mon', c_int),
++    ('tm_year', c_int),
++    ('tm_wday', c_int),
++    ('tm_yday', c_int),
++    ('tm_isdst', c_int),
++    ('tm_gmtoff', c_long),
++    ('tm_zone', STRING),
++]
++assert sizeof(tm) == 44, sizeof(tm)
++assert alignment(tm) == 4, alignment(tm)
++__gnuc_va_list = STRING
++ptrdiff_t = c_int
++int8_t = c_byte
++int16_t = c_short
++uint8_t = c_ubyte
++uint16_t = c_ushort
++uint32_t = c_uint
++uint64_t = c_ulonglong
++int_least8_t = int8_t
++int_least16_t = int16_t
++int_least32_t = int32_t
++int_least64_t = int64_t
++uint_least8_t = uint8_t
++uint_least16_t = uint16_t
++uint_least32_t = uint32_t
++uint_least64_t = uint64_t
++int_fast8_t = int8_t
++int_fast16_t = int16_t
++int_fast32_t = int32_t
++int_fast64_t = int64_t
++uint_fast8_t = uint8_t
++uint_fast16_t = uint16_t
++uint_fast32_t = uint32_t
++uint_fast64_t = uint64_t
++intptr_t = c_long
++uintptr_t = c_ulong
++intmax_t = c_longlong
++uintmax_t = c_ulonglong
++__all__ = ['ENGINE', 'pkcs7_enc_content_st', '__int16_t',
++           'X509_REVOKED', 'SSL_CTX', 'UIT_BOOLEAN',
++           '__darwin_time_t', 'ucontext64_t', 'int_fast32_t',
++           'pem_ctx_st', 'uint8_t', 'fpos_t', 'X509', 'COMP_CTX',
++           'tm', 'N10pem_ctx_st4DOLLAR_17E', 'swblk_t',
++           'ASN1_TEMPLATE', '__darwin_pthread_t', 'fixpt_t',
++           'BIO_METHOD', 'ASN1_PRINTABLESTRING', 'EVP_ENCODE_CTX',
++           'dh_method', 'bio_f_buffer_ctx_struct', 'in_port_t',
++           'X509_SIG', '__darwin_ssize_t', '__darwin_sigset_t',
++           'wait', 'uint_fast16_t', 'N12asn1_type_st4DOLLAR_11E',
++           'uint_least8_t', 'pthread_rwlock_t', 'ASN1_IA5STRING',
++           'fsfilcnt_t', 'ucontext', '__uint64_t', 'timespec',
++           'x509_cinf_st', 'COMP_METHOD', 'MD5_CTX', 'buf_mem_st',
++           'ASN1_ENCODING_st', 'PBEPARAM', 'X509_NAME_ENTRY',
++           '__darwin_va_list', 'ucontext_t', 'lhash_st',
++           'N4wait3DOLLAR_4E', '__darwin_uuid_t',
++           '_ossl_old_des_ks_struct', 'id_t', 'ASN1_BIT_STRING',
++           'va_list', '__darwin_wchar_t', 'pthread_key_t',
++           'pkcs7_signer_info_st', 'ASN1_METHOD', 'DSA_SIG', 'DSA',
++           'UIT_NONE', 'pthread_t', '__darwin_useconds_t',
++           'uint_fast8_t', 'UI_STRING', 'DES_cblock',
++           '__darwin_mcontext64_t', 'rlim_t', 'PEM_Encode_Seal_st',
++           'SHAstate_st', 'u_quad_t', 'openssl_fptr',
++           '_opaque_pthread_rwlockattr_t',
++           'N18x509_attributes_st4DOLLAR_13E',
++           '__darwin_pthread_rwlock_t', 'daddr_t', 'ui_string_st',
++           'x509_file_st', 'X509_req_info_st', 'int_least64_t',
++           'evp_Encode_Ctx_st', 'X509_OBJECTS', 'CRYPTO_EX_DATA',
++           '__int8_t', 'AUTHORITY_KEYID_st', '_opaque_pthread_attr_t',
++           'sigstack', 'EVP_CIPHER_CTX', 'X509_extension_st', 'pid_t',
++           'RSA_METHOD', 'PEM_USER', 'pem_recip_st', 'env_md_ctx_st',
++           'rc5_key_st', 'ui_st', 'X509_PUBKEY', 'u_int8_t',
++           'ASN1_ITEM_st', 'pkcs7_recip_info_st', 'ssl2_state_st',
++           'off_t', 'N10ssl_ctx_st4DOLLAR_18E', 'crypto_ex_data_st',
++           'ui_method_st', '__darwin_pthread_rwlockattr_t',
++           'CRYPTO_EX_dup', '__darwin_ino_t', '__sFILE',
++           'OSUnknownByteOrder', 'BN_MONT_CTX', 'ASN1_NULL', 'time_t',
++           'CRYPTO_EX_new', 'asn1_type_st', 'CRYPTO_EX_DATA_FUNCS',
++           'user_time_t', 'BIGNUM', 'pthread_rwlockattr_t',
++           'ASN1_VALUE_st', 'DH_METHOD', '__darwin_off_t',
++           '_opaque_pthread_t', 'bn_blinding_st', 'RSA', 'ssize_t',
++           'mcontext64_t', 'user_long_t', 'fsblkcnt_t', 'cert_st',
++           '__darwin_pthread_condattr_t', 'X509_PKEY',
++           '__darwin_id_t', '__darwin_nl_item', 'SSL2_STATE', 'FILE',
++           'pthread_mutexattr_t', 'size_t',
++           '_ossl_old_des_key_schedule', 'pkcs7_issuer_and_serial_st',
++           'sigval', 'CRYPTO_MEM_LEAK_CB', 'X509_NAME', 'blkcnt_t',
++           'uint_least16_t', '__darwin_dev_t', 'evp_cipher_info_st',
++           'BN_BLINDING', 'ssl3_state_st', 'uint_least64_t',
++           'user_addr_t', 'DES_key_schedule', 'RIPEMD160_CTX',
++           'u_char', 'X509_algor_st', 'uid_t', 'sess_cert_st',
++           'u_int64_t', 'u_int16_t', 'sigset_t', '__darwin_ptrdiff_t',
++           'ASN1_CTX', 'STACK', '__int32_t', 'UI_METHOD',
++           'NETSCAPE_SPKI', 'UIT_PROMPT', 'st_CRYPTO_EX_DATA_IMPL',
++           'cast_key_st', 'X509_HASH_DIR_CTX', 'sigevent',
++           'user_ssize_t', 'clock_t', 'aes_key_st',
++           '__darwin_socklen_t', '__darwin_intptr_t', 'int_fast64_t',
++           'asn1_string_table_st', 'uint_fast32_t',
++           'ASN1_VISIBLESTRING', 'DSA_SIG_st', 'obj_name_st',
++           'X509_LOOKUP_METHOD', 'u_int32_t', 'EVP_CIPHER_INFO',
++           '__gnuc_va_list', 'AES_KEY', 'PKCS7_ISSUER_AND_SERIAL',
++           'BN_CTX', '__darwin_blkcnt_t', 'key_t', 'SHA_CTX',
++           'pkcs7_signed_st', 'SSL', 'N10pem_ctx_st4DOLLAR_16E',
++           'pthread_attr_t', 'EVP_MD', 'uint', 'ASN1_BOOLEAN',
++           'ino_t', '__darwin_clock_t', 'ASN1_OCTET_STRING',
++           'asn1_ctx_st', 'BIO_F_BUFFER_CTX', 'bn_mont_ctx_st',
++           'X509_REQ_INFO', 'PEM_CTX', 'sigvec',
++           '__darwin_pthread_mutexattr_t', 'x509_attributes_st',
++           'stack_t', '__darwin_mode_t', '__mbstate_t',
++           'asn1_object_st', 'ASN1_ENCODING', '__uint8_t',
++           'LHASH_NODE', 'PKCS7_SIGNER_INFO', 'asn1_method_st',
++           'stack_st', 'bio_info_cb', 'div_t', 'UIT_VERIFY',
++           'PBEPARAM_st', 'N4wait3DOLLAR_3E', 'quad_t', '__siginfo',
++           '__darwin_mbstate_t', 'rsa_st', 'ASN1_UNIVERSALSTRING',
++           'uint64_t', 'ssl_comp_st', 'X509_OBJECT', 'pthread_cond_t',
++           'DH', '__darwin_wctype_t', 'PKCS7_ENVELOPE', 'ASN1_TLC_st',
++           'sig_atomic_t', 'BIO', 'nlink_t', 'BUF_MEM', 'SSL3_RECORD',
++           'bio_method_st', 'timeval', 'UI_string_types', 'BIO_dummy',
++           'ssl_ctx_st', 'NETSCAPE_CERT_SEQUENCE',
++           'BIT_STRING_BITNAME_st', '__darwin_pthread_attr_t',
++           'int8_t', '__darwin_wint_t', 'OBJ_NAME',
++           'PKCS8_PRIV_KEY_INFO', 'PBE2PARAM_st',
++           'LHASH_DOALL_FN_TYPE', 'x509_st', 'X509_VAL', 'dev_t',
++           'ASN1_TEMPLATE_st', 'MD5state_st', '__uint16_t',
++           'LHASH_DOALL_ARG_FN_TYPE', 'mdc2_ctx_st', 'SSL3_STATE',
++           'ssl3_buffer_st', 'ASN1_ITEM_EXP',
++           '_opaque_pthread_condattr_t', 'mode_t', 'ASN1_VALUE',
++           'qaddr_t', '__darwin_gid_t', 'EVP_PKEY', 'CRYPTO_EX_free',
++           '_ossl_old_des_cblock', 'X509_INFO', 'asn1_string_st',
++           'intptr_t', 'UIT_INFO', 'int_fast8_t', 'sigaltstack',
++           'env_md_st', 'LHASH', '__darwin_ucontext_t',
++           'PKCS7_SIGN_ENVELOPE', '__darwin_mcontext_t', 'ct_rune_t',
++           'MD2_CTX', 'pthread_once_t', 'SSL3_BUFFER', 'fd_mask',
++           'ASN1_TYPE', 'PKCS7_SIGNED', 'ssl3_record_st', 'BF_KEY',
++           'MD4state_st', 'MD4_CTX', 'int16_t', 'SSL_CIPHER',
++           'rune_t', 'X509_TRUST', 'siginfo_t', 'X509_STORE',
++           '__sbuf', 'X509_STORE_CTX', '__darwin_blksize_t', 'ldiv_t',
++           'ASN1_TIME', 'SSL_METHOD', 'X509_LOOKUP',
++           'Netscape_spki_st', 'P_PID', 'sigaction', 'sig_t',
++           'hostent', 'x509_cert_aux_st', '_opaque_pthread_cond_t',
++           'segsz_t', 'ushort', '__darwin_ct_rune_t', 'fd_set',
++           'BN_RECP_CTX', 'x509_lookup_st', 'uint16_t', 'pkcs7_st',
++           'asn1_header_st', '__darwin_pthread_key_t',
++           'x509_trust_st', '__darwin_pthread_handler_rec', 'int32_t',
++           'X509_CRL_INFO', 'N11evp_pkey_st4DOLLAR_12E', 'MDC2_CTX',
++           'N23_ossl_old_des_ks_struct4DOLLAR_10E', 'ASN1_HEADER',
++           'X509_crl_info_st', 'LHASH_HASH_FN_TYPE',
++           '_opaque_pthread_mutexattr_t', 'ssl_st',
++           'N8pkcs7_st4DOLLAR_15E', 'evp_pkey_st',
++           'pkcs7_signedandenveloped_st', '__darwin_mach_port_t',
++           'EVP_PBE_KEYGEN', '_opaque_pthread_mutex_t',
++           'ASN1_UTCTIME', 'mcontext', 'crypto_ex_data_func_st',
++           'u_long', 'PBKDF2PARAM_st', 'rc4_key_st', 'DSA_METHOD',
++           'EVP_CIPHER', 'BIT_STRING_BITNAME', 'PKCS7_RECIP_INFO',
++           'ssl3_enc_method', 'X509_CERT_AUX', 'uintmax_t',
++           'int_fast16_t', 'RC5_32_KEY', 'ucontext64', 'ASN1_INTEGER',
++           'u_short', 'N14x509_object_st4DOLLAR_14E', 'mcontext64',
++           'X509_sig_st', 'ASN1_GENERALSTRING', 'PKCS7', '__sFILEX',
++           'X509_name_entry_st', 'ssl_session_st', 'caddr_t',
++           'bignum_st', 'X509_CINF', '__darwin_pthread_cond_t',
++           'ASN1_TLC', 'PKCS7_ENCRYPT', 'NETSCAPE_SPKAC',
++           'Netscape_spkac_st', 'idtype_t', 'UIT_ERROR',
++           'uint_fast64_t', 'in_addr_t', 'pthread_mutex_t',
++           '__int64_t', 'ASN1_BMPSTRING', 'uint32_t',
++           'PEM_ENCODE_SEAL_CTX', 'suseconds_t', 'ASN1_OBJECT',
++           'X509_val_st', 'private_key_st', 'CRYPTO_dynlock',
++           'X509_objects_st', 'CRYPTO_EX_DATA_IMPL',
++           'pthread_condattr_t', 'PKCS7_DIGEST', 'uint_least32_t',
++           'ASN1_STRING', '__uint32_t', 'P_PGID', 'rsa_meth_st',
++           'X509_crl_st', 'RC2_KEY', '__darwin_fsfilcnt_t',
++           'X509_revoked_st', 'PBE2PARAM', 'blksize_t',
++           'Netscape_certificate_sequence', 'ssl_cipher_st',
++           'bignum_ctx', 'register_t', 'ASN1_UTF8STRING',
++           'pkcs7_encrypted_st', 'RC4_KEY', '__darwin_ucontext64_t',
++           'N13ssl2_state_st4DOLLAR_19E', 'bn_recp_ctx_st',
++           'CAST_KEY', 'X509_ATTRIBUTE', '__darwin_suseconds_t',
++           '__sigaction', 'user_ulong_t', 'syscall_arg_t',
++           'evp_cipher_ctx_st', 'X509_ALGOR', 'mcontext_t',
++           'const_DES_cblock', '__darwin_fsblkcnt_t', 'dsa_st',
++           'int_least8_t', 'MD2state_st', 'X509_EXTENSION',
++           'GEN_SESSION_CB', 'int_least16_t', '__darwin_wctrans_t',
++           'PBKDF2PARAM', 'x509_lookup_method_st', 'pem_password_cb',
++           'X509_info_st', 'x509_store_st', '__darwin_natural_t',
++           'X509_pubkey_st', 'pkcs7_digest_st', '__darwin_size_t',
++           'ASN1_STRING_TABLE', 'OSLittleEndian', 'RIPEMD160state_st',
++           'pkcs7_enveloped_st', 'UI', 'ptrdiff_t', 'X509_REQ',
++           'CRYPTO_dynlock_value', 'X509_req_st', 'x509_store_ctx_st',
++           'N13ssl3_state_st4DOLLAR_20E', 'lhash_node_st',
++           '__darwin_pthread_mutex_t', 'LHASH_COMP_FN_TYPE',
++           '__darwin_rune_t', 'rlimit', '__darwin_pthread_once_t',
++           'OSBigEndian', 'uintptr_t', '__darwin_uid_t', 'u_int',
++           'ASN1_T61STRING', 'gid_t', 'ssl_method_st', 'ASN1_ITEM',
++           'ASN1_ENUMERATED', '_opaque_pthread_rwlock_t',
++           'pkcs8_priv_key_info_st', 'intmax_t', 'sigcontext',
++           'X509_CRL', 'rc2_key_st', 'engine_st', 'x509_object_st',
++           '_opaque_pthread_once_t', 'DES_ks', 'SSL_COMP',
++           'dsa_method', 'int64_t', 'bio_st', 'bf_key_st',
++           'ASN1_GENERALIZEDTIME', 'PKCS7_ENC_CONTENT',
++           '__darwin_pid_t', 'lldiv_t', 'comp_method_st',
++           'EVP_MD_CTX', 'evp_cipher_st', 'X509_name_st',
++           'x509_hash_dir_st', '__darwin_mach_port_name_t',
++           'useconds_t', 'user_size_t', 'SSL_SESSION', 'rusage',
++           'ssl_crock_st', 'int_least32_t', '__sigaction_u', 'dh_st',
++           'P_ALL', '__darwin_stack_t', 'N6DES_ks3DOLLAR_9E',
++           'comp_ctx_st', 'X509_CERT_FILE_CTX']
+diff -r 531f2e948299 refactor/tests/data/.svn/text-base/py2_test_grammar.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/text-base/py2_test_grammar.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,956 @@
++# Python 2's Lib/test/test_grammar.py (r66189)
++
++# Python test set -- part 1, grammar.
++# This just tests whether the parser accepts them all.
++
++# NOTE: When you run this test as a script from the command line, you
++# get warnings about certain hex/oct constants.  Since those are
++# issued by the parser, you can't suppress them by adding a
++# filterwarnings() call to this module.  Therefore, to shut up the
++# regression test, the filterwarnings() call has been added to
++# regrtest.py.
++
++from test.test_support import run_unittest, check_syntax_error
++import unittest
++import sys
++# testing import *
++from sys import *
++
++class TokenTests(unittest.TestCase):
++
++    def testBackslash(self):
++        # Backslash means line continuation:
++        x = 1 \
++        + 1
++        self.assertEquals(x, 2, 'backslash for line continuation')
++
++        # Backslash does not means continuation in comments :\
++        x = 0
++        self.assertEquals(x, 0, 'backslash ending comment')
++
++    def testPlainIntegers(self):
++        self.assertEquals(0xff, 255)
++        self.assertEquals(0377, 255)
++        self.assertEquals(2147483647, 017777777777)
++        # "0x" is not a valid literal
++        self.assertRaises(SyntaxError, eval, "0x")
++        from sys import maxint
++        if maxint == 2147483647:
++            self.assertEquals(-2147483647-1, -020000000000)
++            # XXX -2147483648
++            self.assert_(037777777777 > 0)
++            self.assert_(0xffffffff > 0)
++            for s in '2147483648', '040000000000', '0x100000000':
++                try:
++                    x = eval(s)
++                except OverflowError:
++                    self.fail("OverflowError on huge integer literal %r" % s)
++        elif maxint == 9223372036854775807:
++            self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
++            self.assert_(01777777777777777777777 > 0)
++            self.assert_(0xffffffffffffffff > 0)
++            for s in '9223372036854775808', '02000000000000000000000', \
++                     '0x10000000000000000':
++                try:
++                    x = eval(s)
++                except OverflowError:
++                    self.fail("OverflowError on huge integer literal %r" % s)
++        else:
++            self.fail('Weird maxint value %r' % maxint)
++
++    def testLongIntegers(self):
++        x = 0L
++        x = 0l
++        x = 0xffffffffffffffffL
++        x = 0xffffffffffffffffl
++        x = 077777777777777777L
++        x = 077777777777777777l
++        x = 123456789012345678901234567890L
++        x = 123456789012345678901234567890l
++
++    def testFloats(self):
++        x = 3.14
++        x = 314.
++        x = 0.314
++        # XXX x = 000.314
++        x = .314
++        x = 3e14
++        x = 3E14
++        x = 3e-14
++        x = 3e+14
++        x = 3.e14
++        x = .3e14
++        x = 3.1e4
++
++    def testStringLiterals(self):
++        x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
++        x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
++        x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
++        x = "doesn't \"shrink\" does it"
++        y = 'doesn\'t "shrink" does it'
++        self.assert_(len(x) == 24 and x == y)
++        x = "does \"shrink\" doesn't it"
++        y = 'does "shrink" doesn\'t it'
++        self.assert_(len(x) == 24 and x == y)
++        x = """
++The "quick"
++brown fox
++jumps over
++the 'lazy' dog.
++"""
++        y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
++        self.assertEquals(x, y)
++        y = '''
++The "quick"
++brown fox
++jumps over
++the 'lazy' dog.
++'''
++        self.assertEquals(x, y)
++        y = "\n\
++The \"quick\"\n\
++brown fox\n\
++jumps over\n\
++the 'lazy' dog.\n\
++"
++        self.assertEquals(x, y)
++        y = '\n\
++The \"quick\"\n\
++brown fox\n\
++jumps over\n\
++the \'lazy\' dog.\n\
++'
++        self.assertEquals(x, y)
++
++
++class GrammarTests(unittest.TestCase):
++
++    # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
++    # XXX can't test in a script -- this rule is only used when interactive
++
++    # file_input: (NEWLINE | stmt)* ENDMARKER
++    # Being tested as this very moment this very module
++
++    # expr_input: testlist NEWLINE
++    # XXX Hard to test -- used only in calls to input()
++
++    def testEvalInput(self):
++        # testlist ENDMARKER
++        x = eval('1, 0 or 1')
++
++    def testFuncdef(self):
++        ### 'def' NAME parameters ':' suite
++        ### parameters: '(' [varargslist] ')'
++        ### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
++        ###            | ('**'|'*' '*') NAME)
++        ###            | fpdef ['=' test] (',' fpdef ['=' test])* [',']
++        ### fpdef: NAME | '(' fplist ')'
++        ### fplist: fpdef (',' fpdef)* [',']
++        ### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
++        ### argument: [test '='] test   # Really [keyword '='] test
++        def f1(): pass
++        f1()
++        f1(*())
++        f1(*(), **{})
++        def f2(one_argument): pass
++        def f3(two, arguments): pass
++        def f4(two, (compound, (argument, list))): pass
++        def f5((compound, first), two): pass
++        self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
++        self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
++        if sys.platform.startswith('java'):
++            self.assertEquals(f4.func_code.co_varnames,
++                   ('two', '(compound, (argument, list))', 'compound', 'argument',
++                                'list',))
++            self.assertEquals(f5.func_code.co_varnames,
++                   ('(compound, first)', 'two', 'compound', 'first'))
++        else:
++            self.assertEquals(f4.func_code.co_varnames,
++                  ('two', '.1', 'compound', 'argument',  'list'))
++            self.assertEquals(f5.func_code.co_varnames,
++                  ('.0', 'two', 'compound', 'first'))
++        def a1(one_arg,): pass
++        def a2(two, args,): pass
++        def v0(*rest): pass
++        def v1(a, *rest): pass
++        def v2(a, b, *rest): pass
++        def v3(a, (b, c), *rest): return a, b, c, rest
++
++        f1()
++        f2(1)
++        f2(1,)
++        f3(1, 2)
++        f3(1, 2,)
++        f4(1, (2, (3, 4)))
++        v0()
++        v0(1)
++        v0(1,)
++        v0(1,2)
++        v0(1,2,3,4,5,6,7,8,9,0)
++        v1(1)
++        v1(1,)
++        v1(1,2)
++        v1(1,2,3)
++        v1(1,2,3,4,5,6,7,8,9,0)
++        v2(1,2)
++        v2(1,2,3)
++        v2(1,2,3,4)
++        v2(1,2,3,4,5,6,7,8,9,0)
++        v3(1,(2,3))
++        v3(1,(2,3),4)
++        v3(1,(2,3),4,5,6,7,8,9,0)
++
++        # ceval unpacks the formal arguments into the first argcount names;
++        # thus, the names nested inside tuples must appear after these names.
++        if sys.platform.startswith('java'):
++            self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
++        else:
++            self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
++        self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
++        def d01(a=1): pass
++        d01()
++        d01(1)
++        d01(*(1,))
++        d01(**{'a':2})
++        def d11(a, b=1): pass
++        d11(1)
++        d11(1, 2)
++        d11(1, **{'b':2})
++        def d21(a, b, c=1): pass
++        d21(1, 2)
++        d21(1, 2, 3)
++        d21(*(1, 2, 3))
++        d21(1, *(2, 3))
++        d21(1, 2, *(3,))
++        d21(1, 2, **{'c':3})
++        def d02(a=1, b=2): pass
++        d02()
++        d02(1)
++        d02(1, 2)
++        d02(*(1, 2))
++        d02(1, *(2,))
++        d02(1, **{'b':2})
++        d02(**{'a': 1, 'b': 2})
++        def d12(a, b=1, c=2): pass
++        d12(1)
++        d12(1, 2)
++        d12(1, 2, 3)
++        def d22(a, b, c=1, d=2): pass
++        d22(1, 2)
++        d22(1, 2, 3)
++        d22(1, 2, 3, 4)
++        def d01v(a=1, *rest): pass
++        d01v()
++        d01v(1)
++        d01v(1, 2)
++        d01v(*(1, 2, 3, 4))
++        d01v(*(1,))
++        d01v(**{'a':2})
++        def d11v(a, b=1, *rest): pass
++        d11v(1)
++        d11v(1, 2)
++        d11v(1, 2, 3)
++        def d21v(a, b, c=1, *rest): pass
++        d21v(1, 2)
++        d21v(1, 2, 3)
++        d21v(1, 2, 3, 4)
++        d21v(*(1, 2, 3, 4))
++        d21v(1, 2, **{'c': 3})
++        def d02v(a=1, b=2, *rest): pass
++        d02v()
++        d02v(1)
++        d02v(1, 2)
++        d02v(1, 2, 3)
++        d02v(1, *(2, 3, 4))
++        d02v(**{'a': 1, 'b': 2})
++        def d12v(a, b=1, c=2, *rest): pass
++        d12v(1)
++        d12v(1, 2)
++        d12v(1, 2, 3)
++        d12v(1, 2, 3, 4)
++        d12v(*(1, 2, 3, 4))
++        d12v(1, 2, *(3, 4, 5))
++        d12v(1, *(2,), **{'c': 3})
++        def d22v(a, b, c=1, d=2, *rest): pass
++        d22v(1, 2)
++        d22v(1, 2, 3)
++        d22v(1, 2, 3, 4)
++        d22v(1, 2, 3, 4, 5)
++        d22v(*(1, 2, 3, 4))
++        d22v(1, 2, *(3, 4, 5))
++        d22v(1, *(2, 3), **{'d': 4})
++        def d31v((x)): pass
++        d31v(1)
++        def d32v((x,)): pass
++        d32v((1,))
++
++        # keyword arguments after *arglist
++        def f(*args, **kwargs):
++            return args, kwargs
++        self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
++                                                    {'x':2, 'y':5}))
++        self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
++        self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
++
++        # Check ast errors in *args and *kwargs
++        check_syntax_error(self, "f(*g(1=2))")
++        check_syntax_error(self, "f(**g(1=2))")
++
++    def testLambdef(self):
++        ### lambdef: 'lambda' [varargslist] ':' test
++        l1 = lambda : 0
++        self.assertEquals(l1(), 0)
++        l2 = lambda : a[d] # XXX just testing the expression
++        l3 = lambda : [2 < x for x in [-1, 3, 0L]]
++        self.assertEquals(l3(), [0, 1, 0])
++        l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
++        self.assertEquals(l4(), 1)
++        l5 = lambda x, y, z=2: x + y + z
++        self.assertEquals(l5(1, 2), 5)
++        self.assertEquals(l5(1, 2, 3), 6)
++        check_syntax_error(self, "lambda x: x = 2")
++        check_syntax_error(self, "lambda (None,): None")
++
++    ### stmt: simple_stmt | compound_stmt
++    # Tested below
++
++    def testSimpleStmt(self):
++        ### simple_stmt: small_stmt (';' small_stmt)* [';']
++        x = 1; pass; del x
++        def foo():
++            # verify statments that end with semi-colons
++            x = 1; pass; del x;
++        foo()
++
++    ### small_stmt: expr_stmt | print_stmt  | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
++    # Tested below
++
++    def testExprStmt(self):
++        # (exprlist '=')* exprlist
++        1
++        1, 2, 3
++        x = 1
++        x = 1, 2, 3
++        x = y = z = 1, 2, 3
++        x, y, z = 1, 2, 3
++        abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
++
++        check_syntax_error(self, "x + 1 = 1")
++        check_syntax_error(self, "a + 1 = b + 2")
++
++    def testPrintStmt(self):
++        # 'print' (test ',')* [test]
++        import StringIO
++
++        # Can't test printing to real stdout without comparing output
++        # which is not available in unittest.
++        save_stdout = sys.stdout
++        sys.stdout = StringIO.StringIO()
++
++        print 1, 2, 3
++        print 1, 2, 3,
++        print
++        print 0 or 1, 0 or 1,
++        print 0 or 1
++
++        # 'print' '>>' test ','
++        print >> sys.stdout, 1, 2, 3
++        print >> sys.stdout, 1, 2, 3,
++        print >> sys.stdout
++        print >> sys.stdout, 0 or 1, 0 or 1,
++        print >> sys.stdout, 0 or 1
++
++        # test printing to an instance
++        class Gulp:
++            def write(self, msg): pass
++
++        gulp = Gulp()
++        print >> gulp, 1, 2, 3
++        print >> gulp, 1, 2, 3,
++        print >> gulp
++        print >> gulp, 0 or 1, 0 or 1,
++        print >> gulp, 0 or 1
++
++        # test print >> None
++        def driver():
++            oldstdout = sys.stdout
++            sys.stdout = Gulp()
++            try:
++                tellme(Gulp())
++                tellme()
++            finally:
++                sys.stdout = oldstdout
++
++        # we should see this once
++        def tellme(file=sys.stdout):
++            print >> file, 'hello world'
++
++        driver()
++
++        # we should not see this at all
++        def tellme(file=None):
++            print >> file, 'goodbye universe'
++
++        driver()
++
++        self.assertEqual(sys.stdout.getvalue(), '''\
++1 2 3
++1 2 3
++1 1 1
++1 2 3
++1 2 3
++1 1 1
++hello world
++''')
++        sys.stdout = save_stdout
++
++        # syntax errors
++        check_syntax_error(self, 'print ,')
++        check_syntax_error(self, 'print >> x,')
++
++    def testDelStmt(self):
++        # 'del' exprlist
++        abc = [1,2,3]
++        x, y, z = abc
++        xyz = x, y, z
++
++        del abc
++        del x, y, (z, xyz)
++
++    def testPassStmt(self):
++        # 'pass'
++        pass
++
++    # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
++    # Tested below
++
++    def testBreakStmt(self):
++        # 'break'
++        while 1: break
++
++    def testContinueStmt(self):
++        # 'continue'
++        i = 1
++        while i: i = 0; continue
++
++        msg = ""
++        while not msg:
++            msg = "ok"
++            try:
++                continue
++                msg = "continue failed to continue inside try"
++            except:
++                msg = "continue inside try called except block"
++        if msg != "ok":
++            self.fail(msg)
++
++        msg = ""
++        while not msg:
++            msg = "finally block not called"
++            try:
++                continue
++            finally:
++                msg = "ok"
++        if msg != "ok":
++            self.fail(msg)
++
++    def test_break_continue_loop(self):
++        # This test warrants an explanation. It is a test specifically for SF bugs
++        # #463359 and #462937. The bug is that a 'break' statement executed or
++        # exception raised inside a try/except inside a loop, *after* a continue
++        # statement has been executed in that loop, will cause the wrong number of
++        # arguments to be popped off the stack and the instruction pointer reset to
++        # a very small number (usually 0.) Because of this, the following test
++        # *must* written as a function, and the tracking vars *must* be function
++        # arguments with default values. Otherwise, the test will loop and loop.
++
++        def test_inner(extra_burning_oil = 1, count=0):
++            big_hippo = 2
++            while big_hippo:
++                count += 1
++                try:
++                    if extra_burning_oil and big_hippo == 1:
++                        extra_burning_oil -= 1
++                        break
++                    big_hippo -= 1
++                    continue
++                except:
++                    raise
++            if count > 2 or big_hippo <> 1:
++                self.fail("continue then break in try/except in loop broken!")
++        test_inner()
++
++    def testReturn(self):
++        # 'return' [testlist]
++        def g1(): return
++        def g2(): return 1
++        g1()
++        x = g2()
++        check_syntax_error(self, "class foo:return 1")
++
++    def testYield(self):
++        check_syntax_error(self, "class foo:yield 1")
++
++    def testRaise(self):
++        # 'raise' test [',' test]
++        try: raise RuntimeError, 'just testing'
++        except RuntimeError: pass
++        try: raise KeyboardInterrupt
++        except KeyboardInterrupt: pass
++
++    def testImport(self):
++        # 'import' dotted_as_names
++        import sys
++        import time, sys
++        # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
++        from time import time
++        from time import (time)
++        # not testable inside a function, but already done at top of the module
++        # from sys import *
++        from sys import path, argv
++        from sys import (path, argv)
++        from sys import (path, argv,)
++
++    def testGlobal(self):
++        # 'global' NAME (',' NAME)*
++        global a
++        global a, b
++        global one, two, three, four, five, six, seven, eight, nine, ten
++
++    def testExec(self):
++        # 'exec' expr ['in' expr [',' expr]]
++        z = None
++        del z
++        exec 'z=1+1\n'
++        if z != 2: self.fail('exec \'z=1+1\'\\n')
++        del z
++        exec 'z=1+1'
++        if z != 2: self.fail('exec \'z=1+1\'')
++        z = None
++        del z
++        import types
++        if hasattr(types, "UnicodeType"):
++            exec r"""if 1:
++            exec u'z=1+1\n'
++            if z != 2: self.fail('exec u\'z=1+1\'\\n')
++            del z
++            exec u'z=1+1'
++            if z != 2: self.fail('exec u\'z=1+1\'')"""
++        g = {}
++        exec 'z = 1' in g
++        if g.has_key('__builtins__'): del g['__builtins__']
++        if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
++        g = {}
++        l = {}
++
++        import warnings
++        warnings.filterwarnings("ignore", "global statement", module="<string>")
++        exec 'global a; a = 1; b = 2' in g, l
++        if g.has_key('__builtins__'): del g['__builtins__']
++        if l.has_key('__builtins__'): del l['__builtins__']
++        if (g, l) != ({'a':1}, {'b':2}):
++            self.fail('exec ... in g (%s), l (%s)' %(g,l))
++
++    def testAssert(self):
++        # assert_stmt: 'assert' test [',' test]
++        assert 1
++        assert 1, 1
++        assert lambda x:x
++        assert 1, lambda x:x+1
++        try:
++            assert 0, "msg"
++        except AssertionError, e:
++            self.assertEquals(e.args[0], "msg")
++        else:
++            if __debug__:
++                self.fail("AssertionError not raised by assert 0")
++
++    ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
++    # Tested below
++
++    def testIf(self):
++        # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
++        if 1: pass
++        if 1: pass
++        else: pass
++        if 0: pass
++        elif 0: pass
++        if 0: pass
++        elif 0: pass
++        elif 0: pass
++        elif 0: pass
++        else: pass
++
++    def testWhile(self):
++        # 'while' test ':' suite ['else' ':' suite]
++        while 0: pass
++        while 0: pass
++        else: pass
++
++        # Issue1920: "while 0" is optimized away,
++        # ensure that the "else" clause is still present.
++        x = 0
++        while 0:
++            x = 1
++        else:
++            x = 2
++        self.assertEquals(x, 2)
++
++    def testFor(self):
++        # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
++        for i in 1, 2, 3: pass
++        for i, j, k in (): pass
++        else: pass
++        class Squares:
++            def __init__(self, max):
++                self.max = max
++                self.sofar = []
++            def __len__(self): return len(self.sofar)
++            def __getitem__(self, i):
++                if not 0 <= i < self.max: raise IndexError
++                n = len(self.sofar)
++                while n <= i:
++                    self.sofar.append(n*n)
++                    n = n+1
++                return self.sofar[i]
++        n = 0
++        for x in Squares(10): n = n+x
++        if n != 285:
++            self.fail('for over growing sequence')
++
++        result = []
++        for x, in [(1,), (2,), (3,)]:
++            result.append(x)
++        self.assertEqual(result, [1, 2, 3])
++
++    def testTry(self):
++        ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
++        ###         | 'try' ':' suite 'finally' ':' suite
++        ### except_clause: 'except' [expr [('as' | ',') expr]]
++        try:
++            1/0
++        except ZeroDivisionError:
++            pass
++        else:
++            pass
++        try: 1/0
++        except EOFError: pass
++        except TypeError as msg: pass
++        except RuntimeError, msg: pass
++        except: pass
++        else: pass
++        try: 1/0
++        except (EOFError, TypeError, ZeroDivisionError): pass
++        try: 1/0
++        except (EOFError, TypeError, ZeroDivisionError), msg: pass
++        try: pass
++        finally: pass
++
++    def testSuite(self):
++        # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
++        if 1: pass
++        if 1:
++            pass
++        if 1:
++            #
++            #
++            #
++            pass
++            pass
++            #
++            pass
++            #
++
++    def testTest(self):
++        ### and_test ('or' and_test)*
++        ### and_test: not_test ('and' not_test)*
++        ### not_test: 'not' not_test | comparison
++        if not 1: pass
++        if 1 and 1: pass
++        if 1 or 1: pass
++        if not not not 1: pass
++        if not 1 and 1 and 1: pass
++        if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
++
++    def testComparison(self):
++        ### comparison: expr (comp_op expr)*
++        ### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
++        if 1: pass
++        x = (1 == 1)
++        if 1 == 1: pass
++        if 1 != 1: pass
++        if 1 <> 1: pass
++        if 1 < 1: pass
++        if 1 > 1: pass
++        if 1 <= 1: pass
++        if 1 >= 1: pass
++        if 1 is 1: pass
++        if 1 is not 1: pass
++        if 1 in (): pass
++        if 1 not in (): pass
++        if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
++
++    def testBinaryMaskOps(self):
++        x = 1 & 1
++        x = 1 ^ 1
++        x = 1 | 1
++
++    def testShiftOps(self):
++        x = 1 << 1
++        x = 1 >> 1
++        x = 1 << 1 >> 1
++
++    def testAdditiveOps(self):
++        x = 1
++        x = 1 + 1
++        x = 1 - 1 - 1
++        x = 1 - 1 + 1 - 1 + 1
++
++    def testMultiplicativeOps(self):
++        x = 1 * 1
++        x = 1 / 1
++        x = 1 % 1
++        x = 1 / 1 * 1 % 1
++
++    def testUnaryOps(self):
++        x = +1
++        x = -1
++        x = ~1
++        x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
++        x = -1*1/1 + 1*1 - ---1*1
++
++    def testSelectors(self):
++        ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
++        ### subscript: expr | [expr] ':' [expr]
++
++        import sys, time
++        c = sys.path[0]
++        x = time.time()
++        x = sys.modules['time'].time()
++        a = '01234'
++        c = a[0]
++        c = a[-1]
++        s = a[0:5]
++        s = a[:5]
++        s = a[0:]
++        s = a[:]
++        s = a[-5:]
++        s = a[:-1]
++        s = a[-4:-3]
++        # A rough test of SF bug 1333982.  http://python.org/sf/1333982
++        # The testing here is fairly incomplete.
++        # Test cases should include: commas with 1 and 2 colons
++        d = {}
++        d[1] = 1
++        d[1,] = 2
++        d[1,2] = 3
++        d[1,2,3] = 4
++        L = list(d)
++        L.sort()
++        self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
++
++    def testAtoms(self):
++        ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
++        ### dictmaker: test ':' test (',' test ':' test)* [',']
++
++        x = (1)
++        x = (1 or 2 or 3)
++        x = (1 or 2 or 3, 2, 3)
++
++        x = []
++        x = [1]
++        x = [1 or 2 or 3]
++        x = [1 or 2 or 3, 2, 3]
++        x = []
++
++        x = {}
++        x = {'one': 1}
++        x = {'one': 1,}
++        x = {'one' or 'two': 1 or 2}
++        x = {'one': 1, 'two': 2}
++        x = {'one': 1, 'two': 2,}
++        x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
++
++        x = `x`
++        x = `1 or 2 or 3`
++        self.assertEqual(`1,2`, '(1, 2)')
++
++        x = x
++        x = 'x'
++        x = 123
++
++    ### exprlist: expr (',' expr)* [',']
++    ### testlist: test (',' test)* [',']
++    # These have been exercised enough above
++
++    def testClassdef(self):
++        # 'class' NAME ['(' [testlist] ')'] ':' suite
++        class B: pass
++        class B2(): pass
++        class C1(B): pass
++        class C2(B): pass
++        class D(C1, C2, B): pass
++        class C:
++            def meth1(self): pass
++            def meth2(self, arg): pass
++            def meth3(self, a1, a2): pass
++        # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
++        # decorators: decorator+
++        # decorated: decorators (classdef | funcdef)
++        def class_decorator(x):
++            x.decorated = True
++            return x
++        @class_decorator
++        class G:
++            pass
++        self.assertEqual(G.decorated, True)
++
++    def testListcomps(self):
++        # list comprehension tests
++        nums = [1, 2, 3, 4, 5]
++        strs = ["Apple", "Banana", "Coconut"]
++        spcs = ["  Apple", " Banana ", "Coco  nut  "]
++
++        self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco  nut'])
++        self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
++        self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
++        self.assertEqual([(i, s) for i in nums for s in strs],
++                         [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
++                          (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
++                          (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
++                          (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
++                          (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
++        self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
++                         [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
++                          (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
++                          (5, 'Banana'), (5, 'Coconut')])
++        self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
++                         [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
++
++        def test_in_func(l):
++            return [None < x < 3 for x in l if x > 2]
++
++        self.assertEqual(test_in_func(nums), [False, False, False])
++
++        def test_nested_front():
++            self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
++                             [[1, 2], [3, 4], [5, 6]])
++
++        test_nested_front()
++
++        check_syntax_error(self, "[i, s for i in nums for s in strs]")
++        check_syntax_error(self, "[x if y]")
++
++        suppliers = [
++          (1, "Boeing"),
++          (2, "Ford"),
++          (3, "Macdonalds")
++        ]
++
++        parts = [
++          (10, "Airliner"),
++          (20, "Engine"),
++          (30, "Cheeseburger")
++        ]
++
++        suppart = [
++          (1, 10), (1, 20), (2, 20), (3, 30)
++        ]
++
++        x = [
++          (sname, pname)
++            for (sno, sname) in suppliers
++              for (pno, pname) in parts
++                for (sp_sno, sp_pno) in suppart
++                  if sno == sp_sno and pno == sp_pno
++        ]
++
++        self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
++                             ('Macdonalds', 'Cheeseburger')])
++
++    def testGenexps(self):
++        # generator expression tests
++        g = ([x for x in range(10)] for x in range(1))
++        self.assertEqual(g.next(), [x for x in range(10)])
++        try:
++            g.next()
++            self.fail('should produce StopIteration exception')
++        except StopIteration:
++            pass
++
++        a = 1
++        try:
++            g = (a for d in a)
++            g.next()
++            self.fail('should produce TypeError')
++        except TypeError:
++            pass
++
++        self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
++        self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
++
++        a = [x for x in range(10)]
++        b = (x for x in (y for y in a))
++        self.assertEqual(sum(b), sum([x for x in range(10)]))
++
++        self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
++        self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
++        self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
++        check_syntax_error(self, "foo(x for x in range(10), 100)")
++        check_syntax_error(self, "foo(100, x for x in range(10))")
++
++    def testComprehensionSpecials(self):
++        # test for outmost iterable precomputation
++        x = 10; g = (i for i in range(x)); x = 5
++        self.assertEqual(len(list(g)), 10)
++
++        # This should hold, since we're only precomputing outmost iterable.
++        x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
++        x = 5; t = True;
++        self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
++
++        # Grammar allows multiple adjacent 'if's in listcomps and genexps,
++        # even though it's silly. Make sure it works (ifelse broke this.)
++        self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
++        self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
++
++        # verify unpacking single element tuples in listcomp/genexp.
++        self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
++        self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
++
++    def testIfElseExpr(self):
++        # Test ifelse expressions in various cases
++        def _checkeval(msg, ret):
++            "helper to check that evaluation of expressions is done correctly"
++            print x
++            return ret
++
++        self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
++        self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
++        self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
++        self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
++        self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
++        self.assertEqual((5 and 6 if 0 else 1), 1)
++        self.assertEqual(((5 and 6) if 0 else 1), 1)
++        self.assertEqual((5 and (6 if 1 else 1)), 6)
++        self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
++        self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
++        self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
++        self.assertEqual((not 5 if 1 else 1), False)
++        self.assertEqual((not 5 if 0 else 1), 1)
++        self.assertEqual((6 + 1 if 1 else 2), 7)
++        self.assertEqual((6 - 1 if 1 else 2), 5)
++        self.assertEqual((6 * 2 if 1 else 4), 12)
++        self.assertEqual((6 / 2 if 1 else 3), 3)
++        self.assertEqual((6 < 4 if 0 else 2), 2)
++
++
++def test_main():
++    run_unittest(TokenTests, GrammarTests)
++
++if __name__ == '__main__':
++    test_main()
+diff -r 531f2e948299 refactor/tests/data/.svn/text-base/py3_test_grammar.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/.svn/text-base/py3_test_grammar.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,903 @@
++# Python test set -- part 1, grammar.
++# This just tests whether the parser accepts them all.
++
++# NOTE: When you run this test as a script from the command line, you
++# get warnings about certain hex/oct constants.  Since those are
++# issued by the parser, you can't suppress them by adding a
++# filterwarnings() call to this module.  Therefore, to shut up the
++# regression test, the filterwarnings() call has been added to
++# regrtest.py.
++
++from test.support import run_unittest, check_syntax_error
++import unittest
++import sys
++# testing import *
++from sys import *
++
++class TokenTests(unittest.TestCase):
++
++    def testBackslash(self):
++        # Backslash means line continuation:
++        x = 1 \
++        + 1
++        self.assertEquals(x, 2, 'backslash for line continuation')
++
++        # Backslash does not means continuation in comments :\
++        x = 0
++        self.assertEquals(x, 0, 'backslash ending comment')
++
++    def testPlainIntegers(self):
++        self.assertEquals(type(000), type(0))
++        self.assertEquals(0xff, 255)
++        self.assertEquals(0o377, 255)
++        self.assertEquals(2147483647, 0o17777777777)
++        self.assertEquals(0b1001, 9)
++        # "0x" is not a valid literal
++        self.assertRaises(SyntaxError, eval, "0x")
++        from sys import maxsize
++        if maxsize == 2147483647:
++            self.assertEquals(-2147483647-1, -0o20000000000)
++            # XXX -2147483648
++            self.assert_(0o37777777777 > 0)
++            self.assert_(0xffffffff > 0)
++            self.assert_(0b1111111111111111111111111111111 > 0)
++            for s in ('2147483648', '0o40000000000', '0x100000000',
++                      '0b10000000000000000000000000000000'):
++                try:
++                    x = eval(s)
++                except OverflowError:
++                    self.fail("OverflowError on huge integer literal %r" % s)
++        elif maxsize == 9223372036854775807:
++            self.assertEquals(-9223372036854775807-1, -0o1000000000000000000000)
++            self.assert_(0o1777777777777777777777 > 0)
++            self.assert_(0xffffffffffffffff > 0)
++            self.assert_(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
++            for s in '9223372036854775808', '0o2000000000000000000000', \
++                     '0x10000000000000000', \
++                     '0b100000000000000000000000000000000000000000000000000000000000000':
++                try:
++                    x = eval(s)
++                except OverflowError:
++                    self.fail("OverflowError on huge integer literal %r" % s)
++        else:
++            self.fail('Weird maxsize value %r' % maxsize)
++
++    def testLongIntegers(self):
++        x = 0
++        x = 0xffffffffffffffff
++        x = 0Xffffffffffffffff
++        x = 0o77777777777777777
++        x = 0O77777777777777777
++        x = 123456789012345678901234567890
++        x = 0b100000000000000000000000000000000000000000000000000000000000000000000
++        x = 0B111111111111111111111111111111111111111111111111111111111111111111111
++
++    def testFloats(self):
++        x = 3.14
++        x = 314.
++        x = 0.314
++        # XXX x = 000.314
++        x = .314
++        x = 3e14
++        x = 3E14
++        x = 3e-14
++        x = 3e+14
++        x = 3.e14
++        x = .3e14
++        x = 3.1e4
++
++    def testStringLiterals(self):
++        x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
++        x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
++        x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
++        x = "doesn't \"shrink\" does it"
++        y = 'doesn\'t "shrink" does it'
++        self.assert_(len(x) == 24 and x == y)
++        x = "does \"shrink\" doesn't it"
++        y = 'does "shrink" doesn\'t it'
++        self.assert_(len(x) == 24 and x == y)
++        x = """
++The "quick"
++brown fox
++jumps over
++the 'lazy' dog.
++"""
++        y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
++        self.assertEquals(x, y)
++        y = '''
++The "quick"
++brown fox
++jumps over
++the 'lazy' dog.
++'''
++        self.assertEquals(x, y)
++        y = "\n\
++The \"quick\"\n\
++brown fox\n\
++jumps over\n\
++the 'lazy' dog.\n\
++"
++        self.assertEquals(x, y)
++        y = '\n\
++The \"quick\"\n\
++brown fox\n\
++jumps over\n\
++the \'lazy\' dog.\n\
++'
++        self.assertEquals(x, y)
++
++    def testEllipsis(self):
++        x = ...
++        self.assert_(x is Ellipsis)
++        self.assertRaises(SyntaxError, eval, ".. .")
++
++class GrammarTests(unittest.TestCase):
++
++    # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
++    # XXX can't test in a script -- this rule is only used when interactive
++
++    # file_input: (NEWLINE | stmt)* ENDMARKER
++    # Being tested as this very moment this very module
++
++    # expr_input: testlist NEWLINE
++    # XXX Hard to test -- used only in calls to input()
++
++    def testEvalInput(self):
++        # testlist ENDMARKER
++        x = eval('1, 0 or 1')
++
++    def testFuncdef(self):
++        ### [decorators] 'def' NAME parameters ['->' test] ':' suite
++        ### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
++        ### decorators: decorator+
++        ### parameters: '(' [typedargslist] ')'
++        ### typedargslist: ((tfpdef ['=' test] ',')*
++        ###                ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
++        ###                | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
++        ### tfpdef: NAME [':' test]
++        ### varargslist: ((vfpdef ['=' test] ',')*
++        ###              ('*' [vfpdef] (',' vfpdef ['=' test])*  [',' '**' vfpdef] | '**' vfpdef)
++        ###              | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
++        ### vfpdef: NAME
++        def f1(): pass
++        f1()
++        f1(*())
++        f1(*(), **{})
++        def f2(one_argument): pass
++        def f3(two, arguments): pass
++        self.assertEquals(f2.__code__.co_varnames, ('one_argument',))
++        self.assertEquals(f3.__code__.co_varnames, ('two', 'arguments'))
++        def a1(one_arg,): pass
++        def a2(two, args,): pass
++        def v0(*rest): pass
++        def v1(a, *rest): pass
++        def v2(a, b, *rest): pass
++
++        f1()
++        f2(1)
++        f2(1,)
++        f3(1, 2)
++        f3(1, 2,)
++        v0()
++        v0(1)
++        v0(1,)
++        v0(1,2)
++        v0(1,2,3,4,5,6,7,8,9,0)
++        v1(1)
++        v1(1,)
++        v1(1,2)
++        v1(1,2,3)
++        v1(1,2,3,4,5,6,7,8,9,0)
++        v2(1,2)
++        v2(1,2,3)
++        v2(1,2,3,4)
++        v2(1,2,3,4,5,6,7,8,9,0)
++
++        def d01(a=1): pass
++        d01()
++        d01(1)
++        d01(*(1,))
++        d01(**{'a':2})
++        def d11(a, b=1): pass
++        d11(1)
++        d11(1, 2)
++        d11(1, **{'b':2})
++        def d21(a, b, c=1): pass
++        d21(1, 2)
++        d21(1, 2, 3)
++        d21(*(1, 2, 3))
++        d21(1, *(2, 3))
++        d21(1, 2, *(3,))
++        d21(1, 2, **{'c':3})
++        def d02(a=1, b=2): pass
++        d02()
++        d02(1)
++        d02(1, 2)
++        d02(*(1, 2))
++        d02(1, *(2,))
++        d02(1, **{'b':2})
++        d02(**{'a': 1, 'b': 2})
++        def d12(a, b=1, c=2): pass
++        d12(1)
++        d12(1, 2)
++        d12(1, 2, 3)
++        def d22(a, b, c=1, d=2): pass
++        d22(1, 2)
++        d22(1, 2, 3)
++        d22(1, 2, 3, 4)
++        def d01v(a=1, *rest): pass
++        d01v()
++        d01v(1)
++        d01v(1, 2)
++        d01v(*(1, 2, 3, 4))
++        d01v(*(1,))
++        d01v(**{'a':2})
++        def d11v(a, b=1, *rest): pass
++        d11v(1)
++        d11v(1, 2)
++        d11v(1, 2, 3)
++        def d21v(a, b, c=1, *rest): pass
++        d21v(1, 2)
++        d21v(1, 2, 3)
++        d21v(1, 2, 3, 4)
++        d21v(*(1, 2, 3, 4))
++        d21v(1, 2, **{'c': 3})
++        def d02v(a=1, b=2, *rest): pass
++        d02v()
++        d02v(1)
++        d02v(1, 2)
++        d02v(1, 2, 3)
++        d02v(1, *(2, 3, 4))
++        d02v(**{'a': 1, 'b': 2})
++        def d12v(a, b=1, c=2, *rest): pass
++        d12v(1)
++        d12v(1, 2)
++        d12v(1, 2, 3)
++        d12v(1, 2, 3, 4)
++        d12v(*(1, 2, 3, 4))
++        d12v(1, 2, *(3, 4, 5))
++        d12v(1, *(2,), **{'c': 3})
++        def d22v(a, b, c=1, d=2, *rest): pass
++        d22v(1, 2)
++        d22v(1, 2, 3)
++        d22v(1, 2, 3, 4)
++        d22v(1, 2, 3, 4, 5)
++        d22v(*(1, 2, 3, 4))
++        d22v(1, 2, *(3, 4, 5))
++        d22v(1, *(2, 3), **{'d': 4})
++
++        # keyword argument type tests
++        try:
++            str('x', **{b'foo':1 })
++        except TypeError:
++            pass
++        else:
++            self.fail('Bytes should not work as keyword argument names')
++        # keyword only argument tests
++        def pos0key1(*, key): return key
++        pos0key1(key=100)
++        def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
++        pos2key2(1, 2, k1=100)
++        pos2key2(1, 2, k1=100, k2=200)
++        pos2key2(1, 2, k2=100, k1=200)
++        def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
++        pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
++        pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
++
++        # keyword arguments after *arglist
++        def f(*args, **kwargs):
++            return args, kwargs
++        self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
++                                                    {'x':2, 'y':5}))
++        self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
++        self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
++
++        # argument annotation tests
++        def f(x) -> list: pass
++        self.assertEquals(f.__annotations__, {'return': list})
++        def f(x:int): pass
++        self.assertEquals(f.__annotations__, {'x': int})
++        def f(*x:str): pass
++        self.assertEquals(f.__annotations__, {'x': str})
++        def f(**x:float): pass
++        self.assertEquals(f.__annotations__, {'x': float})
++        def f(x, y:1+2): pass
++        self.assertEquals(f.__annotations__, {'y': 3})
++        def f(a, b:1, c:2, d): pass
++        self.assertEquals(f.__annotations__, {'b': 1, 'c': 2})
++        def f(a, b:1, c:2, d, e:3=4, f=5, *g:6): pass
++        self.assertEquals(f.__annotations__,
++                          {'b': 1, 'c': 2, 'e': 3, 'g': 6})
++        def f(a, b:1, c:2, d, e:3=4, f=5, *g:6, h:7, i=8, j:9=10,
++              **k:11) -> 12: pass
++        self.assertEquals(f.__annotations__,
++                          {'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
++                           'k': 11, 'return': 12})
++        # Check for SF Bug #1697248 - mixing decorators and a return annotation
++        def null(x): return x
++        @null
++        def f(x) -> list: pass
++        self.assertEquals(f.__annotations__, {'return': list})
++
++        # test MAKE_CLOSURE with a variety of oparg's
++        closure = 1
++        def f(): return closure
++        def f(x=1): return closure
++        def f(*, k=1): return closure
++        def f() -> int: return closure
++
++        # Check ast errors in *args and *kwargs
++        check_syntax_error(self, "f(*g(1=2))")
++        check_syntax_error(self, "f(**g(1=2))")
++
++    def testLambdef(self):
++        ### lambdef: 'lambda' [varargslist] ':' test
++        l1 = lambda : 0
++        self.assertEquals(l1(), 0)
++        l2 = lambda : a[d] # XXX just testing the expression
++        l3 = lambda : [2 < x for x in [-1, 3, 0]]
++        self.assertEquals(l3(), [0, 1, 0])
++        l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
++        self.assertEquals(l4(), 1)
++        l5 = lambda x, y, z=2: x + y + z
++        self.assertEquals(l5(1, 2), 5)
++        self.assertEquals(l5(1, 2, 3), 6)
++        check_syntax_error(self, "lambda x: x = 2")
++        check_syntax_error(self, "lambda (None,): None")
++        l6 = lambda x, y, *, k=20: x+y+k
++        self.assertEquals(l6(1,2), 1+2+20)
++        self.assertEquals(l6(1,2,k=10), 1+2+10)
++
++
++    ### stmt: simple_stmt | compound_stmt
++    # Tested below
++
++    def testSimpleStmt(self):
++        ### simple_stmt: small_stmt (';' small_stmt)* [';']
++        x = 1; pass; del x
++        def foo():
++            # verify statments that end with semi-colons
++            x = 1; pass; del x;
++        foo()
++
++    ### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
++    # Tested below
++
++    def testExprStmt(self):
++        # (exprlist '=')* exprlist
++        1
++        1, 2, 3
++        x = 1
++        x = 1, 2, 3
++        x = y = z = 1, 2, 3
++        x, y, z = 1, 2, 3
++        abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
++
++        check_syntax_error(self, "x + 1 = 1")
++        check_syntax_error(self, "a + 1 = b + 2")
++
++    def testDelStmt(self):
++        # 'del' exprlist
++        abc = [1,2,3]
++        x, y, z = abc
++        xyz = x, y, z
++
++        del abc
++        del x, y, (z, xyz)
++
++    def testPassStmt(self):
++        # 'pass'
++        pass
++
++    # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
++    # Tested below
++
++    def testBreakStmt(self):
++        # 'break'
++        while 1: break
++
++    def testContinueStmt(self):
++        # 'continue'
++        i = 1
++        while i: i = 0; continue
++
++        msg = ""
++        while not msg:
++            msg = "ok"
++            try:
++                continue
++                msg = "continue failed to continue inside try"
++            except:
++                msg = "continue inside try called except block"
++        if msg != "ok":
++            self.fail(msg)
++
++        msg = ""
++        while not msg:
++            msg = "finally block not called"
++            try:
++                continue
++            finally:
++                msg = "ok"
++        if msg != "ok":
++            self.fail(msg)
++
++    def test_break_continue_loop(self):
++        # This test warrants an explanation. It is a test specifically for SF bugs
++        # #463359 and #462937. The bug is that a 'break' statement executed or
++        # exception raised inside a try/except inside a loop, *after* a continue
++        # statement has been executed in that loop, will cause the wrong number of
++        # arguments to be popped off the stack and the instruction pointer reset to
++        # a very small number (usually 0.) Because of this, the following test
++        # *must* written as a function, and the tracking vars *must* be function
++        # arguments with default values. Otherwise, the test will loop and loop.
++
++        def test_inner(extra_burning_oil = 1, count=0):
++            big_hippo = 2
++            while big_hippo:
++                count += 1
++                try:
++                    if extra_burning_oil and big_hippo == 1:
++                        extra_burning_oil -= 1
++                        break
++                    big_hippo -= 1
++                    continue
++                except:
++                    raise
++            if count > 2 or big_hippo != 1:
++                self.fail("continue then break in try/except in loop broken!")
++        test_inner()
++
++    def testReturn(self):
++        # 'return' [testlist]
++        def g1(): return
++        def g2(): return 1
++        g1()
++        x = g2()
++        check_syntax_error(self, "class foo:return 1")
++
++    def testYield(self):
++        check_syntax_error(self, "class foo:yield 1")
++
++    def testRaise(self):
++        # 'raise' test [',' test]
++        try: raise RuntimeError('just testing')
++        except RuntimeError: pass
++        try: raise KeyboardInterrupt
++        except KeyboardInterrupt: pass
++
++    def testImport(self):
++        # 'import' dotted_as_names
++        import sys
++        import time, sys
++        # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
++        from time import time
++        from time import (time)
++        # not testable inside a function, but already done at top of the module
++        # from sys import *
++        from sys import path, argv
++        from sys import (path, argv)
++        from sys import (path, argv,)
++
++    def testGlobal(self):
++        # 'global' NAME (',' NAME)*
++        global a
++        global a, b
++        global one, two, three, four, five, six, seven, eight, nine, ten
++
++    def testNonlocal(self):
++        # 'nonlocal' NAME (',' NAME)*
++        x = 0
++        y = 0
++        def f():
++            nonlocal x
++            nonlocal x, y
++
++    def testAssert(self):
++        # assert_stmt: 'assert' test [',' test]
++        assert 1
++        assert 1, 1
++        assert lambda x:x
++        assert 1, lambda x:x+1
++        try:
++            assert 0, "msg"
++        except AssertionError as e:
++            self.assertEquals(e.args[0], "msg")
++        else:
++            if __debug__:
++                self.fail("AssertionError not raised by assert 0")
++
++    ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
++    # Tested below
++
++    def testIf(self):
++        # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
++        if 1: pass
++        if 1: pass
++        else: pass
++        if 0: pass
++        elif 0: pass
++        if 0: pass
++        elif 0: pass
++        elif 0: pass
++        elif 0: pass
++        else: pass
++
++    def testWhile(self):
++        # 'while' test ':' suite ['else' ':' suite]
++        while 0: pass
++        while 0: pass
++        else: pass
++
++        # Issue1920: "while 0" is optimized away,
++        # ensure that the "else" clause is still present.
++        x = 0
++        while 0:
++            x = 1
++        else:
++            x = 2
++        self.assertEquals(x, 2)
++
++    def testFor(self):
++        # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
++        for i in 1, 2, 3: pass
++        for i, j, k in (): pass
++        else: pass
++        class Squares:
++            def __init__(self, max):
++                self.max = max
++                self.sofar = []
++            def __len__(self): return len(self.sofar)
++            def __getitem__(self, i):
++                if not 0 <= i < self.max: raise IndexError
++                n = len(self.sofar)
++                while n <= i:
++                    self.sofar.append(n*n)
++                    n = n+1
++                return self.sofar[i]
++        n = 0
++        for x in Squares(10): n = n+x
++        if n != 285:
++            self.fail('for over growing sequence')
++
++        result = []
++        for x, in [(1,), (2,), (3,)]:
++            result.append(x)
++        self.assertEqual(result, [1, 2, 3])
++
++    def testTry(self):
++        ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
++        ###         | 'try' ':' suite 'finally' ':' suite
++        ### except_clause: 'except' [expr ['as' expr]]
++        try:
++            1/0
++        except ZeroDivisionError:
++            pass
++        else:
++            pass
++        try: 1/0
++        except EOFError: pass
++        except TypeError as msg: pass
++        except RuntimeError as msg: pass
++        except: pass
++        else: pass
++        try: 1/0
++        except (EOFError, TypeError, ZeroDivisionError): pass
++        try: 1/0
++        except (EOFError, TypeError, ZeroDivisionError) as msg: pass
++        try: pass
++        finally: pass
++
++    def testSuite(self):
++        # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
++        if 1: pass
++        if 1:
++            pass
++        if 1:
++            #
++            #
++            #
++            pass
++            pass
++            #
++            pass
++            #
++
++    def testTest(self):
++        ### and_test ('or' and_test)*
++        ### and_test: not_test ('and' not_test)*
++        ### not_test: 'not' not_test | comparison
++        if not 1: pass
++        if 1 and 1: pass
++        if 1 or 1: pass
++        if not not not 1: pass
++        if not 1 and 1 and 1: pass
++        if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
++
++    def testComparison(self):
++        ### comparison: expr (comp_op expr)*
++        ### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
++        if 1: pass
++        x = (1 == 1)
++        if 1 == 1: pass
++        if 1 != 1: pass
++        if 1 < 1: pass
++        if 1 > 1: pass
++        if 1 <= 1: pass
++        if 1 >= 1: pass
++        if 1 is 1: pass
++        if 1 is not 1: pass
++        if 1 in (): pass
++        if 1 not in (): pass
++        if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
++
++    def testBinaryMaskOps(self):
++        x = 1 & 1
++        x = 1 ^ 1
++        x = 1 | 1
++
++    def testShiftOps(self):
++        x = 1 << 1
++        x = 1 >> 1
++        x = 1 << 1 >> 1
++
++    def testAdditiveOps(self):
++        x = 1
++        x = 1 + 1
++        x = 1 - 1 - 1
++        x = 1 - 1 + 1 - 1 + 1
++
++    def testMultiplicativeOps(self):
++        x = 1 * 1
++        x = 1 / 1
++        x = 1 % 1
++        x = 1 / 1 * 1 % 1
++
++    def testUnaryOps(self):
++        x = +1
++        x = -1
++        x = ~1
++        x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
++        x = -1*1/1 + 1*1 - ---1*1
++
++    def testSelectors(self):
++        ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
++        ### subscript: expr | [expr] ':' [expr]
++
++        import sys, time
++        c = sys.path[0]
++        x = time.time()
++        x = sys.modules['time'].time()
++        a = '01234'
++        c = a[0]
++        c = a[-1]
++        s = a[0:5]
++        s = a[:5]
++        s = a[0:]
++        s = a[:]
++        s = a[-5:]
++        s = a[:-1]
++        s = a[-4:-3]
++        # A rough test of SF bug 1333982.  http://python.org/sf/1333982
++        # The testing here is fairly incomplete.
++        # Test cases should include: commas with 1 and 2 colons
++        d = {}
++        d[1] = 1
++        d[1,] = 2
++        d[1,2] = 3
++        d[1,2,3] = 4
++        L = list(d)
++        L.sort(key=lambda x: x if isinstance(x, tuple) else ())
++        self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
++
++    def testAtoms(self):
++        ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
++        ### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
++
++        x = (1)
++        x = (1 or 2 or 3)
++        x = (1 or 2 or 3, 2, 3)
++
++        x = []
++        x = [1]
++        x = [1 or 2 or 3]
++        x = [1 or 2 or 3, 2, 3]
++        x = []
++
++        x = {}
++        x = {'one': 1}
++        x = {'one': 1,}
++        x = {'one' or 'two': 1 or 2}
++        x = {'one': 1, 'two': 2}
++        x = {'one': 1, 'two': 2,}
++        x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
++
++        x = {'one'}
++        x = {'one', 1,}
++        x = {'one', 'two', 'three'}
++        x = {2, 3, 4,}
++
++        x = x
++        x = 'x'
++        x = 123
++
++    ### exprlist: expr (',' expr)* [',']
++    ### testlist: test (',' test)* [',']
++    # These have been exercised enough above
++
++    def testClassdef(self):
++        # 'class' NAME ['(' [testlist] ')'] ':' suite
++        class B: pass
++        class B2(): pass
++        class C1(B): pass
++        class C2(B): pass
++        class D(C1, C2, B): pass
++        class C:
++            def meth1(self): pass
++            def meth2(self, arg): pass
++            def meth3(self, a1, a2): pass
++
++        # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
++        # decorators: decorator+
++        # decorated: decorators (classdef | funcdef)
++        def class_decorator(x): return x
++        @class_decorator
++        class G: pass
++
++    def testDictcomps(self):
++        # dictorsetmaker: ( (test ':' test (comp_for |
++        #                                   (',' test ':' test)* [','])) |
++        #                   (test (comp_for | (',' test)* [','])) )
++        nums = [1, 2, 3]
++        self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
++
++    def testListcomps(self):
++        # list comprehension tests
++        nums = [1, 2, 3, 4, 5]
++        strs = ["Apple", "Banana", "Coconut"]
++        spcs = ["  Apple", " Banana ", "Coco  nut  "]
++
++        self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco  nut'])
++        self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
++        self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
++        self.assertEqual([(i, s) for i in nums for s in strs],
++                         [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
++                          (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
++                          (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
++                          (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
++                          (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
++        self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
++                         [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
++                          (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
++                          (5, 'Banana'), (5, 'Coconut')])
++        self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
++                         [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
++
++        def test_in_func(l):
++            return [0 < x < 3 for x in l if x > 2]
++
++        self.assertEqual(test_in_func(nums), [False, False, False])
++
++        def test_nested_front():
++            self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
++                             [[1, 2], [3, 4], [5, 6]])
++
++        test_nested_front()
++
++        check_syntax_error(self, "[i, s for i in nums for s in strs]")
++        check_syntax_error(self, "[x if y]")
++
++        suppliers = [
++          (1, "Boeing"),
++          (2, "Ford"),
++          (3, "Macdonalds")
++        ]
++
++        parts = [
++          (10, "Airliner"),
++          (20, "Engine"),
++          (30, "Cheeseburger")
++        ]
++
++        suppart = [
++          (1, 10), (1, 20), (2, 20), (3, 30)
++        ]
++
++        x = [
++          (sname, pname)
++            for (sno, sname) in suppliers
++              for (pno, pname) in parts
++                for (sp_sno, sp_pno) in suppart
++                  if sno == sp_sno and pno == sp_pno
++        ]
++
++        self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
++                             ('Macdonalds', 'Cheeseburger')])
++
++    def testGenexps(self):
++        # generator expression tests
++        g = ([x for x in range(10)] for x in range(1))
++        self.assertEqual(next(g), [x for x in range(10)])
++        try:
++            next(g)
++            self.fail('should produce StopIteration exception')
++        except StopIteration:
++            pass
++
++        a = 1
++        try:
++            g = (a for d in a)
++            next(g)
++            self.fail('should produce TypeError')
++        except TypeError:
++            pass
++
++        self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
++        self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
++
++        a = [x for x in range(10)]
++        b = (x for x in (y for y in a))
++        self.assertEqual(sum(b), sum([x for x in range(10)]))
++
++        self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
++        self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
++        self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
++        check_syntax_error(self, "foo(x for x in range(10), 100)")
++        check_syntax_error(self, "foo(100, x for x in range(10))")
++
++    def testComprehensionSpecials(self):
++        # test for outmost iterable precomputation
++        x = 10; g = (i for i in range(x)); x = 5
++        self.assertEqual(len(list(g)), 10)
++
++        # This should hold, since we're only precomputing outmost iterable.
++        x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
++        x = 5; t = True;
++        self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
++
++        # Grammar allows multiple adjacent 'if's in listcomps and genexps,
++        # even though it's silly. Make sure it works (ifelse broke this.)
++        self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
++        self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
++
++        # verify unpacking single element tuples in listcomp/genexp.
++        self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
++        self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
++
++    def testIfElseExpr(self):
++        # Test ifelse expressions in various cases
++        def _checkeval(msg, ret):
++            "helper to check that evaluation of expressions is done correctly"
++            print(x)
++            return ret
++
++        # the next line is not allowed anymore
++        #self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
++        self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
++        self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
++        self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
++        self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
++        self.assertEqual((5 and 6 if 0 else 1), 1)
++        self.assertEqual(((5 and 6) if 0 else 1), 1)
++        self.assertEqual((5 and (6 if 1 else 1)), 6)
++        self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
++        self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
++        self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
++        self.assertEqual((not 5 if 1 else 1), False)
++        self.assertEqual((not 5 if 0 else 1), 1)
++        self.assertEqual((6 + 1 if 1 else 2), 7)
++        self.assertEqual((6 - 1 if 1 else 2), 5)
++        self.assertEqual((6 * 2 if 1 else 4), 12)
++        self.assertEqual((6 / 2 if 1 else 3), 3)
++        self.assertEqual((6 < 4 if 0 else 2), 2)
++
++
++def test_main():
++    run_unittest(TokenTests, GrammarTests)
++
++if __name__ == '__main__':
++    test_main()
+diff -r 531f2e948299 refactor/tests/data/README
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/README	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,6 @@
++In this directory:
++- py2_test_grammar.py -- test file that exercises most/all of Python 2.x's grammar.
++- py3_test_grammar.py -- test file that exercises most/all of Python 3.x's grammar.
++- infinite_recursion.py -- test file that causes refactor's faster recursive pattern matching
++  scheme to fail, but passes when refactor falls back to iterative pattern matching.
++- fixes/ -- for use by test_refactor.py
+diff -r 531f2e948299 refactor/tests/data/fixers/.svn/all-wcprops
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/.svn/all-wcprops	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,23 @@
++K 25
++svn:wc:ra_dav:version-url
++V 69
++/projects/!svn/ver/67400/sandbox/trunk/2to3/lib2to3/tests/data/fixers
++END
++no_fixer_cls.py
++K 25
++svn:wc:ra_dav:version-url
++V 85
++/projects/!svn/ver/66652/sandbox/trunk/2to3/lib2to3/tests/data/fixers/no_fixer_cls.py
++END
++bad_order.py
++K 25
++svn:wc:ra_dav:version-url
++V 82
++/projects/!svn/ver/66652/sandbox/trunk/2to3/lib2to3/tests/data/fixers/bad_order.py
++END
++parrot_example.py
++K 25
++svn:wc:ra_dav:version-url
++V 87
++/projects/!svn/ver/66652/sandbox/trunk/2to3/lib2to3/tests/data/fixers/parrot_example.py
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/.svn/dir-prop-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/.svn/dir-prop-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,7 @@
++K 10
++svn:ignore
++V 10
++*.py[co]
++
++
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/.svn/entries
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,133 @@
++9
++
++dir
++70822
++http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/tests/data/fixers
++http://svn.python.org/projects
++
++
++
++2008-11-26T18:07:41.634442Z
++67400
++benjamin.peterson
++has-props
++
++svn:special svn:externals svn:needs-lock
++
++
++
++
++
++
++
++
++
++
++
++6015fed2-1504-0410-9fe1-9d1591cc4771
++
++no_fixer_cls.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++b0b02321b13a7b85c9da18217f249e0c
++2008-09-27T21:03:06.866139Z
++66652
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++75
++
++myfixes
++dir
++
++bad_order.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++41039de27e0a68465d89dc1718955359
++2008-09-27T21:03:06.866139Z
++66652
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++89
++
++parrot_example.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++576098872ea2b821543ae01af92e50ce
++2008-09-27T21:03:06.866139Z
++66652
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++23
++
+diff -r 531f2e948299 refactor/tests/data/fixers/.svn/format
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/.svn/format	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++9
+diff -r 531f2e948299 refactor/tests/data/fixers/.svn/prop-base/bad_order.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/.svn/prop-base/bad_order.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/.svn/prop-base/no_fixer_cls.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/.svn/prop-base/no_fixer_cls.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/.svn/prop-base/parrot_example.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/.svn/prop-base/parrot_example.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/.svn/text-base/bad_order.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/.svn/text-base/bad_order.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++from lib2to3.fixer_base import BaseFix
++
++class FixBadOrder(BaseFix):
++
++    order = "crazy"
+diff -r 531f2e948299 refactor/tests/data/fixers/.svn/text-base/no_fixer_cls.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/.svn/text-base/no_fixer_cls.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++# This is empty so trying to fetch the fixer class gives an AttributeError
+diff -r 531f2e948299 refactor/tests/data/fixers/.svn/text-base/parrot_example.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/.svn/text-base/parrot_example.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,2 @@
++def parrot():
++    pass
+diff -r 531f2e948299 refactor/tests/data/fixers/bad_order.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/bad_order.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,5 @@
++from refactor.fixer_base import BaseFix
++
++class FixBadOrder(BaseFix):
++
++    order = "crazy"
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/all-wcprops
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/all-wcprops	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,41 @@
++K 25
++svn:wc:ra_dav:version-url
++V 77
++/projects/!svn/ver/67400/sandbox/trunk/2to3/lib2to3/tests/data/fixers/myfixes
++END
++fix_preorder.py
++K 25
++svn:wc:ra_dav:version-url
++V 93
++/projects/!svn/ver/66652/sandbox/trunk/2to3/lib2to3/tests/data/fixers/myfixes/fix_preorder.py
++END
++fix_last.py
++K 25
++svn:wc:ra_dav:version-url
++V 89
++/projects/!svn/ver/66652/sandbox/trunk/2to3/lib2to3/tests/data/fixers/myfixes/fix_last.py
++END
++fix_first.py
++K 25
++svn:wc:ra_dav:version-url
++V 90
++/projects/!svn/ver/66652/sandbox/trunk/2to3/lib2to3/tests/data/fixers/myfixes/fix_first.py
++END
++fix_parrot.py
++K 25
++svn:wc:ra_dav:version-url
++V 91
++/projects/!svn/ver/66652/sandbox/trunk/2to3/lib2to3/tests/data/fixers/myfixes/fix_parrot.py
++END
++__init__.py
++K 25
++svn:wc:ra_dav:version-url
++V 89
++/projects/!svn/ver/66652/sandbox/trunk/2to3/lib2to3/tests/data/fixers/myfixes/__init__.py
++END
++fix_explicit.py
++K 25
++svn:wc:ra_dav:version-url
++V 93
++/projects/!svn/ver/66652/sandbox/trunk/2to3/lib2to3/tests/data/fixers/myfixes/fix_explicit.py
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/dir-prop-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/dir-prop-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,7 @@
++K 10
++svn:ignore
++V 10
++*.py[co]
++
++
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/entries
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,232 @@
++9
++
++dir
++70822
++http://svn.python.org/projects/sandbox/trunk/2to3/lib2to3/tests/data/fixers/myfixes
++http://svn.python.org/projects
++
++
++
++2008-11-26T18:07:41.634442Z
++67400
++benjamin.peterson
++has-props
++
++svn:special svn:externals svn:needs-lock
++
++
++
++
++
++
++
++
++
++
++
++6015fed2-1504-0410-9fe1-9d1591cc4771
++
++fix_preorder.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++ac772042a921b251e993ad33e303f2a5
++2008-09-27T21:03:06.866139Z
++66652
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++127
++
++fix_last.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++25d8063f646fd75719f8cf863e857a43
++2008-09-27T21:03:06.866139Z
++66652
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++125
++
++fix_first.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++baf1f88d0b82dae551dd3327f1275082
++2008-09-27T21:03:06.866139Z
++66652
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++124
++
++fix_parrot.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++9f4c64c3e55d4fc7dd3bd17d4046a124
++2008-09-27T21:03:06.866139Z
++66652
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++353
++
++__init__.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++d41d8cd98f00b204e9800998ecf8427e
++2008-09-27T21:03:06.866139Z
++66652
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++0
++
++fix_explicit.py
++file
++
++
++
++
++2009-03-31T00:29:26.000000Z
++23c047bcecfbe7c1481e217117a00414
++2008-09-27T21:03:06.866139Z
++66652
++benjamin.peterson
++has-props
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++123
++
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/format
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/format	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++9
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/prop-base/__init__.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/prop-base/__init__.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/prop-base/fix_explicit.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/prop-base/fix_explicit.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/prop-base/fix_first.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/prop-base/fix_first.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/prop-base/fix_last.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/prop-base/fix_last.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/prop-base/fix_parrot.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/prop-base/fix_parrot.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/prop-base/fix_preorder.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/prop-base/fix_preorder.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,9 @@
++K 13
++svn:eol-style
++V 6
++native
++K 12
++svn:keywords
++V 2
++Id
++END
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/text-base/fix_explicit.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/text-base/fix_explicit.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,6 @@
++from lib2to3.fixer_base import BaseFix
++
++class FixExplicit(BaseFix):
++    explicit = True
++
++    def match(self): return False
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/text-base/fix_first.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/text-base/fix_first.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,6 @@
++from lib2to3.fixer_base import BaseFix
++
++class FixFirst(BaseFix):
++    run_order = 1
++
++    def match(self, node): return False
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/text-base/fix_last.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/text-base/fix_last.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,7 @@
++from lib2to3.fixer_base import BaseFix
++
++class FixLast(BaseFix):
++
++    run_order = 10
++
++    def match(self, node): return False
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/text-base/fix_parrot.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/text-base/fix_parrot.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,13 @@
++from lib2to3.fixer_base import BaseFix
++from lib2to3.fixer_util import Name
++
++class FixParrot(BaseFix):
++    """
++    Change functions named 'parrot' to 'cheese'.
++    """
++
++    PATTERN = """funcdef < 'def' name='parrot' any* >"""
++
++    def transform(self, node, results):
++        name = results["name"]
++        name.replace(Name("cheese", name.get_prefix()))
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/.svn/text-base/fix_preorder.py.svn-base
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/.svn/text-base/fix_preorder.py.svn-base	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,6 @@
++from lib2to3.fixer_base import BaseFix
++
++class FixPreorder(BaseFix):
++    order = "pre"
++
++    def match(self, node): return False
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/fix_explicit.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/fix_explicit.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,6 @@
++from refactor.fixer_base import BaseFix
++
++class FixExplicit(BaseFix):
++    explicit = True
++
++    def match(self): return False
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/fix_first.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/fix_first.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,6 @@
++from refactor.fixer_base import BaseFix
++
++class FixFirst(BaseFix):
++    run_order = 1
++
++    def match(self, node): return False
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/fix_last.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/fix_last.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,7 @@
++from refactor.fixer_base import BaseFix
++
++class FixLast(BaseFix):
++
++    run_order = 10
++
++    def match(self, node): return False
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/fix_parrot.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/fix_parrot.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,13 @@
++from refactor.fixer_base import BaseFix
++from refactor.fixer_util import Name
++
++class FixParrot(BaseFix):
++    """
++    Change functions named 'parrot' to 'cheese'.
++    """
++
++    PATTERN = """funcdef < 'def' name='parrot' any* >"""
++
++    def transform(self, node, results):
++        name = results["name"]
++        name.replace(Name("cheese", name.get_prefix()))
+diff -r 531f2e948299 refactor/tests/data/fixers/myfixes/fix_preorder.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/myfixes/fix_preorder.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,6 @@
++from refactor.fixer_base import BaseFix
++
++class FixPreorder(BaseFix):
++    order = "pre"
++
++    def match(self, node): return False
+diff -r 531f2e948299 refactor/tests/data/fixers/no_fixer_cls.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/no_fixer_cls.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,1 @@
++# This is empty so trying to fetch the fixer class gives an AttributeError
+diff -r 531f2e948299 refactor/tests/data/fixers/parrot_example.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/fixers/parrot_example.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,2 @@
++def parrot():
++    pass
+diff -r 531f2e948299 refactor/tests/data/infinite_recursion.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/infinite_recursion.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,2670 @@
++# This file is used to verify that refactor falls back to a slower, iterative
++# pattern matching scheme in the event that the faster recursive system fails
++# due to infinite recursion.
++from ctypes import *
++STRING = c_char_p
++
++
++OSUnknownByteOrder = 0
++UIT_PROMPT = 1
++P_PGID = 2
++P_PID = 1
++UIT_ERROR = 5
++UIT_INFO = 4
++UIT_NONE = 0
++P_ALL = 0
++UIT_VERIFY = 2
++OSBigEndian = 2
++UIT_BOOLEAN = 3
++OSLittleEndian = 1
++__darwin_nl_item = c_int
++__darwin_wctrans_t = c_int
++__darwin_wctype_t = c_ulong
++__int8_t = c_byte
++__uint8_t = c_ubyte
++__int16_t = c_short
++__uint16_t = c_ushort
++__int32_t = c_int
++__uint32_t = c_uint
++__int64_t = c_longlong
++__uint64_t = c_ulonglong
++__darwin_intptr_t = c_long
++__darwin_natural_t = c_uint
++__darwin_ct_rune_t = c_int
++class __mbstate_t(Union):
++    pass
++__mbstate_t._pack_ = 4
++__mbstate_t._fields_ = [
++    ('__mbstate8', c_char * 128),
++    ('_mbstateL', c_longlong),
++]
++assert sizeof(__mbstate_t) == 128, sizeof(__mbstate_t)
++assert alignment(__mbstate_t) == 4, alignment(__mbstate_t)
++__darwin_mbstate_t = __mbstate_t
++__darwin_ptrdiff_t = c_int
++__darwin_size_t = c_ulong
++__darwin_va_list = STRING
++__darwin_wchar_t = c_int
++__darwin_rune_t = __darwin_wchar_t
++__darwin_wint_t = c_int
++__darwin_clock_t = c_ulong
++__darwin_socklen_t = __uint32_t
++__darwin_ssize_t = c_long
++__darwin_time_t = c_long
++sig_atomic_t = c_int
++class sigcontext(Structure):
++    pass
++sigcontext._fields_ = [
++    ('sc_onstack', c_int),
++    ('sc_mask', c_int),
++    ('sc_eax', c_uint),
++    ('sc_ebx', c_uint),
++    ('sc_ecx', c_uint),
++    ('sc_edx', c_uint),
++    ('sc_edi', c_uint),
++    ('sc_esi', c_uint),
++    ('sc_ebp', c_uint),
++    ('sc_esp', c_uint),
++    ('sc_ss', c_uint),
++    ('sc_eflags', c_uint),
++    ('sc_eip', c_uint),
++    ('sc_cs', c_uint),
++    ('sc_ds', c_uint),
++    ('sc_es', c_uint),
++    ('sc_fs', c_uint),
++    ('sc_gs', c_uint),
++]
++assert sizeof(sigcontext) == 72, sizeof(sigcontext)
++assert alignment(sigcontext) == 4, alignment(sigcontext)
++u_int8_t = c_ubyte
++u_int16_t = c_ushort
++u_int32_t = c_uint
++u_int64_t = c_ulonglong
++int32_t = c_int
++register_t = int32_t
++user_addr_t = u_int64_t
++user_size_t = u_int64_t
++int64_t = c_longlong
++user_ssize_t = int64_t
++user_long_t = int64_t
++user_ulong_t = u_int64_t
++user_time_t = int64_t
++syscall_arg_t = u_int64_t
++
++# values for unnamed enumeration
++class aes_key_st(Structure):
++    pass
++aes_key_st._fields_ = [
++    ('rd_key', c_ulong * 60),
++    ('rounds', c_int),
++]
++assert sizeof(aes_key_st) == 244, sizeof(aes_key_st)
++assert alignment(aes_key_st) == 4, alignment(aes_key_st)
++AES_KEY = aes_key_st
++class asn1_ctx_st(Structure):
++    pass
++asn1_ctx_st._fields_ = [
++    ('p', POINTER(c_ubyte)),
++    ('eos', c_int),
++    ('error', c_int),
++    ('inf', c_int),
++    ('tag', c_int),
++    ('xclass', c_int),
++    ('slen', c_long),
++    ('max', POINTER(c_ubyte)),
++    ('q', POINTER(c_ubyte)),
++    ('pp', POINTER(POINTER(c_ubyte))),
++    ('line', c_int),
++]
++assert sizeof(asn1_ctx_st) == 44, sizeof(asn1_ctx_st)
++assert alignment(asn1_ctx_st) == 4, alignment(asn1_ctx_st)
++ASN1_CTX = asn1_ctx_st
++class asn1_object_st(Structure):
++    pass
++asn1_object_st._fields_ = [
++    ('sn', STRING),
++    ('ln', STRING),
++    ('nid', c_int),
++    ('length', c_int),
++    ('data', POINTER(c_ubyte)),
++    ('flags', c_int),
++]
++assert sizeof(asn1_object_st) == 24, sizeof(asn1_object_st)
++assert alignment(asn1_object_st) == 4, alignment(asn1_object_st)
++ASN1_OBJECT = asn1_object_st
++class asn1_string_st(Structure):
++    pass
++asn1_string_st._fields_ = [
++    ('length', c_int),
++    ('type', c_int),
++    ('data', POINTER(c_ubyte)),
++    ('flags', c_long),
++]
++assert sizeof(asn1_string_st) == 16, sizeof(asn1_string_st)
++assert alignment(asn1_string_st) == 4, alignment(asn1_string_st)
++ASN1_STRING = asn1_string_st
++class ASN1_ENCODING_st(Structure):
++    pass
++ASN1_ENCODING_st._fields_ = [
++    ('enc', POINTER(c_ubyte)),
++    ('len', c_long),
++    ('modified', c_int),
++]
++assert sizeof(ASN1_ENCODING_st) == 12, sizeof(ASN1_ENCODING_st)
++assert alignment(ASN1_ENCODING_st) == 4, alignment(ASN1_ENCODING_st)
++ASN1_ENCODING = ASN1_ENCODING_st
++class asn1_string_table_st(Structure):
++    pass
++asn1_string_table_st._fields_ = [
++    ('nid', c_int),
++    ('minsize', c_long),
++    ('maxsize', c_long),
++    ('mask', c_ulong),
++    ('flags', c_ulong),
++]
++assert sizeof(asn1_string_table_st) == 20, sizeof(asn1_string_table_st)
++assert alignment(asn1_string_table_st) == 4, alignment(asn1_string_table_st)
++ASN1_STRING_TABLE = asn1_string_table_st
++class ASN1_TEMPLATE_st(Structure):
++    pass
++ASN1_TEMPLATE_st._fields_ = [
++]
++ASN1_TEMPLATE = ASN1_TEMPLATE_st
++class ASN1_ITEM_st(Structure):
++    pass
++ASN1_ITEM = ASN1_ITEM_st
++ASN1_ITEM_st._fields_ = [
++]
++class ASN1_TLC_st(Structure):
++    pass
++ASN1_TLC = ASN1_TLC_st
++ASN1_TLC_st._fields_ = [
++]
++class ASN1_VALUE_st(Structure):
++    pass
++ASN1_VALUE_st._fields_ = [
++]
++ASN1_VALUE = ASN1_VALUE_st
++ASN1_ITEM_EXP = ASN1_ITEM
++class asn1_type_st(Structure):
++    pass
++class N12asn1_type_st4DOLLAR_11E(Union):
++    pass
++ASN1_BOOLEAN = c_int
++ASN1_INTEGER = asn1_string_st
++ASN1_ENUMERATED = asn1_string_st
++ASN1_BIT_STRING = asn1_string_st
++ASN1_OCTET_STRING = asn1_string_st
++ASN1_PRINTABLESTRING = asn1_string_st
++ASN1_T61STRING = asn1_string_st
++ASN1_IA5STRING = asn1_string_st
++ASN1_GENERALSTRING = asn1_string_st
++ASN1_BMPSTRING = asn1_string_st
++ASN1_UNIVERSALSTRING = asn1_string_st
++ASN1_UTCTIME = asn1_string_st
++ASN1_GENERALIZEDTIME = asn1_string_st
++ASN1_VISIBLESTRING = asn1_string_st
++ASN1_UTF8STRING = asn1_string_st
++N12asn1_type_st4DOLLAR_11E._fields_ = [
++    ('ptr', STRING),
++    ('boolean', ASN1_BOOLEAN),
++    ('asn1_string', POINTER(ASN1_STRING)),
++    ('object', POINTER(ASN1_OBJECT)),
++    ('integer', POINTER(ASN1_INTEGER)),
++    ('enumerated', POINTER(ASN1_ENUMERATED)),
++    ('bit_string', POINTER(ASN1_BIT_STRING)),
++    ('octet_string', POINTER(ASN1_OCTET_STRING)),
++    ('printablestring', POINTER(ASN1_PRINTABLESTRING)),
++    ('t61string', POINTER(ASN1_T61STRING)),
++    ('ia5string', POINTER(ASN1_IA5STRING)),
++    ('generalstring', POINTER(ASN1_GENERALSTRING)),
++    ('bmpstring', POINTER(ASN1_BMPSTRING)),
++    ('universalstring', POINTER(ASN1_UNIVERSALSTRING)),
++    ('utctime', POINTER(ASN1_UTCTIME)),
++    ('generalizedtime', POINTER(ASN1_GENERALIZEDTIME)),
++    ('visiblestring', POINTER(ASN1_VISIBLESTRING)),
++    ('utf8string', POINTER(ASN1_UTF8STRING)),
++    ('set', POINTER(ASN1_STRING)),
++    ('sequence', POINTER(ASN1_STRING)),
++]
++assert sizeof(N12asn1_type_st4DOLLAR_11E) == 4, sizeof(N12asn1_type_st4DOLLAR_11E)
++assert alignment(N12asn1_type_st4DOLLAR_11E) == 4, alignment(N12asn1_type_st4DOLLAR_11E)
++asn1_type_st._fields_ = [
++    ('type', c_int),
++    ('value', N12asn1_type_st4DOLLAR_11E),
++]
++assert sizeof(asn1_type_st) == 8, sizeof(asn1_type_st)
++assert alignment(asn1_type_st) == 4, alignment(asn1_type_st)
++ASN1_TYPE = asn1_type_st
++class asn1_method_st(Structure):
++    pass
++asn1_method_st._fields_ = [
++    ('i2d', CFUNCTYPE(c_int)),
++    ('d2i', CFUNCTYPE(STRING)),
++    ('create', CFUNCTYPE(STRING)),
++    ('destroy', CFUNCTYPE(None)),
++]
++assert sizeof(asn1_method_st) == 16, sizeof(asn1_method_st)
++assert alignment(asn1_method_st) == 4, alignment(asn1_method_st)
++ASN1_METHOD = asn1_method_st
++class asn1_header_st(Structure):
++    pass
++asn1_header_st._fields_ = [
++    ('header', POINTER(ASN1_OCTET_STRING)),
++    ('data', STRING),
++    ('meth', POINTER(ASN1_METHOD)),
++]
++assert sizeof(asn1_header_st) == 12, sizeof(asn1_header_st)
++assert alignment(asn1_header_st) == 4, alignment(asn1_header_st)
++ASN1_HEADER = asn1_header_st
++class BIT_STRING_BITNAME_st(Structure):
++    pass
++BIT_STRING_BITNAME_st._fields_ = [
++    ('bitnum', c_int),
++    ('lname', STRING),
++    ('sname', STRING),
++]
++assert sizeof(BIT_STRING_BITNAME_st) == 12, sizeof(BIT_STRING_BITNAME_st)
++assert alignment(BIT_STRING_BITNAME_st) == 4, alignment(BIT_STRING_BITNAME_st)
++BIT_STRING_BITNAME = BIT_STRING_BITNAME_st
++class bio_st(Structure):
++    pass
++BIO = bio_st
++bio_info_cb = CFUNCTYPE(None, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)
++class bio_method_st(Structure):
++    pass
++bio_method_st._fields_ = [
++    ('type', c_int),
++    ('name', STRING),
++    ('bwrite', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
++    ('bread', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
++    ('bputs', CFUNCTYPE(c_int, POINTER(BIO), STRING)),
++    ('bgets', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
++    ('ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, c_long, c_void_p)),
++    ('create', CFUNCTYPE(c_int, POINTER(BIO))),
++    ('destroy', CFUNCTYPE(c_int, POINTER(BIO))),
++    ('callback_ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, POINTER(bio_info_cb))),
++]
++assert sizeof(bio_method_st) == 40, sizeof(bio_method_st)
++assert alignment(bio_method_st) == 4, alignment(bio_method_st)
++BIO_METHOD = bio_method_st
++class crypto_ex_data_st(Structure):
++    pass
++class stack_st(Structure):
++    pass
++STACK = stack_st
++crypto_ex_data_st._fields_ = [
++    ('sk', POINTER(STACK)),
++    ('dummy', c_int),
++]
++assert sizeof(crypto_ex_data_st) == 8, sizeof(crypto_ex_data_st)
++assert alignment(crypto_ex_data_st) == 4, alignment(crypto_ex_data_st)
++CRYPTO_EX_DATA = crypto_ex_data_st
++bio_st._fields_ = [
++    ('method', POINTER(BIO_METHOD)),
++    ('callback', CFUNCTYPE(c_long, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)),
++    ('cb_arg', STRING),
++    ('init', c_int),
++    ('shutdown', c_int),
++    ('flags', c_int),
++    ('retry_reason', c_int),
++    ('num', c_int),
++    ('ptr', c_void_p),
++    ('next_bio', POINTER(bio_st)),
++    ('prev_bio', POINTER(bio_st)),
++    ('references', c_int),
++    ('num_read', c_ulong),
++    ('num_write', c_ulong),
++    ('ex_data', CRYPTO_EX_DATA),
++]
++assert sizeof(bio_st) == 64, sizeof(bio_st)
++assert alignment(bio_st) == 4, alignment(bio_st)
++class bio_f_buffer_ctx_struct(Structure):
++    pass
++bio_f_buffer_ctx_struct._fields_ = [
++    ('ibuf_size', c_int),
++    ('obuf_size', c_int),
++    ('ibuf', STRING),
++    ('ibuf_len', c_int),
++    ('ibuf_off', c_int),
++    ('obuf', STRING),
++    ('obuf_len', c_int),
++    ('obuf_off', c_int),
++]
++assert sizeof(bio_f_buffer_ctx_struct) == 32, sizeof(bio_f_buffer_ctx_struct)
++assert alignment(bio_f_buffer_ctx_struct) == 4, alignment(bio_f_buffer_ctx_struct)
++BIO_F_BUFFER_CTX = bio_f_buffer_ctx_struct
++class hostent(Structure):
++    pass
++hostent._fields_ = [
++]
++class bf_key_st(Structure):
++    pass
++bf_key_st._fields_ = [
++    ('P', c_uint * 18),
++    ('S', c_uint * 1024),
++]
++assert sizeof(bf_key_st) == 4168, sizeof(bf_key_st)
++assert alignment(bf_key_st) == 4, alignment(bf_key_st)
++BF_KEY = bf_key_st
++class bignum_st(Structure):
++    pass
++bignum_st._fields_ = [
++    ('d', POINTER(c_ulong)),
++    ('top', c_int),
++    ('dmax', c_int),
++    ('neg', c_int),
++    ('flags', c_int),
++]
++assert sizeof(bignum_st) == 20, sizeof(bignum_st)
++assert alignment(bignum_st) == 4, alignment(bignum_st)
++BIGNUM = bignum_st
++class bignum_ctx(Structure):
++    pass
++bignum_ctx._fields_ = [
++]
++BN_CTX = bignum_ctx
++class bn_blinding_st(Structure):
++    pass
++bn_blinding_st._fields_ = [
++    ('init', c_int),
++    ('A', POINTER(BIGNUM)),
++    ('Ai', POINTER(BIGNUM)),
++    ('mod', POINTER(BIGNUM)),
++    ('thread_id', c_ulong),
++]
++assert sizeof(bn_blinding_st) == 20, sizeof(bn_blinding_st)
++assert alignment(bn_blinding_st) == 4, alignment(bn_blinding_st)
++BN_BLINDING = bn_blinding_st
++class bn_mont_ctx_st(Structure):
++    pass
++bn_mont_ctx_st._fields_ = [
++    ('ri', c_int),
++    ('RR', BIGNUM),
++    ('N', BIGNUM),
++    ('Ni', BIGNUM),
++    ('n0', c_ulong),
++    ('flags', c_int),
++]
++assert sizeof(bn_mont_ctx_st) == 72, sizeof(bn_mont_ctx_st)
++assert alignment(bn_mont_ctx_st) == 4, alignment(bn_mont_ctx_st)
++BN_MONT_CTX = bn_mont_ctx_st
++class bn_recp_ctx_st(Structure):
++    pass
++bn_recp_ctx_st._fields_ = [
++    ('N', BIGNUM),
++    ('Nr', BIGNUM),
++    ('num_bits', c_int),
++    ('shift', c_int),
++    ('flags', c_int),
++]
++assert sizeof(bn_recp_ctx_st) == 52, sizeof(bn_recp_ctx_st)
++assert alignment(bn_recp_ctx_st) == 4, alignment(bn_recp_ctx_st)
++BN_RECP_CTX = bn_recp_ctx_st
++class buf_mem_st(Structure):
++    pass
++buf_mem_st._fields_ = [
++    ('length', c_int),
++    ('data', STRING),
++    ('max', c_int),
++]
++assert sizeof(buf_mem_st) == 12, sizeof(buf_mem_st)
++assert alignment(buf_mem_st) == 4, alignment(buf_mem_st)
++BUF_MEM = buf_mem_st
++class cast_key_st(Structure):
++    pass
++cast_key_st._fields_ = [
++    ('data', c_ulong * 32),
++    ('short_key', c_int),
++]
++assert sizeof(cast_key_st) == 132, sizeof(cast_key_st)
++assert alignment(cast_key_st) == 4, alignment(cast_key_st)
++CAST_KEY = cast_key_st
++class comp_method_st(Structure):
++    pass
++comp_method_st._fields_ = [
++    ('type', c_int),
++    ('name', STRING),
++    ('init', CFUNCTYPE(c_int)),
++    ('finish', CFUNCTYPE(None)),
++    ('compress', CFUNCTYPE(c_int)),
++    ('expand', CFUNCTYPE(c_int)),
++    ('ctrl', CFUNCTYPE(c_long)),
++    ('callback_ctrl', CFUNCTYPE(c_long)),
++]
++assert sizeof(comp_method_st) == 32, sizeof(comp_method_st)
++assert alignment(comp_method_st) == 4, alignment(comp_method_st)
++COMP_METHOD = comp_method_st
++class comp_ctx_st(Structure):
++    pass
++comp_ctx_st._fields_ = [
++    ('meth', POINTER(COMP_METHOD)),
++    ('compress_in', c_ulong),
++    ('compress_out', c_ulong),
++    ('expand_in', c_ulong),
++    ('expand_out', c_ulong),
++    ('ex_data', CRYPTO_EX_DATA),
++]
++assert sizeof(comp_ctx_st) == 28, sizeof(comp_ctx_st)
++assert alignment(comp_ctx_st) == 4, alignment(comp_ctx_st)
++COMP_CTX = comp_ctx_st
++class CRYPTO_dynlock_value(Structure):
++    pass
++CRYPTO_dynlock_value._fields_ = [
++]
++class CRYPTO_dynlock(Structure):
++    pass
++CRYPTO_dynlock._fields_ = [
++    ('references', c_int),
++    ('data', POINTER(CRYPTO_dynlock_value)),
++]
++assert sizeof(CRYPTO_dynlock) == 8, sizeof(CRYPTO_dynlock)
++assert alignment(CRYPTO_dynlock) == 4, alignment(CRYPTO_dynlock)
++BIO_dummy = bio_st
++CRYPTO_EX_new = CFUNCTYPE(c_int, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
++CRYPTO_EX_free = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
++CRYPTO_EX_dup = CFUNCTYPE(c_int, POINTER(CRYPTO_EX_DATA), POINTER(CRYPTO_EX_DATA), c_void_p, c_int, c_long, c_void_p)
++class crypto_ex_data_func_st(Structure):
++    pass
++crypto_ex_data_func_st._fields_ = [
++    ('argl', c_long),
++    ('argp', c_void_p),
++    ('new_func', POINTER(CRYPTO_EX_new)),
++    ('free_func', POINTER(CRYPTO_EX_free)),
++    ('dup_func', POINTER(CRYPTO_EX_dup)),
++]
++assert sizeof(crypto_ex_data_func_st) == 20, sizeof(crypto_ex_data_func_st)
++assert alignment(crypto_ex_data_func_st) == 4, alignment(crypto_ex_data_func_st)
++CRYPTO_EX_DATA_FUNCS = crypto_ex_data_func_st
++class st_CRYPTO_EX_DATA_IMPL(Structure):
++    pass
++CRYPTO_EX_DATA_IMPL = st_CRYPTO_EX_DATA_IMPL
++st_CRYPTO_EX_DATA_IMPL._fields_ = [
++]
++CRYPTO_MEM_LEAK_CB = CFUNCTYPE(c_void_p, c_ulong, STRING, c_int, c_int, c_void_p)
++DES_cblock = c_ubyte * 8
++const_DES_cblock = c_ubyte * 8
++class DES_ks(Structure):
++    pass
++class N6DES_ks3DOLLAR_9E(Union):
++    pass
++N6DES_ks3DOLLAR_9E._fields_ = [
++    ('cblock', DES_cblock),
++    ('deslong', c_ulong * 2),
++]
++assert sizeof(N6DES_ks3DOLLAR_9E) == 8, sizeof(N6DES_ks3DOLLAR_9E)
++assert alignment(N6DES_ks3DOLLAR_9E) == 4, alignment(N6DES_ks3DOLLAR_9E)
++DES_ks._fields_ = [
++    ('ks', N6DES_ks3DOLLAR_9E * 16),
++]
++assert sizeof(DES_ks) == 128, sizeof(DES_ks)
++assert alignment(DES_ks) == 4, alignment(DES_ks)
++DES_key_schedule = DES_ks
++_ossl_old_des_cblock = c_ubyte * 8
++class _ossl_old_des_ks_struct(Structure):
++    pass
++class N23_ossl_old_des_ks_struct4DOLLAR_10E(Union):
++    pass
++N23_ossl_old_des_ks_struct4DOLLAR_10E._fields_ = [
++    ('_', _ossl_old_des_cblock),
++    ('pad', c_ulong * 2),
++]
++assert sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 8, sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E)
++assert alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 4, alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E)
++_ossl_old_des_ks_struct._fields_ = [
++    ('ks', N23_ossl_old_des_ks_struct4DOLLAR_10E),
++]
++assert sizeof(_ossl_old_des_ks_struct) == 8, sizeof(_ossl_old_des_ks_struct)
++assert alignment(_ossl_old_des_ks_struct) == 4, alignment(_ossl_old_des_ks_struct)
++_ossl_old_des_key_schedule = _ossl_old_des_ks_struct * 16
++class dh_st(Structure):
++    pass
++DH = dh_st
++class dh_method(Structure):
++    pass
++dh_method._fields_ = [
++    ('name', STRING),
++    ('generate_key', CFUNCTYPE(c_int, POINTER(DH))),
++    ('compute_key', CFUNCTYPE(c_int, POINTER(c_ubyte), POINTER(BIGNUM), POINTER(DH))),
++    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DH), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
++    ('init', CFUNCTYPE(c_int, POINTER(DH))),
++    ('finish', CFUNCTYPE(c_int, POINTER(DH))),
++    ('flags', c_int),
++    ('app_data', STRING),
++]
++assert sizeof(dh_method) == 32, sizeof(dh_method)
++assert alignment(dh_method) == 4, alignment(dh_method)
++DH_METHOD = dh_method
++class engine_st(Structure):
++    pass
++ENGINE = engine_st
++dh_st._fields_ = [
++    ('pad', c_int),
++    ('version', c_int),
++    ('p', POINTER(BIGNUM)),
++    ('g', POINTER(BIGNUM)),
++    ('length', c_long),
++    ('pub_key', POINTER(BIGNUM)),
++    ('priv_key', POINTER(BIGNUM)),
++    ('flags', c_int),
++    ('method_mont_p', STRING),
++    ('q', POINTER(BIGNUM)),
++    ('j', POINTER(BIGNUM)),
++    ('seed', POINTER(c_ubyte)),
++    ('seedlen', c_int),
++    ('counter', POINTER(BIGNUM)),
++    ('references', c_int),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('meth', POINTER(DH_METHOD)),
++    ('engine', POINTER(ENGINE)),
++]
++assert sizeof(dh_st) == 76, sizeof(dh_st)
++assert alignment(dh_st) == 4, alignment(dh_st)
++class dsa_st(Structure):
++    pass
++DSA = dsa_st
++class DSA_SIG_st(Structure):
++    pass
++DSA_SIG_st._fields_ = [
++    ('r', POINTER(BIGNUM)),
++    ('s', POINTER(BIGNUM)),
++]
++assert sizeof(DSA_SIG_st) == 8, sizeof(DSA_SIG_st)
++assert alignment(DSA_SIG_st) == 4, alignment(DSA_SIG_st)
++DSA_SIG = DSA_SIG_st
++class dsa_method(Structure):
++    pass
++dsa_method._fields_ = [
++    ('name', STRING),
++    ('dsa_do_sign', CFUNCTYPE(POINTER(DSA_SIG), POINTER(c_ubyte), c_int, POINTER(DSA))),
++    ('dsa_sign_setup', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BN_CTX), POINTER(POINTER(BIGNUM)), POINTER(POINTER(BIGNUM)))),
++    ('dsa_do_verify', CFUNCTYPE(c_int, POINTER(c_ubyte), c_int, POINTER(DSA_SIG), POINTER(DSA))),
++    ('dsa_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
++    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
++    ('init', CFUNCTYPE(c_int, POINTER(DSA))),
++    ('finish', CFUNCTYPE(c_int, POINTER(DSA))),
++    ('flags', c_int),
++    ('app_data', STRING),
++]
++assert sizeof(dsa_method) == 40, sizeof(dsa_method)
++assert alignment(dsa_method) == 4, alignment(dsa_method)
++DSA_METHOD = dsa_method
++dsa_st._fields_ = [
++    ('pad', c_int),
++    ('version', c_long),
++    ('write_params', c_int),
++    ('p', POINTER(BIGNUM)),
++    ('q', POINTER(BIGNUM)),
++    ('g', POINTER(BIGNUM)),
++    ('pub_key', POINTER(BIGNUM)),
++    ('priv_key', POINTER(BIGNUM)),
++    ('kinv', POINTER(BIGNUM)),
++    ('r', POINTER(BIGNUM)),
++    ('flags', c_int),
++    ('method_mont_p', STRING),
++    ('references', c_int),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('meth', POINTER(DSA_METHOD)),
++    ('engine', POINTER(ENGINE)),
++]
++assert sizeof(dsa_st) == 68, sizeof(dsa_st)
++assert alignment(dsa_st) == 4, alignment(dsa_st)
++class evp_pkey_st(Structure):
++    pass
++class N11evp_pkey_st4DOLLAR_12E(Union):
++    pass
++class rsa_st(Structure):
++    pass
++N11evp_pkey_st4DOLLAR_12E._fields_ = [
++    ('ptr', STRING),
++    ('rsa', POINTER(rsa_st)),
++    ('dsa', POINTER(dsa_st)),
++    ('dh', POINTER(dh_st)),
++]
++assert sizeof(N11evp_pkey_st4DOLLAR_12E) == 4, sizeof(N11evp_pkey_st4DOLLAR_12E)
++assert alignment(N11evp_pkey_st4DOLLAR_12E) == 4, alignment(N11evp_pkey_st4DOLLAR_12E)
++evp_pkey_st._fields_ = [
++    ('type', c_int),
++    ('save_type', c_int),
++    ('references', c_int),
++    ('pkey', N11evp_pkey_st4DOLLAR_12E),
++    ('save_parameters', c_int),
++    ('attributes', POINTER(STACK)),
++]
++assert sizeof(evp_pkey_st) == 24, sizeof(evp_pkey_st)
++assert alignment(evp_pkey_st) == 4, alignment(evp_pkey_st)
++class env_md_st(Structure):
++    pass
++class env_md_ctx_st(Structure):
++    pass
++EVP_MD_CTX = env_md_ctx_st
++env_md_st._fields_ = [
++    ('type', c_int),
++    ('pkey_type', c_int),
++    ('md_size', c_int),
++    ('flags', c_ulong),
++    ('init', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
++    ('update', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), c_void_p, c_ulong)),
++    ('final', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(c_ubyte))),
++    ('copy', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(EVP_MD_CTX))),
++    ('cleanup', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
++    ('sign', CFUNCTYPE(c_int)),
++    ('verify', CFUNCTYPE(c_int)),
++    ('required_pkey_type', c_int * 5),
++    ('block_size', c_int),
++    ('ctx_size', c_int),
++]
++assert sizeof(env_md_st) == 72, sizeof(env_md_st)
++assert alignment(env_md_st) == 4, alignment(env_md_st)
++EVP_MD = env_md_st
++env_md_ctx_st._fields_ = [
++    ('digest', POINTER(EVP_MD)),
++    ('engine', POINTER(ENGINE)),
++    ('flags', c_ulong),
++    ('md_data', c_void_p),
++]
++assert sizeof(env_md_ctx_st) == 16, sizeof(env_md_ctx_st)
++assert alignment(env_md_ctx_st) == 4, alignment(env_md_ctx_st)
++class evp_cipher_st(Structure):
++    pass
++class evp_cipher_ctx_st(Structure):
++    pass
++EVP_CIPHER_CTX = evp_cipher_ctx_st
++evp_cipher_st._fields_ = [
++    ('nid', c_int),
++    ('block_size', c_int),
++    ('key_len', c_int),
++    ('iv_len', c_int),
++    ('flags', c_ulong),
++    ('init', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_int)),
++    ('do_cipher', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_uint)),
++    ('cleanup', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX))),
++    ('ctx_size', c_int),
++    ('set_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
++    ('get_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
++    ('ctrl', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), c_int, c_int, c_void_p)),
++    ('app_data', c_void_p),
++]
++assert sizeof(evp_cipher_st) == 52, sizeof(evp_cipher_st)
++assert alignment(evp_cipher_st) == 4, alignment(evp_cipher_st)
++class evp_cipher_info_st(Structure):
++    pass
++EVP_CIPHER = evp_cipher_st
++evp_cipher_info_st._fields_ = [
++    ('cipher', POINTER(EVP_CIPHER)),
++    ('iv', c_ubyte * 16),
++]
++assert sizeof(evp_cipher_info_st) == 20, sizeof(evp_cipher_info_st)
++assert alignment(evp_cipher_info_st) == 4, alignment(evp_cipher_info_st)
++EVP_CIPHER_INFO = evp_cipher_info_st
++evp_cipher_ctx_st._fields_ = [
++    ('cipher', POINTER(EVP_CIPHER)),
++    ('engine', POINTER(ENGINE)),
++    ('encrypt', c_int),
++    ('buf_len', c_int),
++    ('oiv', c_ubyte * 16),
++    ('iv', c_ubyte * 16),
++    ('buf', c_ubyte * 32),
++    ('num', c_int),
++    ('app_data', c_void_p),
++    ('key_len', c_int),
++    ('flags', c_ulong),
++    ('cipher_data', c_void_p),
++    ('final_used', c_int),
++    ('block_mask', c_int),
++    ('final', c_ubyte * 32),
++]
++assert sizeof(evp_cipher_ctx_st) == 140, sizeof(evp_cipher_ctx_st)
++assert alignment(evp_cipher_ctx_st) == 4, alignment(evp_cipher_ctx_st)
++class evp_Encode_Ctx_st(Structure):
++    pass
++evp_Encode_Ctx_st._fields_ = [
++    ('num', c_int),
++    ('length', c_int),
++    ('enc_data', c_ubyte * 80),
++    ('line_num', c_int),
++    ('expect_nl', c_int),
++]
++assert sizeof(evp_Encode_Ctx_st) == 96, sizeof(evp_Encode_Ctx_st)
++assert alignment(evp_Encode_Ctx_st) == 4, alignment(evp_Encode_Ctx_st)
++EVP_ENCODE_CTX = evp_Encode_Ctx_st
++EVP_PBE_KEYGEN = CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), STRING, c_int, POINTER(ASN1_TYPE), POINTER(EVP_CIPHER), POINTER(EVP_MD), c_int)
++class lhash_node_st(Structure):
++    pass
++lhash_node_st._fields_ = [
++    ('data', c_void_p),
++    ('next', POINTER(lhash_node_st)),
++    ('hash', c_ulong),
++]
++assert sizeof(lhash_node_st) == 12, sizeof(lhash_node_st)
++assert alignment(lhash_node_st) == 4, alignment(lhash_node_st)
++LHASH_NODE = lhash_node_st
++LHASH_COMP_FN_TYPE = CFUNCTYPE(c_int, c_void_p, c_void_p)
++LHASH_HASH_FN_TYPE = CFUNCTYPE(c_ulong, c_void_p)
++LHASH_DOALL_FN_TYPE = CFUNCTYPE(None, c_void_p)
++LHASH_DOALL_ARG_FN_TYPE = CFUNCTYPE(None, c_void_p, c_void_p)
++class lhash_st(Structure):
++    pass
++lhash_st._fields_ = [
++    ('b', POINTER(POINTER(LHASH_NODE))),
++    ('comp', LHASH_COMP_FN_TYPE),
++    ('hash', LHASH_HASH_FN_TYPE),
++    ('num_nodes', c_uint),
++    ('num_alloc_nodes', c_uint),
++    ('p', c_uint),
++    ('pmax', c_uint),
++    ('up_load', c_ulong),
++    ('down_load', c_ulong),
++    ('num_items', c_ulong),
++    ('num_expands', c_ulong),
++    ('num_expand_reallocs', c_ulong),
++    ('num_contracts', c_ulong),
++    ('num_contract_reallocs', c_ulong),
++    ('num_hash_calls', c_ulong),
++    ('num_comp_calls', c_ulong),
++    ('num_insert', c_ulong),
++    ('num_replace', c_ulong),
++    ('num_delete', c_ulong),
++    ('num_no_delete', c_ulong),
++    ('num_retrieve', c_ulong),
++    ('num_retrieve_miss', c_ulong),
++    ('num_hash_comps', c_ulong),
++    ('error', c_int),
++]
++assert sizeof(lhash_st) == 96, sizeof(lhash_st)
++assert alignment(lhash_st) == 4, alignment(lhash_st)
++LHASH = lhash_st
++class MD2state_st(Structure):
++    pass
++MD2state_st._fields_ = [
++    ('num', c_int),
++    ('data', c_ubyte * 16),
++    ('cksm', c_uint * 16),
++    ('state', c_uint * 16),
++]
++assert sizeof(MD2state_st) == 148, sizeof(MD2state_st)
++assert alignment(MD2state_st) == 4, alignment(MD2state_st)
++MD2_CTX = MD2state_st
++class MD4state_st(Structure):
++    pass
++MD4state_st._fields_ = [
++    ('A', c_uint),
++    ('B', c_uint),
++    ('C', c_uint),
++    ('D', c_uint),
++    ('Nl', c_uint),
++    ('Nh', c_uint),
++    ('data', c_uint * 16),
++    ('num', c_int),
++]
++assert sizeof(MD4state_st) == 92, sizeof(MD4state_st)
++assert alignment(MD4state_st) == 4, alignment(MD4state_st)
++MD4_CTX = MD4state_st
++class MD5state_st(Structure):
++    pass
++MD5state_st._fields_ = [
++    ('A', c_uint),
++    ('B', c_uint),
++    ('C', c_uint),
++    ('D', c_uint),
++    ('Nl', c_uint),
++    ('Nh', c_uint),
++    ('data', c_uint * 16),
++    ('num', c_int),
++]
++assert sizeof(MD5state_st) == 92, sizeof(MD5state_st)
++assert alignment(MD5state_st) == 4, alignment(MD5state_st)
++MD5_CTX = MD5state_st
++class mdc2_ctx_st(Structure):
++    pass
++mdc2_ctx_st._fields_ = [
++    ('num', c_int),
++    ('data', c_ubyte * 8),
++    ('h', DES_cblock),
++    ('hh', DES_cblock),
++    ('pad_type', c_int),
++]
++assert sizeof(mdc2_ctx_st) == 32, sizeof(mdc2_ctx_st)
++assert alignment(mdc2_ctx_st) == 4, alignment(mdc2_ctx_st)
++MDC2_CTX = mdc2_ctx_st
++class obj_name_st(Structure):
++    pass
++obj_name_st._fields_ = [
++    ('type', c_int),
++    ('alias', c_int),
++    ('name', STRING),
++    ('data', STRING),
++]
++assert sizeof(obj_name_st) == 16, sizeof(obj_name_st)
++assert alignment(obj_name_st) == 4, alignment(obj_name_st)
++OBJ_NAME = obj_name_st
++ASN1_TIME = asn1_string_st
++ASN1_NULL = c_int
++EVP_PKEY = evp_pkey_st
++class x509_st(Structure):
++    pass
++X509 = x509_st
++class X509_algor_st(Structure):
++    pass
++X509_ALGOR = X509_algor_st
++class X509_crl_st(Structure):
++    pass
++X509_CRL = X509_crl_st
++class X509_name_st(Structure):
++    pass
++X509_NAME = X509_name_st
++class x509_store_st(Structure):
++    pass
++X509_STORE = x509_store_st
++class x509_store_ctx_st(Structure):
++    pass
++X509_STORE_CTX = x509_store_ctx_st
++engine_st._fields_ = [
++]
++class PEM_Encode_Seal_st(Structure):
++    pass
++PEM_Encode_Seal_st._fields_ = [
++    ('encode', EVP_ENCODE_CTX),
++    ('md', EVP_MD_CTX),
++    ('cipher', EVP_CIPHER_CTX),
++]
++assert sizeof(PEM_Encode_Seal_st) == 252, sizeof(PEM_Encode_Seal_st)
++assert alignment(PEM_Encode_Seal_st) == 4, alignment(PEM_Encode_Seal_st)
++PEM_ENCODE_SEAL_CTX = PEM_Encode_Seal_st
++class pem_recip_st(Structure):
++    pass
++pem_recip_st._fields_ = [
++    ('name', STRING),
++    ('dn', POINTER(X509_NAME)),
++    ('cipher', c_int),
++    ('key_enc', c_int),
++]
++assert sizeof(pem_recip_st) == 16, sizeof(pem_recip_st)
++assert alignment(pem_recip_st) == 4, alignment(pem_recip_st)
++PEM_USER = pem_recip_st
++class pem_ctx_st(Structure):
++    pass
++class N10pem_ctx_st4DOLLAR_16E(Structure):
++    pass
++N10pem_ctx_st4DOLLAR_16E._fields_ = [
++    ('version', c_int),
++    ('mode', c_int),
++]
++assert sizeof(N10pem_ctx_st4DOLLAR_16E) == 8, sizeof(N10pem_ctx_st4DOLLAR_16E)
++assert alignment(N10pem_ctx_st4DOLLAR_16E) == 4, alignment(N10pem_ctx_st4DOLLAR_16E)
++class N10pem_ctx_st4DOLLAR_17E(Structure):
++    pass
++N10pem_ctx_st4DOLLAR_17E._fields_ = [
++    ('cipher', c_int),
++]
++assert sizeof(N10pem_ctx_st4DOLLAR_17E) == 4, sizeof(N10pem_ctx_st4DOLLAR_17E)
++assert alignment(N10pem_ctx_st4DOLLAR_17E) == 4, alignment(N10pem_ctx_st4DOLLAR_17E)
++pem_ctx_st._fields_ = [
++    ('type', c_int),
++    ('proc_type', N10pem_ctx_st4DOLLAR_16E),
++    ('domain', STRING),
++    ('DEK_info', N10pem_ctx_st4DOLLAR_17E),
++    ('originator', POINTER(PEM_USER)),
++    ('num_recipient', c_int),
++    ('recipient', POINTER(POINTER(PEM_USER))),
++    ('x509_chain', POINTER(STACK)),
++    ('md', POINTER(EVP_MD)),
++    ('md_enc', c_int),
++    ('md_len', c_int),
++    ('md_data', STRING),
++    ('dec', POINTER(EVP_CIPHER)),
++    ('key_len', c_int),
++    ('key', POINTER(c_ubyte)),
++    ('data_enc', c_int),
++    ('data_len', c_int),
++    ('data', POINTER(c_ubyte)),
++]
++assert sizeof(pem_ctx_st) == 76, sizeof(pem_ctx_st)
++assert alignment(pem_ctx_st) == 4, alignment(pem_ctx_st)
++PEM_CTX = pem_ctx_st
++pem_password_cb = CFUNCTYPE(c_int, STRING, c_int, c_int, c_void_p)
++class pkcs7_issuer_and_serial_st(Structure):
++    pass
++pkcs7_issuer_and_serial_st._fields_ = [
++    ('issuer', POINTER(X509_NAME)),
++    ('serial', POINTER(ASN1_INTEGER)),
++]
++assert sizeof(pkcs7_issuer_and_serial_st) == 8, sizeof(pkcs7_issuer_and_serial_st)
++assert alignment(pkcs7_issuer_and_serial_st) == 4, alignment(pkcs7_issuer_and_serial_st)
++PKCS7_ISSUER_AND_SERIAL = pkcs7_issuer_and_serial_st
++class pkcs7_signer_info_st(Structure):
++    pass
++pkcs7_signer_info_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
++    ('digest_alg', POINTER(X509_ALGOR)),
++    ('auth_attr', POINTER(STACK)),
++    ('digest_enc_alg', POINTER(X509_ALGOR)),
++    ('enc_digest', POINTER(ASN1_OCTET_STRING)),
++    ('unauth_attr', POINTER(STACK)),
++    ('pkey', POINTER(EVP_PKEY)),
++]
++assert sizeof(pkcs7_signer_info_st) == 32, sizeof(pkcs7_signer_info_st)
++assert alignment(pkcs7_signer_info_st) == 4, alignment(pkcs7_signer_info_st)
++PKCS7_SIGNER_INFO = pkcs7_signer_info_st
++class pkcs7_recip_info_st(Structure):
++    pass
++pkcs7_recip_info_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
++    ('key_enc_algor', POINTER(X509_ALGOR)),
++    ('enc_key', POINTER(ASN1_OCTET_STRING)),
++    ('cert', POINTER(X509)),
++]
++assert sizeof(pkcs7_recip_info_st) == 20, sizeof(pkcs7_recip_info_st)
++assert alignment(pkcs7_recip_info_st) == 4, alignment(pkcs7_recip_info_st)
++PKCS7_RECIP_INFO = pkcs7_recip_info_st
++class pkcs7_signed_st(Structure):
++    pass
++class pkcs7_st(Structure):
++    pass
++pkcs7_signed_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('md_algs', POINTER(STACK)),
++    ('cert', POINTER(STACK)),
++    ('crl', POINTER(STACK)),
++    ('signer_info', POINTER(STACK)),
++    ('contents', POINTER(pkcs7_st)),
++]
++assert sizeof(pkcs7_signed_st) == 24, sizeof(pkcs7_signed_st)
++assert alignment(pkcs7_signed_st) == 4, alignment(pkcs7_signed_st)
++PKCS7_SIGNED = pkcs7_signed_st
++class pkcs7_enc_content_st(Structure):
++    pass
++pkcs7_enc_content_st._fields_ = [
++    ('content_type', POINTER(ASN1_OBJECT)),
++    ('algorithm', POINTER(X509_ALGOR)),
++    ('enc_data', POINTER(ASN1_OCTET_STRING)),
++    ('cipher', POINTER(EVP_CIPHER)),
++]
++assert sizeof(pkcs7_enc_content_st) == 16, sizeof(pkcs7_enc_content_st)
++assert alignment(pkcs7_enc_content_st) == 4, alignment(pkcs7_enc_content_st)
++PKCS7_ENC_CONTENT = pkcs7_enc_content_st
++class pkcs7_enveloped_st(Structure):
++    pass
++pkcs7_enveloped_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('recipientinfo', POINTER(STACK)),
++    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
++]
++assert sizeof(pkcs7_enveloped_st) == 12, sizeof(pkcs7_enveloped_st)
++assert alignment(pkcs7_enveloped_st) == 4, alignment(pkcs7_enveloped_st)
++PKCS7_ENVELOPE = pkcs7_enveloped_st
++class pkcs7_signedandenveloped_st(Structure):
++    pass
++pkcs7_signedandenveloped_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('md_algs', POINTER(STACK)),
++    ('cert', POINTER(STACK)),
++    ('crl', POINTER(STACK)),
++    ('signer_info', POINTER(STACK)),
++    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
++    ('recipientinfo', POINTER(STACK)),
++]
++assert sizeof(pkcs7_signedandenveloped_st) == 28, sizeof(pkcs7_signedandenveloped_st)
++assert alignment(pkcs7_signedandenveloped_st) == 4, alignment(pkcs7_signedandenveloped_st)
++PKCS7_SIGN_ENVELOPE = pkcs7_signedandenveloped_st
++class pkcs7_digest_st(Structure):
++    pass
++pkcs7_digest_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('md', POINTER(X509_ALGOR)),
++    ('contents', POINTER(pkcs7_st)),
++    ('digest', POINTER(ASN1_OCTET_STRING)),
++]
++assert sizeof(pkcs7_digest_st) == 16, sizeof(pkcs7_digest_st)
++assert alignment(pkcs7_digest_st) == 4, alignment(pkcs7_digest_st)
++PKCS7_DIGEST = pkcs7_digest_st
++class pkcs7_encrypted_st(Structure):
++    pass
++pkcs7_encrypted_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
++]
++assert sizeof(pkcs7_encrypted_st) == 8, sizeof(pkcs7_encrypted_st)
++assert alignment(pkcs7_encrypted_st) == 4, alignment(pkcs7_encrypted_st)
++PKCS7_ENCRYPT = pkcs7_encrypted_st
++class N8pkcs7_st4DOLLAR_15E(Union):
++    pass
++N8pkcs7_st4DOLLAR_15E._fields_ = [
++    ('ptr', STRING),
++    ('data', POINTER(ASN1_OCTET_STRING)),
++    ('sign', POINTER(PKCS7_SIGNED)),
++    ('enveloped', POINTER(PKCS7_ENVELOPE)),
++    ('signed_and_enveloped', POINTER(PKCS7_SIGN_ENVELOPE)),
++    ('digest', POINTER(PKCS7_DIGEST)),
++    ('encrypted', POINTER(PKCS7_ENCRYPT)),
++    ('other', POINTER(ASN1_TYPE)),
++]
++assert sizeof(N8pkcs7_st4DOLLAR_15E) == 4, sizeof(N8pkcs7_st4DOLLAR_15E)
++assert alignment(N8pkcs7_st4DOLLAR_15E) == 4, alignment(N8pkcs7_st4DOLLAR_15E)
++pkcs7_st._fields_ = [
++    ('asn1', POINTER(c_ubyte)),
++    ('length', c_long),
++    ('state', c_int),
++    ('detached', c_int),
++    ('type', POINTER(ASN1_OBJECT)),
++    ('d', N8pkcs7_st4DOLLAR_15E),
++]
++assert sizeof(pkcs7_st) == 24, sizeof(pkcs7_st)
++assert alignment(pkcs7_st) == 4, alignment(pkcs7_st)
++PKCS7 = pkcs7_st
++class rc2_key_st(Structure):
++    pass
++rc2_key_st._fields_ = [
++    ('data', c_uint * 64),
++]
++assert sizeof(rc2_key_st) == 256, sizeof(rc2_key_st)
++assert alignment(rc2_key_st) == 4, alignment(rc2_key_st)
++RC2_KEY = rc2_key_st
++class rc4_key_st(Structure):
++    pass
++rc4_key_st._fields_ = [
++    ('x', c_ubyte),
++    ('y', c_ubyte),
++    ('data', c_ubyte * 256),
++]
++assert sizeof(rc4_key_st) == 258, sizeof(rc4_key_st)
++assert alignment(rc4_key_st) == 1, alignment(rc4_key_st)
++RC4_KEY = rc4_key_st
++class rc5_key_st(Structure):
++    pass
++rc5_key_st._fields_ = [
++    ('rounds', c_int),
++    ('data', c_ulong * 34),
++]
++assert sizeof(rc5_key_st) == 140, sizeof(rc5_key_st)
++assert alignment(rc5_key_st) == 4, alignment(rc5_key_st)
++RC5_32_KEY = rc5_key_st
++class RIPEMD160state_st(Structure):
++    pass
++RIPEMD160state_st._fields_ = [
++    ('A', c_uint),
++    ('B', c_uint),
++    ('C', c_uint),
++    ('D', c_uint),
++    ('E', c_uint),
++    ('Nl', c_uint),
++    ('Nh', c_uint),
++    ('data', c_uint * 16),
++    ('num', c_int),
++]
++assert sizeof(RIPEMD160state_st) == 96, sizeof(RIPEMD160state_st)
++assert alignment(RIPEMD160state_st) == 4, alignment(RIPEMD160state_st)
++RIPEMD160_CTX = RIPEMD160state_st
++RSA = rsa_st
++class rsa_meth_st(Structure):
++    pass
++rsa_meth_st._fields_ = [
++    ('name', STRING),
++    ('rsa_pub_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
++    ('rsa_pub_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
++    ('rsa_priv_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
++    ('rsa_priv_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
++    ('rsa_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(RSA))),
++    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
++    ('init', CFUNCTYPE(c_int, POINTER(RSA))),
++    ('finish', CFUNCTYPE(c_int, POINTER(RSA))),
++    ('flags', c_int),
++    ('app_data', STRING),
++    ('rsa_sign', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), POINTER(c_uint), POINTER(RSA))),
++    ('rsa_verify', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), c_uint, POINTER(RSA))),
++]
++assert sizeof(rsa_meth_st) == 52, sizeof(rsa_meth_st)
++assert alignment(rsa_meth_st) == 4, alignment(rsa_meth_st)
++RSA_METHOD = rsa_meth_st
++rsa_st._fields_ = [
++    ('pad', c_int),
++    ('version', c_long),
++    ('meth', POINTER(RSA_METHOD)),
++    ('engine', POINTER(ENGINE)),
++    ('n', POINTER(BIGNUM)),
++    ('e', POINTER(BIGNUM)),
++    ('d', POINTER(BIGNUM)),
++    ('p', POINTER(BIGNUM)),
++    ('q', POINTER(BIGNUM)),
++    ('dmp1', POINTER(BIGNUM)),
++    ('dmq1', POINTER(BIGNUM)),
++    ('iqmp', POINTER(BIGNUM)),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('references', c_int),
++    ('flags', c_int),
++    ('_method_mod_n', POINTER(BN_MONT_CTX)),
++    ('_method_mod_p', POINTER(BN_MONT_CTX)),
++    ('_method_mod_q', POINTER(BN_MONT_CTX)),
++    ('bignum_data', STRING),
++    ('blinding', POINTER(BN_BLINDING)),
++]
++assert sizeof(rsa_st) == 84, sizeof(rsa_st)
++assert alignment(rsa_st) == 4, alignment(rsa_st)
++openssl_fptr = CFUNCTYPE(None)
++class SHAstate_st(Structure):
++    pass
++SHAstate_st._fields_ = [
++    ('h0', c_uint),
++    ('h1', c_uint),
++    ('h2', c_uint),
++    ('h3', c_uint),
++    ('h4', c_uint),
++    ('Nl', c_uint),
++    ('Nh', c_uint),
++    ('data', c_uint * 16),
++    ('num', c_int),
++]
++assert sizeof(SHAstate_st) == 96, sizeof(SHAstate_st)
++assert alignment(SHAstate_st) == 4, alignment(SHAstate_st)
++SHA_CTX = SHAstate_st
++class ssl_st(Structure):
++    pass
++ssl_crock_st = POINTER(ssl_st)
++class ssl_cipher_st(Structure):
++    pass
++ssl_cipher_st._fields_ = [
++    ('valid', c_int),
++    ('name', STRING),
++    ('id', c_ulong),
++    ('algorithms', c_ulong),
++    ('algo_strength', c_ulong),
++    ('algorithm2', c_ulong),
++    ('strength_bits', c_int),
++    ('alg_bits', c_int),
++    ('mask', c_ulong),
++    ('mask_strength', c_ulong),
++]
++assert sizeof(ssl_cipher_st) == 40, sizeof(ssl_cipher_st)
++assert alignment(ssl_cipher_st) == 4, alignment(ssl_cipher_st)
++SSL_CIPHER = ssl_cipher_st
++SSL = ssl_st
++class ssl_ctx_st(Structure):
++    pass
++SSL_CTX = ssl_ctx_st
++class ssl_method_st(Structure):
++    pass
++class ssl3_enc_method(Structure):
++    pass
++ssl_method_st._fields_ = [
++    ('version', c_int),
++    ('ssl_new', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_clear', CFUNCTYPE(None, POINTER(SSL))),
++    ('ssl_free', CFUNCTYPE(None, POINTER(SSL))),
++    ('ssl_accept', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_connect', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_read', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
++    ('ssl_peek', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
++    ('ssl_write', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
++    ('ssl_shutdown', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_renegotiate', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_renegotiate_check', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('ssl_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, c_long, c_void_p)),
++    ('ssl_ctx_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, c_long, c_void_p)),
++    ('get_cipher_by_char', CFUNCTYPE(POINTER(SSL_CIPHER), POINTER(c_ubyte))),
++    ('put_cipher_by_char', CFUNCTYPE(c_int, POINTER(SSL_CIPHER), POINTER(c_ubyte))),
++    ('ssl_pending', CFUNCTYPE(c_int, POINTER(SSL))),
++    ('num_ciphers', CFUNCTYPE(c_int)),
++    ('get_cipher', CFUNCTYPE(POINTER(SSL_CIPHER), c_uint)),
++    ('get_ssl_method', CFUNCTYPE(POINTER(ssl_method_st), c_int)),
++    ('get_timeout', CFUNCTYPE(c_long)),
++    ('ssl3_enc', POINTER(ssl3_enc_method)),
++    ('ssl_version', CFUNCTYPE(c_int)),
++    ('ssl_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, CFUNCTYPE(None))),
++    ('ssl_ctx_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, CFUNCTYPE(None))),
++]
++assert sizeof(ssl_method_st) == 100, sizeof(ssl_method_st)
++assert alignment(ssl_method_st) == 4, alignment(ssl_method_st)
++ssl3_enc_method._fields_ = [
++]
++SSL_METHOD = ssl_method_st
++class ssl_session_st(Structure):
++    pass
++class sess_cert_st(Structure):
++    pass
++ssl_session_st._fields_ = [
++    ('ssl_version', c_int),
++    ('key_arg_length', c_uint),
++    ('key_arg', c_ubyte * 8),
++    ('master_key_length', c_int),
++    ('master_key', c_ubyte * 48),
++    ('session_id_length', c_uint),
++    ('session_id', c_ubyte * 32),
++    ('sid_ctx_length', c_uint),
++    ('sid_ctx', c_ubyte * 32),
++    ('not_resumable', c_int),
++    ('sess_cert', POINTER(sess_cert_st)),
++    ('peer', POINTER(X509)),
++    ('verify_result', c_long),
++    ('references', c_int),
++    ('timeout', c_long),
++    ('time', c_long),
++    ('compress_meth', c_int),
++    ('cipher', POINTER(SSL_CIPHER)),
++    ('cipher_id', c_ulong),
++    ('ciphers', POINTER(STACK)),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('prev', POINTER(ssl_session_st)),
++    ('next', POINTER(ssl_session_st)),
++]
++assert sizeof(ssl_session_st) == 200, sizeof(ssl_session_st)
++assert alignment(ssl_session_st) == 4, alignment(ssl_session_st)
++sess_cert_st._fields_ = [
++]
++SSL_SESSION = ssl_session_st
++GEN_SESSION_CB = CFUNCTYPE(c_int, POINTER(SSL), POINTER(c_ubyte), POINTER(c_uint))
++class ssl_comp_st(Structure):
++    pass
++ssl_comp_st._fields_ = [
++    ('id', c_int),
++    ('name', STRING),
++    ('method', POINTER(COMP_METHOD)),
++]
++assert sizeof(ssl_comp_st) == 12, sizeof(ssl_comp_st)
++assert alignment(ssl_comp_st) == 4, alignment(ssl_comp_st)
++SSL_COMP = ssl_comp_st
++class N10ssl_ctx_st4DOLLAR_18E(Structure):
++    pass
++N10ssl_ctx_st4DOLLAR_18E._fields_ = [
++    ('sess_connect', c_int),
++    ('sess_connect_renegotiate', c_int),
++    ('sess_connect_good', c_int),
++    ('sess_accept', c_int),
++    ('sess_accept_renegotiate', c_int),
++    ('sess_accept_good', c_int),
++    ('sess_miss', c_int),
++    ('sess_timeout', c_int),
++    ('sess_cache_full', c_int),
++    ('sess_hit', c_int),
++    ('sess_cb_hit', c_int),
++]
++assert sizeof(N10ssl_ctx_st4DOLLAR_18E) == 44, sizeof(N10ssl_ctx_st4DOLLAR_18E)
++assert alignment(N10ssl_ctx_st4DOLLAR_18E) == 4, alignment(N10ssl_ctx_st4DOLLAR_18E)
++class cert_st(Structure):
++    pass
++ssl_ctx_st._fields_ = [
++    ('method', POINTER(SSL_METHOD)),
++    ('cipher_list', POINTER(STACK)),
++    ('cipher_list_by_id', POINTER(STACK)),
++    ('cert_store', POINTER(x509_store_st)),
++    ('sessions', POINTER(lhash_st)),
++    ('session_cache_size', c_ulong),
++    ('session_cache_head', POINTER(ssl_session_st)),
++    ('session_cache_tail', POINTER(ssl_session_st)),
++    ('session_cache_mode', c_int),
++    ('session_timeout', c_long),
++    ('new_session_cb', CFUNCTYPE(c_int, POINTER(ssl_st), POINTER(SSL_SESSION))),
++    ('remove_session_cb', CFUNCTYPE(None, POINTER(ssl_ctx_st), POINTER(SSL_SESSION))),
++    ('get_session_cb', CFUNCTYPE(POINTER(SSL_SESSION), POINTER(ssl_st), POINTER(c_ubyte), c_int, POINTER(c_int))),
++    ('stats', N10ssl_ctx_st4DOLLAR_18E),
++    ('references', c_int),
++    ('app_verify_callback', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), c_void_p)),
++    ('app_verify_arg', c_void_p),
++    ('default_passwd_callback', POINTER(pem_password_cb)),
++    ('default_passwd_callback_userdata', c_void_p),
++    ('client_cert_cb', CFUNCTYPE(c_int, POINTER(SSL), POINTER(POINTER(X509)), POINTER(POINTER(EVP_PKEY)))),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('rsa_md5', POINTER(EVP_MD)),
++    ('md5', POINTER(EVP_MD)),
++    ('sha1', POINTER(EVP_MD)),
++    ('extra_certs', POINTER(STACK)),
++    ('comp_methods', POINTER(STACK)),
++    ('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
++    ('client_CA', POINTER(STACK)),
++    ('options', c_ulong),
++    ('mode', c_ulong),
++    ('max_cert_list', c_long),
++    ('cert', POINTER(cert_st)),
++    ('read_ahead', c_int),
++    ('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
++    ('msg_callback_arg', c_void_p),
++    ('verify_mode', c_int),
++    ('verify_depth', c_int),
++    ('sid_ctx_length', c_uint),
++    ('sid_ctx', c_ubyte * 32),
++    ('default_verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
++    ('generate_session_id', GEN_SESSION_CB),
++    ('purpose', c_int),
++    ('trust', c_int),
++    ('quiet_shutdown', c_int),
++]
++assert sizeof(ssl_ctx_st) == 248, sizeof(ssl_ctx_st)
++assert alignment(ssl_ctx_st) == 4, alignment(ssl_ctx_st)
++cert_st._fields_ = [
++]
++class ssl2_state_st(Structure):
++    pass
++class ssl3_state_st(Structure):
++    pass
++ssl_st._fields_ = [
++    ('version', c_int),
++    ('type', c_int),
++    ('method', POINTER(SSL_METHOD)),
++    ('rbio', POINTER(BIO)),
++    ('wbio', POINTER(BIO)),
++    ('bbio', POINTER(BIO)),
++    ('rwstate', c_int),
++    ('in_handshake', c_int),
++    ('handshake_func', CFUNCTYPE(c_int)),
++    ('server', c_int),
++    ('new_session', c_int),
++    ('quiet_shutdown', c_int),
++    ('shutdown', c_int),
++    ('state', c_int),
++    ('rstate', c_int),
++    ('init_buf', POINTER(BUF_MEM)),
++    ('init_msg', c_void_p),
++    ('init_num', c_int),
++    ('init_off', c_int),
++    ('packet', POINTER(c_ubyte)),
++    ('packet_length', c_uint),
++    ('s2', POINTER(ssl2_state_st)),
++    ('s3', POINTER(ssl3_state_st)),
++    ('read_ahead', c_int),
++    ('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
++    ('msg_callback_arg', c_void_p),
++    ('hit', c_int),
++    ('purpose', c_int),
++    ('trust', c_int),
++    ('cipher_list', POINTER(STACK)),
++    ('cipher_list_by_id', POINTER(STACK)),
++    ('enc_read_ctx', POINTER(EVP_CIPHER_CTX)),
++    ('read_hash', POINTER(EVP_MD)),
++    ('expand', POINTER(COMP_CTX)),
++    ('enc_write_ctx', POINTER(EVP_CIPHER_CTX)),
++    ('write_hash', POINTER(EVP_MD)),
++    ('compress', POINTER(COMP_CTX)),
++    ('cert', POINTER(cert_st)),
++    ('sid_ctx_length', c_uint),
++    ('sid_ctx', c_ubyte * 32),
++    ('session', POINTER(SSL_SESSION)),
++    ('generate_session_id', GEN_SESSION_CB),
++    ('verify_mode', c_int),
++    ('verify_depth', c_int),
++    ('verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
++    ('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
++    ('error', c_int),
++    ('error_code', c_int),
++    ('ctx', POINTER(SSL_CTX)),
++    ('debug', c_int),
++    ('verify_result', c_long),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('client_CA', POINTER(STACK)),
++    ('references', c_int),
++    ('options', c_ulong),
++    ('mode', c_ulong),
++    ('max_cert_list', c_long),
++    ('first_packet', c_int),
++    ('client_version', c_int),
++]
++assert sizeof(ssl_st) == 268, sizeof(ssl_st)
++assert alignment(ssl_st) == 4, alignment(ssl_st)
++class N13ssl2_state_st4DOLLAR_19E(Structure):
++    pass
++N13ssl2_state_st4DOLLAR_19E._fields_ = [
++    ('conn_id_length', c_uint),
++    ('cert_type', c_uint),
++    ('cert_length', c_uint),
++    ('csl', c_uint),
++    ('clear', c_uint),
++    ('enc', c_uint),
++    ('ccl', c_ubyte * 32),
++    ('cipher_spec_length', c_uint),
++    ('session_id_length', c_uint),
++    ('clen', c_uint),
++    ('rlen', c_uint),
++]
++assert sizeof(N13ssl2_state_st4DOLLAR_19E) == 72, sizeof(N13ssl2_state_st4DOLLAR_19E)
++assert alignment(N13ssl2_state_st4DOLLAR_19E) == 4, alignment(N13ssl2_state_st4DOLLAR_19E)
++ssl2_state_st._fields_ = [
++    ('three_byte_header', c_int),
++    ('clear_text', c_int),
++    ('escape', c_int),
++    ('ssl2_rollback', c_int),
++    ('wnum', c_uint),
++    ('wpend_tot', c_int),
++    ('wpend_buf', POINTER(c_ubyte)),
++    ('wpend_off', c_int),
++    ('wpend_len', c_int),
++    ('wpend_ret', c_int),
++    ('rbuf_left', c_int),
++    ('rbuf_offs', c_int),
++    ('rbuf', POINTER(c_ubyte)),
++    ('wbuf', POINTER(c_ubyte)),
++    ('write_ptr', POINTER(c_ubyte)),
++    ('padding', c_uint),
++    ('rlength', c_uint),
++    ('ract_data_length', c_int),
++    ('wlength', c_uint),
++    ('wact_data_length', c_int),
++    ('ract_data', POINTER(c_ubyte)),
++    ('wact_data', POINTER(c_ubyte)),
++    ('mac_data', POINTER(c_ubyte)),
++    ('read_key', POINTER(c_ubyte)),
++    ('write_key', POINTER(c_ubyte)),
++    ('challenge_length', c_uint),
++    ('challenge', c_ubyte * 32),
++    ('conn_id_length', c_uint),
++    ('conn_id', c_ubyte * 16),
++    ('key_material_length', c_uint),
++    ('key_material', c_ubyte * 48),
++    ('read_sequence', c_ulong),
++    ('write_sequence', c_ulong),
++    ('tmp', N13ssl2_state_st4DOLLAR_19E),
++]
++assert sizeof(ssl2_state_st) == 288, sizeof(ssl2_state_st)
++assert alignment(ssl2_state_st) == 4, alignment(ssl2_state_st)
++SSL2_STATE = ssl2_state_st
++class ssl3_record_st(Structure):
++    pass
++ssl3_record_st._fields_ = [
++    ('type', c_int),
++    ('length', c_uint),
++    ('off', c_uint),
++    ('data', POINTER(c_ubyte)),
++    ('input', POINTER(c_ubyte)),
++    ('comp', POINTER(c_ubyte)),
++]
++assert sizeof(ssl3_record_st) == 24, sizeof(ssl3_record_st)
++assert alignment(ssl3_record_st) == 4, alignment(ssl3_record_st)
++SSL3_RECORD = ssl3_record_st
++class ssl3_buffer_st(Structure):
++    pass
++size_t = __darwin_size_t
++ssl3_buffer_st._fields_ = [
++    ('buf', POINTER(c_ubyte)),
++    ('len', size_t),
++    ('offset', c_int),
++    ('left', c_int),
++]
++assert sizeof(ssl3_buffer_st) == 16, sizeof(ssl3_buffer_st)
++assert alignment(ssl3_buffer_st) == 4, alignment(ssl3_buffer_st)
++SSL3_BUFFER = ssl3_buffer_st
++class N13ssl3_state_st4DOLLAR_20E(Structure):
++    pass
++N13ssl3_state_st4DOLLAR_20E._fields_ = [
++    ('cert_verify_md', c_ubyte * 72),
++    ('finish_md', c_ubyte * 72),
++    ('finish_md_len', c_int),
++    ('peer_finish_md', c_ubyte * 72),
++    ('peer_finish_md_len', c_int),
++    ('message_size', c_ulong),
++    ('message_type', c_int),
++    ('new_cipher', POINTER(SSL_CIPHER)),
++    ('dh', POINTER(DH)),
++    ('next_state', c_int),
++    ('reuse_message', c_int),
++    ('cert_req', c_int),
++    ('ctype_num', c_int),
++    ('ctype', c_char * 7),
++    ('ca_names', POINTER(STACK)),
++    ('use_rsa_tmp', c_int),
++    ('key_block_length', c_int),
++    ('key_block', POINTER(c_ubyte)),
++    ('new_sym_enc', POINTER(EVP_CIPHER)),
++    ('new_hash', POINTER(EVP_MD)),
++    ('new_compression', POINTER(SSL_COMP)),
++    ('cert_request', c_int),
++]
++assert sizeof(N13ssl3_state_st4DOLLAR_20E) == 296, sizeof(N13ssl3_state_st4DOLLAR_20E)
++assert alignment(N13ssl3_state_st4DOLLAR_20E) == 4, alignment(N13ssl3_state_st4DOLLAR_20E)
++ssl3_state_st._fields_ = [
++    ('flags', c_long),
++    ('delay_buf_pop_ret', c_int),
++    ('read_sequence', c_ubyte * 8),
++    ('read_mac_secret', c_ubyte * 36),
++    ('write_sequence', c_ubyte * 8),
++    ('write_mac_secret', c_ubyte * 36),
++    ('server_random', c_ubyte * 32),
++    ('client_random', c_ubyte * 32),
++    ('need_empty_fragments', c_int),
++    ('empty_fragment_done', c_int),
++    ('rbuf', SSL3_BUFFER),
++    ('wbuf', SSL3_BUFFER),
++    ('rrec', SSL3_RECORD),
++    ('wrec', SSL3_RECORD),
++    ('alert_fragment', c_ubyte * 2),
++    ('alert_fragment_len', c_uint),
++    ('handshake_fragment', c_ubyte * 4),
++    ('handshake_fragment_len', c_uint),
++    ('wnum', c_uint),
++    ('wpend_tot', c_int),
++    ('wpend_type', c_int),
++    ('wpend_ret', c_int),
++    ('wpend_buf', POINTER(c_ubyte)),
++    ('finish_dgst1', EVP_MD_CTX),
++    ('finish_dgst2', EVP_MD_CTX),
++    ('change_cipher_spec', c_int),
++    ('warn_alert', c_int),
++    ('fatal_alert', c_int),
++    ('alert_dispatch', c_int),
++    ('send_alert', c_ubyte * 2),
++    ('renegotiate', c_int),
++    ('total_renegotiations', c_int),
++    ('num_renegotiations', c_int),
++    ('in_read_app_data', c_int),
++    ('tmp', N13ssl3_state_st4DOLLAR_20E),
++]
++assert sizeof(ssl3_state_st) == 648, sizeof(ssl3_state_st)
++assert alignment(ssl3_state_st) == 4, alignment(ssl3_state_st)
++SSL3_STATE = ssl3_state_st
++stack_st._fields_ = [
++    ('num', c_int),
++    ('data', POINTER(STRING)),
++    ('sorted', c_int),
++    ('num_alloc', c_int),
++    ('comp', CFUNCTYPE(c_int, POINTER(STRING), POINTER(STRING))),
++]
++assert sizeof(stack_st) == 20, sizeof(stack_st)
++assert alignment(stack_st) == 4, alignment(stack_st)
++class ui_st(Structure):
++    pass
++ui_st._fields_ = [
++]
++UI = ui_st
++class ui_method_st(Structure):
++    pass
++ui_method_st._fields_ = [
++]
++UI_METHOD = ui_method_st
++class ui_string_st(Structure):
++    pass
++ui_string_st._fields_ = [
++]
++UI_STRING = ui_string_st
++
++# values for enumeration 'UI_string_types'
++UI_string_types = c_int # enum
++class X509_objects_st(Structure):
++    pass
++X509_objects_st._fields_ = [
++    ('nid', c_int),
++    ('a2i', CFUNCTYPE(c_int)),
++    ('i2a', CFUNCTYPE(c_int)),
++]
++assert sizeof(X509_objects_st) == 12, sizeof(X509_objects_st)
++assert alignment(X509_objects_st) == 4, alignment(X509_objects_st)
++X509_OBJECTS = X509_objects_st
++X509_algor_st._fields_ = [
++    ('algorithm', POINTER(ASN1_OBJECT)),
++    ('parameter', POINTER(ASN1_TYPE)),
++]
++assert sizeof(X509_algor_st) == 8, sizeof(X509_algor_st)
++assert alignment(X509_algor_st) == 4, alignment(X509_algor_st)
++class X509_val_st(Structure):
++    pass
++X509_val_st._fields_ = [
++    ('notBefore', POINTER(ASN1_TIME)),
++    ('notAfter', POINTER(ASN1_TIME)),
++]
++assert sizeof(X509_val_st) == 8, sizeof(X509_val_st)
++assert alignment(X509_val_st) == 4, alignment(X509_val_st)
++X509_VAL = X509_val_st
++class X509_pubkey_st(Structure):
++    pass
++X509_pubkey_st._fields_ = [
++    ('algor', POINTER(X509_ALGOR)),
++    ('public_key', POINTER(ASN1_BIT_STRING)),
++    ('pkey', POINTER(EVP_PKEY)),
++]
++assert sizeof(X509_pubkey_st) == 12, sizeof(X509_pubkey_st)
++assert alignment(X509_pubkey_st) == 4, alignment(X509_pubkey_st)
++X509_PUBKEY = X509_pubkey_st
++class X509_sig_st(Structure):
++    pass
++X509_sig_st._fields_ = [
++    ('algor', POINTER(X509_ALGOR)),
++    ('digest', POINTER(ASN1_OCTET_STRING)),
++]
++assert sizeof(X509_sig_st) == 8, sizeof(X509_sig_st)
++assert alignment(X509_sig_st) == 4, alignment(X509_sig_st)
++X509_SIG = X509_sig_st
++class X509_name_entry_st(Structure):
++    pass
++X509_name_entry_st._fields_ = [
++    ('object', POINTER(ASN1_OBJECT)),
++    ('value', POINTER(ASN1_STRING)),
++    ('set', c_int),
++    ('size', c_int),
++]
++assert sizeof(X509_name_entry_st) == 16, sizeof(X509_name_entry_st)
++assert alignment(X509_name_entry_st) == 4, alignment(X509_name_entry_st)
++X509_NAME_ENTRY = X509_name_entry_st
++X509_name_st._fields_ = [
++    ('entries', POINTER(STACK)),
++    ('modified', c_int),
++    ('bytes', POINTER(BUF_MEM)),
++    ('hash', c_ulong),
++]
++assert sizeof(X509_name_st) == 16, sizeof(X509_name_st)
++assert alignment(X509_name_st) == 4, alignment(X509_name_st)
++class X509_extension_st(Structure):
++    pass
++X509_extension_st._fields_ = [
++    ('object', POINTER(ASN1_OBJECT)),
++    ('critical', ASN1_BOOLEAN),
++    ('value', POINTER(ASN1_OCTET_STRING)),
++]
++assert sizeof(X509_extension_st) == 12, sizeof(X509_extension_st)
++assert alignment(X509_extension_st) == 4, alignment(X509_extension_st)
++X509_EXTENSION = X509_extension_st
++class x509_attributes_st(Structure):
++    pass
++class N18x509_attributes_st4DOLLAR_13E(Union):
++    pass
++N18x509_attributes_st4DOLLAR_13E._fields_ = [
++    ('ptr', STRING),
++    ('set', POINTER(STACK)),
++    ('single', POINTER(ASN1_TYPE)),
++]
++assert sizeof(N18x509_attributes_st4DOLLAR_13E) == 4, sizeof(N18x509_attributes_st4DOLLAR_13E)
++assert alignment(N18x509_attributes_st4DOLLAR_13E) == 4, alignment(N18x509_attributes_st4DOLLAR_13E)
++x509_attributes_st._fields_ = [
++    ('object', POINTER(ASN1_OBJECT)),
++    ('single', c_int),
++    ('value', N18x509_attributes_st4DOLLAR_13E),
++]
++assert sizeof(x509_attributes_st) == 12, sizeof(x509_attributes_st)
++assert alignment(x509_attributes_st) == 4, alignment(x509_attributes_st)
++X509_ATTRIBUTE = x509_attributes_st
++class X509_req_info_st(Structure):
++    pass
++X509_req_info_st._fields_ = [
++    ('enc', ASN1_ENCODING),
++    ('version', POINTER(ASN1_INTEGER)),
++    ('subject', POINTER(X509_NAME)),
++    ('pubkey', POINTER(X509_PUBKEY)),
++    ('attributes', POINTER(STACK)),
++]
++assert sizeof(X509_req_info_st) == 28, sizeof(X509_req_info_st)
++assert alignment(X509_req_info_st) == 4, alignment(X509_req_info_st)
++X509_REQ_INFO = X509_req_info_st
++class X509_req_st(Structure):
++    pass
++X509_req_st._fields_ = [
++    ('req_info', POINTER(X509_REQ_INFO)),
++    ('sig_alg', POINTER(X509_ALGOR)),
++    ('signature', POINTER(ASN1_BIT_STRING)),
++    ('references', c_int),
++]
++assert sizeof(X509_req_st) == 16, sizeof(X509_req_st)
++assert alignment(X509_req_st) == 4, alignment(X509_req_st)
++X509_REQ = X509_req_st
++class x509_cinf_st(Structure):
++    pass
++x509_cinf_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('serialNumber', POINTER(ASN1_INTEGER)),
++    ('signature', POINTER(X509_ALGOR)),
++    ('issuer', POINTER(X509_NAME)),
++    ('validity', POINTER(X509_VAL)),
++    ('subject', POINTER(X509_NAME)),
++    ('key', POINTER(X509_PUBKEY)),
++    ('issuerUID', POINTER(ASN1_BIT_STRING)),
++    ('subjectUID', POINTER(ASN1_BIT_STRING)),
++    ('extensions', POINTER(STACK)),
++]
++assert sizeof(x509_cinf_st) == 40, sizeof(x509_cinf_st)
++assert alignment(x509_cinf_st) == 4, alignment(x509_cinf_st)
++X509_CINF = x509_cinf_st
++class x509_cert_aux_st(Structure):
++    pass
++x509_cert_aux_st._fields_ = [
++    ('trust', POINTER(STACK)),
++    ('reject', POINTER(STACK)),
++    ('alias', POINTER(ASN1_UTF8STRING)),
++    ('keyid', POINTER(ASN1_OCTET_STRING)),
++    ('other', POINTER(STACK)),
++]
++assert sizeof(x509_cert_aux_st) == 20, sizeof(x509_cert_aux_st)
++assert alignment(x509_cert_aux_st) == 4, alignment(x509_cert_aux_st)
++X509_CERT_AUX = x509_cert_aux_st
++class AUTHORITY_KEYID_st(Structure):
++    pass
++x509_st._fields_ = [
++    ('cert_info', POINTER(X509_CINF)),
++    ('sig_alg', POINTER(X509_ALGOR)),
++    ('signature', POINTER(ASN1_BIT_STRING)),
++    ('valid', c_int),
++    ('references', c_int),
++    ('name', STRING),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('ex_pathlen', c_long),
++    ('ex_flags', c_ulong),
++    ('ex_kusage', c_ulong),
++    ('ex_xkusage', c_ulong),
++    ('ex_nscert', c_ulong),
++    ('skid', POINTER(ASN1_OCTET_STRING)),
++    ('akid', POINTER(AUTHORITY_KEYID_st)),
++    ('sha1_hash', c_ubyte * 20),
++    ('aux', POINTER(X509_CERT_AUX)),
++]
++assert sizeof(x509_st) == 84, sizeof(x509_st)
++assert alignment(x509_st) == 4, alignment(x509_st)
++AUTHORITY_KEYID_st._fields_ = [
++]
++class x509_trust_st(Structure):
++    pass
++x509_trust_st._fields_ = [
++    ('trust', c_int),
++    ('flags', c_int),
++    ('check_trust', CFUNCTYPE(c_int, POINTER(x509_trust_st), POINTER(X509), c_int)),
++    ('name', STRING),
++    ('arg1', c_int),
++    ('arg2', c_void_p),
++]
++assert sizeof(x509_trust_st) == 24, sizeof(x509_trust_st)
++assert alignment(x509_trust_st) == 4, alignment(x509_trust_st)
++X509_TRUST = x509_trust_st
++class X509_revoked_st(Structure):
++    pass
++X509_revoked_st._fields_ = [
++    ('serialNumber', POINTER(ASN1_INTEGER)),
++    ('revocationDate', POINTER(ASN1_TIME)),
++    ('extensions', POINTER(STACK)),
++    ('sequence', c_int),
++]
++assert sizeof(X509_revoked_st) == 16, sizeof(X509_revoked_st)
++assert alignment(X509_revoked_st) == 4, alignment(X509_revoked_st)
++X509_REVOKED = X509_revoked_st
++class X509_crl_info_st(Structure):
++    pass
++X509_crl_info_st._fields_ = [
++    ('version', POINTER(ASN1_INTEGER)),
++    ('sig_alg', POINTER(X509_ALGOR)),
++    ('issuer', POINTER(X509_NAME)),
++    ('lastUpdate', POINTER(ASN1_TIME)),
++    ('nextUpdate', POINTER(ASN1_TIME)),
++    ('revoked', POINTER(STACK)),
++    ('extensions', POINTER(STACK)),
++    ('enc', ASN1_ENCODING),
++]
++assert sizeof(X509_crl_info_st) == 40, sizeof(X509_crl_info_st)
++assert alignment(X509_crl_info_st) == 4, alignment(X509_crl_info_st)
++X509_CRL_INFO = X509_crl_info_st
++X509_crl_st._fields_ = [
++    ('crl', POINTER(X509_CRL_INFO)),
++    ('sig_alg', POINTER(X509_ALGOR)),
++    ('signature', POINTER(ASN1_BIT_STRING)),
++    ('references', c_int),
++]
++assert sizeof(X509_crl_st) == 16, sizeof(X509_crl_st)
++assert alignment(X509_crl_st) == 4, alignment(X509_crl_st)
++class private_key_st(Structure):
++    pass
++private_key_st._fields_ = [
++    ('version', c_int),
++    ('enc_algor', POINTER(X509_ALGOR)),
++    ('enc_pkey', POINTER(ASN1_OCTET_STRING)),
++    ('dec_pkey', POINTER(EVP_PKEY)),
++    ('key_length', c_int),
++    ('key_data', STRING),
++    ('key_free', c_int),
++    ('cipher', EVP_CIPHER_INFO),
++    ('references', c_int),
++]
++assert sizeof(private_key_st) == 52, sizeof(private_key_st)
++assert alignment(private_key_st) == 4, alignment(private_key_st)
++X509_PKEY = private_key_st
++class X509_info_st(Structure):
++    pass
++X509_info_st._fields_ = [
++    ('x509', POINTER(X509)),
++    ('crl', POINTER(X509_CRL)),
++    ('x_pkey', POINTER(X509_PKEY)),
++    ('enc_cipher', EVP_CIPHER_INFO),
++    ('enc_len', c_int),
++    ('enc_data', STRING),
++    ('references', c_int),
++]
++assert sizeof(X509_info_st) == 44, sizeof(X509_info_st)
++assert alignment(X509_info_st) == 4, alignment(X509_info_st)
++X509_INFO = X509_info_st
++class Netscape_spkac_st(Structure):
++    pass
++Netscape_spkac_st._fields_ = [
++    ('pubkey', POINTER(X509_PUBKEY)),
++    ('challenge', POINTER(ASN1_IA5STRING)),
++]
++assert sizeof(Netscape_spkac_st) == 8, sizeof(Netscape_spkac_st)
++assert alignment(Netscape_spkac_st) == 4, alignment(Netscape_spkac_st)
++NETSCAPE_SPKAC = Netscape_spkac_st
++class Netscape_spki_st(Structure):
++    pass
++Netscape_spki_st._fields_ = [
++    ('spkac', POINTER(NETSCAPE_SPKAC)),
++    ('sig_algor', POINTER(X509_ALGOR)),
++    ('signature', POINTER(ASN1_BIT_STRING)),
++]
++assert sizeof(Netscape_spki_st) == 12, sizeof(Netscape_spki_st)
++assert alignment(Netscape_spki_st) == 4, alignment(Netscape_spki_st)
++NETSCAPE_SPKI = Netscape_spki_st
++class Netscape_certificate_sequence(Structure):
++    pass
++Netscape_certificate_sequence._fields_ = [
++    ('type', POINTER(ASN1_OBJECT)),
++    ('certs', POINTER(STACK)),
++]
++assert sizeof(Netscape_certificate_sequence) == 8, sizeof(Netscape_certificate_sequence)
++assert alignment(Netscape_certificate_sequence) == 4, alignment(Netscape_certificate_sequence)
++NETSCAPE_CERT_SEQUENCE = Netscape_certificate_sequence
++class PBEPARAM_st(Structure):
++    pass
++PBEPARAM_st._fields_ = [
++    ('salt', POINTER(ASN1_OCTET_STRING)),
++    ('iter', POINTER(ASN1_INTEGER)),
++]
++assert sizeof(PBEPARAM_st) == 8, sizeof(PBEPARAM_st)
++assert alignment(PBEPARAM_st) == 4, alignment(PBEPARAM_st)
++PBEPARAM = PBEPARAM_st
++class PBE2PARAM_st(Structure):
++    pass
++PBE2PARAM_st._fields_ = [
++    ('keyfunc', POINTER(X509_ALGOR)),
++    ('encryption', POINTER(X509_ALGOR)),
++]
++assert sizeof(PBE2PARAM_st) == 8, sizeof(PBE2PARAM_st)
++assert alignment(PBE2PARAM_st) == 4, alignment(PBE2PARAM_st)
++PBE2PARAM = PBE2PARAM_st
++class PBKDF2PARAM_st(Structure):
++    pass
++PBKDF2PARAM_st._fields_ = [
++    ('salt', POINTER(ASN1_TYPE)),
++    ('iter', POINTER(ASN1_INTEGER)),
++    ('keylength', POINTER(ASN1_INTEGER)),
++    ('prf', POINTER(X509_ALGOR)),
++]
++assert sizeof(PBKDF2PARAM_st) == 16, sizeof(PBKDF2PARAM_st)
++assert alignment(PBKDF2PARAM_st) == 4, alignment(PBKDF2PARAM_st)
++PBKDF2PARAM = PBKDF2PARAM_st
++class pkcs8_priv_key_info_st(Structure):
++    pass
++pkcs8_priv_key_info_st._fields_ = [
++    ('broken', c_int),
++    ('version', POINTER(ASN1_INTEGER)),
++    ('pkeyalg', POINTER(X509_ALGOR)),
++    ('pkey', POINTER(ASN1_TYPE)),
++    ('attributes', POINTER(STACK)),
++]
++assert sizeof(pkcs8_priv_key_info_st) == 20, sizeof(pkcs8_priv_key_info_st)
++assert alignment(pkcs8_priv_key_info_st) == 4, alignment(pkcs8_priv_key_info_st)
++PKCS8_PRIV_KEY_INFO = pkcs8_priv_key_info_st
++class x509_hash_dir_st(Structure):
++    pass
++x509_hash_dir_st._fields_ = [
++    ('num_dirs', c_int),
++    ('dirs', POINTER(STRING)),
++    ('dirs_type', POINTER(c_int)),
++    ('num_dirs_alloced', c_int),
++]
++assert sizeof(x509_hash_dir_st) == 16, sizeof(x509_hash_dir_st)
++assert alignment(x509_hash_dir_st) == 4, alignment(x509_hash_dir_st)
++X509_HASH_DIR_CTX = x509_hash_dir_st
++class x509_file_st(Structure):
++    pass
++x509_file_st._fields_ = [
++    ('num_paths', c_int),
++    ('num_alloced', c_int),
++    ('paths', POINTER(STRING)),
++    ('path_type', POINTER(c_int)),
++]
++assert sizeof(x509_file_st) == 16, sizeof(x509_file_st)
++assert alignment(x509_file_st) == 4, alignment(x509_file_st)
++X509_CERT_FILE_CTX = x509_file_st
++class x509_object_st(Structure):
++    pass
++class N14x509_object_st4DOLLAR_14E(Union):
++    pass
++N14x509_object_st4DOLLAR_14E._fields_ = [
++    ('ptr', STRING),
++    ('x509', POINTER(X509)),
++    ('crl', POINTER(X509_CRL)),
++    ('pkey', POINTER(EVP_PKEY)),
++]
++assert sizeof(N14x509_object_st4DOLLAR_14E) == 4, sizeof(N14x509_object_st4DOLLAR_14E)
++assert alignment(N14x509_object_st4DOLLAR_14E) == 4, alignment(N14x509_object_st4DOLLAR_14E)
++x509_object_st._fields_ = [
++    ('type', c_int),
++    ('data', N14x509_object_st4DOLLAR_14E),
++]
++assert sizeof(x509_object_st) == 8, sizeof(x509_object_st)
++assert alignment(x509_object_st) == 4, alignment(x509_object_st)
++X509_OBJECT = x509_object_st
++class x509_lookup_st(Structure):
++    pass
++X509_LOOKUP = x509_lookup_st
++class x509_lookup_method_st(Structure):
++    pass
++x509_lookup_method_st._fields_ = [
++    ('name', STRING),
++    ('new_item', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
++    ('free', CFUNCTYPE(None, POINTER(X509_LOOKUP))),
++    ('init', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
++    ('shutdown', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
++    ('ctrl', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_long, POINTER(STRING))),
++    ('get_by_subject', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(X509_OBJECT))),
++    ('get_by_issuer_serial', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(ASN1_INTEGER), POINTER(X509_OBJECT))),
++    ('get_by_fingerprint', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(c_ubyte), c_int, POINTER(X509_OBJECT))),
++    ('get_by_alias', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_int, POINTER(X509_OBJECT))),
++]
++assert sizeof(x509_lookup_method_st) == 40, sizeof(x509_lookup_method_st)
++assert alignment(x509_lookup_method_st) == 4, alignment(x509_lookup_method_st)
++X509_LOOKUP_METHOD = x509_lookup_method_st
++x509_store_st._fields_ = [
++    ('cache', c_int),
++    ('objs', POINTER(STACK)),
++    ('get_cert_methods', POINTER(STACK)),
++    ('flags', c_ulong),
++    ('purpose', c_int),
++    ('trust', c_int),
++    ('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
++    ('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
++    ('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
++    ('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
++    ('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
++    ('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
++    ('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('ex_data', CRYPTO_EX_DATA),
++    ('references', c_int),
++    ('depth', c_int),
++]
++assert sizeof(x509_store_st) == 76, sizeof(x509_store_st)
++assert alignment(x509_store_st) == 4, alignment(x509_store_st)
++x509_lookup_st._fields_ = [
++    ('init', c_int),
++    ('skip', c_int),
++    ('method', POINTER(X509_LOOKUP_METHOD)),
++    ('method_data', STRING),
++    ('store_ctx', POINTER(X509_STORE)),
++]
++assert sizeof(x509_lookup_st) == 20, sizeof(x509_lookup_st)
++assert alignment(x509_lookup_st) == 4, alignment(x509_lookup_st)
++time_t = __darwin_time_t
++x509_store_ctx_st._fields_ = [
++    ('ctx', POINTER(X509_STORE)),
++    ('current_method', c_int),
++    ('cert', POINTER(X509)),
++    ('untrusted', POINTER(STACK)),
++    ('purpose', c_int),
++    ('trust', c_int),
++    ('check_time', time_t),
++    ('flags', c_ulong),
++    ('other_ctx', c_void_p),
++    ('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
++    ('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
++    ('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
++    ('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
++    ('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
++    ('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
++    ('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
++    ('depth', c_int),
++    ('valid', c_int),
++    ('last_untrusted', c_int),
++    ('chain', POINTER(STACK)),
++    ('error_depth', c_int),
++    ('error', c_int),
++    ('current_cert', POINTER(X509)),
++    ('current_issuer', POINTER(X509)),
++    ('current_crl', POINTER(X509_CRL)),
++    ('ex_data', CRYPTO_EX_DATA),
++]
++assert sizeof(x509_store_ctx_st) == 116, sizeof(x509_store_ctx_st)
++assert alignment(x509_store_ctx_st) == 4, alignment(x509_store_ctx_st)
++va_list = __darwin_va_list
++__darwin_off_t = __int64_t
++fpos_t = __darwin_off_t
++class __sbuf(Structure):
++    pass
++__sbuf._fields_ = [
++    ('_base', POINTER(c_ubyte)),
++    ('_size', c_int),
++]
++assert sizeof(__sbuf) == 8, sizeof(__sbuf)
++assert alignment(__sbuf) == 4, alignment(__sbuf)
++class __sFILEX(Structure):
++    pass
++__sFILEX._fields_ = [
++]
++class __sFILE(Structure):
++    pass
++__sFILE._pack_ = 4
++__sFILE._fields_ = [
++    ('_p', POINTER(c_ubyte)),
++    ('_r', c_int),
++    ('_w', c_int),
++    ('_flags', c_short),
++    ('_file', c_short),
++    ('_bf', __sbuf),
++    ('_lbfsize', c_int),
++    ('_cookie', c_void_p),
++    ('_close', CFUNCTYPE(c_int, c_void_p)),
++    ('_read', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
++    ('_seek', CFUNCTYPE(fpos_t, c_void_p, c_longlong, c_int)),
++    ('_write', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
++    ('_ub', __sbuf),
++    ('_extra', POINTER(__sFILEX)),
++    ('_ur', c_int),
++    ('_ubuf', c_ubyte * 3),
++    ('_nbuf', c_ubyte * 1),
++    ('_lb', __sbuf),
++    ('_blksize', c_int),
++    ('_offset', fpos_t),
++]
++assert sizeof(__sFILE) == 88, sizeof(__sFILE)
++assert alignment(__sFILE) == 4, alignment(__sFILE)
++FILE = __sFILE
++ct_rune_t = __darwin_ct_rune_t
++rune_t = __darwin_rune_t
++class div_t(Structure):
++    pass
++div_t._fields_ = [
++    ('quot', c_int),
++    ('rem', c_int),
++]
++assert sizeof(div_t) == 8, sizeof(div_t)
++assert alignment(div_t) == 4, alignment(div_t)
++class ldiv_t(Structure):
++    pass
++ldiv_t._fields_ = [
++    ('quot', c_long),
++    ('rem', c_long),
++]
++assert sizeof(ldiv_t) == 8, sizeof(ldiv_t)
++assert alignment(ldiv_t) == 4, alignment(ldiv_t)
++class lldiv_t(Structure):
++    pass
++lldiv_t._pack_ = 4
++lldiv_t._fields_ = [
++    ('quot', c_longlong),
++    ('rem', c_longlong),
++]
++assert sizeof(lldiv_t) == 16, sizeof(lldiv_t)
++assert alignment(lldiv_t) == 4, alignment(lldiv_t)
++__darwin_dev_t = __int32_t
++dev_t = __darwin_dev_t
++__darwin_mode_t = __uint16_t
++mode_t = __darwin_mode_t
++class mcontext(Structure):
++    pass
++mcontext._fields_ = [
++]
++class mcontext64(Structure):
++    pass
++mcontext64._fields_ = [
++]
++class __darwin_pthread_handler_rec(Structure):
++    pass
++__darwin_pthread_handler_rec._fields_ = [
++    ('__routine', CFUNCTYPE(None, c_void_p)),
++    ('__arg', c_void_p),
++    ('__next', POINTER(__darwin_pthread_handler_rec)),
++]
++assert sizeof(__darwin_pthread_handler_rec) == 12, sizeof(__darwin_pthread_handler_rec)
++assert alignment(__darwin_pthread_handler_rec) == 4, alignment(__darwin_pthread_handler_rec)
++class _opaque_pthread_attr_t(Structure):
++    pass
++_opaque_pthread_attr_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 36),
++]
++assert sizeof(_opaque_pthread_attr_t) == 40, sizeof(_opaque_pthread_attr_t)
++assert alignment(_opaque_pthread_attr_t) == 4, alignment(_opaque_pthread_attr_t)
++class _opaque_pthread_cond_t(Structure):
++    pass
++_opaque_pthread_cond_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 24),
++]
++assert sizeof(_opaque_pthread_cond_t) == 28, sizeof(_opaque_pthread_cond_t)
++assert alignment(_opaque_pthread_cond_t) == 4, alignment(_opaque_pthread_cond_t)
++class _opaque_pthread_condattr_t(Structure):
++    pass
++_opaque_pthread_condattr_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 4),
++]
++assert sizeof(_opaque_pthread_condattr_t) == 8, sizeof(_opaque_pthread_condattr_t)
++assert alignment(_opaque_pthread_condattr_t) == 4, alignment(_opaque_pthread_condattr_t)
++class _opaque_pthread_mutex_t(Structure):
++    pass
++_opaque_pthread_mutex_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 40),
++]
++assert sizeof(_opaque_pthread_mutex_t) == 44, sizeof(_opaque_pthread_mutex_t)
++assert alignment(_opaque_pthread_mutex_t) == 4, alignment(_opaque_pthread_mutex_t)
++class _opaque_pthread_mutexattr_t(Structure):
++    pass
++_opaque_pthread_mutexattr_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 8),
++]
++assert sizeof(_opaque_pthread_mutexattr_t) == 12, sizeof(_opaque_pthread_mutexattr_t)
++assert alignment(_opaque_pthread_mutexattr_t) == 4, alignment(_opaque_pthread_mutexattr_t)
++class _opaque_pthread_once_t(Structure):
++    pass
++_opaque_pthread_once_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 4),
++]
++assert sizeof(_opaque_pthread_once_t) == 8, sizeof(_opaque_pthread_once_t)
++assert alignment(_opaque_pthread_once_t) == 4, alignment(_opaque_pthread_once_t)
++class _opaque_pthread_rwlock_t(Structure):
++    pass
++_opaque_pthread_rwlock_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 124),
++]
++assert sizeof(_opaque_pthread_rwlock_t) == 128, sizeof(_opaque_pthread_rwlock_t)
++assert alignment(_opaque_pthread_rwlock_t) == 4, alignment(_opaque_pthread_rwlock_t)
++class _opaque_pthread_rwlockattr_t(Structure):
++    pass
++_opaque_pthread_rwlockattr_t._fields_ = [
++    ('__sig', c_long),
++    ('__opaque', c_char * 12),
++]
++assert sizeof(_opaque_pthread_rwlockattr_t) == 16, sizeof(_opaque_pthread_rwlockattr_t)
++assert alignment(_opaque_pthread_rwlockattr_t) == 4, alignment(_opaque_pthread_rwlockattr_t)
++class _opaque_pthread_t(Structure):
++    pass
++_opaque_pthread_t._fields_ = [
++    ('__sig', c_long),
++    ('__cleanup_stack', POINTER(__darwin_pthread_handler_rec)),
++    ('__opaque', c_char * 596),
++]
++assert sizeof(_opaque_pthread_t) == 604, sizeof(_opaque_pthread_t)
++assert alignment(_opaque_pthread_t) == 4, alignment(_opaque_pthread_t)
++__darwin_blkcnt_t = __int64_t
++__darwin_blksize_t = __int32_t
++__darwin_fsblkcnt_t = c_uint
++__darwin_fsfilcnt_t = c_uint
++__darwin_gid_t = __uint32_t
++__darwin_id_t = __uint32_t
++__darwin_ino_t = __uint32_t
++__darwin_mach_port_name_t = __darwin_natural_t
++__darwin_mach_port_t = __darwin_mach_port_name_t
++__darwin_mcontext_t = POINTER(mcontext)
++__darwin_mcontext64_t = POINTER(mcontext64)
++__darwin_pid_t = __int32_t
++__darwin_pthread_attr_t = _opaque_pthread_attr_t
++__darwin_pthread_cond_t = _opaque_pthread_cond_t
++__darwin_pthread_condattr_t = _opaque_pthread_condattr_t
++__darwin_pthread_key_t = c_ulong
++__darwin_pthread_mutex_t = _opaque_pthread_mutex_t
++__darwin_pthread_mutexattr_t = _opaque_pthread_mutexattr_t
++__darwin_pthread_once_t = _opaque_pthread_once_t
++__darwin_pthread_rwlock_t = _opaque_pthread_rwlock_t
++__darwin_pthread_rwlockattr_t = _opaque_pthread_rwlockattr_t
++__darwin_pthread_t = POINTER(_opaque_pthread_t)
++__darwin_sigset_t = __uint32_t
++__darwin_suseconds_t = __int32_t
++__darwin_uid_t = __uint32_t
++__darwin_useconds_t = __uint32_t
++__darwin_uuid_t = c_ubyte * 16
++class sigaltstack(Structure):
++    pass
++sigaltstack._fields_ = [
++    ('ss_sp', c_void_p),
++    ('ss_size', __darwin_size_t),
++    ('ss_flags', c_int),
++]
++assert sizeof(sigaltstack) == 12, sizeof(sigaltstack)
++assert alignment(sigaltstack) == 4, alignment(sigaltstack)
++__darwin_stack_t = sigaltstack
++class ucontext(Structure):
++    pass
++ucontext._fields_ = [
++    ('uc_onstack', c_int),
++    ('uc_sigmask', __darwin_sigset_t),
++    ('uc_stack', __darwin_stack_t),
++    ('uc_link', POINTER(ucontext)),
++    ('uc_mcsize', __darwin_size_t),
++    ('uc_mcontext', __darwin_mcontext_t),
++]
++assert sizeof(ucontext) == 32, sizeof(ucontext)
++assert alignment(ucontext) == 4, alignment(ucontext)
++__darwin_ucontext_t = ucontext
++class ucontext64(Structure):
++    pass
++ucontext64._fields_ = [
++    ('uc_onstack', c_int),
++    ('uc_sigmask', __darwin_sigset_t),
++    ('uc_stack', __darwin_stack_t),
++    ('uc_link', POINTER(ucontext64)),
++    ('uc_mcsize', __darwin_size_t),
++    ('uc_mcontext64', __darwin_mcontext64_t),
++]
++assert sizeof(ucontext64) == 32, sizeof(ucontext64)
++assert alignment(ucontext64) == 4, alignment(ucontext64)
++__darwin_ucontext64_t = ucontext64
++class timeval(Structure):
++    pass
++timeval._fields_ = [
++    ('tv_sec', __darwin_time_t),
++    ('tv_usec', __darwin_suseconds_t),
++]
++assert sizeof(timeval) == 8, sizeof(timeval)
++assert alignment(timeval) == 4, alignment(timeval)
++rlim_t = __int64_t
++class rusage(Structure):
++    pass
++rusage._fields_ = [
++    ('ru_utime', timeval),
++    ('ru_stime', timeval),
++    ('ru_maxrss', c_long),
++    ('ru_ixrss', c_long),
++    ('ru_idrss', c_long),
++    ('ru_isrss', c_long),
++    ('ru_minflt', c_long),
++    ('ru_majflt', c_long),
++    ('ru_nswap', c_long),
++    ('ru_inblock', c_long),
++    ('ru_oublock', c_long),
++    ('ru_msgsnd', c_long),
++    ('ru_msgrcv', c_long),
++    ('ru_nsignals', c_long),
++    ('ru_nvcsw', c_long),
++    ('ru_nivcsw', c_long),
++]
++assert sizeof(rusage) == 72, sizeof(rusage)
++assert alignment(rusage) == 4, alignment(rusage)
++class rlimit(Structure):
++    pass
++rlimit._pack_ = 4
++rlimit._fields_ = [
++    ('rlim_cur', rlim_t),
++    ('rlim_max', rlim_t),
++]
++assert sizeof(rlimit) == 16, sizeof(rlimit)
++assert alignment(rlimit) == 4, alignment(rlimit)
++mcontext_t = __darwin_mcontext_t
++mcontext64_t = __darwin_mcontext64_t
++pthread_attr_t = __darwin_pthread_attr_t
++sigset_t = __darwin_sigset_t
++ucontext_t = __darwin_ucontext_t
++ucontext64_t = __darwin_ucontext64_t
++uid_t = __darwin_uid_t
++class sigval(Union):
++    pass
++sigval._fields_ = [
++    ('sival_int', c_int),
++    ('sival_ptr', c_void_p),
++]
++assert sizeof(sigval) == 4, sizeof(sigval)
++assert alignment(sigval) == 4, alignment(sigval)
++class sigevent(Structure):
++    pass
++sigevent._fields_ = [
++    ('sigev_notify', c_int),
++    ('sigev_signo', c_int),
++    ('sigev_value', sigval),
++    ('sigev_notify_function', CFUNCTYPE(None, sigval)),
++    ('sigev_notify_attributes', POINTER(pthread_attr_t)),
++]
++assert sizeof(sigevent) == 20, sizeof(sigevent)
++assert alignment(sigevent) == 4, alignment(sigevent)
++class __siginfo(Structure):
++    pass
++pid_t = __darwin_pid_t
++__siginfo._fields_ = [
++    ('si_signo', c_int),
++    ('si_errno', c_int),
++    ('si_code', c_int),
++    ('si_pid', pid_t),
++    ('si_uid', uid_t),
++    ('si_status', c_int),
++    ('si_addr', c_void_p),
++    ('si_value', sigval),
++    ('si_band', c_long),
++    ('pad', c_ulong * 7),
++]
++assert sizeof(__siginfo) == 64, sizeof(__siginfo)
++assert alignment(__siginfo) == 4, alignment(__siginfo)
++siginfo_t = __siginfo
++class __sigaction_u(Union):
++    pass
++__sigaction_u._fields_ = [
++    ('__sa_handler', CFUNCTYPE(None, c_int)),
++    ('__sa_sigaction', CFUNCTYPE(None, c_int, POINTER(__siginfo), c_void_p)),
++]
++assert sizeof(__sigaction_u) == 4, sizeof(__sigaction_u)
++assert alignment(__sigaction_u) == 4, alignment(__sigaction_u)
++class __sigaction(Structure):
++    pass
++__sigaction._fields_ = [
++    ('__sigaction_u', __sigaction_u),
++    ('sa_tramp', CFUNCTYPE(None, c_void_p, c_int, c_int, POINTER(siginfo_t), c_void_p)),
++    ('sa_mask', sigset_t),
++    ('sa_flags', c_int),
++]
++assert sizeof(__sigaction) == 16, sizeof(__sigaction)
++assert alignment(__sigaction) == 4, alignment(__sigaction)
++class sigaction(Structure):
++    pass
++sigaction._fields_ = [
++    ('__sigaction_u', __sigaction_u),
++    ('sa_mask', sigset_t),
++    ('sa_flags', c_int),
++]
++assert sizeof(sigaction) == 12, sizeof(sigaction)
++assert alignment(sigaction) == 4, alignment(sigaction)
++sig_t = CFUNCTYPE(None, c_int)
++stack_t = __darwin_stack_t
++class sigvec(Structure):
++    pass
++sigvec._fields_ = [
++    ('sv_handler', CFUNCTYPE(None, c_int)),
++    ('sv_mask', c_int),
++    ('sv_flags', c_int),
++]
++assert sizeof(sigvec) == 12, sizeof(sigvec)
++assert alignment(sigvec) == 4, alignment(sigvec)
++class sigstack(Structure):
++    pass
++sigstack._fields_ = [
++    ('ss_sp', STRING),
++    ('ss_onstack', c_int),
++]
++assert sizeof(sigstack) == 8, sizeof(sigstack)
++assert alignment(sigstack) == 4, alignment(sigstack)
++u_char = c_ubyte
++u_short = c_ushort
++u_int = c_uint
++u_long = c_ulong
++ushort = c_ushort
++uint = c_uint
++u_quad_t = u_int64_t
++quad_t = int64_t
++qaddr_t = POINTER(quad_t)
++caddr_t = STRING
++daddr_t = int32_t
++fixpt_t = u_int32_t
++blkcnt_t = __darwin_blkcnt_t
++blksize_t = __darwin_blksize_t
++gid_t = __darwin_gid_t
++in_addr_t = __uint32_t
++in_port_t = __uint16_t
++ino_t = __darwin_ino_t
++key_t = __int32_t
++nlink_t = __uint16_t
++off_t = __darwin_off_t
++segsz_t = int32_t
++swblk_t = int32_t
++clock_t = __darwin_clock_t
++ssize_t = __darwin_ssize_t
++useconds_t = __darwin_useconds_t
++suseconds_t = __darwin_suseconds_t
++fd_mask = __int32_t
++class fd_set(Structure):
++    pass
++fd_set._fields_ = [
++    ('fds_bits', __int32_t * 32),
++]
++assert sizeof(fd_set) == 128, sizeof(fd_set)
++assert alignment(fd_set) == 4, alignment(fd_set)
++pthread_cond_t = __darwin_pthread_cond_t
++pthread_condattr_t = __darwin_pthread_condattr_t
++pthread_mutex_t = __darwin_pthread_mutex_t
++pthread_mutexattr_t = __darwin_pthread_mutexattr_t
++pthread_once_t = __darwin_pthread_once_t
++pthread_rwlock_t = __darwin_pthread_rwlock_t
++pthread_rwlockattr_t = __darwin_pthread_rwlockattr_t
++pthread_t = __darwin_pthread_t
++pthread_key_t = __darwin_pthread_key_t
++fsblkcnt_t = __darwin_fsblkcnt_t
++fsfilcnt_t = __darwin_fsfilcnt_t
++
++# values for enumeration 'idtype_t'
++idtype_t = c_int # enum
++id_t = __darwin_id_t
++class wait(Union):
++    pass
++class N4wait3DOLLAR_3E(Structure):
++    pass
++N4wait3DOLLAR_3E._fields_ = [
++    ('w_Termsig', c_uint, 7),
++    ('w_Coredump', c_uint, 1),
++    ('w_Retcode', c_uint, 8),
++    ('w_Filler', c_uint, 16),
++]
++assert sizeof(N4wait3DOLLAR_3E) == 4, sizeof(N4wait3DOLLAR_3E)
++assert alignment(N4wait3DOLLAR_3E) == 4, alignment(N4wait3DOLLAR_3E)
++class N4wait3DOLLAR_4E(Structure):
++    pass
++N4wait3DOLLAR_4E._fields_ = [
++    ('w_Stopval', c_uint, 8),
++    ('w_Stopsig', c_uint, 8),
++    ('w_Filler', c_uint, 16),
++]
++assert sizeof(N4wait3DOLLAR_4E) == 4, sizeof(N4wait3DOLLAR_4E)
++assert alignment(N4wait3DOLLAR_4E) == 4, alignment(N4wait3DOLLAR_4E)
++wait._fields_ = [
++    ('w_status', c_int),
++    ('w_T', N4wait3DOLLAR_3E),
++    ('w_S', N4wait3DOLLAR_4E),
++]
++assert sizeof(wait) == 4, sizeof(wait)
++assert alignment(wait) == 4, alignment(wait)
++class timespec(Structure):
++    pass
++timespec._fields_ = [
++    ('tv_sec', time_t),
++    ('tv_nsec', c_long),
++]
++assert sizeof(timespec) == 8, sizeof(timespec)
++assert alignment(timespec) == 4, alignment(timespec)
++class tm(Structure):
++    pass
++tm._fields_ = [
++    ('tm_sec', c_int),
++    ('tm_min', c_int),
++    ('tm_hour', c_int),
++    ('tm_mday', c_int),
++    ('tm_mon', c_int),
++    ('tm_year', c_int),
++    ('tm_wday', c_int),
++    ('tm_yday', c_int),
++    ('tm_isdst', c_int),
++    ('tm_gmtoff', c_long),
++    ('tm_zone', STRING),
++]
++assert sizeof(tm) == 44, sizeof(tm)
++assert alignment(tm) == 4, alignment(tm)
++__gnuc_va_list = STRING
++ptrdiff_t = c_int
++int8_t = c_byte
++int16_t = c_short
++uint8_t = c_ubyte
++uint16_t = c_ushort
++uint32_t = c_uint
++uint64_t = c_ulonglong
++int_least8_t = int8_t
++int_least16_t = int16_t
++int_least32_t = int32_t
++int_least64_t = int64_t
++uint_least8_t = uint8_t
++uint_least16_t = uint16_t
++uint_least32_t = uint32_t
++uint_least64_t = uint64_t
++int_fast8_t = int8_t
++int_fast16_t = int16_t
++int_fast32_t = int32_t
++int_fast64_t = int64_t
++uint_fast8_t = uint8_t
++uint_fast16_t = uint16_t
++uint_fast32_t = uint32_t
++uint_fast64_t = uint64_t
++intptr_t = c_long
++uintptr_t = c_ulong
++intmax_t = c_longlong
++uintmax_t = c_ulonglong
++__all__ = ['ENGINE', 'pkcs7_enc_content_st', '__int16_t',
++           'X509_REVOKED', 'SSL_CTX', 'UIT_BOOLEAN',
++           '__darwin_time_t', 'ucontext64_t', 'int_fast32_t',
++           'pem_ctx_st', 'uint8_t', 'fpos_t', 'X509', 'COMP_CTX',
++           'tm', 'N10pem_ctx_st4DOLLAR_17E', 'swblk_t',
++           'ASN1_TEMPLATE', '__darwin_pthread_t', 'fixpt_t',
++           'BIO_METHOD', 'ASN1_PRINTABLESTRING', 'EVP_ENCODE_CTX',
++           'dh_method', 'bio_f_buffer_ctx_struct', 'in_port_t',
++           'X509_SIG', '__darwin_ssize_t', '__darwin_sigset_t',
++           'wait', 'uint_fast16_t', 'N12asn1_type_st4DOLLAR_11E',
++           'uint_least8_t', 'pthread_rwlock_t', 'ASN1_IA5STRING',
++           'fsfilcnt_t', 'ucontext', '__uint64_t', 'timespec',
++           'x509_cinf_st', 'COMP_METHOD', 'MD5_CTX', 'buf_mem_st',
++           'ASN1_ENCODING_st', 'PBEPARAM', 'X509_NAME_ENTRY',
++           '__darwin_va_list', 'ucontext_t', 'lhash_st',
++           'N4wait3DOLLAR_4E', '__darwin_uuid_t',
++           '_ossl_old_des_ks_struct', 'id_t', 'ASN1_BIT_STRING',
++           'va_list', '__darwin_wchar_t', 'pthread_key_t',
++           'pkcs7_signer_info_st', 'ASN1_METHOD', 'DSA_SIG', 'DSA',
++           'UIT_NONE', 'pthread_t', '__darwin_useconds_t',
++           'uint_fast8_t', 'UI_STRING', 'DES_cblock',
++           '__darwin_mcontext64_t', 'rlim_t', 'PEM_Encode_Seal_st',
++           'SHAstate_st', 'u_quad_t', 'openssl_fptr',
++           '_opaque_pthread_rwlockattr_t',
++           'N18x509_attributes_st4DOLLAR_13E',
++           '__darwin_pthread_rwlock_t', 'daddr_t', 'ui_string_st',
++           'x509_file_st', 'X509_req_info_st', 'int_least64_t',
++           'evp_Encode_Ctx_st', 'X509_OBJECTS', 'CRYPTO_EX_DATA',
++           '__int8_t', 'AUTHORITY_KEYID_st', '_opaque_pthread_attr_t',
++           'sigstack', 'EVP_CIPHER_CTX', 'X509_extension_st', 'pid_t',
++           'RSA_METHOD', 'PEM_USER', 'pem_recip_st', 'env_md_ctx_st',
++           'rc5_key_st', 'ui_st', 'X509_PUBKEY', 'u_int8_t',
++           'ASN1_ITEM_st', 'pkcs7_recip_info_st', 'ssl2_state_st',
++           'off_t', 'N10ssl_ctx_st4DOLLAR_18E', 'crypto_ex_data_st',
++           'ui_method_st', '__darwin_pthread_rwlockattr_t',
++           'CRYPTO_EX_dup', '__darwin_ino_t', '__sFILE',
++           'OSUnknownByteOrder', 'BN_MONT_CTX', 'ASN1_NULL', 'time_t',
++           'CRYPTO_EX_new', 'asn1_type_st', 'CRYPTO_EX_DATA_FUNCS',
++           'user_time_t', 'BIGNUM', 'pthread_rwlockattr_t',
++           'ASN1_VALUE_st', 'DH_METHOD', '__darwin_off_t',
++           '_opaque_pthread_t', 'bn_blinding_st', 'RSA', 'ssize_t',
++           'mcontext64_t', 'user_long_t', 'fsblkcnt_t', 'cert_st',
++           '__darwin_pthread_condattr_t', 'X509_PKEY',
++           '__darwin_id_t', '__darwin_nl_item', 'SSL2_STATE', 'FILE',
++           'pthread_mutexattr_t', 'size_t',
++           '_ossl_old_des_key_schedule', 'pkcs7_issuer_and_serial_st',
++           'sigval', 'CRYPTO_MEM_LEAK_CB', 'X509_NAME', 'blkcnt_t',
++           'uint_least16_t', '__darwin_dev_t', 'evp_cipher_info_st',
++           'BN_BLINDING', 'ssl3_state_st', 'uint_least64_t',
++           'user_addr_t', 'DES_key_schedule', 'RIPEMD160_CTX',
++           'u_char', 'X509_algor_st', 'uid_t', 'sess_cert_st',
++           'u_int64_t', 'u_int16_t', 'sigset_t', '__darwin_ptrdiff_t',
++           'ASN1_CTX', 'STACK', '__int32_t', 'UI_METHOD',
++           'NETSCAPE_SPKI', 'UIT_PROMPT', 'st_CRYPTO_EX_DATA_IMPL',
++           'cast_key_st', 'X509_HASH_DIR_CTX', 'sigevent',
++           'user_ssize_t', 'clock_t', 'aes_key_st',
++           '__darwin_socklen_t', '__darwin_intptr_t', 'int_fast64_t',
++           'asn1_string_table_st', 'uint_fast32_t',
++           'ASN1_VISIBLESTRING', 'DSA_SIG_st', 'obj_name_st',
++           'X509_LOOKUP_METHOD', 'u_int32_t', 'EVP_CIPHER_INFO',
++           '__gnuc_va_list', 'AES_KEY', 'PKCS7_ISSUER_AND_SERIAL',
++           'BN_CTX', '__darwin_blkcnt_t', 'key_t', 'SHA_CTX',
++           'pkcs7_signed_st', 'SSL', 'N10pem_ctx_st4DOLLAR_16E',
++           'pthread_attr_t', 'EVP_MD', 'uint', 'ASN1_BOOLEAN',
++           'ino_t', '__darwin_clock_t', 'ASN1_OCTET_STRING',
++           'asn1_ctx_st', 'BIO_F_BUFFER_CTX', 'bn_mont_ctx_st',
++           'X509_REQ_INFO', 'PEM_CTX', 'sigvec',
++           '__darwin_pthread_mutexattr_t', 'x509_attributes_st',
++           'stack_t', '__darwin_mode_t', '__mbstate_t',
++           'asn1_object_st', 'ASN1_ENCODING', '__uint8_t',
++           'LHASH_NODE', 'PKCS7_SIGNER_INFO', 'asn1_method_st',
++           'stack_st', 'bio_info_cb', 'div_t', 'UIT_VERIFY',
++           'PBEPARAM_st', 'N4wait3DOLLAR_3E', 'quad_t', '__siginfo',
++           '__darwin_mbstate_t', 'rsa_st', 'ASN1_UNIVERSALSTRING',
++           'uint64_t', 'ssl_comp_st', 'X509_OBJECT', 'pthread_cond_t',
++           'DH', '__darwin_wctype_t', 'PKCS7_ENVELOPE', 'ASN1_TLC_st',
++           'sig_atomic_t', 'BIO', 'nlink_t', 'BUF_MEM', 'SSL3_RECORD',
++           'bio_method_st', 'timeval', 'UI_string_types', 'BIO_dummy',
++           'ssl_ctx_st', 'NETSCAPE_CERT_SEQUENCE',
++           'BIT_STRING_BITNAME_st', '__darwin_pthread_attr_t',
++           'int8_t', '__darwin_wint_t', 'OBJ_NAME',
++           'PKCS8_PRIV_KEY_INFO', 'PBE2PARAM_st',
++           'LHASH_DOALL_FN_TYPE', 'x509_st', 'X509_VAL', 'dev_t',
++           'ASN1_TEMPLATE_st', 'MD5state_st', '__uint16_t',
++           'LHASH_DOALL_ARG_FN_TYPE', 'mdc2_ctx_st', 'SSL3_STATE',
++           'ssl3_buffer_st', 'ASN1_ITEM_EXP',
++           '_opaque_pthread_condattr_t', 'mode_t', 'ASN1_VALUE',
++           'qaddr_t', '__darwin_gid_t', 'EVP_PKEY', 'CRYPTO_EX_free',
++           '_ossl_old_des_cblock', 'X509_INFO', 'asn1_string_st',
++           'intptr_t', 'UIT_INFO', 'int_fast8_t', 'sigaltstack',
++           'env_md_st', 'LHASH', '__darwin_ucontext_t',
++           'PKCS7_SIGN_ENVELOPE', '__darwin_mcontext_t', 'ct_rune_t',
++           'MD2_CTX', 'pthread_once_t', 'SSL3_BUFFER', 'fd_mask',
++           'ASN1_TYPE', 'PKCS7_SIGNED', 'ssl3_record_st', 'BF_KEY',
++           'MD4state_st', 'MD4_CTX', 'int16_t', 'SSL_CIPHER',
++           'rune_t', 'X509_TRUST', 'siginfo_t', 'X509_STORE',
++           '__sbuf', 'X509_STORE_CTX', '__darwin_blksize_t', 'ldiv_t',
++           'ASN1_TIME', 'SSL_METHOD', 'X509_LOOKUP',
++           'Netscape_spki_st', 'P_PID', 'sigaction', 'sig_t',
++           'hostent', 'x509_cert_aux_st', '_opaque_pthread_cond_t',
++           'segsz_t', 'ushort', '__darwin_ct_rune_t', 'fd_set',
++           'BN_RECP_CTX', 'x509_lookup_st', 'uint16_t', 'pkcs7_st',
++           'asn1_header_st', '__darwin_pthread_key_t',
++           'x509_trust_st', '__darwin_pthread_handler_rec', 'int32_t',
++           'X509_CRL_INFO', 'N11evp_pkey_st4DOLLAR_12E', 'MDC2_CTX',
++           'N23_ossl_old_des_ks_struct4DOLLAR_10E', 'ASN1_HEADER',
++           'X509_crl_info_st', 'LHASH_HASH_FN_TYPE',
++           '_opaque_pthread_mutexattr_t', 'ssl_st',
++           'N8pkcs7_st4DOLLAR_15E', 'evp_pkey_st',
++           'pkcs7_signedandenveloped_st', '__darwin_mach_port_t',
++           'EVP_PBE_KEYGEN', '_opaque_pthread_mutex_t',
++           'ASN1_UTCTIME', 'mcontext', 'crypto_ex_data_func_st',
++           'u_long', 'PBKDF2PARAM_st', 'rc4_key_st', 'DSA_METHOD',
++           'EVP_CIPHER', 'BIT_STRING_BITNAME', 'PKCS7_RECIP_INFO',
++           'ssl3_enc_method', 'X509_CERT_AUX', 'uintmax_t',
++           'int_fast16_t', 'RC5_32_KEY', 'ucontext64', 'ASN1_INTEGER',
++           'u_short', 'N14x509_object_st4DOLLAR_14E', 'mcontext64',
++           'X509_sig_st', 'ASN1_GENERALSTRING', 'PKCS7', '__sFILEX',
++           'X509_name_entry_st', 'ssl_session_st', 'caddr_t',
++           'bignum_st', 'X509_CINF', '__darwin_pthread_cond_t',
++           'ASN1_TLC', 'PKCS7_ENCRYPT', 'NETSCAPE_SPKAC',
++           'Netscape_spkac_st', 'idtype_t', 'UIT_ERROR',
++           'uint_fast64_t', 'in_addr_t', 'pthread_mutex_t',
++           '__int64_t', 'ASN1_BMPSTRING', 'uint32_t',
++           'PEM_ENCODE_SEAL_CTX', 'suseconds_t', 'ASN1_OBJECT',
++           'X509_val_st', 'private_key_st', 'CRYPTO_dynlock',
++           'X509_objects_st', 'CRYPTO_EX_DATA_IMPL',
++           'pthread_condattr_t', 'PKCS7_DIGEST', 'uint_least32_t',
++           'ASN1_STRING', '__uint32_t', 'P_PGID', 'rsa_meth_st',
++           'X509_crl_st', 'RC2_KEY', '__darwin_fsfilcnt_t',
++           'X509_revoked_st', 'PBE2PARAM', 'blksize_t',
++           'Netscape_certificate_sequence', 'ssl_cipher_st',
++           'bignum_ctx', 'register_t', 'ASN1_UTF8STRING',
++           'pkcs7_encrypted_st', 'RC4_KEY', '__darwin_ucontext64_t',
++           'N13ssl2_state_st4DOLLAR_19E', 'bn_recp_ctx_st',
++           'CAST_KEY', 'X509_ATTRIBUTE', '__darwin_suseconds_t',
++           '__sigaction', 'user_ulong_t', 'syscall_arg_t',
++           'evp_cipher_ctx_st', 'X509_ALGOR', 'mcontext_t',
++           'const_DES_cblock', '__darwin_fsblkcnt_t', 'dsa_st',
++           'int_least8_t', 'MD2state_st', 'X509_EXTENSION',
++           'GEN_SESSION_CB', 'int_least16_t', '__darwin_wctrans_t',
++           'PBKDF2PARAM', 'x509_lookup_method_st', 'pem_password_cb',
++           'X509_info_st', 'x509_store_st', '__darwin_natural_t',
++           'X509_pubkey_st', 'pkcs7_digest_st', '__darwin_size_t',
++           'ASN1_STRING_TABLE', 'OSLittleEndian', 'RIPEMD160state_st',
++           'pkcs7_enveloped_st', 'UI', 'ptrdiff_t', 'X509_REQ',
++           'CRYPTO_dynlock_value', 'X509_req_st', 'x509_store_ctx_st',
++           'N13ssl3_state_st4DOLLAR_20E', 'lhash_node_st',
++           '__darwin_pthread_mutex_t', 'LHASH_COMP_FN_TYPE',
++           '__darwin_rune_t', 'rlimit', '__darwin_pthread_once_t',
++           'OSBigEndian', 'uintptr_t', '__darwin_uid_t', 'u_int',
++           'ASN1_T61STRING', 'gid_t', 'ssl_method_st', 'ASN1_ITEM',
++           'ASN1_ENUMERATED', '_opaque_pthread_rwlock_t',
++           'pkcs8_priv_key_info_st', 'intmax_t', 'sigcontext',
++           'X509_CRL', 'rc2_key_st', 'engine_st', 'x509_object_st',
++           '_opaque_pthread_once_t', 'DES_ks', 'SSL_COMP',
++           'dsa_method', 'int64_t', 'bio_st', 'bf_key_st',
++           'ASN1_GENERALIZEDTIME', 'PKCS7_ENC_CONTENT',
++           '__darwin_pid_t', 'lldiv_t', 'comp_method_st',
++           'EVP_MD_CTX', 'evp_cipher_st', 'X509_name_st',
++           'x509_hash_dir_st', '__darwin_mach_port_name_t',
++           'useconds_t', 'user_size_t', 'SSL_SESSION', 'rusage',
++           'ssl_crock_st', 'int_least32_t', '__sigaction_u', 'dh_st',
++           'P_ALL', '__darwin_stack_t', 'N6DES_ks3DOLLAR_9E',
++           'comp_ctx_st', 'X509_CERT_FILE_CTX']
+diff -r 531f2e948299 refactor/tests/data/py2_test_grammar.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/py2_test_grammar.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,956 @@
++# Python 2's Lib/test/test_grammar.py (r66189)
++
++# Python test set -- part 1, grammar.
++# This just tests whether the parser accepts them all.
++
++# NOTE: When you run this test as a script from the command line, you
++# get warnings about certain hex/oct constants.  Since those are
++# issued by the parser, you can't suppress them by adding a
++# filterwarnings() call to this module.  Therefore, to shut up the
++# regression test, the filterwarnings() call has been added to
++# regrtest.py.
++
++from test.test_support import run_unittest, check_syntax_error
++import unittest
++import sys
++# testing import *
++from sys import *
++
++class TokenTests(unittest.TestCase):
++
++    def testBackslash(self):
++        # Backslash means line continuation:
++        x = 1 \
++        + 1
++        self.assertEquals(x, 2, 'backslash for line continuation')
++
++        # Backslash does not means continuation in comments :\
++        x = 0
++        self.assertEquals(x, 0, 'backslash ending comment')
++
++    def testPlainIntegers(self):
++        self.assertEquals(0xff, 255)
++        self.assertEquals(0377, 255)
++        self.assertEquals(2147483647, 017777777777)
++        # "0x" is not a valid literal
++        self.assertRaises(SyntaxError, eval, "0x")
++        from sys import maxint
++        if maxint == 2147483647:
++            self.assertEquals(-2147483647-1, -020000000000)
++            # XXX -2147483648
++            self.assert_(037777777777 > 0)
++            self.assert_(0xffffffff > 0)
++            for s in '2147483648', '040000000000', '0x100000000':
++                try:
++                    x = eval(s)
++                except OverflowError:
++                    self.fail("OverflowError on huge integer literal %r" % s)
++        elif maxint == 9223372036854775807:
++            self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
++            self.assert_(01777777777777777777777 > 0)
++            self.assert_(0xffffffffffffffff > 0)
++            for s in '9223372036854775808', '02000000000000000000000', \
++                     '0x10000000000000000':
++                try:
++                    x = eval(s)
++                except OverflowError:
++                    self.fail("OverflowError on huge integer literal %r" % s)
++        else:
++            self.fail('Weird maxint value %r' % maxint)
++
++    def testLongIntegers(self):
++        x = 0L
++        x = 0l
++        x = 0xffffffffffffffffL
++        x = 0xffffffffffffffffl
++        x = 077777777777777777L
++        x = 077777777777777777l
++        x = 123456789012345678901234567890L
++        x = 123456789012345678901234567890l
++
++    def testFloats(self):
++        x = 3.14
++        x = 314.
++        x = 0.314
++        # XXX x = 000.314
++        x = .314
++        x = 3e14
++        x = 3E14
++        x = 3e-14
++        x = 3e+14
++        x = 3.e14
++        x = .3e14
++        x = 3.1e4
++
++    def testStringLiterals(self):
++        x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
++        x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
++        x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
++        x = "doesn't \"shrink\" does it"
++        y = 'doesn\'t "shrink" does it'
++        self.assert_(len(x) == 24 and x == y)
++        x = "does \"shrink\" doesn't it"
++        y = 'does "shrink" doesn\'t it'
++        self.assert_(len(x) == 24 and x == y)
++        x = """
++The "quick"
++brown fox
++jumps over
++the 'lazy' dog.
++"""
++        y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
++        self.assertEquals(x, y)
++        y = '''
++The "quick"
++brown fox
++jumps over
++the 'lazy' dog.
++'''
++        self.assertEquals(x, y)
++        y = "\n\
++The \"quick\"\n\
++brown fox\n\
++jumps over\n\
++the 'lazy' dog.\n\
++"
++        self.assertEquals(x, y)
++        y = '\n\
++The \"quick\"\n\
++brown fox\n\
++jumps over\n\
++the \'lazy\' dog.\n\
++'
++        self.assertEquals(x, y)
++
++
++class GrammarTests(unittest.TestCase):
++
++    # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
++    # XXX can't test in a script -- this rule is only used when interactive
++
++    # file_input: (NEWLINE | stmt)* ENDMARKER
++    # Being tested as this very moment this very module
++
++    # expr_input: testlist NEWLINE
++    # XXX Hard to test -- used only in calls to input()
++
++    def testEvalInput(self):
++        # testlist ENDMARKER
++        x = eval('1, 0 or 1')
++
++    def testFuncdef(self):
++        ### 'def' NAME parameters ':' suite
++        ### parameters: '(' [varargslist] ')'
++        ### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
++        ###            | ('**'|'*' '*') NAME)
++        ###            | fpdef ['=' test] (',' fpdef ['=' test])* [',']
++        ### fpdef: NAME | '(' fplist ')'
++        ### fplist: fpdef (',' fpdef)* [',']
++        ### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
++        ### argument: [test '='] test   # Really [keyword '='] test
++        def f1(): pass
++        f1()
++        f1(*())
++        f1(*(), **{})
++        def f2(one_argument): pass
++        def f3(two, arguments): pass
++        def f4(two, (compound, (argument, list))): pass
++        def f5((compound, first), two): pass
++        self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
++        self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
++        if sys.platform.startswith('java'):
++            self.assertEquals(f4.func_code.co_varnames,
++                   ('two', '(compound, (argument, list))', 'compound', 'argument',
++                                'list',))
++            self.assertEquals(f5.func_code.co_varnames,
++                   ('(compound, first)', 'two', 'compound', 'first'))
++        else:
++            self.assertEquals(f4.func_code.co_varnames,
++                  ('two', '.1', 'compound', 'argument',  'list'))
++            self.assertEquals(f5.func_code.co_varnames,
++                  ('.0', 'two', 'compound', 'first'))
++        def a1(one_arg,): pass
++        def a2(two, args,): pass
++        def v0(*rest): pass
++        def v1(a, *rest): pass
++        def v2(a, b, *rest): pass
++        def v3(a, (b, c), *rest): return a, b, c, rest
++
++        f1()
++        f2(1)
++        f2(1,)
++        f3(1, 2)
++        f3(1, 2,)
++        f4(1, (2, (3, 4)))
++        v0()
++        v0(1)
++        v0(1,)
++        v0(1,2)
++        v0(1,2,3,4,5,6,7,8,9,0)
++        v1(1)
++        v1(1,)
++        v1(1,2)
++        v1(1,2,3)
++        v1(1,2,3,4,5,6,7,8,9,0)
++        v2(1,2)
++        v2(1,2,3)
++        v2(1,2,3,4)
++        v2(1,2,3,4,5,6,7,8,9,0)
++        v3(1,(2,3))
++        v3(1,(2,3),4)
++        v3(1,(2,3),4,5,6,7,8,9,0)
++
++        # ceval unpacks the formal arguments into the first argcount names;
++        # thus, the names nested inside tuples must appear after these names.
++        if sys.platform.startswith('java'):
++            self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
++        else:
++            self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
++        self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
++        def d01(a=1): pass
++        d01()
++        d01(1)
++        d01(*(1,))
++        d01(**{'a':2})
++        def d11(a, b=1): pass
++        d11(1)
++        d11(1, 2)
++        d11(1, **{'b':2})
++        def d21(a, b, c=1): pass
++        d21(1, 2)
++        d21(1, 2, 3)
++        d21(*(1, 2, 3))
++        d21(1, *(2, 3))
++        d21(1, 2, *(3,))
++        d21(1, 2, **{'c':3})
++        def d02(a=1, b=2): pass
++        d02()
++        d02(1)
++        d02(1, 2)
++        d02(*(1, 2))
++        d02(1, *(2,))
++        d02(1, **{'b':2})
++        d02(**{'a': 1, 'b': 2})
++        def d12(a, b=1, c=2): pass
++        d12(1)
++        d12(1, 2)
++        d12(1, 2, 3)
++        def d22(a, b, c=1, d=2): pass
++        d22(1, 2)
++        d22(1, 2, 3)
++        d22(1, 2, 3, 4)
++        def d01v(a=1, *rest): pass
++        d01v()
++        d01v(1)
++        d01v(1, 2)
++        d01v(*(1, 2, 3, 4))
++        d01v(*(1,))
++        d01v(**{'a':2})
++        def d11v(a, b=1, *rest): pass
++        d11v(1)
++        d11v(1, 2)
++        d11v(1, 2, 3)
++        def d21v(a, b, c=1, *rest): pass
++        d21v(1, 2)
++        d21v(1, 2, 3)
++        d21v(1, 2, 3, 4)
++        d21v(*(1, 2, 3, 4))
++        d21v(1, 2, **{'c': 3})
++        def d02v(a=1, b=2, *rest): pass
++        d02v()
++        d02v(1)
++        d02v(1, 2)
++        d02v(1, 2, 3)
++        d02v(1, *(2, 3, 4))
++        d02v(**{'a': 1, 'b': 2})
++        def d12v(a, b=1, c=2, *rest): pass
++        d12v(1)
++        d12v(1, 2)
++        d12v(1, 2, 3)
++        d12v(1, 2, 3, 4)
++        d12v(*(1, 2, 3, 4))
++        d12v(1, 2, *(3, 4, 5))
++        d12v(1, *(2,), **{'c': 3})
++        def d22v(a, b, c=1, d=2, *rest): pass
++        d22v(1, 2)
++        d22v(1, 2, 3)
++        d22v(1, 2, 3, 4)
++        d22v(1, 2, 3, 4, 5)
++        d22v(*(1, 2, 3, 4))
++        d22v(1, 2, *(3, 4, 5))
++        d22v(1, *(2, 3), **{'d': 4})
++        def d31v((x)): pass
++        d31v(1)
++        def d32v((x,)): pass
++        d32v((1,))
++
++        # keyword arguments after *arglist
++        def f(*args, **kwargs):
++            return args, kwargs
++        self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
++                                                    {'x':2, 'y':5}))
++        self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
++        self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
++
++        # Check ast errors in *args and *kwargs
++        check_syntax_error(self, "f(*g(1=2))")
++        check_syntax_error(self, "f(**g(1=2))")
++
++    def testLambdef(self):
++        ### lambdef: 'lambda' [varargslist] ':' test
++        l1 = lambda : 0
++        self.assertEquals(l1(), 0)
++        l2 = lambda : a[d] # XXX just testing the expression
++        l3 = lambda : [2 < x for x in [-1, 3, 0L]]
++        self.assertEquals(l3(), [0, 1, 0])
++        l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
++        self.assertEquals(l4(), 1)
++        l5 = lambda x, y, z=2: x + y + z
++        self.assertEquals(l5(1, 2), 5)
++        self.assertEquals(l5(1, 2, 3), 6)
++        check_syntax_error(self, "lambda x: x = 2")
++        check_syntax_error(self, "lambda (None,): None")
++
++    ### stmt: simple_stmt | compound_stmt
++    # Tested below
++
++    def testSimpleStmt(self):
++        ### simple_stmt: small_stmt (';' small_stmt)* [';']
++        x = 1; pass; del x
++        def foo():
++            # verify statments that end with semi-colons
++            x = 1; pass; del x;
++        foo()
++
++    ### small_stmt: expr_stmt | print_stmt  | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
++    # Tested below
++
++    def testExprStmt(self):
++        # (exprlist '=')* exprlist
++        1
++        1, 2, 3
++        x = 1
++        x = 1, 2, 3
++        x = y = z = 1, 2, 3
++        x, y, z = 1, 2, 3
++        abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
++
++        check_syntax_error(self, "x + 1 = 1")
++        check_syntax_error(self, "a + 1 = b + 2")
++
++    def testPrintStmt(self):
++        # 'print' (test ',')* [test]
++        import StringIO
++
++        # Can't test printing to real stdout without comparing output
++        # which is not available in unittest.
++        save_stdout = sys.stdout
++        sys.stdout = StringIO.StringIO()
++
++        print 1, 2, 3
++        print 1, 2, 3,
++        print
++        print 0 or 1, 0 or 1,
++        print 0 or 1
++
++        # 'print' '>>' test ','
++        print >> sys.stdout, 1, 2, 3
++        print >> sys.stdout, 1, 2, 3,
++        print >> sys.stdout
++        print >> sys.stdout, 0 or 1, 0 or 1,
++        print >> sys.stdout, 0 or 1
++
++        # test printing to an instance
++        class Gulp:
++            def write(self, msg): pass
++
++        gulp = Gulp()
++        print >> gulp, 1, 2, 3
++        print >> gulp, 1, 2, 3,
++        print >> gulp
++        print >> gulp, 0 or 1, 0 or 1,
++        print >> gulp, 0 or 1
++
++        # test print >> None
++        def driver():
++            oldstdout = sys.stdout
++            sys.stdout = Gulp()
++            try:
++                tellme(Gulp())
++                tellme()
++            finally:
++                sys.stdout = oldstdout
++
++        # we should see this once
++        def tellme(file=sys.stdout):
++            print >> file, 'hello world'
++
++        driver()
++
++        # we should not see this at all
++        def tellme(file=None):
++            print >> file, 'goodbye universe'
++
++        driver()
++
++        self.assertEqual(sys.stdout.getvalue(), '''\
++1 2 3
++1 2 3
++1 1 1
++1 2 3
++1 2 3
++1 1 1
++hello world
++''')
++        sys.stdout = save_stdout
++
++        # syntax errors
++        check_syntax_error(self, 'print ,')
++        check_syntax_error(self, 'print >> x,')
++
++    def testDelStmt(self):
++        # 'del' exprlist
++        abc = [1,2,3]
++        x, y, z = abc
++        xyz = x, y, z
++
++        del abc
++        del x, y, (z, xyz)
++
++    def testPassStmt(self):
++        # 'pass'
++        pass
++
++    # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
++    # Tested below
++
++    def testBreakStmt(self):
++        # 'break'
++        while 1: break
++
++    def testContinueStmt(self):
++        # 'continue'
++        i = 1
++        while i: i = 0; continue
++
++        msg = ""
++        while not msg:
++            msg = "ok"
++            try:
++                continue
++                msg = "continue failed to continue inside try"
++            except:
++                msg = "continue inside try called except block"
++        if msg != "ok":
++            self.fail(msg)
++
++        msg = ""
++        while not msg:
++            msg = "finally block not called"
++            try:
++                continue
++            finally:
++                msg = "ok"
++        if msg != "ok":
++            self.fail(msg)
++
++    def test_break_continue_loop(self):
++        # This test warrants an explanation. It is a test specifically for SF bugs
++        # #463359 and #462937. The bug is that a 'break' statement executed or
++        # exception raised inside a try/except inside a loop, *after* a continue
++        # statement has been executed in that loop, will cause the wrong number of
++        # arguments to be popped off the stack and the instruction pointer reset to
++        # a very small number (usually 0.) Because of this, the following test
++        # *must* written as a function, and the tracking vars *must* be function
++        # arguments with default values. Otherwise, the test will loop and loop.
++
++        def test_inner(extra_burning_oil = 1, count=0):
++            big_hippo = 2
++            while big_hippo:
++                count += 1
++                try:
++                    if extra_burning_oil and big_hippo == 1:
++                        extra_burning_oil -= 1
++                        break
++                    big_hippo -= 1
++                    continue
++                except:
++                    raise
++            if count > 2 or big_hippo <> 1:
++                self.fail("continue then break in try/except in loop broken!")
++        test_inner()
++
++    def testReturn(self):
++        # 'return' [testlist]
++        def g1(): return
++        def g2(): return 1
++        g1()
++        x = g2()
++        check_syntax_error(self, "class foo:return 1")
++
++    def testYield(self):
++        check_syntax_error(self, "class foo:yield 1")
++
++    def testRaise(self):
++        # 'raise' test [',' test]
++        try: raise RuntimeError, 'just testing'
++        except RuntimeError: pass
++        try: raise KeyboardInterrupt
++        except KeyboardInterrupt: pass
++
++    def testImport(self):
++        # 'import' dotted_as_names
++        import sys
++        import time, sys
++        # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
++        from time import time
++        from time import (time)
++        # not testable inside a function, but already done at top of the module
++        # from sys import *
++        from sys import path, argv
++        from sys import (path, argv)
++        from sys import (path, argv,)
++
++    def testGlobal(self):
++        # 'global' NAME (',' NAME)*
++        global a
++        global a, b
++        global one, two, three, four, five, six, seven, eight, nine, ten
++
++    def testExec(self):
++        # 'exec' expr ['in' expr [',' expr]]
++        z = None
++        del z
++        exec 'z=1+1\n'
++        if z != 2: self.fail('exec \'z=1+1\'\\n')
++        del z
++        exec 'z=1+1'
++        if z != 2: self.fail('exec \'z=1+1\'')
++        z = None
++        del z
++        import types
++        if hasattr(types, "UnicodeType"):
++            exec r"""if 1:
++            exec u'z=1+1\n'
++            if z != 2: self.fail('exec u\'z=1+1\'\\n')
++            del z
++            exec u'z=1+1'
++            if z != 2: self.fail('exec u\'z=1+1\'')"""
++        g = {}
++        exec 'z = 1' in g
++        if g.has_key('__builtins__'): del g['__builtins__']
++        if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
++        g = {}
++        l = {}
++
++        import warnings
++        warnings.filterwarnings("ignore", "global statement", module="<string>")
++        exec 'global a; a = 1; b = 2' in g, l
++        if g.has_key('__builtins__'): del g['__builtins__']
++        if l.has_key('__builtins__'): del l['__builtins__']
++        if (g, l) != ({'a':1}, {'b':2}):
++            self.fail('exec ... in g (%s), l (%s)' %(g,l))
++
++    def testAssert(self):
++        # assert_stmt: 'assert' test [',' test]
++        assert 1
++        assert 1, 1
++        assert lambda x:x
++        assert 1, lambda x:x+1
++        try:
++            assert 0, "msg"
++        except AssertionError, e:
++            self.assertEquals(e.args[0], "msg")
++        else:
++            if __debug__:
++                self.fail("AssertionError not raised by assert 0")
++
++    ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
++    # Tested below
++
++    def testIf(self):
++        # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
++        if 1: pass
++        if 1: pass
++        else: pass
++        if 0: pass
++        elif 0: pass
++        if 0: pass
++        elif 0: pass
++        elif 0: pass
++        elif 0: pass
++        else: pass
++
++    def testWhile(self):
++        # 'while' test ':' suite ['else' ':' suite]
++        while 0: pass
++        while 0: pass
++        else: pass
++
++        # Issue1920: "while 0" is optimized away,
++        # ensure that the "else" clause is still present.
++        x = 0
++        while 0:
++            x = 1
++        else:
++            x = 2
++        self.assertEquals(x, 2)
++
++    def testFor(self):
++        # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
++        for i in 1, 2, 3: pass
++        for i, j, k in (): pass
++        else: pass
++        class Squares:
++            def __init__(self, max):
++                self.max = max
++                self.sofar = []
++            def __len__(self): return len(self.sofar)
++            def __getitem__(self, i):
++                if not 0 <= i < self.max: raise IndexError
++                n = len(self.sofar)
++                while n <= i:
++                    self.sofar.append(n*n)
++                    n = n+1
++                return self.sofar[i]
++        n = 0
++        for x in Squares(10): n = n+x
++        if n != 285:
++            self.fail('for over growing sequence')
++
++        result = []
++        for x, in [(1,), (2,), (3,)]:
++            result.append(x)
++        self.assertEqual(result, [1, 2, 3])
++
++    def testTry(self):
++        ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
++        ###         | 'try' ':' suite 'finally' ':' suite
++        ### except_clause: 'except' [expr [('as' | ',') expr]]
++        try:
++            1/0
++        except ZeroDivisionError:
++            pass
++        else:
++            pass
++        try: 1/0
++        except EOFError: pass
++        except TypeError as msg: pass
++        except RuntimeError, msg: pass
++        except: pass
++        else: pass
++        try: 1/0
++        except (EOFError, TypeError, ZeroDivisionError): pass
++        try: 1/0
++        except (EOFError, TypeError, ZeroDivisionError), msg: pass
++        try: pass
++        finally: pass
++
++    def testSuite(self):
++        # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
++        if 1: pass
++        if 1:
++            pass
++        if 1:
++            #
++            #
++            #
++            pass
++            pass
++            #
++            pass
++            #
++
++    def testTest(self):
++        ### and_test ('or' and_test)*
++        ### and_test: not_test ('and' not_test)*
++        ### not_test: 'not' not_test | comparison
++        if not 1: pass
++        if 1 and 1: pass
++        if 1 or 1: pass
++        if not not not 1: pass
++        if not 1 and 1 and 1: pass
++        if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
++
++    def testComparison(self):
++        ### comparison: expr (comp_op expr)*
++        ### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
++        if 1: pass
++        x = (1 == 1)
++        if 1 == 1: pass
++        if 1 != 1: pass
++        if 1 <> 1: pass
++        if 1 < 1: pass
++        if 1 > 1: pass
++        if 1 <= 1: pass
++        if 1 >= 1: pass
++        if 1 is 1: pass
++        if 1 is not 1: pass
++        if 1 in (): pass
++        if 1 not in (): pass
++        if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
++
++    def testBinaryMaskOps(self):
++        x = 1 & 1
++        x = 1 ^ 1
++        x = 1 | 1
++
++    def testShiftOps(self):
++        x = 1 << 1
++        x = 1 >> 1
++        x = 1 << 1 >> 1
++
++    def testAdditiveOps(self):
++        x = 1
++        x = 1 + 1
++        x = 1 - 1 - 1
++        x = 1 - 1 + 1 - 1 + 1
++
++    def testMultiplicativeOps(self):
++        x = 1 * 1
++        x = 1 / 1
++        x = 1 % 1
++        x = 1 / 1 * 1 % 1
++
++    def testUnaryOps(self):
++        x = +1
++        x = -1
++        x = ~1
++        x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
++        x = -1*1/1 + 1*1 - ---1*1
++
++    def testSelectors(self):
++        ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
++        ### subscript: expr | [expr] ':' [expr]
++
++        import sys, time
++        c = sys.path[0]
++        x = time.time()
++        x = sys.modules['time'].time()
++        a = '01234'
++        c = a[0]
++        c = a[-1]
++        s = a[0:5]
++        s = a[:5]
++        s = a[0:]
++        s = a[:]
++        s = a[-5:]
++        s = a[:-1]
++        s = a[-4:-3]
++        # A rough test of SF bug 1333982.  http://python.org/sf/1333982
++        # The testing here is fairly incomplete.
++        # Test cases should include: commas with 1 and 2 colons
++        d = {}
++        d[1] = 1
++        d[1,] = 2
++        d[1,2] = 3
++        d[1,2,3] = 4
++        L = list(d)
++        L.sort()
++        self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
++
++    def testAtoms(self):
++        ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
++        ### dictmaker: test ':' test (',' test ':' test)* [',']
++
++        x = (1)
++        x = (1 or 2 or 3)
++        x = (1 or 2 or 3, 2, 3)
++
++        x = []
++        x = [1]
++        x = [1 or 2 or 3]
++        x = [1 or 2 or 3, 2, 3]
++        x = []
++
++        x = {}
++        x = {'one': 1}
++        x = {'one': 1,}
++        x = {'one' or 'two': 1 or 2}
++        x = {'one': 1, 'two': 2}
++        x = {'one': 1, 'two': 2,}
++        x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
++
++        x = `x`
++        x = `1 or 2 or 3`
++        self.assertEqual(`1,2`, '(1, 2)')
++
++        x = x
++        x = 'x'
++        x = 123
++
++    ### exprlist: expr (',' expr)* [',']
++    ### testlist: test (',' test)* [',']
++    # These have been exercised enough above
++
++    def testClassdef(self):
++        # 'class' NAME ['(' [testlist] ')'] ':' suite
++        class B: pass
++        class B2(): pass
++        class C1(B): pass
++        class C2(B): pass
++        class D(C1, C2, B): pass
++        class C:
++            def meth1(self): pass
++            def meth2(self, arg): pass
++            def meth3(self, a1, a2): pass
++        # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
++        # decorators: decorator+
++        # decorated: decorators (classdef | funcdef)
++        def class_decorator(x):
++            x.decorated = True
++            return x
++        @class_decorator
++        class G:
++            pass
++        self.assertEqual(G.decorated, True)
++
++    def testListcomps(self):
++        # list comprehension tests
++        nums = [1, 2, 3, 4, 5]
++        strs = ["Apple", "Banana", "Coconut"]
++        spcs = ["  Apple", " Banana ", "Coco  nut  "]
++
++        self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco  nut'])
++        self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
++        self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
++        self.assertEqual([(i, s) for i in nums for s in strs],
++                         [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
++                          (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
++                          (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
++                          (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
++                          (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
++        self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
++                         [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
++                          (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
++                          (5, 'Banana'), (5, 'Coconut')])
++        self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
++                         [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
++
++        def test_in_func(l):
++            return [None < x < 3 for x in l if x > 2]
++
++        self.assertEqual(test_in_func(nums), [False, False, False])
++
++        def test_nested_front():
++            self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
++                             [[1, 2], [3, 4], [5, 6]])
++
++        test_nested_front()
++
++        check_syntax_error(self, "[i, s for i in nums for s in strs]")
++        check_syntax_error(self, "[x if y]")
++
++        suppliers = [
++          (1, "Boeing"),
++          (2, "Ford"),
++          (3, "Macdonalds")
++        ]
++
++        parts = [
++          (10, "Airliner"),
++          (20, "Engine"),
++          (30, "Cheeseburger")
++        ]
++
++        suppart = [
++          (1, 10), (1, 20), (2, 20), (3, 30)
++        ]
++
++        x = [
++          (sname, pname)
++            for (sno, sname) in suppliers
++              for (pno, pname) in parts
++                for (sp_sno, sp_pno) in suppart
++                  if sno == sp_sno and pno == sp_pno
++        ]
++
++        self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
++                             ('Macdonalds', 'Cheeseburger')])
++
++    def testGenexps(self):
++        # generator expression tests
++        g = ([x for x in range(10)] for x in range(1))
++        self.assertEqual(g.next(), [x for x in range(10)])
++        try:
++            g.next()
++            self.fail('should produce StopIteration exception')
++        except StopIteration:
++            pass
++
++        a = 1
++        try:
++            g = (a for d in a)
++            g.next()
++            self.fail('should produce TypeError')
++        except TypeError:
++            pass
++
++        self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
++        self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
++
++        a = [x for x in range(10)]
++        b = (x for x in (y for y in a))
++        self.assertEqual(sum(b), sum([x for x in range(10)]))
++
++        self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
++        self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
++        self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
++        check_syntax_error(self, "foo(x for x in range(10), 100)")
++        check_syntax_error(self, "foo(100, x for x in range(10))")
++
++    def testComprehensionSpecials(self):
++        # test for outmost iterable precomputation
++        x = 10; g = (i for i in range(x)); x = 5
++        self.assertEqual(len(list(g)), 10)
++
++        # This should hold, since we're only precomputing outmost iterable.
++        x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
++        x = 5; t = True;
++        self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
++
++        # Grammar allows multiple adjacent 'if's in listcomps and genexps,
++        # even though it's silly. Make sure it works (ifelse broke this.)
++        self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
++        self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
++
++        # verify unpacking single element tuples in listcomp/genexp.
++        self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
++        self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
++
++    def testIfElseExpr(self):
++        # Test ifelse expressions in various cases
++        def _checkeval(msg, ret):
++            "helper to check that evaluation of expressions is done correctly"
++            print x
++            return ret
++
++        self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
++        self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
++        self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
++        self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
++        self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
++        self.assertEqual((5 and 6 if 0 else 1), 1)
++        self.assertEqual(((5 and 6) if 0 else 1), 1)
++        self.assertEqual((5 and (6 if 1 else 1)), 6)
++        self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
++        self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
++        self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
++        self.assertEqual((not 5 if 1 else 1), False)
++        self.assertEqual((not 5 if 0 else 1), 1)
++        self.assertEqual((6 + 1 if 1 else 2), 7)
++        self.assertEqual((6 - 1 if 1 else 2), 5)
++        self.assertEqual((6 * 2 if 1 else 4), 12)
++        self.assertEqual((6 / 2 if 1 else 3), 3)
++        self.assertEqual((6 < 4 if 0 else 2), 2)
++
++
++def test_main():
++    run_unittest(TokenTests, GrammarTests)
++
++if __name__ == '__main__':
++    test_main()
+diff -r 531f2e948299 refactor/tests/data/py3_test_grammar.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/data/py3_test_grammar.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,903 @@
++# Python test set -- part 1, grammar.
++# This just tests whether the parser accepts them all.
++
++# NOTE: When you run this test as a script from the command line, you
++# get warnings about certain hex/oct constants.  Since those are
++# issued by the parser, you can't suppress them by adding a
++# filterwarnings() call to this module.  Therefore, to shut up the
++# regression test, the filterwarnings() call has been added to
++# regrtest.py.
++
++from test.support import run_unittest, check_syntax_error
++import unittest
++import sys
++# testing import *
++from sys import *
++
++class TokenTests(unittest.TestCase):
++
++    def testBackslash(self):
++        # Backslash means line continuation:
++        x = 1 \
++        + 1
++        self.assertEquals(x, 2, 'backslash for line continuation')
++
++        # Backslash does not means continuation in comments :\
++        x = 0
++        self.assertEquals(x, 0, 'backslash ending comment')
++
++    def testPlainIntegers(self):
++        self.assertEquals(type(000), type(0))
++        self.assertEquals(0xff, 255)
++        self.assertEquals(0o377, 255)
++        self.assertEquals(2147483647, 0o17777777777)
++        self.assertEquals(0b1001, 9)
++        # "0x" is not a valid literal
++        self.assertRaises(SyntaxError, eval, "0x")
++        from sys import maxsize
++        if maxsize == 2147483647:
++            self.assertEquals(-2147483647-1, -0o20000000000)
++            # XXX -2147483648
++            self.assert_(0o37777777777 > 0)
++            self.assert_(0xffffffff > 0)
++            self.assert_(0b1111111111111111111111111111111 > 0)
++            for s in ('2147483648', '0o40000000000', '0x100000000',
++                      '0b10000000000000000000000000000000'):
++                try:
++                    x = eval(s)
++                except OverflowError:
++                    self.fail("OverflowError on huge integer literal %r" % s)
++        elif maxsize == 9223372036854775807:
++            self.assertEquals(-9223372036854775807-1, -0o1000000000000000000000)
++            self.assert_(0o1777777777777777777777 > 0)
++            self.assert_(0xffffffffffffffff > 0)
++            self.assert_(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
++            for s in '9223372036854775808', '0o2000000000000000000000', \
++                     '0x10000000000000000', \
++                     '0b100000000000000000000000000000000000000000000000000000000000000':
++                try:
++                    x = eval(s)
++                except OverflowError:
++                    self.fail("OverflowError on huge integer literal %r" % s)
++        else:
++            self.fail('Weird maxsize value %r' % maxsize)
++
++    def testLongIntegers(self):
++        x = 0
++        x = 0xffffffffffffffff
++        x = 0Xffffffffffffffff
++        x = 0o77777777777777777
++        x = 0O77777777777777777
++        x = 123456789012345678901234567890
++        x = 0b100000000000000000000000000000000000000000000000000000000000000000000
++        x = 0B111111111111111111111111111111111111111111111111111111111111111111111
++
++    def testFloats(self):
++        x = 3.14
++        x = 314.
++        x = 0.314
++        # XXX x = 000.314
++        x = .314
++        x = 3e14
++        x = 3E14
++        x = 3e-14
++        x = 3e+14
++        x = 3.e14
++        x = .3e14
++        x = 3.1e4
++
++    def testStringLiterals(self):
++        x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
++        x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
++        x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
++        x = "doesn't \"shrink\" does it"
++        y = 'doesn\'t "shrink" does it'
++        self.assert_(len(x) == 24 and x == y)
++        x = "does \"shrink\" doesn't it"
++        y = 'does "shrink" doesn\'t it'
++        self.assert_(len(x) == 24 and x == y)
++        x = """
++The "quick"
++brown fox
++jumps over
++the 'lazy' dog.
++"""
++        y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
++        self.assertEquals(x, y)
++        y = '''
++The "quick"
++brown fox
++jumps over
++the 'lazy' dog.
++'''
++        self.assertEquals(x, y)
++        y = "\n\
++The \"quick\"\n\
++brown fox\n\
++jumps over\n\
++the 'lazy' dog.\n\
++"
++        self.assertEquals(x, y)
++        y = '\n\
++The \"quick\"\n\
++brown fox\n\
++jumps over\n\
++the \'lazy\' dog.\n\
++'
++        self.assertEquals(x, y)
++
++    def testEllipsis(self):
++        x = ...
++        self.assert_(x is Ellipsis)
++        self.assertRaises(SyntaxError, eval, ".. .")
++
++class GrammarTests(unittest.TestCase):
++
++    # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
++    # XXX can't test in a script -- this rule is only used when interactive
++
++    # file_input: (NEWLINE | stmt)* ENDMARKER
++    # Being tested as this very moment this very module
++
++    # expr_input: testlist NEWLINE
++    # XXX Hard to test -- used only in calls to input()
++
++    def testEvalInput(self):
++        # testlist ENDMARKER
++        x = eval('1, 0 or 1')
++
++    def testFuncdef(self):
++        ### [decorators] 'def' NAME parameters ['->' test] ':' suite
++        ### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
++        ### decorators: decorator+
++        ### parameters: '(' [typedargslist] ')'
++        ### typedargslist: ((tfpdef ['=' test] ',')*
++        ###                ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
++        ###                | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
++        ### tfpdef: NAME [':' test]
++        ### varargslist: ((vfpdef ['=' test] ',')*
++        ###              ('*' [vfpdef] (',' vfpdef ['=' test])*  [',' '**' vfpdef] | '**' vfpdef)
++        ###              | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
++        ### vfpdef: NAME
++        def f1(): pass
++        f1()
++        f1(*())
++        f1(*(), **{})
++        def f2(one_argument): pass
++        def f3(two, arguments): pass
++        self.assertEquals(f2.__code__.co_varnames, ('one_argument',))
++        self.assertEquals(f3.__code__.co_varnames, ('two', 'arguments'))
++        def a1(one_arg,): pass
++        def a2(two, args,): pass
++        def v0(*rest): pass
++        def v1(a, *rest): pass
++        def v2(a, b, *rest): pass
++
++        f1()
++        f2(1)
++        f2(1,)
++        f3(1, 2)
++        f3(1, 2,)
++        v0()
++        v0(1)
++        v0(1,)
++        v0(1,2)
++        v0(1,2,3,4,5,6,7,8,9,0)
++        v1(1)
++        v1(1,)
++        v1(1,2)
++        v1(1,2,3)
++        v1(1,2,3,4,5,6,7,8,9,0)
++        v2(1,2)
++        v2(1,2,3)
++        v2(1,2,3,4)
++        v2(1,2,3,4,5,6,7,8,9,0)
++
++        def d01(a=1): pass
++        d01()
++        d01(1)
++        d01(*(1,))
++        d01(**{'a':2})
++        def d11(a, b=1): pass
++        d11(1)
++        d11(1, 2)
++        d11(1, **{'b':2})
++        def d21(a, b, c=1): pass
++        d21(1, 2)
++        d21(1, 2, 3)
++        d21(*(1, 2, 3))
++        d21(1, *(2, 3))
++        d21(1, 2, *(3,))
++        d21(1, 2, **{'c':3})
++        def d02(a=1, b=2): pass
++        d02()
++        d02(1)
++        d02(1, 2)
++        d02(*(1, 2))
++        d02(1, *(2,))
++        d02(1, **{'b':2})
++        d02(**{'a': 1, 'b': 2})
++        def d12(a, b=1, c=2): pass
++        d12(1)
++        d12(1, 2)
++        d12(1, 2, 3)
++        def d22(a, b, c=1, d=2): pass
++        d22(1, 2)
++        d22(1, 2, 3)
++        d22(1, 2, 3, 4)
++        def d01v(a=1, *rest): pass
++        d01v()
++        d01v(1)
++        d01v(1, 2)
++        d01v(*(1, 2, 3, 4))
++        d01v(*(1,))
++        d01v(**{'a':2})
++        def d11v(a, b=1, *rest): pass
++        d11v(1)
++        d11v(1, 2)
++        d11v(1, 2, 3)
++        def d21v(a, b, c=1, *rest): pass
++        d21v(1, 2)
++        d21v(1, 2, 3)
++        d21v(1, 2, 3, 4)
++        d21v(*(1, 2, 3, 4))
++        d21v(1, 2, **{'c': 3})
++        def d02v(a=1, b=2, *rest): pass
++        d02v()
++        d02v(1)
++        d02v(1, 2)
++        d02v(1, 2, 3)
++        d02v(1, *(2, 3, 4))
++        d02v(**{'a': 1, 'b': 2})
++        def d12v(a, b=1, c=2, *rest): pass
++        d12v(1)
++        d12v(1, 2)
++        d12v(1, 2, 3)
++        d12v(1, 2, 3, 4)
++        d12v(*(1, 2, 3, 4))
++        d12v(1, 2, *(3, 4, 5))
++        d12v(1, *(2,), **{'c': 3})
++        def d22v(a, b, c=1, d=2, *rest): pass
++        d22v(1, 2)
++        d22v(1, 2, 3)
++        d22v(1, 2, 3, 4)
++        d22v(1, 2, 3, 4, 5)
++        d22v(*(1, 2, 3, 4))
++        d22v(1, 2, *(3, 4, 5))
++        d22v(1, *(2, 3), **{'d': 4})
++
++        # keyword argument type tests
++        try:
++            str('x', **{b'foo':1 })
++        except TypeError:
++            pass
++        else:
++            self.fail('Bytes should not work as keyword argument names')
++        # keyword only argument tests
++        def pos0key1(*, key): return key
++        pos0key1(key=100)
++        def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
++        pos2key2(1, 2, k1=100)
++        pos2key2(1, 2, k1=100, k2=200)
++        pos2key2(1, 2, k2=100, k1=200)
++        def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
++        pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
++        pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
++
++        # keyword arguments after *arglist
++        def f(*args, **kwargs):
++            return args, kwargs
++        self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
++                                                    {'x':2, 'y':5}))
++        self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
++        self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
++
++        # argument annotation tests
++        def f(x) -> list: pass
++        self.assertEquals(f.__annotations__, {'return': list})
++        def f(x:int): pass
++        self.assertEquals(f.__annotations__, {'x': int})
++        def f(*x:str): pass
++        self.assertEquals(f.__annotations__, {'x': str})
++        def f(**x:float): pass
++        self.assertEquals(f.__annotations__, {'x': float})
++        def f(x, y:1+2): pass
++        self.assertEquals(f.__annotations__, {'y': 3})
++        def f(a, b:1, c:2, d): pass
++        self.assertEquals(f.__annotations__, {'b': 1, 'c': 2})
++        def f(a, b:1, c:2, d, e:3=4, f=5, *g:6): pass
++        self.assertEquals(f.__annotations__,
++                          {'b': 1, 'c': 2, 'e': 3, 'g': 6})
++        def f(a, b:1, c:2, d, e:3=4, f=5, *g:6, h:7, i=8, j:9=10,
++              **k:11) -> 12: pass
++        self.assertEquals(f.__annotations__,
++                          {'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
++                           'k': 11, 'return': 12})
++        # Check for SF Bug #1697248 - mixing decorators and a return annotation
++        def null(x): return x
++        @null
++        def f(x) -> list: pass
++        self.assertEquals(f.__annotations__, {'return': list})
++
++        # test MAKE_CLOSURE with a variety of oparg's
++        closure = 1
++        def f(): return closure
++        def f(x=1): return closure
++        def f(*, k=1): return closure
++        def f() -> int: return closure
++
++        # Check ast errors in *args and *kwargs
++        check_syntax_error(self, "f(*g(1=2))")
++        check_syntax_error(self, "f(**g(1=2))")
++
++    def testLambdef(self):
++        ### lambdef: 'lambda' [varargslist] ':' test
++        l1 = lambda : 0
++        self.assertEquals(l1(), 0)
++        l2 = lambda : a[d] # XXX just testing the expression
++        l3 = lambda : [2 < x for x in [-1, 3, 0]]
++        self.assertEquals(l3(), [0, 1, 0])
++        l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
++        self.assertEquals(l4(), 1)
++        l5 = lambda x, y, z=2: x + y + z
++        self.assertEquals(l5(1, 2), 5)
++        self.assertEquals(l5(1, 2, 3), 6)
++        check_syntax_error(self, "lambda x: x = 2")
++        check_syntax_error(self, "lambda (None,): None")
++        l6 = lambda x, y, *, k=20: x+y+k
++        self.assertEquals(l6(1,2), 1+2+20)
++        self.assertEquals(l6(1,2,k=10), 1+2+10)
++
++
++    ### stmt: simple_stmt | compound_stmt
++    # Tested below
++
++    def testSimpleStmt(self):
++        ### simple_stmt: small_stmt (';' small_stmt)* [';']
++        x = 1; pass; del x
++        def foo():
++            # verify statments that end with semi-colons
++            x = 1; pass; del x;
++        foo()
++
++    ### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
++    # Tested below
++
++    def testExprStmt(self):
++        # (exprlist '=')* exprlist
++        1
++        1, 2, 3
++        x = 1
++        x = 1, 2, 3
++        x = y = z = 1, 2, 3
++        x, y, z = 1, 2, 3
++        abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
++
++        check_syntax_error(self, "x + 1 = 1")
++        check_syntax_error(self, "a + 1 = b + 2")
++
++    def testDelStmt(self):
++        # 'del' exprlist
++        abc = [1,2,3]
++        x, y, z = abc
++        xyz = x, y, z
++
++        del abc
++        del x, y, (z, xyz)
++
++    def testPassStmt(self):
++        # 'pass'
++        pass
++
++    # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
++    # Tested below
++
++    def testBreakStmt(self):
++        # 'break'
++        while 1: break
++
++    def testContinueStmt(self):
++        # 'continue'
++        i = 1
++        while i: i = 0; continue
++
++        msg = ""
++        while not msg:
++            msg = "ok"
++            try:
++                continue
++                msg = "continue failed to continue inside try"
++            except:
++                msg = "continue inside try called except block"
++        if msg != "ok":
++            self.fail(msg)
++
++        msg = ""
++        while not msg:
++            msg = "finally block not called"
++            try:
++                continue
++            finally:
++                msg = "ok"
++        if msg != "ok":
++            self.fail(msg)
++
++    def test_break_continue_loop(self):
++        # This test warrants an explanation. It is a test specifically for SF bugs
++        # #463359 and #462937. The bug is that a 'break' statement executed or
++        # exception raised inside a try/except inside a loop, *after* a continue
++        # statement has been executed in that loop, will cause the wrong number of
++        # arguments to be popped off the stack and the instruction pointer reset to
++        # a very small number (usually 0.) Because of this, the following test
++        # *must* written as a function, and the tracking vars *must* be function
++        # arguments with default values. Otherwise, the test will loop and loop.
++
++        def test_inner(extra_burning_oil = 1, count=0):
++            big_hippo = 2
++            while big_hippo:
++                count += 1
++                try:
++                    if extra_burning_oil and big_hippo == 1:
++                        extra_burning_oil -= 1
++                        break
++                    big_hippo -= 1
++                    continue
++                except:
++                    raise
++            if count > 2 or big_hippo != 1:
++                self.fail("continue then break in try/except in loop broken!")
++        test_inner()
++
++    def testReturn(self):
++        # 'return' [testlist]
++        def g1(): return
++        def g2(): return 1
++        g1()
++        x = g2()
++        check_syntax_error(self, "class foo:return 1")
++
++    def testYield(self):
++        check_syntax_error(self, "class foo:yield 1")
++
++    def testRaise(self):
++        # 'raise' test [',' test]
++        try: raise RuntimeError('just testing')
++        except RuntimeError: pass
++        try: raise KeyboardInterrupt
++        except KeyboardInterrupt: pass
++
++    def testImport(self):
++        # 'import' dotted_as_names
++        import sys
++        import time, sys
++        # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
++        from time import time
++        from time import (time)
++        # not testable inside a function, but already done at top of the module
++        # from sys import *
++        from sys import path, argv
++        from sys import (path, argv)
++        from sys import (path, argv,)
++
++    def testGlobal(self):
++        # 'global' NAME (',' NAME)*
++        global a
++        global a, b
++        global one, two, three, four, five, six, seven, eight, nine, ten
++
++    def testNonlocal(self):
++        # 'nonlocal' NAME (',' NAME)*
++        x = 0
++        y = 0
++        def f():
++            nonlocal x
++            nonlocal x, y
++
++    def testAssert(self):
++        # assert_stmt: 'assert' test [',' test]
++        assert 1
++        assert 1, 1
++        assert lambda x:x
++        assert 1, lambda x:x+1
++        try:
++            assert 0, "msg"
++        except AssertionError as e:
++            self.assertEquals(e.args[0], "msg")
++        else:
++            if __debug__:
++                self.fail("AssertionError not raised by assert 0")
++
++    ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
++    # Tested below
++
++    def testIf(self):
++        # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
++        if 1: pass
++        if 1: pass
++        else: pass
++        if 0: pass
++        elif 0: pass
++        if 0: pass
++        elif 0: pass
++        elif 0: pass
++        elif 0: pass
++        else: pass
++
++    def testWhile(self):
++        # 'while' test ':' suite ['else' ':' suite]
++        while 0: pass
++        while 0: pass
++        else: pass
++
++        # Issue1920: "while 0" is optimized away,
++        # ensure that the "else" clause is still present.
++        x = 0
++        while 0:
++            x = 1
++        else:
++            x = 2
++        self.assertEquals(x, 2)
++
++    def testFor(self):
++        # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
++        for i in 1, 2, 3: pass
++        for i, j, k in (): pass
++        else: pass
++        class Squares:
++            def __init__(self, max):
++                self.max = max
++                self.sofar = []
++            def __len__(self): return len(self.sofar)
++            def __getitem__(self, i):
++                if not 0 <= i < self.max: raise IndexError
++                n = len(self.sofar)
++                while n <= i:
++                    self.sofar.append(n*n)
++                    n = n+1
++                return self.sofar[i]
++        n = 0
++        for x in Squares(10): n = n+x
++        if n != 285:
++            self.fail('for over growing sequence')
++
++        result = []
++        for x, in [(1,), (2,), (3,)]:
++            result.append(x)
++        self.assertEqual(result, [1, 2, 3])
++
++    def testTry(self):
++        ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
++        ###         | 'try' ':' suite 'finally' ':' suite
++        ### except_clause: 'except' [expr ['as' expr]]
++        try:
++            1/0
++        except ZeroDivisionError:
++            pass
++        else:
++            pass
++        try: 1/0
++        except EOFError: pass
++        except TypeError as msg: pass
++        except RuntimeError as msg: pass
++        except: pass
++        else: pass
++        try: 1/0
++        except (EOFError, TypeError, ZeroDivisionError): pass
++        try: 1/0
++        except (EOFError, TypeError, ZeroDivisionError) as msg: pass
++        try: pass
++        finally: pass
++
++    def testSuite(self):
++        # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
++        if 1: pass
++        if 1:
++            pass
++        if 1:
++            #
++            #
++            #
++            pass
++            pass
++            #
++            pass
++            #
++
++    def testTest(self):
++        ### and_test ('or' and_test)*
++        ### and_test: not_test ('and' not_test)*
++        ### not_test: 'not' not_test | comparison
++        if not 1: pass
++        if 1 and 1: pass
++        if 1 or 1: pass
++        if not not not 1: pass
++        if not 1 and 1 and 1: pass
++        if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
++
++    def testComparison(self):
++        ### comparison: expr (comp_op expr)*
++        ### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
++        if 1: pass
++        x = (1 == 1)
++        if 1 == 1: pass
++        if 1 != 1: pass
++        if 1 < 1: pass
++        if 1 > 1: pass
++        if 1 <= 1: pass
++        if 1 >= 1: pass
++        if 1 is 1: pass
++        if 1 is not 1: pass
++        if 1 in (): pass
++        if 1 not in (): pass
++        if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
++
++    def testBinaryMaskOps(self):
++        x = 1 & 1
++        x = 1 ^ 1
++        x = 1 | 1
++
++    def testShiftOps(self):
++        x = 1 << 1
++        x = 1 >> 1
++        x = 1 << 1 >> 1
++
++    def testAdditiveOps(self):
++        x = 1
++        x = 1 + 1
++        x = 1 - 1 - 1
++        x = 1 - 1 + 1 - 1 + 1
++
++    def testMultiplicativeOps(self):
++        x = 1 * 1
++        x = 1 / 1
++        x = 1 % 1
++        x = 1 / 1 * 1 % 1
++
++    def testUnaryOps(self):
++        x = +1
++        x = -1
++        x = ~1
++        x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
++        x = -1*1/1 + 1*1 - ---1*1
++
++    def testSelectors(self):
++        ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
++        ### subscript: expr | [expr] ':' [expr]
++
++        import sys, time
++        c = sys.path[0]
++        x = time.time()
++        x = sys.modules['time'].time()
++        a = '01234'
++        c = a[0]
++        c = a[-1]
++        s = a[0:5]
++        s = a[:5]
++        s = a[0:]
++        s = a[:]
++        s = a[-5:]
++        s = a[:-1]
++        s = a[-4:-3]
++        # A rough test of SF bug 1333982.  http://python.org/sf/1333982
++        # The testing here is fairly incomplete.
++        # Test cases should include: commas with 1 and 2 colons
++        d = {}
++        d[1] = 1
++        d[1,] = 2
++        d[1,2] = 3
++        d[1,2,3] = 4
++        L = list(d)
++        L.sort(key=lambda x: x if isinstance(x, tuple) else ())
++        self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
++
++    def testAtoms(self):
++        ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
++        ### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
++
++        x = (1)
++        x = (1 or 2 or 3)
++        x = (1 or 2 or 3, 2, 3)
++
++        x = []
++        x = [1]
++        x = [1 or 2 or 3]
++        x = [1 or 2 or 3, 2, 3]
++        x = []
++
++        x = {}
++        x = {'one': 1}
++        x = {'one': 1,}
++        x = {'one' or 'two': 1 or 2}
++        x = {'one': 1, 'two': 2}
++        x = {'one': 1, 'two': 2,}
++        x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
++
++        x = {'one'}
++        x = {'one', 1,}
++        x = {'one', 'two', 'three'}
++        x = {2, 3, 4,}
++
++        x = x
++        x = 'x'
++        x = 123
++
++    ### exprlist: expr (',' expr)* [',']
++    ### testlist: test (',' test)* [',']
++    # These have been exercised enough above
++
++    def testClassdef(self):
++        # 'class' NAME ['(' [testlist] ')'] ':' suite
++        class B: pass
++        class B2(): pass
++        class C1(B): pass
++        class C2(B): pass
++        class D(C1, C2, B): pass
++        class C:
++            def meth1(self): pass
++            def meth2(self, arg): pass
++            def meth3(self, a1, a2): pass
++
++        # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
++        # decorators: decorator+
++        # decorated: decorators (classdef | funcdef)
++        def class_decorator(x): return x
++        @class_decorator
++        class G: pass
++
++    def testDictcomps(self):
++        # dictorsetmaker: ( (test ':' test (comp_for |
++        #                                   (',' test ':' test)* [','])) |
++        #                   (test (comp_for | (',' test)* [','])) )
++        nums = [1, 2, 3]
++        self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
++
++    def testListcomps(self):
++        # list comprehension tests
++        nums = [1, 2, 3, 4, 5]
++        strs = ["Apple", "Banana", "Coconut"]
++        spcs = ["  Apple", " Banana ", "Coco  nut  "]
++
++        self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco  nut'])
++        self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
++        self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
++        self.assertEqual([(i, s) for i in nums for s in strs],
++                         [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
++                          (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
++                          (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
++                          (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
++                          (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
++        self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
++                         [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
++                          (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
++                          (5, 'Banana'), (5, 'Coconut')])
++        self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
++                         [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
++
++        def test_in_func(l):
++            return [0 < x < 3 for x in l if x > 2]
++
++        self.assertEqual(test_in_func(nums), [False, False, False])
++
++        def test_nested_front():
++            self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
++                             [[1, 2], [3, 4], [5, 6]])
++
++        test_nested_front()
++
++        check_syntax_error(self, "[i, s for i in nums for s in strs]")
++        check_syntax_error(self, "[x if y]")
++
++        suppliers = [
++          (1, "Boeing"),
++          (2, "Ford"),
++          (3, "Macdonalds")
++        ]
++
++        parts = [
++          (10, "Airliner"),
++          (20, "Engine"),
++          (30, "Cheeseburger")
++        ]
++
++        suppart = [
++          (1, 10), (1, 20), (2, 20), (3, 30)
++        ]
++
++        x = [
++          (sname, pname)
++            for (sno, sname) in suppliers
++              for (pno, pname) in parts
++                for (sp_sno, sp_pno) in suppart
++                  if sno == sp_sno and pno == sp_pno
++        ]
++
++        self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
++                             ('Macdonalds', 'Cheeseburger')])
++
++    def testGenexps(self):
++        # generator expression tests
++        g = ([x for x in range(10)] for x in range(1))
++        self.assertEqual(next(g), [x for x in range(10)])
++        try:
++            next(g)
++            self.fail('should produce StopIteration exception')
++        except StopIteration:
++            pass
++
++        a = 1
++        try:
++            g = (a for d in a)
++            next(g)
++            self.fail('should produce TypeError')
++        except TypeError:
++            pass
++
++        self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
++        self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
++
++        a = [x for x in range(10)]
++        b = (x for x in (y for y in a))
++        self.assertEqual(sum(b), sum([x for x in range(10)]))
++
++        self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
++        self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
++        self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
++        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
++        check_syntax_error(self, "foo(x for x in range(10), 100)")
++        check_syntax_error(self, "foo(100, x for x in range(10))")
++
++    def testComprehensionSpecials(self):
++        # test for outmost iterable precomputation
++        x = 10; g = (i for i in range(x)); x = 5
++        self.assertEqual(len(list(g)), 10)
++
++        # This should hold, since we're only precomputing outmost iterable.
++        x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
++        x = 5; t = True;
++        self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
++
++        # Grammar allows multiple adjacent 'if's in listcomps and genexps,
++        # even though it's silly. Make sure it works (ifelse broke this.)
++        self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
++        self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
++
++        # verify unpacking single element tuples in listcomp/genexp.
++        self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
++        self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
++
++    def testIfElseExpr(self):
++        # Test ifelse expressions in various cases
++        def _checkeval(msg, ret):
++            "helper to check that evaluation of expressions is done correctly"
++            print(x)
++            return ret
++
++        # the next line is not allowed anymore
++        #self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
++        self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
++        self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
++        self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
++        self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
++        self.assertEqual((5 and 6 if 0 else 1), 1)
++        self.assertEqual(((5 and 6) if 0 else 1), 1)
++        self.assertEqual((5 and (6 if 1 else 1)), 6)
++        self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
++        self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
++        self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
++        self.assertEqual((not 5 if 1 else 1), False)
++        self.assertEqual((not 5 if 0 else 1), 1)
++        self.assertEqual((6 + 1 if 1 else 2), 7)
++        self.assertEqual((6 - 1 if 1 else 2), 5)
++        self.assertEqual((6 * 2 if 1 else 4), 12)
++        self.assertEqual((6 / 2 if 1 else 3), 3)
++        self.assertEqual((6 < 4 if 0 else 2), 2)
++
++
++def test_main():
++    run_unittest(TokenTests, GrammarTests)
++
++if __name__ == '__main__':
++    test_main()
+diff -r 531f2e948299 refactor/tests/pytree_idempotency.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/pytree_idempotency.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,92 @@
++#!/usr/bin/env python2.5
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Main program for testing the infrastructure."""
++
++__author__ = "Guido van Rossum <guido at python.org>"
++
++# Support imports (need to be imported first)
++from . import support
++
++# Python imports
++import os
++import sys
++import logging
++
++# Local imports
++from .. import pytree
++import pgen2
++from pgen2 import driver
++
++logging.basicConfig()
++
++def main():
++    gr = driver.load_grammar("Grammar.txt")
++    dr = driver.Driver(gr, convert=pytree.convert)
++
++    fn = "example.py"
++    tree = dr.parse_file(fn, debug=True)
++    if not diff(fn, tree):
++        print "No diffs."
++    if not sys.argv[1:]:
++        return # Pass a dummy argument to run the complete test suite below
++
++    problems = []
++
++    # Process every imported module
++    for name in sys.modules:
++        mod = sys.modules[name]
++        if mod is None or not hasattr(mod, "__file__"):
++            continue
++        fn = mod.__file__
++        if fn.endswith(".pyc"):
++            fn = fn[:-1]
++        if not fn.endswith(".py"):
++            continue
++        print >>sys.stderr, "Parsing", fn
++        tree = dr.parse_file(fn, debug=True)
++        if diff(fn, tree):
++            problems.append(fn)
++
++    # Process every single module on sys.path (but not in packages)
++    for dir in sys.path:
++        try:
++            names = os.listdir(dir)
++        except os.error:
++            continue
++        print >>sys.stderr, "Scanning", dir, "..."
++        for name in names:
++            if not name.endswith(".py"):
++                continue
++            print >>sys.stderr, "Parsing", name
++            fn = os.path.join(dir, name)
++            try:
++                tree = dr.parse_file(fn, debug=True)
++            except pgen2.parse.ParseError, err:
++                print "ParseError:", err
++            else:
++                if diff(fn, tree):
++                    problems.append(fn)
++
++    # Show summary of problem files
++    if not problems:
++        print "No problems.  Congratulations!"
++    else:
++        print "Problems in following files:"
++        for fn in problems:
++            print "***", fn
++
++def diff(fn, tree):
++    f = open("@", "w")
++    try:
++        f.write(str(tree))
++    finally:
++        f.close()
++    try:
++        return os.system("diff -u %s @" % fn)
++    finally:
++        os.remove("@")
++
++if __name__ == "__main__":
++    main()
+diff -r 531f2e948299 refactor/tests/support.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/support.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,78 @@
++"""Support code for test_*.py files"""
++# Original Author: Collin Winter
++
++# Python imports
++import unittest
++import sys
++import os
++import os.path
++import re
++from textwrap import dedent
++
++#sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
++
++# Local imports
++from .. import pytree
++from .. import refactor
++from ..pgen2 import driver
++
++test_pkg = "refactor.fixes"
++test_dir = os.path.dirname(__file__)
++proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
++grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
++grammar = driver.load_grammar(grammar_path)
++driver = driver.Driver(grammar, convert=pytree.convert)
++
++def parse_version(version_string):
++    """Returns a version tuple matching input version_string."""
++    if not version_string:
++        return ()
++
++    version_list = []
++    for token in version_string.split('.'):
++        try:
++            version_list.append(int(token))
++        except ValueError:
++            version_list.append(token)
++    return tuple(version_list)
++
++def parse_string(string):
++    return driver.parse_string(reformat(string), debug=True)
++
++# Python 2.3's TestSuite is not iter()-able
++if sys.version_info < (2, 4):
++    def TestSuite_iter(self):
++        return iter(self._tests)
++    unittest.TestSuite.__iter__ = TestSuite_iter
++
++def run_all_tests(test_mod=None, tests=None):
++    if tests is None:
++        tests = unittest.TestLoader().loadTestsFromModule(test_mod)
++    unittest.TextTestRunner(verbosity=2).run(tests)
++
++def reformat(string):
++    return dedent(string) + "\n\n"
++
++def get_refactorer(fixers=None, options=None, pkg_name=None):
++    """
++    A convenience function for creating a RefactoringTool for tests.
++
++    fixers is a list of fixers for the RefactoringTool to use. By default
++    "refactor.fixes.*" is used. options is an optional dictionary of options to
++    be passed to the RefactoringTool.
++    """
++    pkg_name = pkg_name or test_pkg
++    if fixers is not None:
++        fixers = [pkg_name + ".fix_" + fix for fix in fixers]
++    else:
++        fixers = refactor.get_fixers_from_package(pkg_name)
++    options = options or {}
++    return refactor.RefactoringTool(fixers, options, explicit=True)
++
++def all_project_files():
++    for dirpath, dirnames, filenames in os.walk(proj_dir):
++        for filename in filenames:
++            if filename.endswith(".py"):
++                yield os.path.join(dirpath, filename)
++
++TestCase = unittest.TestCase
+diff -r 531f2e948299 refactor/tests/test_all_fixers.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/test_all_fixers.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,35 @@
++#!/usr/bin/env python2.5
++"""Tests that run all fixer modules over an input stream.
++
++This has been broken out into its own test module because of its
++running time.
++"""
++# Author: Collin Winter
++
++# Testing imports
++try:
++    from . import support
++except ImportError:
++    import support
++
++# Python imports
++import unittest
++
++# Local imports
++from .. import pytree
++from .. import refactor
++
++class Test_all(support.TestCase):
++    def setUp(self):
++        options = {"print_function" : False}
++        self.refactor = support.get_refactorer(options=options)
++
++    def test_all_project_files(self):
++        for filepath in support.all_project_files():
++            print "Fixing %s..." % filepath
++            self.refactor.refactor_string(open(filepath).read(), filepath)
++
++
++if __name__ == "__main__":
++    import __main__
++    support.run_all_tests(__main__)
+diff -r 531f2e948299 refactor/tests/test_fixers.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/test_fixers.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,222 @@
++#!/usr/bin/env python2.5
++""" Test suite for the fixer modules """
++# Original Author: Collin Winter
++
++# Testing imports
++try:
++    from tests import support
++except ImportError:
++    import support
++
++# Python imports
++import os
++import unittest
++from itertools import chain
++from operator import itemgetter
++
++# Local imports
++from .. import pygram, pytree, refactor, fixer_util
++
++class FixerTestCase(support.TestCase):
++    old_version = (3, 0)
++    new_version = (2, 5)
++
++    def setUp(self, fix_list=None):
++        if fix_list is None:
++            fix_list = [self.fixer]
++        options = {"print_function" : False}
++        pkg_name = self.get_pkg_name()
++        self.refactor = support.get_refactorer(fix_list, options,
++                                               pkg_name=pkg_name)
++        self.fixer_log = []
++        self.filename = "<string>"
++
++        for fixer in chain(self.refactor.pre_order,
++                           self.refactor.post_order):
++            fixer.log = self.fixer_log
++
++    def _check(self, versions, ignore_warnings=False):
++        """Verifying a fix matches before and after version
++
++        versions is a dict mapping version tuples to sample code.
++
++        Example:
++            check({ (3, 0): 'print()',
++                     (2,3): 'print' })
++            # The same dict applies for 3.x to 2.x and vice versa
++        """
++        before = self.price_is_right(versions, self.old_version)
++        after = self.price_is_right(versions, self.new_version)
++
++        # Quit now if neither before nor after won the Price is Right.
++        if before == None or after == None:
++            return
++
++        before = support.reformat(before)
++        after = support.reformat(after)
++
++        tree = self.refactor.refactor_string(before, self.filename)
++        self.failUnlessEqual(after, str(tree))
++        if not ignore_warnings:
++            self.failUnlessEqual(self.fixer_log, [])
++        return tree
++
++
++    def price_is_right(self, versions, target_version):
++        """Return the closest version in versions without going over target
++        """
++        snippet = None
++        for version_key in sorted(versions.keys()):
++            if version_key > target_version:
++                break
++            snippet = versions[version_key]
++        return snippet
++
++    def check(self, up, down):
++        if self.old_version > self.new_version:
++            self._check(down)
++        elif self.old_version < self.new_version:
++            self._check(up)
++        else:
++            self._check(down)
++            self._check(up)
++
++    def get_pkg_name(self):
++        if self.old_version >= (3, 0):
++            return 'refactor.fixes.from3'
++        else:
++            return 'refactor.fixes.from2'
++
++    def warns(self, before, after, message, unchanged=False):
++        tree = self._check(before, after)
++        self.failUnless(message in "".join(self.fixer_log))
++        if not unchanged:
++            self.failUnless(tree.was_changed)
++
++    def warns_unchanged(self, before, message):
++        self.warns(before, before, message, unchanged=True)
++
++    def unchanged(self, before, ignore_warnings=False):
++        self._check(before, before)
++        if not ignore_warnings:
++            self.failUnlessEqual(self.fixer_log, [])
++
++    def assert_runs_after(self, *names):
++        fixes = [self.fixer]
++        fixes.extend(names)
++        options = {"print_function" : False}
++        r = support.get_refactorer(fixes, options)
++        (pre, post) = r.get_fixers()
++        n = "fix_" + self.fixer
++        if post and post[-1].__class__.__module__.endswith(n):
++            # We're the last fixer to run
++            return
++        if pre and pre[-1].__class__.__module__.endswith(n) and not post:
++            # We're the last in pre and post is empty
++            return
++        self.fail("Fixer run order (%s) is incorrect; %s should be last."\
++               %(", ".join([x.__class__.__module__ for x in (pre+post)]), n))
++
++class Test_range(FixerTestCase):
++    fixer = "range"
++
++    def test_xrange(self):
++        up = {}
++        down = {
++            (2, 5): """x = xrange(0, 10, 2)""",
++            (3, 0): """x = range(0, 10, 2)""",
++            }
++        self.check(up, down)
++
++    def test_range(self):
++        up = {}
++        down = {
++            (2, 5): """x = list(xrange(0, 10, 2))""",
++            (3, 0): """x = list(range(0, 10, 2))""",
++            }
++        self.check(up, down)
++
++class Test_renames(FixerTestCase):
++    fixer = "renames"
++
++    def test_maxint(self):
++        up = {}
++        down = {
++            (2, 5): """sys.maxint""",
++            (2, 6): """sys.maxsize""",
++            }
++        self.check(up, down)
++
++class Test_print(FixerTestCase):
++    """
++    http://docs.python.org/3.0/whatsnew/3.0.html
++
++    Old: print "The answer is", 2*2
++    New: print("The answer is", 2*2)
++
++    Old: print x,           # Trailing comma suppresses newline
++    New: print(x, end=" ")  # Appends a space instead of a newline
++
++    Old: print              # Prints a newline
++    New: print()            # You must call the function!
++
++    Old: print >>sys.stderr, "fatal error"
++    New: print("fatal error", file=sys.stderr)
++
++    Old: print (x, y)       # prints repr((x, y))
++    New: print((x, y))      # Not the same as print(x, y)!
++    """
++
++    fixer = "print"
++
++    def test_func(self):
++        up = {}
++        down = {
++            (2, 5): """print""",
++            (3, 0): """print()""",
++            }
++        self.check(up, down)
++
++    def test_x(self):
++        up = {}
++        down = {
++            (2, 5): """print x""",
++            (3, 0): """print(x)""",
++            }
++        self.check(up, down)
++
++    def test_str(self):
++        up = {}
++        down = {
++            (2, 5): """print ''""",
++            (3, 0): """print('')""",
++            }
++        self.check(up, down)
++
++    def test_compound(self):
++        up = {}
++        down = {
++            (2, 5): """print "The answer is", 2*2""",
++            (3, 0): """print("The answer is", 2*2)""",
++            }
++        self.check(up, down)
++
++    def test_end(self):
++        up = {}
++        down = {
++            (2, 5): """print x, """,
++            (3, 0): """print(x, end=" ")""",
++            }
++        self.check(up, down)
++
++    def test_stderr(self):
++        up = {}
++        down = {
++            (2, 5): """print >>sys.stderr, 'fatal error'""",
++            (3, 0): """print('fatal error', file=sys.stderr)""",
++            }
++        self.check(up, down)
++
++if __name__ == "__main__":
++    import __main__
++    support.run_all_tests(__main__)
+diff -r 531f2e948299 refactor/tests/test_parser.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/test_parser.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,202 @@
++#!/usr/bin/env python2.5
++"""Test suite for refactor's parser and grammar files.
++
++This is the place to add tests for changes to refactor's grammar, such as those
++merging the grammars for Python 2 and 3. In addition to specific tests for
++parts of the grammar we've changed, we also make sure we can parse the
++test_grammar.py files from both Python 2 and Python 3.
++"""
++# Author: Collin Winter
++
++# Testing imports
++from . import support
++from .support import driver, test_dir
++
++# Python imports
++import os
++import os.path
++
++# Local imports
++from ..pgen2.parse import ParseError
++
++
++class GrammarTest(support.TestCase):
++    def validate(self, code):
++        support.parse_string(code)
++
++    def invalid_syntax(self, code):
++        try:
++            self.validate(code)
++        except ParseError:
++            pass
++        else:
++            raise AssertionError("Syntax shouldn't have been valid")
++
++
++class TestRaiseChanges(GrammarTest):
++    def test_2x_style_1(self):
++        self.validate("raise")
++
++    def test_2x_style_2(self):
++        self.validate("raise E, V")
++
++    def test_2x_style_3(self):
++        self.validate("raise E, V, T")
++
++    def test_2x_style_invalid_1(self):
++        self.invalid_syntax("raise E, V, T, Z")
++
++    def test_3x_style(self):
++        self.validate("raise E1 from E2")
++
++    def test_3x_style_invalid_1(self):
++        self.invalid_syntax("raise E, V from E1")
++
++    def test_3x_style_invalid_2(self):
++        self.invalid_syntax("raise E from E1, E2")
++
++    def test_3x_style_invalid_3(self):
++        self.invalid_syntax("raise from E1, E2")
++
++    def test_3x_style_invalid_4(self):
++        self.invalid_syntax("raise E from")
++
++
++# Adapated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
++class TestFunctionAnnotations(GrammarTest):
++    def test_1(self):
++        self.validate("""def f(x) -> list: pass""")
++
++    def test_2(self):
++        self.validate("""def f(x:int): pass""")
++
++    def test_3(self):
++        self.validate("""def f(*x:str): pass""")
++
++    def test_4(self):
++        self.validate("""def f(**x:float): pass""")
++
++    def test_5(self):
++        self.validate("""def f(x, y:1+2): pass""")
++
++    def test_6(self):
++        self.validate("""def f(a, (b:1, c:2, d)): pass""")
++
++    def test_7(self):
++        self.validate("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
++
++    def test_8(self):
++        s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
++                        *g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
++        self.validate(s)
++
++
++class TestExcept(GrammarTest):
++    def test_new(self):
++        s = """
++            try:
++                x
++            except E as N:
++                y"""
++        self.validate(s)
++
++    def test_old(self):
++        s = """
++            try:
++                x
++            except E, N:
++                y"""
++        self.validate(s)
++
++
++# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
++class TestSetLiteral(GrammarTest):
++    def test_1(self):
++        self.validate("""x = {'one'}""")
++
++    def test_2(self):
++        self.validate("""x = {'one', 1,}""")
++
++    def test_3(self):
++        self.validate("""x = {'one', 'two', 'three'}""")
++
++    def test_4(self):
++        self.validate("""x = {2, 3, 4,}""")
++
++
++class TestNumericLiterals(GrammarTest):
++    def test_new_octal_notation(self):
++        self.validate("""0o7777777777777""")
++        self.invalid_syntax("""0o7324528887""")
++
++    def test_new_binary_notation(self):
++        self.validate("""0b101010""")
++        self.invalid_syntax("""0b0101021""")
++
++
++class TestClassDef(GrammarTest):
++    def test_new_syntax(self):
++        self.validate("class B(t=7): pass")
++        self.validate("class B(t, *args): pass")
++        self.validate("class B(t, **kwargs): pass")
++        self.validate("class B(t, *args, **kwargs): pass")
++        self.validate("class B(t, y=9, *args, **kwargs): pass")
++
++
++class TestParserIdempotency(support.TestCase):
++
++    """A cut-down version of pytree_idempotency.py."""
++
++    def test_all_project_files(self):
++        for filepath in support.all_project_files():
++            print "Parsing %s..." % filepath
++            tree = driver.parse_file(filepath, debug=True)
++            if diff(filepath, tree):
++                self.fail("Idempotency failed: %s" % filepath)
++
++
++class TestLiterals(GrammarTest):
++
++    def test_multiline_bytes_literals(self):
++        s = """
++            md5test(b"\xaa" * 80,
++                    (b"Test Using Larger Than Block-Size Key "
++                     b"and Larger Than One Block-Size Data"),
++                    "6f630fad67cda0ee1fb1f562db3aa53e")
++            """
++        self.validate(s)
++
++    def test_multiline_bytes_tripquote_literals(self):
++        s = '''
++            b"""
++            <?xml version="1.0" encoding="UTF-8"?>
++            <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
++            """
++            '''
++        self.validate(s)
++
++    def test_multiline_str_literals(self):
++        s = """
++            md5test("\xaa" * 80,
++                    ("Test Using Larger Than Block-Size Key "
++                     "and Larger Than One Block-Size Data"),
++                    "6f630fad67cda0ee1fb1f562db3aa53e")
++            """
++        self.validate(s)
++
++
++def diff(fn, tree):
++    f = open("@", "w")
++    try:
++        f.write(str(tree))
++    finally:
++        f.close()
++    try:
++        return os.system("diff -u %s @" % fn)
++    finally:
++        os.remove("@")
++
++
++if __name__ == "__main__":
++    import __main__
++    support.run_all_tests(__main__)
+diff -r 531f2e948299 refactor/tests/test_pytree.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/test_pytree.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,458 @@
++#!/usr/bin/env python2.5
++# Copyright 2006 Google, Inc. All Rights Reserved.
++# Licensed to PSF under a Contributor Agreement.
++
++"""Unit tests for pytree.py.
++
++NOTE: Please *don't* add doc strings to individual test methods!
++In verbose mode, printing of the module, class and method name is much
++more helpful than printing of (the first line of) the docstring,
++especially when debugging a test.
++"""
++
++# Testing imports
++from . import support
++
++# Local imports (XXX should become a package)
++from .. import pytree
++
++try:
++    sorted
++except NameError:
++    def sorted(lst):
++        l = list(lst)
++        l.sort()
++        return l
++
++class TestNodes(support.TestCase):
++
++    """Unit tests for nodes (Base, Leaf, Node)."""
++
++    def testBaseCantConstruct(self):
++        if __debug__:
++            # Test that instantiating Base() raises an AssertionError
++            self.assertRaises(AssertionError, pytree.Base)
++
++    def testLeaf(self):
++        l1 = pytree.Leaf(100, "foo")
++        self.assertEqual(l1.type, 100)
++        self.assertEqual(l1.value, "foo")
++
++    def testLeafRepr(self):
++        l1 = pytree.Leaf(100, "foo")
++        self.assertEqual(repr(l1), "Leaf(100, 'foo')")
++
++    def testLeafStr(self):
++        l1 = pytree.Leaf(100, "foo")
++        self.assertEqual(str(l1), "foo")
++        l2 = pytree.Leaf(100, "foo", context=(" ", (10, 1)))
++        self.assertEqual(str(l2), " foo")
++
++    def testLeafStrNumericValue(self):
++        # Make sure that the Leaf's value is stringified. Failing to
++        #  do this can cause a TypeError in certain situations.
++        l1 = pytree.Leaf(2, 5)
++        l1.set_prefix("foo_")
++        self.assertEqual(str(l1), "foo_5")
++
++    def testLeafEq(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "foo", context=(" ", (1, 0)))
++        self.assertEqual(l1, l2)
++        l3 = pytree.Leaf(101, "foo")
++        l4 = pytree.Leaf(100, "bar")
++        self.assertNotEqual(l1, l3)
++        self.assertNotEqual(l1, l4)
++
++    def testLeafPrefix(self):
++        l1 = pytree.Leaf(100, "foo")
++        self.assertEqual(l1.get_prefix(), "")
++        self.failIf(l1.was_changed)
++        l1.set_prefix("  ##\n\n")
++        self.assertEqual(l1.get_prefix(), "  ##\n\n")
++        self.failUnless(l1.was_changed)
++
++    def testNode(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(200, "bar")
++        n1 = pytree.Node(1000, [l1, l2])
++        self.assertEqual(n1.type, 1000)
++        self.assertEqual(n1.children, [l1, l2])
++
++    def testNodeRepr(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
++        n1 = pytree.Node(1000, [l1, l2])
++        self.assertEqual(repr(n1),
++                         "Node(1000, [%s, %s])" % (repr(l1), repr(l2)))
++
++    def testNodeStr(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
++        n1 = pytree.Node(1000, [l1, l2])
++        self.assertEqual(str(n1), "foo bar")
++
++    def testNodePrefix(self):
++        l1 = pytree.Leaf(100, "foo")
++        self.assertEqual(l1.get_prefix(), "")
++        n1 = pytree.Node(1000, [l1])
++        self.assertEqual(n1.get_prefix(), "")
++        n1.set_prefix(" ")
++        self.assertEqual(n1.get_prefix(), " ")
++        self.assertEqual(l1.get_prefix(), " ")
++
++    def testGetSuffix(self):
++        l1 = pytree.Leaf(100, "foo", prefix="a")
++        l2 = pytree.Leaf(100, "bar", prefix="b")
++        n1 = pytree.Node(1000, [l1, l2])
++
++        self.assertEqual(l1.get_suffix(), l2.get_prefix())
++        self.assertEqual(l2.get_suffix(), "")
++        self.assertEqual(n1.get_suffix(), "")
++
++        l3 = pytree.Leaf(100, "bar", prefix="c")
++        n2 = pytree.Node(1000, [n1, l3])
++
++        self.assertEqual(n1.get_suffix(), l3.get_prefix())
++        self.assertEqual(l3.get_suffix(), "")
++        self.assertEqual(n2.get_suffix(), "")
++
++    def testNodeEq(self):
++        n1 = pytree.Node(1000, ())
++        n2 = pytree.Node(1000, [], context=(" ", (1, 0)))
++        self.assertEqual(n1, n2)
++        n3 = pytree.Node(1001, ())
++        self.assertNotEqual(n1, n3)
++
++    def testNodeEqRecursive(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1])
++        n2 = pytree.Node(1000, [l2])
++        self.assertEqual(n1, n2)
++        l3 = pytree.Leaf(100, "bar")
++        n3 = pytree.Node(1000, [l3])
++        self.assertNotEqual(n1, n3)
++
++    def testReplace(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "+")
++        l3 = pytree.Leaf(100, "bar")
++        n1 = pytree.Node(1000, [l1, l2, l3])
++        self.assertEqual(n1.children, [l1, l2, l3])
++        self.failUnless(isinstance(n1.children, list))
++        self.failIf(n1.was_changed)
++        l2new = pytree.Leaf(100, "-")
++        l2.replace(l2new)
++        self.assertEqual(n1.children, [l1, l2new, l3])
++        self.failUnless(isinstance(n1.children, list))
++        self.failUnless(n1.was_changed)
++
++    def testReplaceWithList(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "+")
++        l3 = pytree.Leaf(100, "bar")
++        n1 = pytree.Node(1000, [l1, l2, l3])
++
++        l2.replace([pytree.Leaf(100, "*"), pytree.Leaf(100, "*")])
++        self.assertEqual(str(n1), "foo**bar")
++        self.failUnless(isinstance(n1.children, list))
++
++    def testPostOrder(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar")
++        n1 = pytree.Node(1000, [l1, l2])
++        self.assertEqual(list(n1.post_order()), [l1, l2, n1])
++
++    def testPreOrder(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar")
++        n1 = pytree.Node(1000, [l1, l2])
++        self.assertEqual(list(n1.pre_order()), [n1, l1, l2])
++
++    def testChangedLeaf(self):
++        l1 = pytree.Leaf(100, "f")
++        self.failIf(l1.was_changed)
++
++        l1.changed()
++        self.failUnless(l1.was_changed)
++
++    def testChangedNode(self):
++        l1 = pytree.Leaf(100, "f")
++        n1 = pytree.Node(1000, [l1])
++        self.failIf(n1.was_changed)
++
++        n1.changed()
++        self.failUnless(n1.was_changed)
++
++    def testChangedRecursive(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "+")
++        l3 = pytree.Leaf(100, "bar")
++        n1 = pytree.Node(1000, [l1, l2, l3])
++        n2 = pytree.Node(1000, [n1])
++        self.failIf(l1.was_changed)
++        self.failIf(n1.was_changed)
++        self.failIf(n2.was_changed)
++
++        n1.changed()
++        self.failUnless(n1.was_changed)
++        self.failUnless(n2.was_changed)
++        self.failIf(l1.was_changed)
++
++    def testLeafConstructorPrefix(self):
++        for prefix in ("xyz_", ""):
++            l1 = pytree.Leaf(100, "self", prefix=prefix)
++            self.failUnless(str(l1), prefix + "self")
++            self.assertEqual(l1.get_prefix(), prefix)
++
++    def testNodeConstructorPrefix(self):
++        for prefix in ("xyz_", ""):
++            l1 = pytree.Leaf(100, "self")
++            l2 = pytree.Leaf(100, "foo", prefix="_")
++            n1 = pytree.Node(1000, [l1, l2], prefix=prefix)
++            self.failUnless(str(n1), prefix + "self_foo")
++            self.assertEqual(n1.get_prefix(), prefix)
++            self.assertEqual(l1.get_prefix(), prefix)
++            self.assertEqual(l2.get_prefix(), "_")
++
++    def testRemove(self):
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1, l2])
++        n2 = pytree.Node(1000, [n1])
++
++        self.assertEqual(n1.remove(), 0)
++        self.assertEqual(n2.children, [])
++        self.assertEqual(l1.parent, n1)
++        self.assertEqual(n1.parent, None)
++        self.assertEqual(n2.parent, None)
++        self.failIf(n1.was_changed)
++        self.failUnless(n2.was_changed)
++
++        self.assertEqual(l2.remove(), 1)
++        self.assertEqual(l1.remove(), 0)
++        self.assertEqual(n1.children, [])
++        self.assertEqual(l1.parent, None)
++        self.assertEqual(n1.parent, None)
++        self.assertEqual(n2.parent, None)
++        self.failUnless(n1.was_changed)
++        self.failUnless(n2.was_changed)
++
++    def testRemoveParentless(self):
++        n1 = pytree.Node(1000, [])
++        n1.remove()
++        self.assertEqual(n1.parent, None)
++
++        l1 = pytree.Leaf(100, "foo")
++        l1.remove()
++        self.assertEqual(l1.parent, None)
++
++    def testNodeSetChild(self):
++        l1 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1])
++
++        l2 = pytree.Leaf(100, "bar")
++        n1.set_child(0, l2)
++        self.assertEqual(l1.parent, None)
++        self.assertEqual(l2.parent, n1)
++        self.assertEqual(n1.children, [l2])
++
++        n2 = pytree.Node(1000, [l1])
++        n2.set_child(0, n1)
++        self.assertEqual(l1.parent, None)
++        self.assertEqual(n1.parent, n2)
++        self.assertEqual(n2.parent, None)
++        self.assertEqual(n2.children, [n1])
++
++        self.assertRaises(IndexError, n1.set_child, 4, l2)
++        # I don't care what it raises, so long as it's an exception
++        self.assertRaises(Exception, n1.set_child, 0, list)
++
++    def testNodeInsertChild(self):
++        l1 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1])
++
++        l2 = pytree.Leaf(100, "bar")
++        n1.insert_child(0, l2)
++        self.assertEqual(l2.parent, n1)
++        self.assertEqual(n1.children, [l2, l1])
++
++        l3 = pytree.Leaf(100, "abc")
++        n1.insert_child(2, l3)
++        self.assertEqual(n1.children, [l2, l1, l3])
++
++        # I don't care what it raises, so long as it's an exception
++        self.assertRaises(Exception, n1.insert_child, 0, list)
++
++    def testNodeAppendChild(self):
++        n1 = pytree.Node(1000, [])
++
++        l1 = pytree.Leaf(100, "foo")
++        n1.append_child(l1)
++        self.assertEqual(l1.parent, n1)
++        self.assertEqual(n1.children, [l1])
++
++        l2 = pytree.Leaf(100, "bar")
++        n1.append_child(l2)
++        self.assertEqual(l2.parent, n1)
++        self.assertEqual(n1.children, [l1, l2])
++
++        # I don't care what it raises, so long as it's an exception
++        self.assertRaises(Exception, n1.append_child, list)
++
++    def testNodeNextSibling(self):
++        n1 = pytree.Node(1000, [])
++        n2 = pytree.Node(1000, [])
++        p1 = pytree.Node(1000, [n1, n2])
++
++        self.failUnless(n1.next_sibling is n2)
++        self.assertEqual(n2.next_sibling, None)
++        self.assertEqual(p1.next_sibling, None)
++
++    def testLeafNextSibling(self):
++        l1 = pytree.Leaf(100, "a")
++        l2 = pytree.Leaf(100, "b")
++        p1 = pytree.Node(1000, [l1, l2])
++
++        self.failUnless(l1.next_sibling is l2)
++        self.assertEqual(l2.next_sibling, None)
++        self.assertEqual(p1.next_sibling, None)
++
++    def testNodePrevSibling(self):
++        n1 = pytree.Node(1000, [])
++        n2 = pytree.Node(1000, [])
++        p1 = pytree.Node(1000, [n1, n2])
++
++        self.failUnless(n2.prev_sibling is n1)
++        self.assertEqual(n1.prev_sibling, None)
++        self.assertEqual(p1.prev_sibling, None)
++
++    def testLeafPrevSibling(self):
++        l1 = pytree.Leaf(100, "a")
++        l2 = pytree.Leaf(100, "b")
++        p1 = pytree.Node(1000, [l1, l2])
++
++        self.failUnless(l2.prev_sibling is l1)
++        self.assertEqual(l1.prev_sibling, None)
++        self.assertEqual(p1.prev_sibling, None)
++
++
++class TestPatterns(support.TestCase):
++
++    """Unit tests for tree matching patterns."""
++
++    def testBasicPatterns(self):
++        # Build a tree
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar")
++        l3 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1, l2])
++        n2 = pytree.Node(1000, [l3])
++        root = pytree.Node(1000, [n1, n2])
++        # Build a pattern matching a leaf
++        pl = pytree.LeafPattern(100, "foo", name="pl")
++        r = {}
++        self.assertFalse(pl.match(root, results=r))
++        self.assertEqual(r, {})
++        self.assertFalse(pl.match(n1, results=r))
++        self.assertEqual(r, {})
++        self.assertFalse(pl.match(n2, results=r))
++        self.assertEqual(r, {})
++        self.assertTrue(pl.match(l1, results=r))
++        self.assertEqual(r, {"pl": l1})
++        r = {}
++        self.assertFalse(pl.match(l2, results=r))
++        self.assertEqual(r, {})
++        # Build a pattern matching a node
++        pn = pytree.NodePattern(1000, [pl], name="pn")
++        self.assertFalse(pn.match(root, results=r))
++        self.assertEqual(r, {})
++        self.assertFalse(pn.match(n1, results=r))
++        self.assertEqual(r, {})
++        self.assertTrue(pn.match(n2, results=r))
++        self.assertEqual(r, {"pn": n2, "pl": l3})
++        r = {}
++        self.assertFalse(pn.match(l1, results=r))
++        self.assertEqual(r, {})
++        self.assertFalse(pn.match(l2, results=r))
++        self.assertEqual(r, {})
++
++    def testWildcardPatterns(self):
++        # Build a tree for testing
++        l1 = pytree.Leaf(100, "foo")
++        l2 = pytree.Leaf(100, "bar")
++        l3 = pytree.Leaf(100, "foo")
++        n1 = pytree.Node(1000, [l1, l2])
++        n2 = pytree.Node(1000, [l3])
++        root = pytree.Node(1000, [n1, n2])
++        # Build a pattern
++        pl = pytree.LeafPattern(100, "foo", name="pl")
++        pn = pytree.NodePattern(1000, [pl], name="pn")
++        pw = pytree.WildcardPattern([[pn], [pl, pl]], name="pw")
++        r = {}
++        self.assertFalse(pw.match_seq([root], r))
++        self.assertEqual(r, {})
++        self.assertFalse(pw.match_seq([n1], r))
++        self.assertEqual(r, {})
++        self.assertTrue(pw.match_seq([n2], r))
++        # These are easier to debug
++        self.assertEqual(sorted(r.keys()), ["pl", "pn", "pw"])
++        self.assertEqual(r["pl"], l1)
++        self.assertEqual(r["pn"], n2)
++        self.assertEqual(r["pw"], [n2])
++        # But this is equivalent
++        self.assertEqual(r, {"pl": l1, "pn": n2, "pw": [n2]})
++        r = {}
++        self.assertTrue(pw.match_seq([l1, l3], r))
++        self.assertEqual(r, {"pl": l3, "pw": [l1, l3]})
++        self.assert_(r["pl"] is l3)
++        r = {}
++
++    def testGenerateMatches(self):
++        la = pytree.Leaf(1, "a")
++        lb = pytree.Leaf(1, "b")
++        lc = pytree.Leaf(1, "c")
++        ld = pytree.Leaf(1, "d")
++        le = pytree.Leaf(1, "e")
++        lf = pytree.Leaf(1, "f")
++        leaves = [la, lb, lc, ld, le, lf]
++        root = pytree.Node(1000, leaves)
++        pa = pytree.LeafPattern(1, "a", "pa")
++        pb = pytree.LeafPattern(1, "b", "pb")
++        pc = pytree.LeafPattern(1, "c", "pc")
++        pd = pytree.LeafPattern(1, "d", "pd")
++        pe = pytree.LeafPattern(1, "e", "pe")
++        pf = pytree.LeafPattern(1, "f", "pf")
++        pw = pytree.WildcardPattern([[pa, pb, pc], [pd, pe],
++                                     [pa, pb], [pc, pd], [pe, pf]],
++                                    min=1, max=4, name="pw")
++        self.assertEqual([x[0] for x in pw.generate_matches(leaves)],
++                         [3, 5, 2, 4, 6])
++        pr = pytree.NodePattern(type=1000, content=[pw], name="pr")
++        matches = list(pytree.generate_matches([pr], [root]))
++        self.assertEqual(len(matches), 1)
++        c, r = matches[0]
++        self.assertEqual(c, 1)
++        self.assertEqual(str(r["pr"]), "abcdef")
++        self.assertEqual(r["pw"], [la, lb, lc, ld, le, lf])
++        for c in "abcdef":
++            self.assertEqual(r["p" + c], pytree.Leaf(1, c))
++
++    def testHasKeyExample(self):
++        pattern = pytree.NodePattern(331,
++                                     (pytree.LeafPattern(7),
++                                      pytree.WildcardPattern(name="args"),
++                                      pytree.LeafPattern(8)))
++        l1 = pytree.Leaf(7, "(")
++        l2 = pytree.Leaf(3, "x")
++        l3 = pytree.Leaf(8, ")")
++        node = pytree.Node(331, [l1, l2, l3])
++        r = {}
++        self.assert_(pattern.match(node, r))
++        self.assertEqual(r["args"], [l2])
++
++
++if __name__ == "__main__":
++    import __main__
++    support.run_all_tests(__main__)
+diff -r 531f2e948299 refactor/tests/test_refactor.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/test_refactor.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,168 @@
++"""
++Unit tests for refactor.py.
++"""
++
++import sys
++import os
++import operator
++import StringIO
++import tempfile
++import unittest
++
++from .. import refactor, pygram, fixer_base
++
++from . import support
++
++
++FIXER_DIR = os.path.join(os.path.dirname(__file__), "data/fixers")
++
++sys.path.append(FIXER_DIR)
++try:
++    _DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
++finally:
++    sys.path.pop()
++
++class TestRefactoringTool(unittest.TestCase):
++
++    def setUp(self):
++        sys.path.append(FIXER_DIR)
++
++    def tearDown(self):
++        sys.path.pop()
++
++    def check_instances(self, instances, classes):
++        for inst, cls in zip(instances, classes):
++            if not isinstance(inst, cls):
++                self.fail("%s are not instances of %s" % instances, classes)
++
++    def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
++        return refactor.RefactoringTool(fixers, options, explicit)
++
++    def test_print_function_option(self):
++        gram = pygram.python_grammar
++        save = gram.keywords["print"]
++        try:
++            rt = self.rt({"print_function" : True})
++            self.assertRaises(KeyError, operator.itemgetter("print"),
++                              gram.keywords)
++        finally:
++            gram.keywords["print"] = save
++
++    def test_fixer_loading_helpers(self):
++        contents = ["explicit", "first", "last", "parrot", "preorder"]
++        non_prefixed = refactor.get_all_fix_names("myfixes")
++        prefixed = refactor.get_all_fix_names("myfixes", False)
++        full_names = refactor.get_fixers_from_package("myfixes")
++        self.assertEqual(prefixed, ["fix_" + name for name in contents])
++        self.assertEqual(non_prefixed, contents)
++        self.assertEqual(full_names,
++                         ["myfixes.fix_" + name for name in contents])
++
++    def test_get_headnode_dict(self):
++        class NoneFix(fixer_base.BaseFix):
++            PATTERN = None
++
++        class FileInputFix(fixer_base.BaseFix):
++            PATTERN = "file_input< any * >"
++
++        no_head = NoneFix({}, [])
++        with_head = FileInputFix({}, [])
++        d = refactor.get_headnode_dict([no_head, with_head])
++        expected = {None: [no_head],
++                    pygram.python_symbols.file_input : [with_head]}
++        self.assertEqual(d, expected)
++
++    def test_fixer_loading(self):
++        from myfixes.fix_first import FixFirst
++        from myfixes.fix_last import FixLast
++        from myfixes.fix_parrot import FixParrot
++        from myfixes.fix_preorder import FixPreorder
++
++        rt = self.rt()
++        pre, post = rt.get_fixers()
++
++        self.check_instances(pre, [FixPreorder])
++        self.check_instances(post, [FixFirst, FixParrot, FixLast])
++
++    def test_naughty_fixers(self):
++        self.assertRaises(ImportError, self.rt, fixers=["not_here"])
++        self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
++        self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
++
++    def test_refactor_string(self):
++        rt = self.rt()
++        input = "def parrot(): pass\n\n"
++        tree = rt.refactor_string(input, "<test>")
++        self.assertNotEqual(str(tree), input)
++
++        input = "def f(): pass\n\n"
++        tree = rt.refactor_string(input, "<test>")
++        self.assertEqual(str(tree), input)
++
++    def test_refactor_stdin(self):
++
++        class MyRT(refactor.RefactoringTool):
++
++            def print_output(self, lines):
++                diff_lines.extend(lines)
++
++        diff_lines = []
++        rt = MyRT(_DEFAULT_FIXERS)
++        save = sys.stdin
++        sys.stdin = StringIO.StringIO("def parrot(): pass\n\n")
++        try:
++            rt.refactor_stdin()
++        finally:
++            sys.stdin = save
++        expected = """--- <stdin> (original)
+++++ <stdin> (refactored)
++@@ -1,2 +1,2 @@
++-def parrot(): pass
+++def cheese(): pass""".splitlines()
++        self.assertEqual(diff_lines[:-1], expected)
++
++    def test_refactor_file(self):
++        test_file = os.path.join(FIXER_DIR, "parrot_example.py")
++        old_contents = open(test_file, "r").read()
++        rt = self.rt()
++
++        rt.refactor_file(test_file)
++        self.assertEqual(old_contents, open(test_file, "r").read())
++
++        rt.refactor_file(test_file, True)
++        try:
++            self.assertNotEqual(old_contents, open(test_file, "r").read())
++        finally:
++            open(test_file, "w").write(old_contents)
++
++    def test_refactor_docstring(self):
++        rt = self.rt()
++
++        def example():
++            """
++            >>> example()
++            42
++            """
++        out = rt.refactor_docstring(example.__doc__, "<test>")
++        self.assertEqual(out, example.__doc__)
++
++        def parrot():
++            """
++            >>> def parrot():
++            ...      return 43
++            """
++        out = rt.refactor_docstring(parrot.__doc__, "<test>")
++        self.assertNotEqual(out, parrot.__doc__)
++
++    def test_explicit(self):
++        from myfixes.fix_explicit import FixExplicit
++
++        rt = self.rt(fixers=["myfixes.fix_explicit"])
++        self.assertEqual(len(rt.post_order), 0)
++
++        rt = self.rt(explicit=["myfixes.fix_explicit"])
++        for fix in rt.post_order:
++            if isinstance(fix, FixExplicit):
++                break
++        else:
++            self.fail("explicit fixer not loaded")
+diff -r 531f2e948299 refactor/tests/test_util.py
+--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
++++ b/refactor/tests/test_util.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -0,0 +1,559 @@
++#!/usr/bin/env python2.5
++""" Test suite for the code in fixes.util """
++# Author: Collin Winter
++
++# Testing imports
++from . import support
++
++# Python imports
++import os.path
++
++# Local imports
++from .. import pytree
++from .. import fixer_util
++from ..fixer_util import Attr, Name
++
++
++def parse(code, strip_levels=0):
++    # The topmost node is file_input, which we don't care about.
++    # The next-topmost node is a *_stmt node, which we also don't care about
++    tree = support.parse_string(code)
++    for i in range(strip_levels):
++        tree = tree.children[0]
++    tree.parent = None
++    return tree
++
++class MacroTestCase(support.TestCase):
++    def assertStr(self, node, string):
++        if isinstance(node, (tuple, list)):
++            node = pytree.Node(fixer_util.syms.simple_stmt, node)
++        self.assertEqual(str(node), string)
++
++
++class Test_is_tuple(support.TestCase):
++    def is_tuple(self, string):
++        return fixer_util.is_tuple(parse(string, strip_levels=2))
++
++    def test_valid(self):
++        self.failUnless(self.is_tuple("(a, b)"))
++        self.failUnless(self.is_tuple("(a, (b, c))"))
++        self.failUnless(self.is_tuple("((a, (b, c)),)"))
++        self.failUnless(self.is_tuple("(a,)"))
++        self.failUnless(self.is_tuple("()"))
++
++    def test_invalid(self):
++        self.failIf(self.is_tuple("(a)"))
++        self.failIf(self.is_tuple("('foo') % (b, c)"))
++
++
++class Test_is_list(support.TestCase):
++    def is_list(self, string):
++        return fixer_util.is_list(parse(string, strip_levels=2))
++
++    def test_valid(self):
++        self.failUnless(self.is_list("[]"))
++        self.failUnless(self.is_list("[a]"))
++        self.failUnless(self.is_list("[a, b]"))
++        self.failUnless(self.is_list("[a, [b, c]]"))
++        self.failUnless(self.is_list("[[a, [b, c]],]"))
++
++    def test_invalid(self):
++        self.failIf(self.is_list("[]+[]"))
++
++
++class Test_Attr(MacroTestCase):
++    def test(self):
++        call = parse("foo()", strip_levels=2)
++
++        self.assertStr(Attr(Name("a"), Name("b")), "a.b")
++        self.assertStr(Attr(call, Name("b")), "foo().b")
++
++    def test_returns(self):
++        attr = Attr(Name("a"), Name("b"))
++        self.assertEqual(type(attr), list)
++
++
++class Test_Name(MacroTestCase):
++    def test(self):
++        self.assertStr(Name("a"), "a")
++        self.assertStr(Name("foo.foo().bar"), "foo.foo().bar")
++        self.assertStr(Name("a", prefix="b"), "ba")
++
++
++class Test_does_tree_import(support.TestCase):
++    def _find_bind_rec(self, name, node):
++        # Search a tree for a binding -- used to find the starting
++        # point for these tests.
++        c = fixer_util.find_binding(name, node)
++        if c: return c
++        for child in node.children:
++            c = self._find_bind_rec(name, child)
++            if c: return c
++
++    def does_tree_import(self, package, name, string):
++        node = parse(string)
++        # Find the binding of start -- that's what we'll go from
++        node = self._find_bind_rec('start', node)
++        return fixer_util.does_tree_import(package, name, node)
++
++    def try_with(self, string):
++        failing_tests = (("a", "a", "from a import b"),
++                         ("a.d", "a", "from a.d import b"),
++                         ("d.a", "a", "from d.a import b"),
++                         (None, "a", "import b"),
++                         (None, "a", "import b, c, d"))
++        for package, name, import_ in failing_tests:
++            n = self.does_tree_import(package, name, import_ + "\n" + string)
++            self.failIf(n)
++            n = self.does_tree_import(package, name, string + "\n" + import_)
++            self.failIf(n)
++
++        passing_tests = (("a", "a", "from a import a"),
++                         ("x", "a", "from x import a"),
++                         ("x", "a", "from x import b, c, a, d"),
++                         ("x.b", "a", "from x.b import a"),
++                         ("x.b", "a", "from x.b import b, c, a, d"),
++                         (None, "a", "import a"),
++                         (None, "a", "import b, c, a, d"))
++        for package, name, import_ in passing_tests:
++            n = self.does_tree_import(package, name, import_ + "\n" + string)
++            self.failUnless(n)
++            n = self.does_tree_import(package, name, string + "\n" + import_)
++            self.failUnless(n)
++
++    def test_in_function(self):
++        self.try_with("def foo():\n\tbar.baz()\n\tstart=3")
++
++class Test_find_binding(support.TestCase):
++    def find_binding(self, name, string, package=None):
++        return fixer_util.find_binding(name, parse(string), package)
++
++    def test_simple_assignment(self):
++        self.failUnless(self.find_binding("a", "a = b"))
++        self.failUnless(self.find_binding("a", "a = [b, c, d]"))
++        self.failUnless(self.find_binding("a", "a = foo()"))
++        self.failUnless(self.find_binding("a", "a = foo().foo.foo[6][foo]"))
++        self.failIf(self.find_binding("a", "foo = a"))
++        self.failIf(self.find_binding("a", "foo = (a, b, c)"))
++
++    def test_tuple_assignment(self):
++        self.failUnless(self.find_binding("a", "(a,) = b"))
++        self.failUnless(self.find_binding("a", "(a, b, c) = [b, c, d]"))
++        self.failUnless(self.find_binding("a", "(c, (d, a), b) = foo()"))
++        self.failUnless(self.find_binding("a", "(a, b) = foo().foo[6][foo]"))
++        self.failIf(self.find_binding("a", "(foo, b) = (b, a)"))
++        self.failIf(self.find_binding("a", "(foo, (b, c)) = (a, b, c)"))
++
++    def test_list_assignment(self):
++        self.failUnless(self.find_binding("a", "[a] = b"))
++        self.failUnless(self.find_binding("a", "[a, b, c] = [b, c, d]"))
++        self.failUnless(self.find_binding("a", "[c, [d, a], b] = foo()"))
++        self.failUnless(self.find_binding("a", "[a, b] = foo().foo[a][foo]"))
++        self.failIf(self.find_binding("a", "[foo, b] = (b, a)"))
++        self.failIf(self.find_binding("a", "[foo, [b, c]] = (a, b, c)"))
++
++    def test_invalid_assignments(self):
++        self.failIf(self.find_binding("a", "foo.a = 5"))
++        self.failIf(self.find_binding("a", "foo[a] = 5"))
++        self.failIf(self.find_binding("a", "foo(a) = 5"))
++        self.failIf(self.find_binding("a", "foo(a, b) = 5"))
++
++    def test_simple_import(self):
++        self.failUnless(self.find_binding("a", "import a"))
++        self.failUnless(self.find_binding("a", "import b, c, a, d"))
++        self.failIf(self.find_binding("a", "import b"))
++        self.failIf(self.find_binding("a", "import b, c, d"))
++
++    def test_from_import(self):
++        self.failUnless(self.find_binding("a", "from x import a"))
++        self.failUnless(self.find_binding("a", "from a import a"))
++        self.failUnless(self.find_binding("a", "from x import b, c, a, d"))
++        self.failUnless(self.find_binding("a", "from x.b import a"))
++        self.failUnless(self.find_binding("a", "from x.b import b, c, a, d"))
++        self.failIf(self.find_binding("a", "from a import b"))
++        self.failIf(self.find_binding("a", "from a.d import b"))
++        self.failIf(self.find_binding("a", "from d.a import b"))
++
++    def test_import_as(self):
++        self.failUnless(self.find_binding("a", "import b as a"))
++        self.failUnless(self.find_binding("a", "import b as a, c, a as f, d"))
++        self.failIf(self.find_binding("a", "import a as f"))
++        self.failIf(self.find_binding("a", "import b, c as f, d as e"))
++
++    def test_from_import_as(self):
++        self.failUnless(self.find_binding("a", "from x import b as a"))
++        self.failUnless(self.find_binding("a", "from x import g as a, d as b"))
++        self.failUnless(self.find_binding("a", "from x.b import t as a"))
++        self.failUnless(self.find_binding("a", "from x.b import g as a, d"))
++        self.failIf(self.find_binding("a", "from a import b as t"))
++        self.failIf(self.find_binding("a", "from a.d import b as t"))
++        self.failIf(self.find_binding("a", "from d.a import b as t"))
++
++    def test_simple_import_with_package(self):
++        self.failUnless(self.find_binding("b", "import b"))
++        self.failUnless(self.find_binding("b", "import b, c, d"))
++        self.failIf(self.find_binding("b", "import b", "b"))
++        self.failIf(self.find_binding("b", "import b, c, d", "c"))
++
++    def test_from_import_with_package(self):
++        self.failUnless(self.find_binding("a", "from x import a", "x"))
++        self.failUnless(self.find_binding("a", "from a import a", "a"))
++        self.failUnless(self.find_binding("a", "from x import *", "x"))
++        self.failUnless(self.find_binding("a", "from x import b, c, a, d", "x"))
++        self.failUnless(self.find_binding("a", "from x.b import a", "x.b"))
++        self.failUnless(self.find_binding("a", "from x.b import *", "x.b"))
++        self.failUnless(self.find_binding("a", "from x.b import b, c, a, d", "x.b"))
++        self.failIf(self.find_binding("a", "from a import b", "a"))
++        self.failIf(self.find_binding("a", "from a.d import b", "a.d"))
++        self.failIf(self.find_binding("a", "from d.a import b", "a.d"))
++        self.failIf(self.find_binding("a", "from x.y import *", "a.b"))
++
++    def test_import_as_with_package(self):
++        self.failIf(self.find_binding("a", "import b.c as a", "b.c"))
++        self.failIf(self.find_binding("a", "import a as f", "f"))
++        self.failIf(self.find_binding("a", "import a as f", "a"))
++
++    def test_from_import_as_with_package(self):
++        # Because it would take a lot of special-case code in the fixers
++        # to deal with from foo import bar as baz, we'll simply always
++        # fail if there is an "from ... import ... as ..."
++        self.failIf(self.find_binding("a", "from x import b as a", "x"))
++        self.failIf(self.find_binding("a", "from x import g as a, d as b", "x"))
++        self.failIf(self.find_binding("a", "from x.b import t as a", "x.b"))
++        self.failIf(self.find_binding("a", "from x.b import g as a, d", "x.b"))
++        self.failIf(self.find_binding("a", "from a import b as t", "a"))
++        self.failIf(self.find_binding("a", "from a import b as t", "b"))
++        self.failIf(self.find_binding("a", "from a import b as t", "t"))
++
++    def test_function_def(self):
++        self.failUnless(self.find_binding("a", "def a(): pass"))
++        self.failUnless(self.find_binding("a", "def a(b, c, d): pass"))
++        self.failUnless(self.find_binding("a", "def a(): b = 7"))
++        self.failIf(self.find_binding("a", "def d(b, (c, a), e): pass"))
++        self.failIf(self.find_binding("a", "def d(a=7): pass"))
++        self.failIf(self.find_binding("a", "def d(a): pass"))
++        self.failIf(self.find_binding("a", "def d(): a = 7"))
++
++        s = """
++            def d():
++                def a():
++                    pass"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_class_def(self):
++        self.failUnless(self.find_binding("a", "class a: pass"))
++        self.failUnless(self.find_binding("a", "class a(): pass"))
++        self.failUnless(self.find_binding("a", "class a(b): pass"))
++        self.failUnless(self.find_binding("a", "class a(b, c=8): pass"))
++        self.failIf(self.find_binding("a", "class d: pass"))
++        self.failIf(self.find_binding("a", "class d(a): pass"))
++        self.failIf(self.find_binding("a", "class d(b, a=7): pass"))
++        self.failIf(self.find_binding("a", "class d(b, *a): pass"))
++        self.failIf(self.find_binding("a", "class d(b, **a): pass"))
++        self.failIf(self.find_binding("a", "class d: a = 7"))
++
++        s = """
++            class d():
++                class a():
++                    pass"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_for(self):
++        self.failUnless(self.find_binding("a", "for a in r: pass"))
++        self.failUnless(self.find_binding("a", "for a, b in r: pass"))
++        self.failUnless(self.find_binding("a", "for (a, b) in r: pass"))
++        self.failUnless(self.find_binding("a", "for c, (a,) in r: pass"))
++        self.failUnless(self.find_binding("a", "for c, (a, b) in r: pass"))
++        self.failUnless(self.find_binding("a", "for c in r: a = c"))
++        self.failIf(self.find_binding("a", "for c in a: pass"))
++
++    def test_for_nested(self):
++        s = """
++            for b in r:
++                for a in b:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for a, c in b:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for (a, c) in b:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for (a,) in b:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for c, (a, d) in b:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for c in b:
++                    a = 7"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for c in b:
++                    d = a"""
++        self.failIf(self.find_binding("a", s))
++
++        s = """
++            for b in r:
++                for c in a:
++                    d = 7"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_if(self):
++        self.failUnless(self.find_binding("a", "if b in r: a = c"))
++        self.failIf(self.find_binding("a", "if a in r: d = e"))
++
++    def test_if_nested(self):
++        s = """
++            if b in r:
++                if c in d:
++                    a = c"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            if b in r:
++                if c in d:
++                    c = a"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_while(self):
++        self.failUnless(self.find_binding("a", "while b in r: a = c"))
++        self.failIf(self.find_binding("a", "while a in r: d = e"))
++
++    def test_while_nested(self):
++        s = """
++            while b in r:
++                while c in d:
++                    a = c"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            while b in r:
++                while c in d:
++                    c = a"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_try_except(self):
++        s = """
++            try:
++                a = 6
++            except:
++                b = 8"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except KeyError:
++                pass
++            except:
++                a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                b = 6"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_try_except_nested(self):
++        s = """
++            try:
++                try:
++                    a = 6
++                except:
++                    pass
++            except:
++                b = 8"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                try:
++                    a = 6
++                except:
++                    pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                try:
++                    pass
++                except:
++                    a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                try:
++                    b = 8
++                except KeyError:
++                    pass
++                except:
++                    a = 6
++            except:
++                pass"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                pass
++            except:
++                try:
++                    b = 8
++                except KeyError:
++                    pass
++                except:
++                    a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                b = 6"""
++        self.failIf(self.find_binding("a", s))
++
++        s = """
++            try:
++                try:
++                    b = 8
++                except:
++                    c = d
++            except:
++                try:
++                    b = 6
++                except:
++                    t = 8
++                except:
++                    o = y"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_try_except_finally(self):
++        s = """
++            try:
++                c = 6
++            except:
++                b = 8
++            finally:
++                a = 9"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            finally:
++                a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            finally:
++                b = 6"""
++        self.failIf(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            except:
++                b = 9
++            finally:
++                b = 6"""
++        self.failIf(self.find_binding("a", s))
++
++    def test_try_except_finally_nested(self):
++        s = """
++            try:
++                c = 6
++            except:
++                b = 8
++            finally:
++                try:
++                    a = 9
++                except:
++                    b = 9
++                finally:
++                    c = 9"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            finally:
++                try:
++                    pass
++                finally:
++                    a = 6"""
++        self.failUnless(self.find_binding("a", s))
++
++        s = """
++            try:
++                b = 8
++            finally:
++                try:
++                    b = 6
++                finally:
++                    b = 7"""
++        self.failIf(self.find_binding("a", s))
++
++class Test_touch_import(support.TestCase):
++
++    def test_after_docstring(self):
++        node = parse('"""foo"""\nbar()')
++        fixer_util.touch_import(None, "foo", node)
++        self.assertEqual(str(node), '"""foo"""\nimport foo\nbar()\n\n')
++
++    def test_after_imports(self):
++        node = parse('"""foo"""\nimport bar\nbar()')
++        fixer_util.touch_import(None, "foo", node)
++        self.assertEqual(str(node), '"""foo"""\nimport bar\nimport foo\nbar()\n\n')
++
++    def test_beginning(self):
++        node = parse('bar()')
++        fixer_util.touch_import(None, "foo", node)
++        self.assertEqual(str(node), 'import foo\nbar()\n\n')
++
++    def test_from_import(self):
++        node = parse('bar()')
++        fixer_util.touch_import("cgi", "escape", node)
++        self.assertEqual(str(node), 'from cgi import escape\nbar()\n\n')
++
++    def test_name_import(self):
++        node = parse('bar()')
++        fixer_util.touch_import(None, "cgi", node)
++        self.assertEqual(str(node), 'import cgi\nbar()\n\n')
++
++
++if __name__ == "__main__":
++    import __main__
++    support.run_all_tests(__main__)
+diff -r 531f2e948299 scripts/.svn/entries
+--- a/scripts/.svn/entries	Mon Mar 30 20:02:09 2009 -0500
++++ b/scripts/.svn/entries	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,7 +1,7 @@
+ 9
+ 
+ dir
+-70785
++70822
+ http://svn.python.org/projects/sandbox/trunk/2to3/scripts
+ http://svn.python.org/projects
+ 
+diff -r 531f2e948299 scripts/find_pattern.py
+--- a/scripts/find_pattern.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/scripts/find_pattern.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -47,19 +47,24 @@
+ from StringIO import StringIO
+ 
+ # Local imports
+-from lib2to3 import pytree
+-from lib2to3.pgen2 import driver
+-from lib2to3.pygram import python_symbols, python_grammar
+-
+-driver = driver.Driver(python_grammar, convert=pytree.convert)
++from refactor import pytree
++from refactor import pgen2
++from refactor.pygram import python_symbols, python_grammar
+ 
+ def main(args):
+     parser = optparse.OptionParser(usage="find_pattern.py [options] [string]")
+     parser.add_option("-f", "--file", action="store",
+                       help="Read a code snippet from the specified file")
++    parser.add_option("-p", "--print-function", action="store_true",
++                      help="Modify the grammar so that print() is a function")
+ 
+     # Parse command line arguments
+     options, args = parser.parse_args(args)
++
++    if options.print_function:
++        del python_grammar.keywords["print"]
++
++    driver = pgen2.driver.Driver(python_grammar, convert=pytree.convert)
+     if options.file:
+         tree = driver.parse_file(options.file)
+     elif len(args) > 1:
+diff -r 531f2e948299 setup.py
+--- a/setup.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/setup.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -3,6 +3,13 @@
+ setup(
+    name="2to3",
+    packages=['lib2to3','lib2to3.fixes','lib2to3.pgen2'],
+-   package_data={'lib2to3':['Grammar.txt','PatternGrammar.txt']},
++   package_data={'lib2to3':['lib2to3/Grammar.txt','lib2to3/PatternGrammar.txt']},
+    scripts=["2to3"]
+ )
++
++setup(
++   name="refactor",
++   packages=['refactor','refactor.fixes','refactor.fixes.from2','refactor.fixes.from3','refactor.pgen2'],
++   package_data={'refactor':['Grammar.txt','PatternGrammar.txt']},
++   scripts=["3to2"]
++)
+diff -r 531f2e948299 test.py
+--- a/test.py	Mon Mar 30 20:02:09 2009 -0500
++++ b/test.py	Wed Apr 01 13:59:47 2009 -0500
+@@ -1,32 +1,59 @@
+ #!/usr/bin/env python2.5
+ 
+-"""Main test file for 2to3.
++"""Main test file for refactor (2to3 and back again).
+ 
+ Running "python test.py" will run all tests in tests/test_*.py.
+ """
+-# Author: Collin Winter
++# Original Author: Collin Winter
+ 
+ import unittest
+-from lib2to3 import tests
+-import lib2to3.tests.support
+-from sys import exit, argv
++from optparse import OptionParser, OptionGroup
++from sys import exit
+ 
+-if "-h" in argv or "--help" in argv or len(argv) > 2:
+-    print "Usage: %s [-h] [test suite[.test class]]" %(argv[0])
+-    print "default   : run all tests in lib2to3/tests/test_*.py"
+-    print "test suite: run tests in lib2to3/tests/<test suite>"
+-    print "test class : run tests in <test suite>.<test class>"
+-    exit(1)
++# Note more imports below, based on optparse output.
+ 
+-if len(argv) == 2:
++usage = "usage: %prog [options] arg"
++usage += "\n\narg can be:\n"
++usage += "test suite: run tests in refactor/tests/<test suite>\n"
++usage += "test class: run tests in <test suite>.<test class>\n"
++usage += "(default: run all tests in refactor/tests/test_*.py)"
++
++parser = OptionParser(usage=usage)
++parser.add_option("--source",
++                  dest="source",
++                  help="source version of Python to refactor")
++parser.add_option("--target",
++                  dest="target",
++                  help="target version of Python")
++parser.add_option("--base",
++                  dest="base", default="refactor",
++                  help="base package, e.g. lib2to3 or refactor")
++
++(options, args) = parser.parse_args()
++
++# It's too late at night to figure out why __import__ is failing.
++exec "from %s import tests" % options.base
++exec "from %s.tests import support" % options.base
++exec "from %s.tests.test_fixers import FixerTestCase as Fixer" % options.base
++
++old_version = support.parse_version(options.source)
++new_version = support.parse_version(options.target)
++
++if old_version:
++    Fixer.old_version = old_version
++if new_version:
++    Fixer.new_version = new_version
++
++if len(args) > 0:
++    arg = args[0]
+     mod = tests
+-    for m in argv[1].split("."):
++    for m in arg.split("."):
+         mod = getattr(mod, m, None)
+         if not mod:
+             print "Error importing %s" %(m)
+             exit(1)
+ 
+-    if argv[1].find(".") == -1:
++    if arg.find(".") == -1:
+         # Just the module was specified, load all the tests
+         suite = unittest.TestLoader().loadTestsFromModule(mod)
+     else:

Added: sandbox/trunk/refactor_pkg/example.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/example.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+	# comment indented by tab
+
+"""Docstring.
+
+Here are some doctest exampes:
+
+>>> print 42
+42
+
+ >>> d = {1: 1, 2: 2, 2: 2}
+ >>> d.keys().sort()
+ >>> print d
+ {1: 1, 2: 2}
+
+  >>> for i in d.keys():
+  ...     print i, d[i]
+
+And a tricky one:
+
+>>> class X(Structure):
+...     _fields_ = [("x", c_int), ("y", c_int), ("array", c_char_p * 5)]
+...
+>>> x = X()
+>>> print x._objects
+None
+>>>
+
+"""
+
+import sys
+
+def unicode_examples():
+    a = unicode(b)
+    a = u"xxx"
+    a = U"""xxx"""
+    a = ur'xxx'
+    a = UR'''xxx'''
+    a = Ur"xxx"
+    a = uR"""xxx"""
+    b = u"..." u'...'
+
+def ne_examples():
+    if x <> y:
+        pass
+    if x<>y:
+        pass
+    if x<>y<>z:
+        pass
+
+def has_key_examples():
+    #
+    x = d.has_key("x") or d.has_key("y")
+    #
+    x = a.b.c.d.has_key("x") ** 3
+    #
+    x = a.b.has_key(1 + 2).__repr__()
+    #
+    x = a.b.has_key(1 + 2).__repr__() ** -3 ** 4
+    #
+    x = a.has_key(f or g)
+    #
+    x = a + b.has_key(c)
+    #
+    x = a.has_key(lambda: 12)
+    #
+    x = a.has_key(a for a in b)
+    #
+    if not a.has_key(b): pass
+    #
+    if not a.has_key(b).__repr__(): pass
+    #
+    if not a.has_key(b) ** 2: pass
+
+def foo():
+	pass # body indented by tab
+
+def test_ws_comma():
+    yield 1,2 ,3
+    f(1,2 ,3)
+    `a ,b`
+    def f(a,b ,c): pass
+    { a:b,c:d , e : f }
+
+def apply_examples():
+    x = apply(f, g + h)
+    y = apply(f, g, h)
+    z = apply(fs[0], g or h, h or g)
+    # Hello
+    apply(f, (x, y) + t)
+    apply(f, args,)
+    apply(f, args, kwds,)
+    # Test that complex functions are parenthesized
+    x = apply(f+g, args)
+    x = apply(f*g, args)
+    x = apply(f**g, args)
+    # But dotted names etc. not
+    x = apply(f.g, args)
+    x = apply(f[x], args)
+    x = apply(f(), args)
+    # Extreme case
+    x = apply(a.b.c.d.e.f, args, kwds)
+    # XXX Comments in weird places still get lost
+    apply(   # foo
+          f, # bar
+          args)
+
+def bad_apply_examples():
+    # These should *not* be touched
+    apply()
+    apply(f)
+    apply(f,)
+    apply(f, args, kwds, extras)
+    apply(f, *args, **kwds)
+    apply(f, *args)
+    apply(func=f, args=args, kwds=kwds)
+    apply(f, args=args, kwds=kwds)
+    apply(f, args, kwds=kwds)
+
+def metaclass_examples():
+    class X:
+        __metaclass__ = Meta
+
+    class X(b1, b2):
+        bar = 23 # Comment on me!
+        __metaclass__ = Meta
+        spam = 27.23 # Laughable
+
+    class X:
+        __metaclass__ = Meta; x = 23; y = 34 # Yes, I can handle this, too.
+
+def intern_examples():
+    #
+    # These should be refactored:
+    #
+    x = intern(a)
+    #
+    y = intern("b" # test
+              )
+    #
+    z = intern(a+b+c.d,)
+    #
+    intern("y%s" % 5).replace("y", "")
+    #
+    # These not:
+    #
+    intern(a=1)
+    #
+    intern(f, g)
+    #
+    intern(*h)
+    #
+    intern(**i)
+
+def print_examples():
+    # plain vanilla
+    print 1, 1+1, 1+1+1
+    #
+    print 1, 2
+    #
+    print 1
+
+    print
+
+    # trailing commas
+    print 1, 2, 3,
+    #
+    print 1, 2,
+    #
+    print 1,
+    #
+    print
+
+    # >> stuff
+    print >>sys.stderr, 1, 2, 3    # no trailing comma
+    #
+    print >>sys.stdder, 1, 2,      # trailing comma
+    #
+    print >>sys.stderr, 1+1        # no trailing comma
+    #
+    print >>  sys.stderr           # spaces before sys.stderr
+
+def exec_examples():
+    #
+    exec code
+    #
+    exec code in ns
+    #
+    exec code in ns1, ns2
+    #
+    exec (a.b()) in ns
+    #
+    exec a.b() + c in ns
+    #
+    # These should not be touched:
+    #
+    exec(code)
+    #
+    exec (code)
+    #
+    exec(code, ns)
+    #
+    exec(code, ns1, ns2)
+
+def repr_examples():
+    x = `1 + 2`
+    #
+    y = `x`
+    #
+    z = `y`.__repr__()
+    #
+    x = `1, 2, 3`
+    #
+    x = `1 + `2``
+    #
+    x = `1, 2 + `3, 4``
+
+def except_examples():
+    try:
+        pass
+    except Exception, (f, e):
+        pass
+    except ImportError, e:
+        print e.args
+    #
+    try:
+        pass
+    except (RuntimeError, ImportError), e:
+        pass
+    #
+    try:
+        pass
+    except Exception, (a, b):
+        pass
+    #
+    try:
+        pass
+    except Exception, d[5]:
+        pass
+    #
+    try:
+        pass
+    except Exception, a.foo:
+        pass
+    #
+    try:
+        pass
+    except Exception, a().foo:
+        pass
+    #
+    # These should not be touched:
+    #
+    try:
+        pass
+    except:
+        pass
+    #
+    try:
+        pass
+    except Exception:
+        pass
+    #
+    try:
+        pass
+    except (Exception, SystemExit):
+        pass
+
+def raise_examples():
+    raise Exception, 5
+    #
+    raise Exception,5
+    #
+    raise Exception, (5, 6, 7)
+    #
+    # These should not be touched
+    #
+    raise Exception
+    #
+    raise Exception(5, 6)
+    #
+    # These should produce a warning
+    # TODO: convert "raise E, V, T" to
+    #  "e = E(V); e.__traceback__ = T; raise e;"
+    #
+    raise Exception, 5, 6
+    #
+    raise Exception,5,6
+    #
+    raise Exception, (5, 6, 7), 6
+
+def long_examples():
+    x = long(x)
+    y = isinstance(x, long)
+    z = type(x) in (int, long)
+    a = 12L
+    b = 0x12l
+    # unchanged:
+    a = 12
+    b = 0x12
+    c = 3.14
+
+def dict_examples():
+    #
+    # Plain method calls
+    #
+    print d.keys()
+    print d.items()
+    print d.values()
+    #
+    # Plain method calls in special contexts
+    #
+    print iter(e.keys())
+    for i in e.keys(): print i
+    [i for i in e.keys()]
+    (i for i in e.keys())
+    #
+    # Iterator method calls
+    #
+    print f.iterkeys()
+    print f.iteritems()
+    print f.itervalues()
+    #
+    # Iterator method calls in special contexts
+    #
+    print list(g.iterkeys())
+    print sorted(g.iterkeys())
+    print iter(g.iterkeys())
+    for i in g.iterkeys(): print i
+    [i for i in g.iterkeys()]
+    (i for i in g.iterkeys())
+    #
+    # Examples with a "tail"; these are never "special"
+    #
+    print h.iterkeys().next()
+    print h.keys()[0]
+    print list(h.iterkeys().next())
+    for x in h.keys()[0]: print x
+
+def dict_negative_examples():
+    #
+    # These should all remain unchanged:
+    #
+    print list(h.keys())
+    print sorted(h.keys())
+
+def xrange_examples():
+    for i in xrange(100): print i
+    for i in xrange(0, 100): print i
+    for i in xrange(0, 100, 10): print i
+
+def input_examples():
+    a = input()
+    b = input(str(a))
+
+def raw_input_examples():
+    a = raw_input()
+    b = raw_input(a.rstrip())
+
+def filter_examples():
+    filter(os.unlink, filenames)
+    filter(None, "whatever")
+    filter(lambda x: not x, range(4))
+
+def map_examples():
+    map(None, foo.bar)
+    map(None, foo.bar,)
+    map(None, foo, bar)
+    map(f, foo.bar)
+    map(lambda x: x+1, range(10))
+
+def basestring_examples():
+    if isinstance(x, basestring): pass
+
+def buffer_examples():
+    x = buffer(y)
+
+def sys_exc_examples():
+    print sys.exc_type, sys.exc_value, sys.exc_traceback
+
+class X:
+    def maximum(self):
+        return max(self.data.values())
+    def total(self):
+        return sum(self.data.values())
+
+
+# This is the last line.

Added: sandbox/trunk/refactor_pkg/lib2to3/Grammar.txt
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/Grammar.txt	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,155 @@
+# Grammar for Python
+
+# Note:  Changing the grammar specified in this file will most likely
+#        require corresponding changes in the parser module
+#        (../Modules/parsermodule.c).  If you can't make the changes to
+#        that module yourself, please co-ordinate the required changes
+#        with someone who can; ask around on python-dev for help.  Fred
+#        Drake <fdrake at acm.org> will probably be listening there.
+
+# NOTE WELL: You should also follow all the steps listed in PEP 306,
+# "How to Change Python's Grammar"
+
+# Commands for Kees Blom's railroad program
+#diagram:token NAME
+#diagram:token NUMBER
+#diagram:token STRING
+#diagram:token NEWLINE
+#diagram:token ENDMARKER
+#diagram:token INDENT
+#diagram:output\input python.bla
+#diagram:token DEDENT
+#diagram:output\textwidth 20.04cm\oddsidemargin  0.0cm\evensidemargin 0.0cm
+#diagram:rules
+
+# Start symbols for the grammar:
+#	file_input is a module or sequence of commands read from an input file;
+#	single_input is a single interactive statement;
+#	eval_input is the input for the eval() and input() functions.
+# NB: compound_stmt in single_input is followed by extra NEWLINE!
+file_input: (NEWLINE | stmt)* ENDMARKER
+single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+eval_input: testlist NEWLINE* ENDMARKER
+
+decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+decorators: decorator+
+decorated: decorators (classdef | funcdef)
+funcdef: 'def' NAME parameters ['->' test] ':' suite
+parameters: '(' [typedargslist] ')'
+typedargslist: ((tfpdef ['=' test] ',')*
+                ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname)
+                | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
+tname: NAME [':' test]
+tfpdef: tname | '(' tfplist ')'
+tfplist: tfpdef (',' tfpdef)* [',']
+varargslist: ((vfpdef ['=' test] ',')*
+              ('*' [vname] (',' vname ['=' test])*  [',' '**' vname] | '**' vname)
+              | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
+vname: NAME
+vfpdef: vname | '(' vfplist ')'
+vfplist: vfpdef (',' vfpdef)* [',']
+
+stmt: simple_stmt | compound_stmt
+simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+small_stmt: (expr_stmt | print_stmt  | del_stmt | pass_stmt | flow_stmt |
+             import_stmt | global_stmt | exec_stmt | assert_stmt)
+expr_stmt: testlist (augassign (yield_expr|testlist) |
+                     ('=' (yield_expr|testlist))*)
+augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+            '<<=' | '>>=' | '**=' | '//=')
+# For normal assignments, additional restrictions enforced by the interpreter
+print_stmt: 'print' ( [ test (',' test)* [','] ] |
+                      '>>' test [ (',' test)+ [','] ] )
+del_stmt: 'del' exprlist
+pass_stmt: 'pass'
+flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+break_stmt: 'break'
+continue_stmt: 'continue'
+return_stmt: 'return' [testlist]
+yield_stmt: yield_expr
+raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]
+import_stmt: import_name | import_from
+import_name: 'import' dotted_as_names
+import_from: ('from' ('.'* dotted_name | '.'+)
+              'import' ('*' | '(' import_as_names ')' | import_as_names))
+import_as_name: NAME ['as' NAME]
+dotted_as_name: dotted_name ['as' NAME]
+import_as_names: import_as_name (',' import_as_name)* [',']
+dotted_as_names: dotted_as_name (',' dotted_as_name)*
+dotted_name: NAME ('.' NAME)*
+global_stmt: ('global' | 'nonlocal') NAME (',' NAME)*
+exec_stmt: 'exec' expr ['in' test [',' test]]
+assert_stmt: 'assert' test [',' test]
+
+compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
+if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+while_stmt: 'while' test ':' suite ['else' ':' suite]
+for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+try_stmt: ('try' ':' suite
+           ((except_clause ':' suite)+
+	    ['else' ':' suite]
+	    ['finally' ':' suite] |
+	   'finally' ':' suite))
+with_stmt: 'with' test [ with_var ] ':' suite
+with_var: 'as' expr
+# NB compile.c makes sure that the default except clause is last
+except_clause: 'except' [test [(',' | 'as') test]]
+suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+
+# Backward compatibility cruft to support:
+# [ x for x in lambda: True, lambda: False if x() ]
+# even while also allowing:
+# lambda x: 5 if x else 2
+# (But not a mix of the two)
+testlist_safe: old_test [(',' old_test)+ [',']]
+old_test: or_test | old_lambdef
+old_lambdef: 'lambda' [varargslist] ':' old_test
+
+test: or_test ['if' or_test 'else' test] | lambdef
+or_test: and_test ('or' and_test)*
+and_test: not_test ('and' not_test)*
+not_test: 'not' not_test | comparison
+comparison: expr (comp_op expr)*
+comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+expr: xor_expr ('|' xor_expr)*
+xor_expr: and_expr ('^' and_expr)*
+and_expr: shift_expr ('&' shift_expr)*
+shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+arith_expr: term (('+'|'-') term)*
+term: factor (('*'|'/'|'%'|'//') factor)*
+factor: ('+'|'-'|'~') factor | power
+power: atom trailer* ['**' factor]
+atom: ('(' [yield_expr|testlist_gexp] ')' |
+       '[' [listmaker] ']' |
+       '{' [dictsetmaker] '}' |
+       '`' testlist1 '`' |
+       NAME | NUMBER | STRING+ | '.' '.' '.')
+listmaker: test ( comp_for | (',' test)* [','] )
+testlist_gexp: test ( comp_for | (',' test)* [','] )
+lambdef: 'lambda' [varargslist] ':' test
+trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+subscriptlist: subscript (',' subscript)* [',']
+subscript: test | [test] ':' [test] [sliceop]
+sliceop: ':' [test]
+exprlist: expr (',' expr)* [',']
+testlist: test (',' test)* [',']
+dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
+                (test (comp_for | (',' test)* [','])) )
+
+classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+arglist: (argument ',')* (argument [',']
+                         |'*' test (',' argument)* [',' '**' test] 
+                         |'**' test)
+argument: test [comp_for] | test '=' test  # Really [keyword '='] test
+
+comp_iter: comp_for | comp_if
+comp_for: 'for' exprlist 'in' testlist_safe [comp_iter]
+comp_if: 'if' old_test [comp_iter]
+
+testlist1: test (',' test)*
+
+# not used in grammar, but may appear in "node" passed from Parser to Compiler
+encoding_decl: NAME
+
+yield_expr: 'yield' [testlist]

Added: sandbox/trunk/refactor_pkg/lib2to3/PatternGrammar.txt
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/PatternGrammar.txt	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,28 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+# A grammar to describe tree matching patterns.
+# Not shown here:
+# - 'TOKEN' stands for any token (leaf node)
+# - 'any' stands for any node (leaf or interior)
+# With 'any' we can still specify the sub-structure.
+
+# The start symbol is 'Matcher'.
+
+Matcher: Alternatives ENDMARKER
+
+Alternatives: Alternative ('|' Alternative)*
+
+Alternative: (Unit | NegatedUnit)+
+
+Unit: [NAME '='] ( STRING [Repeater]
+                 | NAME [Details] [Repeater]
+                 | '(' Alternatives ')' [Repeater]
+                 | '[' Alternatives ']'
+		 )
+
+NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')')
+
+Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}'
+
+Details: '<' Alternatives '>'

Added: sandbox/trunk/refactor_pkg/lib2to3/__init__.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/__init__.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1 @@
+from refactor import *

Added: sandbox/trunk/refactor_pkg/lib2to3/fixes/__init__.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/fixes/__init__.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,2 @@
+from refactor.fixes import from2
+from refactor.fixes.from2 import *

Added: sandbox/trunk/refactor_pkg/lib2to3/pgen2/__init__.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/pgen2/__init__.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1 @@
+from refactor.pgen2 import *

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/__init__.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/__init__.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,24 @@
+"""Make tests/ into a package. This allows us to "import tests" and
+have tests.all_tests be a TestSuite representing all test cases
+from all test_*.py files in tests/."""
+# Author: Collin Winter
+
+import os
+import os.path
+import unittest
+import types
+
+from . import support
+
+all_tests = unittest.TestSuite()
+
+tests_dir = os.path.join(os.path.dirname(__file__), '..', 'tests')
+tests = [t[0:-3] for t in os.listdir(tests_dir)
+                        if t.startswith('test_') and t.endswith('.py')]
+
+loader = unittest.TestLoader()
+
+for t in tests:
+    __import__("",globals(),locals(),[t],level=1)
+    mod = globals()[t]
+    all_tests.addTests(loader.loadTestsFromModule(mod))

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/README
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/README	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,6 @@
+In this directory:
+- py2_test_grammar.py -- test file that exercises most/all of Python 2.x's grammar.
+- py3_test_grammar.py -- test file that exercises most/all of Python 3.x's grammar.
+- infinite_recursion.py -- test file that causes lib2to3's faster recursive pattern matching
+  scheme to fail, but passes when lib2to3 falls back to iterative pattern matching.
+- fixes/ -- for use by test_refactor.py

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/bad_order.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/bad_order.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,5 @@
+from refactor.fixer_base import BaseFix
+
+class FixBadOrder(BaseFix):
+
+    order = "crazy"

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/__init__.py
==============================================================================

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_explicit.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_explicit.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,6 @@
+from refactor.fixer_base import BaseFix
+
+class FixExplicit(BaseFix):
+    explicit = True
+
+    def match(self): return False

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_first.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_first.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,6 @@
+from refactor.fixer_base import BaseFix
+
+class FixFirst(BaseFix):
+    run_order = 1
+
+    def match(self, node): return False

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_last.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_last.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,7 @@
+from refactor.fixer_base import BaseFix
+
+class FixLast(BaseFix):
+
+    run_order = 10
+
+    def match(self, node): return False

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_parrot.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_parrot.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,13 @@
+from refactor.fixer_base import BaseFix
+from refactor.fixer_util import Name
+
+class FixParrot(BaseFix):
+    """
+    Change functions named 'parrot' to 'cheese'.
+    """
+
+    PATTERN = """funcdef < 'def' name='parrot' any* >"""
+
+    def transform(self, node, results):
+        name = results["name"]
+        name.replace(Name("cheese", name.get_prefix()))

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_preorder.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/myfixes/fix_preorder.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,6 @@
+from refactor.fixer_base import BaseFix
+
+class FixPreorder(BaseFix):
+    order = "pre"
+
+    def match(self, node): return False

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/no_fixer_cls.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/no_fixer_cls.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1 @@
+# This is empty so trying to fetch the fixer class gives an AttributeError

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/parrot_example.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/fixers/parrot_example.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,2 @@
+def parrot():
+    pass

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/infinite_recursion.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/infinite_recursion.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,2669 @@
+# This file is used to verify that 2to3 falls back to a slower, iterative pattern matching
+# scheme in the event that the faster recursive system fails due to infinite recursion.
+from ctypes import *
+STRING = c_char_p
+
+
+OSUnknownByteOrder = 0
+UIT_PROMPT = 1
+P_PGID = 2
+P_PID = 1
+UIT_ERROR = 5
+UIT_INFO = 4
+UIT_NONE = 0
+P_ALL = 0
+UIT_VERIFY = 2
+OSBigEndian = 2
+UIT_BOOLEAN = 3
+OSLittleEndian = 1
+__darwin_nl_item = c_int
+__darwin_wctrans_t = c_int
+__darwin_wctype_t = c_ulong
+__int8_t = c_byte
+__uint8_t = c_ubyte
+__int16_t = c_short
+__uint16_t = c_ushort
+__int32_t = c_int
+__uint32_t = c_uint
+__int64_t = c_longlong
+__uint64_t = c_ulonglong
+__darwin_intptr_t = c_long
+__darwin_natural_t = c_uint
+__darwin_ct_rune_t = c_int
+class __mbstate_t(Union):
+    pass
+__mbstate_t._pack_ = 4
+__mbstate_t._fields_ = [
+    ('__mbstate8', c_char * 128),
+    ('_mbstateL', c_longlong),
+]
+assert sizeof(__mbstate_t) == 128, sizeof(__mbstate_t)
+assert alignment(__mbstate_t) == 4, alignment(__mbstate_t)
+__darwin_mbstate_t = __mbstate_t
+__darwin_ptrdiff_t = c_int
+__darwin_size_t = c_ulong
+__darwin_va_list = STRING
+__darwin_wchar_t = c_int
+__darwin_rune_t = __darwin_wchar_t
+__darwin_wint_t = c_int
+__darwin_clock_t = c_ulong
+__darwin_socklen_t = __uint32_t
+__darwin_ssize_t = c_long
+__darwin_time_t = c_long
+sig_atomic_t = c_int
+class sigcontext(Structure):
+    pass
+sigcontext._fields_ = [
+    ('sc_onstack', c_int),
+    ('sc_mask', c_int),
+    ('sc_eax', c_uint),
+    ('sc_ebx', c_uint),
+    ('sc_ecx', c_uint),
+    ('sc_edx', c_uint),
+    ('sc_edi', c_uint),
+    ('sc_esi', c_uint),
+    ('sc_ebp', c_uint),
+    ('sc_esp', c_uint),
+    ('sc_ss', c_uint),
+    ('sc_eflags', c_uint),
+    ('sc_eip', c_uint),
+    ('sc_cs', c_uint),
+    ('sc_ds', c_uint),
+    ('sc_es', c_uint),
+    ('sc_fs', c_uint),
+    ('sc_gs', c_uint),
+]
+assert sizeof(sigcontext) == 72, sizeof(sigcontext)
+assert alignment(sigcontext) == 4, alignment(sigcontext)
+u_int8_t = c_ubyte
+u_int16_t = c_ushort
+u_int32_t = c_uint
+u_int64_t = c_ulonglong
+int32_t = c_int
+register_t = int32_t
+user_addr_t = u_int64_t
+user_size_t = u_int64_t
+int64_t = c_longlong
+user_ssize_t = int64_t
+user_long_t = int64_t
+user_ulong_t = u_int64_t
+user_time_t = int64_t
+syscall_arg_t = u_int64_t
+
+# values for unnamed enumeration
+class aes_key_st(Structure):
+    pass
+aes_key_st._fields_ = [
+    ('rd_key', c_ulong * 60),
+    ('rounds', c_int),
+]
+assert sizeof(aes_key_st) == 244, sizeof(aes_key_st)
+assert alignment(aes_key_st) == 4, alignment(aes_key_st)
+AES_KEY = aes_key_st
+class asn1_ctx_st(Structure):
+    pass
+asn1_ctx_st._fields_ = [
+    ('p', POINTER(c_ubyte)),
+    ('eos', c_int),
+    ('error', c_int),
+    ('inf', c_int),
+    ('tag', c_int),
+    ('xclass', c_int),
+    ('slen', c_long),
+    ('max', POINTER(c_ubyte)),
+    ('q', POINTER(c_ubyte)),
+    ('pp', POINTER(POINTER(c_ubyte))),
+    ('line', c_int),
+]
+assert sizeof(asn1_ctx_st) == 44, sizeof(asn1_ctx_st)
+assert alignment(asn1_ctx_st) == 4, alignment(asn1_ctx_st)
+ASN1_CTX = asn1_ctx_st
+class asn1_object_st(Structure):
+    pass
+asn1_object_st._fields_ = [
+    ('sn', STRING),
+    ('ln', STRING),
+    ('nid', c_int),
+    ('length', c_int),
+    ('data', POINTER(c_ubyte)),
+    ('flags', c_int),
+]
+assert sizeof(asn1_object_st) == 24, sizeof(asn1_object_st)
+assert alignment(asn1_object_st) == 4, alignment(asn1_object_st)
+ASN1_OBJECT = asn1_object_st
+class asn1_string_st(Structure):
+    pass
+asn1_string_st._fields_ = [
+    ('length', c_int),
+    ('type', c_int),
+    ('data', POINTER(c_ubyte)),
+    ('flags', c_long),
+]
+assert sizeof(asn1_string_st) == 16, sizeof(asn1_string_st)
+assert alignment(asn1_string_st) == 4, alignment(asn1_string_st)
+ASN1_STRING = asn1_string_st
+class ASN1_ENCODING_st(Structure):
+    pass
+ASN1_ENCODING_st._fields_ = [
+    ('enc', POINTER(c_ubyte)),
+    ('len', c_long),
+    ('modified', c_int),
+]
+assert sizeof(ASN1_ENCODING_st) == 12, sizeof(ASN1_ENCODING_st)
+assert alignment(ASN1_ENCODING_st) == 4, alignment(ASN1_ENCODING_st)
+ASN1_ENCODING = ASN1_ENCODING_st
+class asn1_string_table_st(Structure):
+    pass
+asn1_string_table_st._fields_ = [
+    ('nid', c_int),
+    ('minsize', c_long),
+    ('maxsize', c_long),
+    ('mask', c_ulong),
+    ('flags', c_ulong),
+]
+assert sizeof(asn1_string_table_st) == 20, sizeof(asn1_string_table_st)
+assert alignment(asn1_string_table_st) == 4, alignment(asn1_string_table_st)
+ASN1_STRING_TABLE = asn1_string_table_st
+class ASN1_TEMPLATE_st(Structure):
+    pass
+ASN1_TEMPLATE_st._fields_ = [
+]
+ASN1_TEMPLATE = ASN1_TEMPLATE_st
+class ASN1_ITEM_st(Structure):
+    pass
+ASN1_ITEM = ASN1_ITEM_st
+ASN1_ITEM_st._fields_ = [
+]
+class ASN1_TLC_st(Structure):
+    pass
+ASN1_TLC = ASN1_TLC_st
+ASN1_TLC_st._fields_ = [
+]
+class ASN1_VALUE_st(Structure):
+    pass
+ASN1_VALUE_st._fields_ = [
+]
+ASN1_VALUE = ASN1_VALUE_st
+ASN1_ITEM_EXP = ASN1_ITEM
+class asn1_type_st(Structure):
+    pass
+class N12asn1_type_st4DOLLAR_11E(Union):
+    pass
+ASN1_BOOLEAN = c_int
+ASN1_INTEGER = asn1_string_st
+ASN1_ENUMERATED = asn1_string_st
+ASN1_BIT_STRING = asn1_string_st
+ASN1_OCTET_STRING = asn1_string_st
+ASN1_PRINTABLESTRING = asn1_string_st
+ASN1_T61STRING = asn1_string_st
+ASN1_IA5STRING = asn1_string_st
+ASN1_GENERALSTRING = asn1_string_st
+ASN1_BMPSTRING = asn1_string_st
+ASN1_UNIVERSALSTRING = asn1_string_st
+ASN1_UTCTIME = asn1_string_st
+ASN1_GENERALIZEDTIME = asn1_string_st
+ASN1_VISIBLESTRING = asn1_string_st
+ASN1_UTF8STRING = asn1_string_st
+N12asn1_type_st4DOLLAR_11E._fields_ = [
+    ('ptr', STRING),
+    ('boolean', ASN1_BOOLEAN),
+    ('asn1_string', POINTER(ASN1_STRING)),
+    ('object', POINTER(ASN1_OBJECT)),
+    ('integer', POINTER(ASN1_INTEGER)),
+    ('enumerated', POINTER(ASN1_ENUMERATED)),
+    ('bit_string', POINTER(ASN1_BIT_STRING)),
+    ('octet_string', POINTER(ASN1_OCTET_STRING)),
+    ('printablestring', POINTER(ASN1_PRINTABLESTRING)),
+    ('t61string', POINTER(ASN1_T61STRING)),
+    ('ia5string', POINTER(ASN1_IA5STRING)),
+    ('generalstring', POINTER(ASN1_GENERALSTRING)),
+    ('bmpstring', POINTER(ASN1_BMPSTRING)),
+    ('universalstring', POINTER(ASN1_UNIVERSALSTRING)),
+    ('utctime', POINTER(ASN1_UTCTIME)),
+    ('generalizedtime', POINTER(ASN1_GENERALIZEDTIME)),
+    ('visiblestring', POINTER(ASN1_VISIBLESTRING)),
+    ('utf8string', POINTER(ASN1_UTF8STRING)),
+    ('set', POINTER(ASN1_STRING)),
+    ('sequence', POINTER(ASN1_STRING)),
+]
+assert sizeof(N12asn1_type_st4DOLLAR_11E) == 4, sizeof(N12asn1_type_st4DOLLAR_11E)
+assert alignment(N12asn1_type_st4DOLLAR_11E) == 4, alignment(N12asn1_type_st4DOLLAR_11E)
+asn1_type_st._fields_ = [
+    ('type', c_int),
+    ('value', N12asn1_type_st4DOLLAR_11E),
+]
+assert sizeof(asn1_type_st) == 8, sizeof(asn1_type_st)
+assert alignment(asn1_type_st) == 4, alignment(asn1_type_st)
+ASN1_TYPE = asn1_type_st
+class asn1_method_st(Structure):
+    pass
+asn1_method_st._fields_ = [
+    ('i2d', CFUNCTYPE(c_int)),
+    ('d2i', CFUNCTYPE(STRING)),
+    ('create', CFUNCTYPE(STRING)),
+    ('destroy', CFUNCTYPE(None)),
+]
+assert sizeof(asn1_method_st) == 16, sizeof(asn1_method_st)
+assert alignment(asn1_method_st) == 4, alignment(asn1_method_st)
+ASN1_METHOD = asn1_method_st
+class asn1_header_st(Structure):
+    pass
+asn1_header_st._fields_ = [
+    ('header', POINTER(ASN1_OCTET_STRING)),
+    ('data', STRING),
+    ('meth', POINTER(ASN1_METHOD)),
+]
+assert sizeof(asn1_header_st) == 12, sizeof(asn1_header_st)
+assert alignment(asn1_header_st) == 4, alignment(asn1_header_st)
+ASN1_HEADER = asn1_header_st
+class BIT_STRING_BITNAME_st(Structure):
+    pass
+BIT_STRING_BITNAME_st._fields_ = [
+    ('bitnum', c_int),
+    ('lname', STRING),
+    ('sname', STRING),
+]
+assert sizeof(BIT_STRING_BITNAME_st) == 12, sizeof(BIT_STRING_BITNAME_st)
+assert alignment(BIT_STRING_BITNAME_st) == 4, alignment(BIT_STRING_BITNAME_st)
+BIT_STRING_BITNAME = BIT_STRING_BITNAME_st
+class bio_st(Structure):
+    pass
+BIO = bio_st
+bio_info_cb = CFUNCTYPE(None, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)
+class bio_method_st(Structure):
+    pass
+bio_method_st._fields_ = [
+    ('type', c_int),
+    ('name', STRING),
+    ('bwrite', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
+    ('bread', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
+    ('bputs', CFUNCTYPE(c_int, POINTER(BIO), STRING)),
+    ('bgets', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
+    ('ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, c_long, c_void_p)),
+    ('create', CFUNCTYPE(c_int, POINTER(BIO))),
+    ('destroy', CFUNCTYPE(c_int, POINTER(BIO))),
+    ('callback_ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, POINTER(bio_info_cb))),
+]
+assert sizeof(bio_method_st) == 40, sizeof(bio_method_st)
+assert alignment(bio_method_st) == 4, alignment(bio_method_st)
+BIO_METHOD = bio_method_st
+class crypto_ex_data_st(Structure):
+    pass
+class stack_st(Structure):
+    pass
+STACK = stack_st
+crypto_ex_data_st._fields_ = [
+    ('sk', POINTER(STACK)),
+    ('dummy', c_int),
+]
+assert sizeof(crypto_ex_data_st) == 8, sizeof(crypto_ex_data_st)
+assert alignment(crypto_ex_data_st) == 4, alignment(crypto_ex_data_st)
+CRYPTO_EX_DATA = crypto_ex_data_st
+bio_st._fields_ = [
+    ('method', POINTER(BIO_METHOD)),
+    ('callback', CFUNCTYPE(c_long, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)),
+    ('cb_arg', STRING),
+    ('init', c_int),
+    ('shutdown', c_int),
+    ('flags', c_int),
+    ('retry_reason', c_int),
+    ('num', c_int),
+    ('ptr', c_void_p),
+    ('next_bio', POINTER(bio_st)),
+    ('prev_bio', POINTER(bio_st)),
+    ('references', c_int),
+    ('num_read', c_ulong),
+    ('num_write', c_ulong),
+    ('ex_data', CRYPTO_EX_DATA),
+]
+assert sizeof(bio_st) == 64, sizeof(bio_st)
+assert alignment(bio_st) == 4, alignment(bio_st)
+class bio_f_buffer_ctx_struct(Structure):
+    pass
+bio_f_buffer_ctx_struct._fields_ = [
+    ('ibuf_size', c_int),
+    ('obuf_size', c_int),
+    ('ibuf', STRING),
+    ('ibuf_len', c_int),
+    ('ibuf_off', c_int),
+    ('obuf', STRING),
+    ('obuf_len', c_int),
+    ('obuf_off', c_int),
+]
+assert sizeof(bio_f_buffer_ctx_struct) == 32, sizeof(bio_f_buffer_ctx_struct)
+assert alignment(bio_f_buffer_ctx_struct) == 4, alignment(bio_f_buffer_ctx_struct)
+BIO_F_BUFFER_CTX = bio_f_buffer_ctx_struct
+class hostent(Structure):
+    pass
+hostent._fields_ = [
+]
+class bf_key_st(Structure):
+    pass
+bf_key_st._fields_ = [
+    ('P', c_uint * 18),
+    ('S', c_uint * 1024),
+]
+assert sizeof(bf_key_st) == 4168, sizeof(bf_key_st)
+assert alignment(bf_key_st) == 4, alignment(bf_key_st)
+BF_KEY = bf_key_st
+class bignum_st(Structure):
+    pass
+bignum_st._fields_ = [
+    ('d', POINTER(c_ulong)),
+    ('top', c_int),
+    ('dmax', c_int),
+    ('neg', c_int),
+    ('flags', c_int),
+]
+assert sizeof(bignum_st) == 20, sizeof(bignum_st)
+assert alignment(bignum_st) == 4, alignment(bignum_st)
+BIGNUM = bignum_st
+class bignum_ctx(Structure):
+    pass
+bignum_ctx._fields_ = [
+]
+BN_CTX = bignum_ctx
+class bn_blinding_st(Structure):
+    pass
+bn_blinding_st._fields_ = [
+    ('init', c_int),
+    ('A', POINTER(BIGNUM)),
+    ('Ai', POINTER(BIGNUM)),
+    ('mod', POINTER(BIGNUM)),
+    ('thread_id', c_ulong),
+]
+assert sizeof(bn_blinding_st) == 20, sizeof(bn_blinding_st)
+assert alignment(bn_blinding_st) == 4, alignment(bn_blinding_st)
+BN_BLINDING = bn_blinding_st
+class bn_mont_ctx_st(Structure):
+    pass
+bn_mont_ctx_st._fields_ = [
+    ('ri', c_int),
+    ('RR', BIGNUM),
+    ('N', BIGNUM),
+    ('Ni', BIGNUM),
+    ('n0', c_ulong),
+    ('flags', c_int),
+]
+assert sizeof(bn_mont_ctx_st) == 72, sizeof(bn_mont_ctx_st)
+assert alignment(bn_mont_ctx_st) == 4, alignment(bn_mont_ctx_st)
+BN_MONT_CTX = bn_mont_ctx_st
+class bn_recp_ctx_st(Structure):
+    pass
+bn_recp_ctx_st._fields_ = [
+    ('N', BIGNUM),
+    ('Nr', BIGNUM),
+    ('num_bits', c_int),
+    ('shift', c_int),
+    ('flags', c_int),
+]
+assert sizeof(bn_recp_ctx_st) == 52, sizeof(bn_recp_ctx_st)
+assert alignment(bn_recp_ctx_st) == 4, alignment(bn_recp_ctx_st)
+BN_RECP_CTX = bn_recp_ctx_st
+class buf_mem_st(Structure):
+    pass
+buf_mem_st._fields_ = [
+    ('length', c_int),
+    ('data', STRING),
+    ('max', c_int),
+]
+assert sizeof(buf_mem_st) == 12, sizeof(buf_mem_st)
+assert alignment(buf_mem_st) == 4, alignment(buf_mem_st)
+BUF_MEM = buf_mem_st
+class cast_key_st(Structure):
+    pass
+cast_key_st._fields_ = [
+    ('data', c_ulong * 32),
+    ('short_key', c_int),
+]
+assert sizeof(cast_key_st) == 132, sizeof(cast_key_st)
+assert alignment(cast_key_st) == 4, alignment(cast_key_st)
+CAST_KEY = cast_key_st
+class comp_method_st(Structure):
+    pass
+comp_method_st._fields_ = [
+    ('type', c_int),
+    ('name', STRING),
+    ('init', CFUNCTYPE(c_int)),
+    ('finish', CFUNCTYPE(None)),
+    ('compress', CFUNCTYPE(c_int)),
+    ('expand', CFUNCTYPE(c_int)),
+    ('ctrl', CFUNCTYPE(c_long)),
+    ('callback_ctrl', CFUNCTYPE(c_long)),
+]
+assert sizeof(comp_method_st) == 32, sizeof(comp_method_st)
+assert alignment(comp_method_st) == 4, alignment(comp_method_st)
+COMP_METHOD = comp_method_st
+class comp_ctx_st(Structure):
+    pass
+comp_ctx_st._fields_ = [
+    ('meth', POINTER(COMP_METHOD)),
+    ('compress_in', c_ulong),
+    ('compress_out', c_ulong),
+    ('expand_in', c_ulong),
+    ('expand_out', c_ulong),
+    ('ex_data', CRYPTO_EX_DATA),
+]
+assert sizeof(comp_ctx_st) == 28, sizeof(comp_ctx_st)
+assert alignment(comp_ctx_st) == 4, alignment(comp_ctx_st)
+COMP_CTX = comp_ctx_st
+class CRYPTO_dynlock_value(Structure):
+    pass
+CRYPTO_dynlock_value._fields_ = [
+]
+class CRYPTO_dynlock(Structure):
+    pass
+CRYPTO_dynlock._fields_ = [
+    ('references', c_int),
+    ('data', POINTER(CRYPTO_dynlock_value)),
+]
+assert sizeof(CRYPTO_dynlock) == 8, sizeof(CRYPTO_dynlock)
+assert alignment(CRYPTO_dynlock) == 4, alignment(CRYPTO_dynlock)
+BIO_dummy = bio_st
+CRYPTO_EX_new = CFUNCTYPE(c_int, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
+CRYPTO_EX_free = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
+CRYPTO_EX_dup = CFUNCTYPE(c_int, POINTER(CRYPTO_EX_DATA), POINTER(CRYPTO_EX_DATA), c_void_p, c_int, c_long, c_void_p)
+class crypto_ex_data_func_st(Structure):
+    pass
+crypto_ex_data_func_st._fields_ = [
+    ('argl', c_long),
+    ('argp', c_void_p),
+    ('new_func', POINTER(CRYPTO_EX_new)),
+    ('free_func', POINTER(CRYPTO_EX_free)),
+    ('dup_func', POINTER(CRYPTO_EX_dup)),
+]
+assert sizeof(crypto_ex_data_func_st) == 20, sizeof(crypto_ex_data_func_st)
+assert alignment(crypto_ex_data_func_st) == 4, alignment(crypto_ex_data_func_st)
+CRYPTO_EX_DATA_FUNCS = crypto_ex_data_func_st
+class st_CRYPTO_EX_DATA_IMPL(Structure):
+    pass
+CRYPTO_EX_DATA_IMPL = st_CRYPTO_EX_DATA_IMPL
+st_CRYPTO_EX_DATA_IMPL._fields_ = [
+]
+CRYPTO_MEM_LEAK_CB = CFUNCTYPE(c_void_p, c_ulong, STRING, c_int, c_int, c_void_p)
+DES_cblock = c_ubyte * 8
+const_DES_cblock = c_ubyte * 8
+class DES_ks(Structure):
+    pass
+class N6DES_ks3DOLLAR_9E(Union):
+    pass
+N6DES_ks3DOLLAR_9E._fields_ = [
+    ('cblock', DES_cblock),
+    ('deslong', c_ulong * 2),
+]
+assert sizeof(N6DES_ks3DOLLAR_9E) == 8, sizeof(N6DES_ks3DOLLAR_9E)
+assert alignment(N6DES_ks3DOLLAR_9E) == 4, alignment(N6DES_ks3DOLLAR_9E)
+DES_ks._fields_ = [
+    ('ks', N6DES_ks3DOLLAR_9E * 16),
+]
+assert sizeof(DES_ks) == 128, sizeof(DES_ks)
+assert alignment(DES_ks) == 4, alignment(DES_ks)
+DES_key_schedule = DES_ks
+_ossl_old_des_cblock = c_ubyte * 8
+class _ossl_old_des_ks_struct(Structure):
+    pass
+class N23_ossl_old_des_ks_struct4DOLLAR_10E(Union):
+    pass
+N23_ossl_old_des_ks_struct4DOLLAR_10E._fields_ = [
+    ('_', _ossl_old_des_cblock),
+    ('pad', c_ulong * 2),
+]
+assert sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 8, sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E)
+assert alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 4, alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E)
+_ossl_old_des_ks_struct._fields_ = [
+    ('ks', N23_ossl_old_des_ks_struct4DOLLAR_10E),
+]
+assert sizeof(_ossl_old_des_ks_struct) == 8, sizeof(_ossl_old_des_ks_struct)
+assert alignment(_ossl_old_des_ks_struct) == 4, alignment(_ossl_old_des_ks_struct)
+_ossl_old_des_key_schedule = _ossl_old_des_ks_struct * 16
+class dh_st(Structure):
+    pass
+DH = dh_st
+class dh_method(Structure):
+    pass
+dh_method._fields_ = [
+    ('name', STRING),
+    ('generate_key', CFUNCTYPE(c_int, POINTER(DH))),
+    ('compute_key', CFUNCTYPE(c_int, POINTER(c_ubyte), POINTER(BIGNUM), POINTER(DH))),
+    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DH), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+    ('init', CFUNCTYPE(c_int, POINTER(DH))),
+    ('finish', CFUNCTYPE(c_int, POINTER(DH))),
+    ('flags', c_int),
+    ('app_data', STRING),
+]
+assert sizeof(dh_method) == 32, sizeof(dh_method)
+assert alignment(dh_method) == 4, alignment(dh_method)
+DH_METHOD = dh_method
+class engine_st(Structure):
+    pass
+ENGINE = engine_st
+dh_st._fields_ = [
+    ('pad', c_int),
+    ('version', c_int),
+    ('p', POINTER(BIGNUM)),
+    ('g', POINTER(BIGNUM)),
+    ('length', c_long),
+    ('pub_key', POINTER(BIGNUM)),
+    ('priv_key', POINTER(BIGNUM)),
+    ('flags', c_int),
+    ('method_mont_p', STRING),
+    ('q', POINTER(BIGNUM)),
+    ('j', POINTER(BIGNUM)),
+    ('seed', POINTER(c_ubyte)),
+    ('seedlen', c_int),
+    ('counter', POINTER(BIGNUM)),
+    ('references', c_int),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('meth', POINTER(DH_METHOD)),
+    ('engine', POINTER(ENGINE)),
+]
+assert sizeof(dh_st) == 76, sizeof(dh_st)
+assert alignment(dh_st) == 4, alignment(dh_st)
+class dsa_st(Structure):
+    pass
+DSA = dsa_st
+class DSA_SIG_st(Structure):
+    pass
+DSA_SIG_st._fields_ = [
+    ('r', POINTER(BIGNUM)),
+    ('s', POINTER(BIGNUM)),
+]
+assert sizeof(DSA_SIG_st) == 8, sizeof(DSA_SIG_st)
+assert alignment(DSA_SIG_st) == 4, alignment(DSA_SIG_st)
+DSA_SIG = DSA_SIG_st
+class dsa_method(Structure):
+    pass
+dsa_method._fields_ = [
+    ('name', STRING),
+    ('dsa_do_sign', CFUNCTYPE(POINTER(DSA_SIG), POINTER(c_ubyte), c_int, POINTER(DSA))),
+    ('dsa_sign_setup', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BN_CTX), POINTER(POINTER(BIGNUM)), POINTER(POINTER(BIGNUM)))),
+    ('dsa_do_verify', CFUNCTYPE(c_int, POINTER(c_ubyte), c_int, POINTER(DSA_SIG), POINTER(DSA))),
+    ('dsa_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+    ('init', CFUNCTYPE(c_int, POINTER(DSA))),
+    ('finish', CFUNCTYPE(c_int, POINTER(DSA))),
+    ('flags', c_int),
+    ('app_data', STRING),
+]
+assert sizeof(dsa_method) == 40, sizeof(dsa_method)
+assert alignment(dsa_method) == 4, alignment(dsa_method)
+DSA_METHOD = dsa_method
+dsa_st._fields_ = [
+    ('pad', c_int),
+    ('version', c_long),
+    ('write_params', c_int),
+    ('p', POINTER(BIGNUM)),
+    ('q', POINTER(BIGNUM)),
+    ('g', POINTER(BIGNUM)),
+    ('pub_key', POINTER(BIGNUM)),
+    ('priv_key', POINTER(BIGNUM)),
+    ('kinv', POINTER(BIGNUM)),
+    ('r', POINTER(BIGNUM)),
+    ('flags', c_int),
+    ('method_mont_p', STRING),
+    ('references', c_int),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('meth', POINTER(DSA_METHOD)),
+    ('engine', POINTER(ENGINE)),
+]
+assert sizeof(dsa_st) == 68, sizeof(dsa_st)
+assert alignment(dsa_st) == 4, alignment(dsa_st)
+class evp_pkey_st(Structure):
+    pass
+class N11evp_pkey_st4DOLLAR_12E(Union):
+    pass
+class rsa_st(Structure):
+    pass
+N11evp_pkey_st4DOLLAR_12E._fields_ = [
+    ('ptr', STRING),
+    ('rsa', POINTER(rsa_st)),
+    ('dsa', POINTER(dsa_st)),
+    ('dh', POINTER(dh_st)),
+]
+assert sizeof(N11evp_pkey_st4DOLLAR_12E) == 4, sizeof(N11evp_pkey_st4DOLLAR_12E)
+assert alignment(N11evp_pkey_st4DOLLAR_12E) == 4, alignment(N11evp_pkey_st4DOLLAR_12E)
+evp_pkey_st._fields_ = [
+    ('type', c_int),
+    ('save_type', c_int),
+    ('references', c_int),
+    ('pkey', N11evp_pkey_st4DOLLAR_12E),
+    ('save_parameters', c_int),
+    ('attributes', POINTER(STACK)),
+]
+assert sizeof(evp_pkey_st) == 24, sizeof(evp_pkey_st)
+assert alignment(evp_pkey_st) == 4, alignment(evp_pkey_st)
+class env_md_st(Structure):
+    pass
+class env_md_ctx_st(Structure):
+    pass
+EVP_MD_CTX = env_md_ctx_st
+env_md_st._fields_ = [
+    ('type', c_int),
+    ('pkey_type', c_int),
+    ('md_size', c_int),
+    ('flags', c_ulong),
+    ('init', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
+    ('update', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), c_void_p, c_ulong)),
+    ('final', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(c_ubyte))),
+    ('copy', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(EVP_MD_CTX))),
+    ('cleanup', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
+    ('sign', CFUNCTYPE(c_int)),
+    ('verify', CFUNCTYPE(c_int)),
+    ('required_pkey_type', c_int * 5),
+    ('block_size', c_int),
+    ('ctx_size', c_int),
+]
+assert sizeof(env_md_st) == 72, sizeof(env_md_st)
+assert alignment(env_md_st) == 4, alignment(env_md_st)
+EVP_MD = env_md_st
+env_md_ctx_st._fields_ = [
+    ('digest', POINTER(EVP_MD)),
+    ('engine', POINTER(ENGINE)),
+    ('flags', c_ulong),
+    ('md_data', c_void_p),
+]
+assert sizeof(env_md_ctx_st) == 16, sizeof(env_md_ctx_st)
+assert alignment(env_md_ctx_st) == 4, alignment(env_md_ctx_st)
+class evp_cipher_st(Structure):
+    pass
+class evp_cipher_ctx_st(Structure):
+    pass
+EVP_CIPHER_CTX = evp_cipher_ctx_st
+evp_cipher_st._fields_ = [
+    ('nid', c_int),
+    ('block_size', c_int),
+    ('key_len', c_int),
+    ('iv_len', c_int),
+    ('flags', c_ulong),
+    ('init', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_int)),
+    ('do_cipher', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_uint)),
+    ('cleanup', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX))),
+    ('ctx_size', c_int),
+    ('set_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
+    ('get_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
+    ('ctrl', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), c_int, c_int, c_void_p)),
+    ('app_data', c_void_p),
+]
+assert sizeof(evp_cipher_st) == 52, sizeof(evp_cipher_st)
+assert alignment(evp_cipher_st) == 4, alignment(evp_cipher_st)
+class evp_cipher_info_st(Structure):
+    pass
+EVP_CIPHER = evp_cipher_st
+evp_cipher_info_st._fields_ = [
+    ('cipher', POINTER(EVP_CIPHER)),
+    ('iv', c_ubyte * 16),
+]
+assert sizeof(evp_cipher_info_st) == 20, sizeof(evp_cipher_info_st)
+assert alignment(evp_cipher_info_st) == 4, alignment(evp_cipher_info_st)
+EVP_CIPHER_INFO = evp_cipher_info_st
+evp_cipher_ctx_st._fields_ = [
+    ('cipher', POINTER(EVP_CIPHER)),
+    ('engine', POINTER(ENGINE)),
+    ('encrypt', c_int),
+    ('buf_len', c_int),
+    ('oiv', c_ubyte * 16),
+    ('iv', c_ubyte * 16),
+    ('buf', c_ubyte * 32),
+    ('num', c_int),
+    ('app_data', c_void_p),
+    ('key_len', c_int),
+    ('flags', c_ulong),
+    ('cipher_data', c_void_p),
+    ('final_used', c_int),
+    ('block_mask', c_int),
+    ('final', c_ubyte * 32),
+]
+assert sizeof(evp_cipher_ctx_st) == 140, sizeof(evp_cipher_ctx_st)
+assert alignment(evp_cipher_ctx_st) == 4, alignment(evp_cipher_ctx_st)
+class evp_Encode_Ctx_st(Structure):
+    pass
+evp_Encode_Ctx_st._fields_ = [
+    ('num', c_int),
+    ('length', c_int),
+    ('enc_data', c_ubyte * 80),
+    ('line_num', c_int),
+    ('expect_nl', c_int),
+]
+assert sizeof(evp_Encode_Ctx_st) == 96, sizeof(evp_Encode_Ctx_st)
+assert alignment(evp_Encode_Ctx_st) == 4, alignment(evp_Encode_Ctx_st)
+EVP_ENCODE_CTX = evp_Encode_Ctx_st
+EVP_PBE_KEYGEN = CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), STRING, c_int, POINTER(ASN1_TYPE), POINTER(EVP_CIPHER), POINTER(EVP_MD), c_int)
+class lhash_node_st(Structure):
+    pass
+lhash_node_st._fields_ = [
+    ('data', c_void_p),
+    ('next', POINTER(lhash_node_st)),
+    ('hash', c_ulong),
+]
+assert sizeof(lhash_node_st) == 12, sizeof(lhash_node_st)
+assert alignment(lhash_node_st) == 4, alignment(lhash_node_st)
+LHASH_NODE = lhash_node_st
+LHASH_COMP_FN_TYPE = CFUNCTYPE(c_int, c_void_p, c_void_p)
+LHASH_HASH_FN_TYPE = CFUNCTYPE(c_ulong, c_void_p)
+LHASH_DOALL_FN_TYPE = CFUNCTYPE(None, c_void_p)
+LHASH_DOALL_ARG_FN_TYPE = CFUNCTYPE(None, c_void_p, c_void_p)
+class lhash_st(Structure):
+    pass
+lhash_st._fields_ = [
+    ('b', POINTER(POINTER(LHASH_NODE))),
+    ('comp', LHASH_COMP_FN_TYPE),
+    ('hash', LHASH_HASH_FN_TYPE),
+    ('num_nodes', c_uint),
+    ('num_alloc_nodes', c_uint),
+    ('p', c_uint),
+    ('pmax', c_uint),
+    ('up_load', c_ulong),
+    ('down_load', c_ulong),
+    ('num_items', c_ulong),
+    ('num_expands', c_ulong),
+    ('num_expand_reallocs', c_ulong),
+    ('num_contracts', c_ulong),
+    ('num_contract_reallocs', c_ulong),
+    ('num_hash_calls', c_ulong),
+    ('num_comp_calls', c_ulong),
+    ('num_insert', c_ulong),
+    ('num_replace', c_ulong),
+    ('num_delete', c_ulong),
+    ('num_no_delete', c_ulong),
+    ('num_retrieve', c_ulong),
+    ('num_retrieve_miss', c_ulong),
+    ('num_hash_comps', c_ulong),
+    ('error', c_int),
+]
+assert sizeof(lhash_st) == 96, sizeof(lhash_st)
+assert alignment(lhash_st) == 4, alignment(lhash_st)
+LHASH = lhash_st
+class MD2state_st(Structure):
+    pass
+MD2state_st._fields_ = [
+    ('num', c_int),
+    ('data', c_ubyte * 16),
+    ('cksm', c_uint * 16),
+    ('state', c_uint * 16),
+]
+assert sizeof(MD2state_st) == 148, sizeof(MD2state_st)
+assert alignment(MD2state_st) == 4, alignment(MD2state_st)
+MD2_CTX = MD2state_st
+class MD4state_st(Structure):
+    pass
+MD4state_st._fields_ = [
+    ('A', c_uint),
+    ('B', c_uint),
+    ('C', c_uint),
+    ('D', c_uint),
+    ('Nl', c_uint),
+    ('Nh', c_uint),
+    ('data', c_uint * 16),
+    ('num', c_int),
+]
+assert sizeof(MD4state_st) == 92, sizeof(MD4state_st)
+assert alignment(MD4state_st) == 4, alignment(MD4state_st)
+MD4_CTX = MD4state_st
+class MD5state_st(Structure):
+    pass
+MD5state_st._fields_ = [
+    ('A', c_uint),
+    ('B', c_uint),
+    ('C', c_uint),
+    ('D', c_uint),
+    ('Nl', c_uint),
+    ('Nh', c_uint),
+    ('data', c_uint * 16),
+    ('num', c_int),
+]
+assert sizeof(MD5state_st) == 92, sizeof(MD5state_st)
+assert alignment(MD5state_st) == 4, alignment(MD5state_st)
+MD5_CTX = MD5state_st
+class mdc2_ctx_st(Structure):
+    pass
+mdc2_ctx_st._fields_ = [
+    ('num', c_int),
+    ('data', c_ubyte * 8),
+    ('h', DES_cblock),
+    ('hh', DES_cblock),
+    ('pad_type', c_int),
+]
+assert sizeof(mdc2_ctx_st) == 32, sizeof(mdc2_ctx_st)
+assert alignment(mdc2_ctx_st) == 4, alignment(mdc2_ctx_st)
+MDC2_CTX = mdc2_ctx_st
+class obj_name_st(Structure):
+    pass
+obj_name_st._fields_ = [
+    ('type', c_int),
+    ('alias', c_int),
+    ('name', STRING),
+    ('data', STRING),
+]
+assert sizeof(obj_name_st) == 16, sizeof(obj_name_st)
+assert alignment(obj_name_st) == 4, alignment(obj_name_st)
+OBJ_NAME = obj_name_st
+ASN1_TIME = asn1_string_st
+ASN1_NULL = c_int
+EVP_PKEY = evp_pkey_st
+class x509_st(Structure):
+    pass
+X509 = x509_st
+class X509_algor_st(Structure):
+    pass
+X509_ALGOR = X509_algor_st
+class X509_crl_st(Structure):
+    pass
+X509_CRL = X509_crl_st
+class X509_name_st(Structure):
+    pass
+X509_NAME = X509_name_st
+class x509_store_st(Structure):
+    pass
+X509_STORE = x509_store_st
+class x509_store_ctx_st(Structure):
+    pass
+X509_STORE_CTX = x509_store_ctx_st
+engine_st._fields_ = [
+]
+class PEM_Encode_Seal_st(Structure):
+    pass
+PEM_Encode_Seal_st._fields_ = [
+    ('encode', EVP_ENCODE_CTX),
+    ('md', EVP_MD_CTX),
+    ('cipher', EVP_CIPHER_CTX),
+]
+assert sizeof(PEM_Encode_Seal_st) == 252, sizeof(PEM_Encode_Seal_st)
+assert alignment(PEM_Encode_Seal_st) == 4, alignment(PEM_Encode_Seal_st)
+PEM_ENCODE_SEAL_CTX = PEM_Encode_Seal_st
+class pem_recip_st(Structure):
+    pass
+pem_recip_st._fields_ = [
+    ('name', STRING),
+    ('dn', POINTER(X509_NAME)),
+    ('cipher', c_int),
+    ('key_enc', c_int),
+]
+assert sizeof(pem_recip_st) == 16, sizeof(pem_recip_st)
+assert alignment(pem_recip_st) == 4, alignment(pem_recip_st)
+PEM_USER = pem_recip_st
+class pem_ctx_st(Structure):
+    pass
+class N10pem_ctx_st4DOLLAR_16E(Structure):
+    pass
+N10pem_ctx_st4DOLLAR_16E._fields_ = [
+    ('version', c_int),
+    ('mode', c_int),
+]
+assert sizeof(N10pem_ctx_st4DOLLAR_16E) == 8, sizeof(N10pem_ctx_st4DOLLAR_16E)
+assert alignment(N10pem_ctx_st4DOLLAR_16E) == 4, alignment(N10pem_ctx_st4DOLLAR_16E)
+class N10pem_ctx_st4DOLLAR_17E(Structure):
+    pass
+N10pem_ctx_st4DOLLAR_17E._fields_ = [
+    ('cipher', c_int),
+]
+assert sizeof(N10pem_ctx_st4DOLLAR_17E) == 4, sizeof(N10pem_ctx_st4DOLLAR_17E)
+assert alignment(N10pem_ctx_st4DOLLAR_17E) == 4, alignment(N10pem_ctx_st4DOLLAR_17E)
+pem_ctx_st._fields_ = [
+    ('type', c_int),
+    ('proc_type', N10pem_ctx_st4DOLLAR_16E),
+    ('domain', STRING),
+    ('DEK_info', N10pem_ctx_st4DOLLAR_17E),
+    ('originator', POINTER(PEM_USER)),
+    ('num_recipient', c_int),
+    ('recipient', POINTER(POINTER(PEM_USER))),
+    ('x509_chain', POINTER(STACK)),
+    ('md', POINTER(EVP_MD)),
+    ('md_enc', c_int),
+    ('md_len', c_int),
+    ('md_data', STRING),
+    ('dec', POINTER(EVP_CIPHER)),
+    ('key_len', c_int),
+    ('key', POINTER(c_ubyte)),
+    ('data_enc', c_int),
+    ('data_len', c_int),
+    ('data', POINTER(c_ubyte)),
+]
+assert sizeof(pem_ctx_st) == 76, sizeof(pem_ctx_st)
+assert alignment(pem_ctx_st) == 4, alignment(pem_ctx_st)
+PEM_CTX = pem_ctx_st
+pem_password_cb = CFUNCTYPE(c_int, STRING, c_int, c_int, c_void_p)
+class pkcs7_issuer_and_serial_st(Structure):
+    pass
+pkcs7_issuer_and_serial_st._fields_ = [
+    ('issuer', POINTER(X509_NAME)),
+    ('serial', POINTER(ASN1_INTEGER)),
+]
+assert sizeof(pkcs7_issuer_and_serial_st) == 8, sizeof(pkcs7_issuer_and_serial_st)
+assert alignment(pkcs7_issuer_and_serial_st) == 4, alignment(pkcs7_issuer_and_serial_st)
+PKCS7_ISSUER_AND_SERIAL = pkcs7_issuer_and_serial_st
+class pkcs7_signer_info_st(Structure):
+    pass
+pkcs7_signer_info_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
+    ('digest_alg', POINTER(X509_ALGOR)),
+    ('auth_attr', POINTER(STACK)),
+    ('digest_enc_alg', POINTER(X509_ALGOR)),
+    ('enc_digest', POINTER(ASN1_OCTET_STRING)),
+    ('unauth_attr', POINTER(STACK)),
+    ('pkey', POINTER(EVP_PKEY)),
+]
+assert sizeof(pkcs7_signer_info_st) == 32, sizeof(pkcs7_signer_info_st)
+assert alignment(pkcs7_signer_info_st) == 4, alignment(pkcs7_signer_info_st)
+PKCS7_SIGNER_INFO = pkcs7_signer_info_st
+class pkcs7_recip_info_st(Structure):
+    pass
+pkcs7_recip_info_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
+    ('key_enc_algor', POINTER(X509_ALGOR)),
+    ('enc_key', POINTER(ASN1_OCTET_STRING)),
+    ('cert', POINTER(X509)),
+]
+assert sizeof(pkcs7_recip_info_st) == 20, sizeof(pkcs7_recip_info_st)
+assert alignment(pkcs7_recip_info_st) == 4, alignment(pkcs7_recip_info_st)
+PKCS7_RECIP_INFO = pkcs7_recip_info_st
+class pkcs7_signed_st(Structure):
+    pass
+class pkcs7_st(Structure):
+    pass
+pkcs7_signed_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('md_algs', POINTER(STACK)),
+    ('cert', POINTER(STACK)),
+    ('crl', POINTER(STACK)),
+    ('signer_info', POINTER(STACK)),
+    ('contents', POINTER(pkcs7_st)),
+]
+assert sizeof(pkcs7_signed_st) == 24, sizeof(pkcs7_signed_st)
+assert alignment(pkcs7_signed_st) == 4, alignment(pkcs7_signed_st)
+PKCS7_SIGNED = pkcs7_signed_st
+class pkcs7_enc_content_st(Structure):
+    pass
+pkcs7_enc_content_st._fields_ = [
+    ('content_type', POINTER(ASN1_OBJECT)),
+    ('algorithm', POINTER(X509_ALGOR)),
+    ('enc_data', POINTER(ASN1_OCTET_STRING)),
+    ('cipher', POINTER(EVP_CIPHER)),
+]
+assert sizeof(pkcs7_enc_content_st) == 16, sizeof(pkcs7_enc_content_st)
+assert alignment(pkcs7_enc_content_st) == 4, alignment(pkcs7_enc_content_st)
+PKCS7_ENC_CONTENT = pkcs7_enc_content_st
+class pkcs7_enveloped_st(Structure):
+    pass
+pkcs7_enveloped_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('recipientinfo', POINTER(STACK)),
+    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
+]
+assert sizeof(pkcs7_enveloped_st) == 12, sizeof(pkcs7_enveloped_st)
+assert alignment(pkcs7_enveloped_st) == 4, alignment(pkcs7_enveloped_st)
+PKCS7_ENVELOPE = pkcs7_enveloped_st
+class pkcs7_signedandenveloped_st(Structure):
+    pass
+pkcs7_signedandenveloped_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('md_algs', POINTER(STACK)),
+    ('cert', POINTER(STACK)),
+    ('crl', POINTER(STACK)),
+    ('signer_info', POINTER(STACK)),
+    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
+    ('recipientinfo', POINTER(STACK)),
+]
+assert sizeof(pkcs7_signedandenveloped_st) == 28, sizeof(pkcs7_signedandenveloped_st)
+assert alignment(pkcs7_signedandenveloped_st) == 4, alignment(pkcs7_signedandenveloped_st)
+PKCS7_SIGN_ENVELOPE = pkcs7_signedandenveloped_st
+class pkcs7_digest_st(Structure):
+    pass
+pkcs7_digest_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('md', POINTER(X509_ALGOR)),
+    ('contents', POINTER(pkcs7_st)),
+    ('digest', POINTER(ASN1_OCTET_STRING)),
+]
+assert sizeof(pkcs7_digest_st) == 16, sizeof(pkcs7_digest_st)
+assert alignment(pkcs7_digest_st) == 4, alignment(pkcs7_digest_st)
+PKCS7_DIGEST = pkcs7_digest_st
+class pkcs7_encrypted_st(Structure):
+    pass
+pkcs7_encrypted_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
+]
+assert sizeof(pkcs7_encrypted_st) == 8, sizeof(pkcs7_encrypted_st)
+assert alignment(pkcs7_encrypted_st) == 4, alignment(pkcs7_encrypted_st)
+PKCS7_ENCRYPT = pkcs7_encrypted_st
+class N8pkcs7_st4DOLLAR_15E(Union):
+    pass
+N8pkcs7_st4DOLLAR_15E._fields_ = [
+    ('ptr', STRING),
+    ('data', POINTER(ASN1_OCTET_STRING)),
+    ('sign', POINTER(PKCS7_SIGNED)),
+    ('enveloped', POINTER(PKCS7_ENVELOPE)),
+    ('signed_and_enveloped', POINTER(PKCS7_SIGN_ENVELOPE)),
+    ('digest', POINTER(PKCS7_DIGEST)),
+    ('encrypted', POINTER(PKCS7_ENCRYPT)),
+    ('other', POINTER(ASN1_TYPE)),
+]
+assert sizeof(N8pkcs7_st4DOLLAR_15E) == 4, sizeof(N8pkcs7_st4DOLLAR_15E)
+assert alignment(N8pkcs7_st4DOLLAR_15E) == 4, alignment(N8pkcs7_st4DOLLAR_15E)
+pkcs7_st._fields_ = [
+    ('asn1', POINTER(c_ubyte)),
+    ('length', c_long),
+    ('state', c_int),
+    ('detached', c_int),
+    ('type', POINTER(ASN1_OBJECT)),
+    ('d', N8pkcs7_st4DOLLAR_15E),
+]
+assert sizeof(pkcs7_st) == 24, sizeof(pkcs7_st)
+assert alignment(pkcs7_st) == 4, alignment(pkcs7_st)
+PKCS7 = pkcs7_st
+class rc2_key_st(Structure):
+    pass
+rc2_key_st._fields_ = [
+    ('data', c_uint * 64),
+]
+assert sizeof(rc2_key_st) == 256, sizeof(rc2_key_st)
+assert alignment(rc2_key_st) == 4, alignment(rc2_key_st)
+RC2_KEY = rc2_key_st
+class rc4_key_st(Structure):
+    pass
+rc4_key_st._fields_ = [
+    ('x', c_ubyte),
+    ('y', c_ubyte),
+    ('data', c_ubyte * 256),
+]
+assert sizeof(rc4_key_st) == 258, sizeof(rc4_key_st)
+assert alignment(rc4_key_st) == 1, alignment(rc4_key_st)
+RC4_KEY = rc4_key_st
+class rc5_key_st(Structure):
+    pass
+rc5_key_st._fields_ = [
+    ('rounds', c_int),
+    ('data', c_ulong * 34),
+]
+assert sizeof(rc5_key_st) == 140, sizeof(rc5_key_st)
+assert alignment(rc5_key_st) == 4, alignment(rc5_key_st)
+RC5_32_KEY = rc5_key_st
+class RIPEMD160state_st(Structure):
+    pass
+RIPEMD160state_st._fields_ = [
+    ('A', c_uint),
+    ('B', c_uint),
+    ('C', c_uint),
+    ('D', c_uint),
+    ('E', c_uint),
+    ('Nl', c_uint),
+    ('Nh', c_uint),
+    ('data', c_uint * 16),
+    ('num', c_int),
+]
+assert sizeof(RIPEMD160state_st) == 96, sizeof(RIPEMD160state_st)
+assert alignment(RIPEMD160state_st) == 4, alignment(RIPEMD160state_st)
+RIPEMD160_CTX = RIPEMD160state_st
+RSA = rsa_st
+class rsa_meth_st(Structure):
+    pass
+rsa_meth_st._fields_ = [
+    ('name', STRING),
+    ('rsa_pub_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+    ('rsa_pub_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+    ('rsa_priv_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+    ('rsa_priv_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+    ('rsa_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(RSA))),
+    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+    ('init', CFUNCTYPE(c_int, POINTER(RSA))),
+    ('finish', CFUNCTYPE(c_int, POINTER(RSA))),
+    ('flags', c_int),
+    ('app_data', STRING),
+    ('rsa_sign', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), POINTER(c_uint), POINTER(RSA))),
+    ('rsa_verify', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), c_uint, POINTER(RSA))),
+]
+assert sizeof(rsa_meth_st) == 52, sizeof(rsa_meth_st)
+assert alignment(rsa_meth_st) == 4, alignment(rsa_meth_st)
+RSA_METHOD = rsa_meth_st
+rsa_st._fields_ = [
+    ('pad', c_int),
+    ('version', c_long),
+    ('meth', POINTER(RSA_METHOD)),
+    ('engine', POINTER(ENGINE)),
+    ('n', POINTER(BIGNUM)),
+    ('e', POINTER(BIGNUM)),
+    ('d', POINTER(BIGNUM)),
+    ('p', POINTER(BIGNUM)),
+    ('q', POINTER(BIGNUM)),
+    ('dmp1', POINTER(BIGNUM)),
+    ('dmq1', POINTER(BIGNUM)),
+    ('iqmp', POINTER(BIGNUM)),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('references', c_int),
+    ('flags', c_int),
+    ('_method_mod_n', POINTER(BN_MONT_CTX)),
+    ('_method_mod_p', POINTER(BN_MONT_CTX)),
+    ('_method_mod_q', POINTER(BN_MONT_CTX)),
+    ('bignum_data', STRING),
+    ('blinding', POINTER(BN_BLINDING)),
+]
+assert sizeof(rsa_st) == 84, sizeof(rsa_st)
+assert alignment(rsa_st) == 4, alignment(rsa_st)
+openssl_fptr = CFUNCTYPE(None)
+class SHAstate_st(Structure):
+    pass
+SHAstate_st._fields_ = [
+    ('h0', c_uint),
+    ('h1', c_uint),
+    ('h2', c_uint),
+    ('h3', c_uint),
+    ('h4', c_uint),
+    ('Nl', c_uint),
+    ('Nh', c_uint),
+    ('data', c_uint * 16),
+    ('num', c_int),
+]
+assert sizeof(SHAstate_st) == 96, sizeof(SHAstate_st)
+assert alignment(SHAstate_st) == 4, alignment(SHAstate_st)
+SHA_CTX = SHAstate_st
+class ssl_st(Structure):
+    pass
+ssl_crock_st = POINTER(ssl_st)
+class ssl_cipher_st(Structure):
+    pass
+ssl_cipher_st._fields_ = [
+    ('valid', c_int),
+    ('name', STRING),
+    ('id', c_ulong),
+    ('algorithms', c_ulong),
+    ('algo_strength', c_ulong),
+    ('algorithm2', c_ulong),
+    ('strength_bits', c_int),
+    ('alg_bits', c_int),
+    ('mask', c_ulong),
+    ('mask_strength', c_ulong),
+]
+assert sizeof(ssl_cipher_st) == 40, sizeof(ssl_cipher_st)
+assert alignment(ssl_cipher_st) == 4, alignment(ssl_cipher_st)
+SSL_CIPHER = ssl_cipher_st
+SSL = ssl_st
+class ssl_ctx_st(Structure):
+    pass
+SSL_CTX = ssl_ctx_st
+class ssl_method_st(Structure):
+    pass
+class ssl3_enc_method(Structure):
+    pass
+ssl_method_st._fields_ = [
+    ('version', c_int),
+    ('ssl_new', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_clear', CFUNCTYPE(None, POINTER(SSL))),
+    ('ssl_free', CFUNCTYPE(None, POINTER(SSL))),
+    ('ssl_accept', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_connect', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_read', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
+    ('ssl_peek', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
+    ('ssl_write', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
+    ('ssl_shutdown', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_renegotiate', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_renegotiate_check', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, c_long, c_void_p)),
+    ('ssl_ctx_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, c_long, c_void_p)),
+    ('get_cipher_by_char', CFUNCTYPE(POINTER(SSL_CIPHER), POINTER(c_ubyte))),
+    ('put_cipher_by_char', CFUNCTYPE(c_int, POINTER(SSL_CIPHER), POINTER(c_ubyte))),
+    ('ssl_pending', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('num_ciphers', CFUNCTYPE(c_int)),
+    ('get_cipher', CFUNCTYPE(POINTER(SSL_CIPHER), c_uint)),
+    ('get_ssl_method', CFUNCTYPE(POINTER(ssl_method_st), c_int)),
+    ('get_timeout', CFUNCTYPE(c_long)),
+    ('ssl3_enc', POINTER(ssl3_enc_method)),
+    ('ssl_version', CFUNCTYPE(c_int)),
+    ('ssl_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, CFUNCTYPE(None))),
+    ('ssl_ctx_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, CFUNCTYPE(None))),
+]
+assert sizeof(ssl_method_st) == 100, sizeof(ssl_method_st)
+assert alignment(ssl_method_st) == 4, alignment(ssl_method_st)
+ssl3_enc_method._fields_ = [
+]
+SSL_METHOD = ssl_method_st
+class ssl_session_st(Structure):
+    pass
+class sess_cert_st(Structure):
+    pass
+ssl_session_st._fields_ = [
+    ('ssl_version', c_int),
+    ('key_arg_length', c_uint),
+    ('key_arg', c_ubyte * 8),
+    ('master_key_length', c_int),
+    ('master_key', c_ubyte * 48),
+    ('session_id_length', c_uint),
+    ('session_id', c_ubyte * 32),
+    ('sid_ctx_length', c_uint),
+    ('sid_ctx', c_ubyte * 32),
+    ('not_resumable', c_int),
+    ('sess_cert', POINTER(sess_cert_st)),
+    ('peer', POINTER(X509)),
+    ('verify_result', c_long),
+    ('references', c_int),
+    ('timeout', c_long),
+    ('time', c_long),
+    ('compress_meth', c_int),
+    ('cipher', POINTER(SSL_CIPHER)),
+    ('cipher_id', c_ulong),
+    ('ciphers', POINTER(STACK)),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('prev', POINTER(ssl_session_st)),
+    ('next', POINTER(ssl_session_st)),
+]
+assert sizeof(ssl_session_st) == 200, sizeof(ssl_session_st)
+assert alignment(ssl_session_st) == 4, alignment(ssl_session_st)
+sess_cert_st._fields_ = [
+]
+SSL_SESSION = ssl_session_st
+GEN_SESSION_CB = CFUNCTYPE(c_int, POINTER(SSL), POINTER(c_ubyte), POINTER(c_uint))
+class ssl_comp_st(Structure):
+    pass
+ssl_comp_st._fields_ = [
+    ('id', c_int),
+    ('name', STRING),
+    ('method', POINTER(COMP_METHOD)),
+]
+assert sizeof(ssl_comp_st) == 12, sizeof(ssl_comp_st)
+assert alignment(ssl_comp_st) == 4, alignment(ssl_comp_st)
+SSL_COMP = ssl_comp_st
+class N10ssl_ctx_st4DOLLAR_18E(Structure):
+    pass
+N10ssl_ctx_st4DOLLAR_18E._fields_ = [
+    ('sess_connect', c_int),
+    ('sess_connect_renegotiate', c_int),
+    ('sess_connect_good', c_int),
+    ('sess_accept', c_int),
+    ('sess_accept_renegotiate', c_int),
+    ('sess_accept_good', c_int),
+    ('sess_miss', c_int),
+    ('sess_timeout', c_int),
+    ('sess_cache_full', c_int),
+    ('sess_hit', c_int),
+    ('sess_cb_hit', c_int),
+]
+assert sizeof(N10ssl_ctx_st4DOLLAR_18E) == 44, sizeof(N10ssl_ctx_st4DOLLAR_18E)
+assert alignment(N10ssl_ctx_st4DOLLAR_18E) == 4, alignment(N10ssl_ctx_st4DOLLAR_18E)
+class cert_st(Structure):
+    pass
+ssl_ctx_st._fields_ = [
+    ('method', POINTER(SSL_METHOD)),
+    ('cipher_list', POINTER(STACK)),
+    ('cipher_list_by_id', POINTER(STACK)),
+    ('cert_store', POINTER(x509_store_st)),
+    ('sessions', POINTER(lhash_st)),
+    ('session_cache_size', c_ulong),
+    ('session_cache_head', POINTER(ssl_session_st)),
+    ('session_cache_tail', POINTER(ssl_session_st)),
+    ('session_cache_mode', c_int),
+    ('session_timeout', c_long),
+    ('new_session_cb', CFUNCTYPE(c_int, POINTER(ssl_st), POINTER(SSL_SESSION))),
+    ('remove_session_cb', CFUNCTYPE(None, POINTER(ssl_ctx_st), POINTER(SSL_SESSION))),
+    ('get_session_cb', CFUNCTYPE(POINTER(SSL_SESSION), POINTER(ssl_st), POINTER(c_ubyte), c_int, POINTER(c_int))),
+    ('stats', N10ssl_ctx_st4DOLLAR_18E),
+    ('references', c_int),
+    ('app_verify_callback', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), c_void_p)),
+    ('app_verify_arg', c_void_p),
+    ('default_passwd_callback', POINTER(pem_password_cb)),
+    ('default_passwd_callback_userdata', c_void_p),
+    ('client_cert_cb', CFUNCTYPE(c_int, POINTER(SSL), POINTER(POINTER(X509)), POINTER(POINTER(EVP_PKEY)))),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('rsa_md5', POINTER(EVP_MD)),
+    ('md5', POINTER(EVP_MD)),
+    ('sha1', POINTER(EVP_MD)),
+    ('extra_certs', POINTER(STACK)),
+    ('comp_methods', POINTER(STACK)),
+    ('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
+    ('client_CA', POINTER(STACK)),
+    ('options', c_ulong),
+    ('mode', c_ulong),
+    ('max_cert_list', c_long),
+    ('cert', POINTER(cert_st)),
+    ('read_ahead', c_int),
+    ('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
+    ('msg_callback_arg', c_void_p),
+    ('verify_mode', c_int),
+    ('verify_depth', c_int),
+    ('sid_ctx_length', c_uint),
+    ('sid_ctx', c_ubyte * 32),
+    ('default_verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+    ('generate_session_id', GEN_SESSION_CB),
+    ('purpose', c_int),
+    ('trust', c_int),
+    ('quiet_shutdown', c_int),
+]
+assert sizeof(ssl_ctx_st) == 248, sizeof(ssl_ctx_st)
+assert alignment(ssl_ctx_st) == 4, alignment(ssl_ctx_st)
+cert_st._fields_ = [
+]
+class ssl2_state_st(Structure):
+    pass
+class ssl3_state_st(Structure):
+    pass
+ssl_st._fields_ = [
+    ('version', c_int),
+    ('type', c_int),
+    ('method', POINTER(SSL_METHOD)),
+    ('rbio', POINTER(BIO)),
+    ('wbio', POINTER(BIO)),
+    ('bbio', POINTER(BIO)),
+    ('rwstate', c_int),
+    ('in_handshake', c_int),
+    ('handshake_func', CFUNCTYPE(c_int)),
+    ('server', c_int),
+    ('new_session', c_int),
+    ('quiet_shutdown', c_int),
+    ('shutdown', c_int),
+    ('state', c_int),
+    ('rstate', c_int),
+    ('init_buf', POINTER(BUF_MEM)),
+    ('init_msg', c_void_p),
+    ('init_num', c_int),
+    ('init_off', c_int),
+    ('packet', POINTER(c_ubyte)),
+    ('packet_length', c_uint),
+    ('s2', POINTER(ssl2_state_st)),
+    ('s3', POINTER(ssl3_state_st)),
+    ('read_ahead', c_int),
+    ('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
+    ('msg_callback_arg', c_void_p),
+    ('hit', c_int),
+    ('purpose', c_int),
+    ('trust', c_int),
+    ('cipher_list', POINTER(STACK)),
+    ('cipher_list_by_id', POINTER(STACK)),
+    ('enc_read_ctx', POINTER(EVP_CIPHER_CTX)),
+    ('read_hash', POINTER(EVP_MD)),
+    ('expand', POINTER(COMP_CTX)),
+    ('enc_write_ctx', POINTER(EVP_CIPHER_CTX)),
+    ('write_hash', POINTER(EVP_MD)),
+    ('compress', POINTER(COMP_CTX)),
+    ('cert', POINTER(cert_st)),
+    ('sid_ctx_length', c_uint),
+    ('sid_ctx', c_ubyte * 32),
+    ('session', POINTER(SSL_SESSION)),
+    ('generate_session_id', GEN_SESSION_CB),
+    ('verify_mode', c_int),
+    ('verify_depth', c_int),
+    ('verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+    ('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
+    ('error', c_int),
+    ('error_code', c_int),
+    ('ctx', POINTER(SSL_CTX)),
+    ('debug', c_int),
+    ('verify_result', c_long),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('client_CA', POINTER(STACK)),
+    ('references', c_int),
+    ('options', c_ulong),
+    ('mode', c_ulong),
+    ('max_cert_list', c_long),
+    ('first_packet', c_int),
+    ('client_version', c_int),
+]
+assert sizeof(ssl_st) == 268, sizeof(ssl_st)
+assert alignment(ssl_st) == 4, alignment(ssl_st)
+class N13ssl2_state_st4DOLLAR_19E(Structure):
+    pass
+N13ssl2_state_st4DOLLAR_19E._fields_ = [
+    ('conn_id_length', c_uint),
+    ('cert_type', c_uint),
+    ('cert_length', c_uint),
+    ('csl', c_uint),
+    ('clear', c_uint),
+    ('enc', c_uint),
+    ('ccl', c_ubyte * 32),
+    ('cipher_spec_length', c_uint),
+    ('session_id_length', c_uint),
+    ('clen', c_uint),
+    ('rlen', c_uint),
+]
+assert sizeof(N13ssl2_state_st4DOLLAR_19E) == 72, sizeof(N13ssl2_state_st4DOLLAR_19E)
+assert alignment(N13ssl2_state_st4DOLLAR_19E) == 4, alignment(N13ssl2_state_st4DOLLAR_19E)
+ssl2_state_st._fields_ = [
+    ('three_byte_header', c_int),
+    ('clear_text', c_int),
+    ('escape', c_int),
+    ('ssl2_rollback', c_int),
+    ('wnum', c_uint),
+    ('wpend_tot', c_int),
+    ('wpend_buf', POINTER(c_ubyte)),
+    ('wpend_off', c_int),
+    ('wpend_len', c_int),
+    ('wpend_ret', c_int),
+    ('rbuf_left', c_int),
+    ('rbuf_offs', c_int),
+    ('rbuf', POINTER(c_ubyte)),
+    ('wbuf', POINTER(c_ubyte)),
+    ('write_ptr', POINTER(c_ubyte)),
+    ('padding', c_uint),
+    ('rlength', c_uint),
+    ('ract_data_length', c_int),
+    ('wlength', c_uint),
+    ('wact_data_length', c_int),
+    ('ract_data', POINTER(c_ubyte)),
+    ('wact_data', POINTER(c_ubyte)),
+    ('mac_data', POINTER(c_ubyte)),
+    ('read_key', POINTER(c_ubyte)),
+    ('write_key', POINTER(c_ubyte)),
+    ('challenge_length', c_uint),
+    ('challenge', c_ubyte * 32),
+    ('conn_id_length', c_uint),
+    ('conn_id', c_ubyte * 16),
+    ('key_material_length', c_uint),
+    ('key_material', c_ubyte * 48),
+    ('read_sequence', c_ulong),
+    ('write_sequence', c_ulong),
+    ('tmp', N13ssl2_state_st4DOLLAR_19E),
+]
+assert sizeof(ssl2_state_st) == 288, sizeof(ssl2_state_st)
+assert alignment(ssl2_state_st) == 4, alignment(ssl2_state_st)
+SSL2_STATE = ssl2_state_st
+class ssl3_record_st(Structure):
+    pass
+ssl3_record_st._fields_ = [
+    ('type', c_int),
+    ('length', c_uint),
+    ('off', c_uint),
+    ('data', POINTER(c_ubyte)),
+    ('input', POINTER(c_ubyte)),
+    ('comp', POINTER(c_ubyte)),
+]
+assert sizeof(ssl3_record_st) == 24, sizeof(ssl3_record_st)
+assert alignment(ssl3_record_st) == 4, alignment(ssl3_record_st)
+SSL3_RECORD = ssl3_record_st
+class ssl3_buffer_st(Structure):
+    pass
+size_t = __darwin_size_t
+ssl3_buffer_st._fields_ = [
+    ('buf', POINTER(c_ubyte)),
+    ('len', size_t),
+    ('offset', c_int),
+    ('left', c_int),
+]
+assert sizeof(ssl3_buffer_st) == 16, sizeof(ssl3_buffer_st)
+assert alignment(ssl3_buffer_st) == 4, alignment(ssl3_buffer_st)
+SSL3_BUFFER = ssl3_buffer_st
+class N13ssl3_state_st4DOLLAR_20E(Structure):
+    pass
+N13ssl3_state_st4DOLLAR_20E._fields_ = [
+    ('cert_verify_md', c_ubyte * 72),
+    ('finish_md', c_ubyte * 72),
+    ('finish_md_len', c_int),
+    ('peer_finish_md', c_ubyte * 72),
+    ('peer_finish_md_len', c_int),
+    ('message_size', c_ulong),
+    ('message_type', c_int),
+    ('new_cipher', POINTER(SSL_CIPHER)),
+    ('dh', POINTER(DH)),
+    ('next_state', c_int),
+    ('reuse_message', c_int),
+    ('cert_req', c_int),
+    ('ctype_num', c_int),
+    ('ctype', c_char * 7),
+    ('ca_names', POINTER(STACK)),
+    ('use_rsa_tmp', c_int),
+    ('key_block_length', c_int),
+    ('key_block', POINTER(c_ubyte)),
+    ('new_sym_enc', POINTER(EVP_CIPHER)),
+    ('new_hash', POINTER(EVP_MD)),
+    ('new_compression', POINTER(SSL_COMP)),
+    ('cert_request', c_int),
+]
+assert sizeof(N13ssl3_state_st4DOLLAR_20E) == 296, sizeof(N13ssl3_state_st4DOLLAR_20E)
+assert alignment(N13ssl3_state_st4DOLLAR_20E) == 4, alignment(N13ssl3_state_st4DOLLAR_20E)
+ssl3_state_st._fields_ = [
+    ('flags', c_long),
+    ('delay_buf_pop_ret', c_int),
+    ('read_sequence', c_ubyte * 8),
+    ('read_mac_secret', c_ubyte * 36),
+    ('write_sequence', c_ubyte * 8),
+    ('write_mac_secret', c_ubyte * 36),
+    ('server_random', c_ubyte * 32),
+    ('client_random', c_ubyte * 32),
+    ('need_empty_fragments', c_int),
+    ('empty_fragment_done', c_int),
+    ('rbuf', SSL3_BUFFER),
+    ('wbuf', SSL3_BUFFER),
+    ('rrec', SSL3_RECORD),
+    ('wrec', SSL3_RECORD),
+    ('alert_fragment', c_ubyte * 2),
+    ('alert_fragment_len', c_uint),
+    ('handshake_fragment', c_ubyte * 4),
+    ('handshake_fragment_len', c_uint),
+    ('wnum', c_uint),
+    ('wpend_tot', c_int),
+    ('wpend_type', c_int),
+    ('wpend_ret', c_int),
+    ('wpend_buf', POINTER(c_ubyte)),
+    ('finish_dgst1', EVP_MD_CTX),
+    ('finish_dgst2', EVP_MD_CTX),
+    ('change_cipher_spec', c_int),
+    ('warn_alert', c_int),
+    ('fatal_alert', c_int),
+    ('alert_dispatch', c_int),
+    ('send_alert', c_ubyte * 2),
+    ('renegotiate', c_int),
+    ('total_renegotiations', c_int),
+    ('num_renegotiations', c_int),
+    ('in_read_app_data', c_int),
+    ('tmp', N13ssl3_state_st4DOLLAR_20E),
+]
+assert sizeof(ssl3_state_st) == 648, sizeof(ssl3_state_st)
+assert alignment(ssl3_state_st) == 4, alignment(ssl3_state_st)
+SSL3_STATE = ssl3_state_st
+stack_st._fields_ = [
+    ('num', c_int),
+    ('data', POINTER(STRING)),
+    ('sorted', c_int),
+    ('num_alloc', c_int),
+    ('comp', CFUNCTYPE(c_int, POINTER(STRING), POINTER(STRING))),
+]
+assert sizeof(stack_st) == 20, sizeof(stack_st)
+assert alignment(stack_st) == 4, alignment(stack_st)
+class ui_st(Structure):
+    pass
+ui_st._fields_ = [
+]
+UI = ui_st
+class ui_method_st(Structure):
+    pass
+ui_method_st._fields_ = [
+]
+UI_METHOD = ui_method_st
+class ui_string_st(Structure):
+    pass
+ui_string_st._fields_ = [
+]
+UI_STRING = ui_string_st
+
+# values for enumeration 'UI_string_types'
+UI_string_types = c_int # enum
+class X509_objects_st(Structure):
+    pass
+X509_objects_st._fields_ = [
+    ('nid', c_int),
+    ('a2i', CFUNCTYPE(c_int)),
+    ('i2a', CFUNCTYPE(c_int)),
+]
+assert sizeof(X509_objects_st) == 12, sizeof(X509_objects_st)
+assert alignment(X509_objects_st) == 4, alignment(X509_objects_st)
+X509_OBJECTS = X509_objects_st
+X509_algor_st._fields_ = [
+    ('algorithm', POINTER(ASN1_OBJECT)),
+    ('parameter', POINTER(ASN1_TYPE)),
+]
+assert sizeof(X509_algor_st) == 8, sizeof(X509_algor_st)
+assert alignment(X509_algor_st) == 4, alignment(X509_algor_st)
+class X509_val_st(Structure):
+    pass
+X509_val_st._fields_ = [
+    ('notBefore', POINTER(ASN1_TIME)),
+    ('notAfter', POINTER(ASN1_TIME)),
+]
+assert sizeof(X509_val_st) == 8, sizeof(X509_val_st)
+assert alignment(X509_val_st) == 4, alignment(X509_val_st)
+X509_VAL = X509_val_st
+class X509_pubkey_st(Structure):
+    pass
+X509_pubkey_st._fields_ = [
+    ('algor', POINTER(X509_ALGOR)),
+    ('public_key', POINTER(ASN1_BIT_STRING)),
+    ('pkey', POINTER(EVP_PKEY)),
+]
+assert sizeof(X509_pubkey_st) == 12, sizeof(X509_pubkey_st)
+assert alignment(X509_pubkey_st) == 4, alignment(X509_pubkey_st)
+X509_PUBKEY = X509_pubkey_st
+class X509_sig_st(Structure):
+    pass
+X509_sig_st._fields_ = [
+    ('algor', POINTER(X509_ALGOR)),
+    ('digest', POINTER(ASN1_OCTET_STRING)),
+]
+assert sizeof(X509_sig_st) == 8, sizeof(X509_sig_st)
+assert alignment(X509_sig_st) == 4, alignment(X509_sig_st)
+X509_SIG = X509_sig_st
+class X509_name_entry_st(Structure):
+    pass
+X509_name_entry_st._fields_ = [
+    ('object', POINTER(ASN1_OBJECT)),
+    ('value', POINTER(ASN1_STRING)),
+    ('set', c_int),
+    ('size', c_int),
+]
+assert sizeof(X509_name_entry_st) == 16, sizeof(X509_name_entry_st)
+assert alignment(X509_name_entry_st) == 4, alignment(X509_name_entry_st)
+X509_NAME_ENTRY = X509_name_entry_st
+X509_name_st._fields_ = [
+    ('entries', POINTER(STACK)),
+    ('modified', c_int),
+    ('bytes', POINTER(BUF_MEM)),
+    ('hash', c_ulong),
+]
+assert sizeof(X509_name_st) == 16, sizeof(X509_name_st)
+assert alignment(X509_name_st) == 4, alignment(X509_name_st)
+class X509_extension_st(Structure):
+    pass
+X509_extension_st._fields_ = [
+    ('object', POINTER(ASN1_OBJECT)),
+    ('critical', ASN1_BOOLEAN),
+    ('value', POINTER(ASN1_OCTET_STRING)),
+]
+assert sizeof(X509_extension_st) == 12, sizeof(X509_extension_st)
+assert alignment(X509_extension_st) == 4, alignment(X509_extension_st)
+X509_EXTENSION = X509_extension_st
+class x509_attributes_st(Structure):
+    pass
+class N18x509_attributes_st4DOLLAR_13E(Union):
+    pass
+N18x509_attributes_st4DOLLAR_13E._fields_ = [
+    ('ptr', STRING),
+    ('set', POINTER(STACK)),
+    ('single', POINTER(ASN1_TYPE)),
+]
+assert sizeof(N18x509_attributes_st4DOLLAR_13E) == 4, sizeof(N18x509_attributes_st4DOLLAR_13E)
+assert alignment(N18x509_attributes_st4DOLLAR_13E) == 4, alignment(N18x509_attributes_st4DOLLAR_13E)
+x509_attributes_st._fields_ = [
+    ('object', POINTER(ASN1_OBJECT)),
+    ('single', c_int),
+    ('value', N18x509_attributes_st4DOLLAR_13E),
+]
+assert sizeof(x509_attributes_st) == 12, sizeof(x509_attributes_st)
+assert alignment(x509_attributes_st) == 4, alignment(x509_attributes_st)
+X509_ATTRIBUTE = x509_attributes_st
+class X509_req_info_st(Structure):
+    pass
+X509_req_info_st._fields_ = [
+    ('enc', ASN1_ENCODING),
+    ('version', POINTER(ASN1_INTEGER)),
+    ('subject', POINTER(X509_NAME)),
+    ('pubkey', POINTER(X509_PUBKEY)),
+    ('attributes', POINTER(STACK)),
+]
+assert sizeof(X509_req_info_st) == 28, sizeof(X509_req_info_st)
+assert alignment(X509_req_info_st) == 4, alignment(X509_req_info_st)
+X509_REQ_INFO = X509_req_info_st
+class X509_req_st(Structure):
+    pass
+X509_req_st._fields_ = [
+    ('req_info', POINTER(X509_REQ_INFO)),
+    ('sig_alg', POINTER(X509_ALGOR)),
+    ('signature', POINTER(ASN1_BIT_STRING)),
+    ('references', c_int),
+]
+assert sizeof(X509_req_st) == 16, sizeof(X509_req_st)
+assert alignment(X509_req_st) == 4, alignment(X509_req_st)
+X509_REQ = X509_req_st
+class x509_cinf_st(Structure):
+    pass
+x509_cinf_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('serialNumber', POINTER(ASN1_INTEGER)),
+    ('signature', POINTER(X509_ALGOR)),
+    ('issuer', POINTER(X509_NAME)),
+    ('validity', POINTER(X509_VAL)),
+    ('subject', POINTER(X509_NAME)),
+    ('key', POINTER(X509_PUBKEY)),
+    ('issuerUID', POINTER(ASN1_BIT_STRING)),
+    ('subjectUID', POINTER(ASN1_BIT_STRING)),
+    ('extensions', POINTER(STACK)),
+]
+assert sizeof(x509_cinf_st) == 40, sizeof(x509_cinf_st)
+assert alignment(x509_cinf_st) == 4, alignment(x509_cinf_st)
+X509_CINF = x509_cinf_st
+class x509_cert_aux_st(Structure):
+    pass
+x509_cert_aux_st._fields_ = [
+    ('trust', POINTER(STACK)),
+    ('reject', POINTER(STACK)),
+    ('alias', POINTER(ASN1_UTF8STRING)),
+    ('keyid', POINTER(ASN1_OCTET_STRING)),
+    ('other', POINTER(STACK)),
+]
+assert sizeof(x509_cert_aux_st) == 20, sizeof(x509_cert_aux_st)
+assert alignment(x509_cert_aux_st) == 4, alignment(x509_cert_aux_st)
+X509_CERT_AUX = x509_cert_aux_st
+class AUTHORITY_KEYID_st(Structure):
+    pass
+x509_st._fields_ = [
+    ('cert_info', POINTER(X509_CINF)),
+    ('sig_alg', POINTER(X509_ALGOR)),
+    ('signature', POINTER(ASN1_BIT_STRING)),
+    ('valid', c_int),
+    ('references', c_int),
+    ('name', STRING),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('ex_pathlen', c_long),
+    ('ex_flags', c_ulong),
+    ('ex_kusage', c_ulong),
+    ('ex_xkusage', c_ulong),
+    ('ex_nscert', c_ulong),
+    ('skid', POINTER(ASN1_OCTET_STRING)),
+    ('akid', POINTER(AUTHORITY_KEYID_st)),
+    ('sha1_hash', c_ubyte * 20),
+    ('aux', POINTER(X509_CERT_AUX)),
+]
+assert sizeof(x509_st) == 84, sizeof(x509_st)
+assert alignment(x509_st) == 4, alignment(x509_st)
+AUTHORITY_KEYID_st._fields_ = [
+]
+class x509_trust_st(Structure):
+    pass
+x509_trust_st._fields_ = [
+    ('trust', c_int),
+    ('flags', c_int),
+    ('check_trust', CFUNCTYPE(c_int, POINTER(x509_trust_st), POINTER(X509), c_int)),
+    ('name', STRING),
+    ('arg1', c_int),
+    ('arg2', c_void_p),
+]
+assert sizeof(x509_trust_st) == 24, sizeof(x509_trust_st)
+assert alignment(x509_trust_st) == 4, alignment(x509_trust_st)
+X509_TRUST = x509_trust_st
+class X509_revoked_st(Structure):
+    pass
+X509_revoked_st._fields_ = [
+    ('serialNumber', POINTER(ASN1_INTEGER)),
+    ('revocationDate', POINTER(ASN1_TIME)),
+    ('extensions', POINTER(STACK)),
+    ('sequence', c_int),
+]
+assert sizeof(X509_revoked_st) == 16, sizeof(X509_revoked_st)
+assert alignment(X509_revoked_st) == 4, alignment(X509_revoked_st)
+X509_REVOKED = X509_revoked_st
+class X509_crl_info_st(Structure):
+    pass
+X509_crl_info_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('sig_alg', POINTER(X509_ALGOR)),
+    ('issuer', POINTER(X509_NAME)),
+    ('lastUpdate', POINTER(ASN1_TIME)),
+    ('nextUpdate', POINTER(ASN1_TIME)),
+    ('revoked', POINTER(STACK)),
+    ('extensions', POINTER(STACK)),
+    ('enc', ASN1_ENCODING),
+]
+assert sizeof(X509_crl_info_st) == 40, sizeof(X509_crl_info_st)
+assert alignment(X509_crl_info_st) == 4, alignment(X509_crl_info_st)
+X509_CRL_INFO = X509_crl_info_st
+X509_crl_st._fields_ = [
+    ('crl', POINTER(X509_CRL_INFO)),
+    ('sig_alg', POINTER(X509_ALGOR)),
+    ('signature', POINTER(ASN1_BIT_STRING)),
+    ('references', c_int),
+]
+assert sizeof(X509_crl_st) == 16, sizeof(X509_crl_st)
+assert alignment(X509_crl_st) == 4, alignment(X509_crl_st)
+class private_key_st(Structure):
+    pass
+private_key_st._fields_ = [
+    ('version', c_int),
+    ('enc_algor', POINTER(X509_ALGOR)),
+    ('enc_pkey', POINTER(ASN1_OCTET_STRING)),
+    ('dec_pkey', POINTER(EVP_PKEY)),
+    ('key_length', c_int),
+    ('key_data', STRING),
+    ('key_free', c_int),
+    ('cipher', EVP_CIPHER_INFO),
+    ('references', c_int),
+]
+assert sizeof(private_key_st) == 52, sizeof(private_key_st)
+assert alignment(private_key_st) == 4, alignment(private_key_st)
+X509_PKEY = private_key_st
+class X509_info_st(Structure):
+    pass
+X509_info_st._fields_ = [
+    ('x509', POINTER(X509)),
+    ('crl', POINTER(X509_CRL)),
+    ('x_pkey', POINTER(X509_PKEY)),
+    ('enc_cipher', EVP_CIPHER_INFO),
+    ('enc_len', c_int),
+    ('enc_data', STRING),
+    ('references', c_int),
+]
+assert sizeof(X509_info_st) == 44, sizeof(X509_info_st)
+assert alignment(X509_info_st) == 4, alignment(X509_info_st)
+X509_INFO = X509_info_st
+class Netscape_spkac_st(Structure):
+    pass
+Netscape_spkac_st._fields_ = [
+    ('pubkey', POINTER(X509_PUBKEY)),
+    ('challenge', POINTER(ASN1_IA5STRING)),
+]
+assert sizeof(Netscape_spkac_st) == 8, sizeof(Netscape_spkac_st)
+assert alignment(Netscape_spkac_st) == 4, alignment(Netscape_spkac_st)
+NETSCAPE_SPKAC = Netscape_spkac_st
+class Netscape_spki_st(Structure):
+    pass
+Netscape_spki_st._fields_ = [
+    ('spkac', POINTER(NETSCAPE_SPKAC)),
+    ('sig_algor', POINTER(X509_ALGOR)),
+    ('signature', POINTER(ASN1_BIT_STRING)),
+]
+assert sizeof(Netscape_spki_st) == 12, sizeof(Netscape_spki_st)
+assert alignment(Netscape_spki_st) == 4, alignment(Netscape_spki_st)
+NETSCAPE_SPKI = Netscape_spki_st
+class Netscape_certificate_sequence(Structure):
+    pass
+Netscape_certificate_sequence._fields_ = [
+    ('type', POINTER(ASN1_OBJECT)),
+    ('certs', POINTER(STACK)),
+]
+assert sizeof(Netscape_certificate_sequence) == 8, sizeof(Netscape_certificate_sequence)
+assert alignment(Netscape_certificate_sequence) == 4, alignment(Netscape_certificate_sequence)
+NETSCAPE_CERT_SEQUENCE = Netscape_certificate_sequence
+class PBEPARAM_st(Structure):
+    pass
+PBEPARAM_st._fields_ = [
+    ('salt', POINTER(ASN1_OCTET_STRING)),
+    ('iter', POINTER(ASN1_INTEGER)),
+]
+assert sizeof(PBEPARAM_st) == 8, sizeof(PBEPARAM_st)
+assert alignment(PBEPARAM_st) == 4, alignment(PBEPARAM_st)
+PBEPARAM = PBEPARAM_st
+class PBE2PARAM_st(Structure):
+    pass
+PBE2PARAM_st._fields_ = [
+    ('keyfunc', POINTER(X509_ALGOR)),
+    ('encryption', POINTER(X509_ALGOR)),
+]
+assert sizeof(PBE2PARAM_st) == 8, sizeof(PBE2PARAM_st)
+assert alignment(PBE2PARAM_st) == 4, alignment(PBE2PARAM_st)
+PBE2PARAM = PBE2PARAM_st
+class PBKDF2PARAM_st(Structure):
+    pass
+PBKDF2PARAM_st._fields_ = [
+    ('salt', POINTER(ASN1_TYPE)),
+    ('iter', POINTER(ASN1_INTEGER)),
+    ('keylength', POINTER(ASN1_INTEGER)),
+    ('prf', POINTER(X509_ALGOR)),
+]
+assert sizeof(PBKDF2PARAM_st) == 16, sizeof(PBKDF2PARAM_st)
+assert alignment(PBKDF2PARAM_st) == 4, alignment(PBKDF2PARAM_st)
+PBKDF2PARAM = PBKDF2PARAM_st
+class pkcs8_priv_key_info_st(Structure):
+    pass
+pkcs8_priv_key_info_st._fields_ = [
+    ('broken', c_int),
+    ('version', POINTER(ASN1_INTEGER)),
+    ('pkeyalg', POINTER(X509_ALGOR)),
+    ('pkey', POINTER(ASN1_TYPE)),
+    ('attributes', POINTER(STACK)),
+]
+assert sizeof(pkcs8_priv_key_info_st) == 20, sizeof(pkcs8_priv_key_info_st)
+assert alignment(pkcs8_priv_key_info_st) == 4, alignment(pkcs8_priv_key_info_st)
+PKCS8_PRIV_KEY_INFO = pkcs8_priv_key_info_st
+class x509_hash_dir_st(Structure):
+    pass
+x509_hash_dir_st._fields_ = [
+    ('num_dirs', c_int),
+    ('dirs', POINTER(STRING)),
+    ('dirs_type', POINTER(c_int)),
+    ('num_dirs_alloced', c_int),
+]
+assert sizeof(x509_hash_dir_st) == 16, sizeof(x509_hash_dir_st)
+assert alignment(x509_hash_dir_st) == 4, alignment(x509_hash_dir_st)
+X509_HASH_DIR_CTX = x509_hash_dir_st
+class x509_file_st(Structure):
+    pass
+x509_file_st._fields_ = [
+    ('num_paths', c_int),
+    ('num_alloced', c_int),
+    ('paths', POINTER(STRING)),
+    ('path_type', POINTER(c_int)),
+]
+assert sizeof(x509_file_st) == 16, sizeof(x509_file_st)
+assert alignment(x509_file_st) == 4, alignment(x509_file_st)
+X509_CERT_FILE_CTX = x509_file_st
+class x509_object_st(Structure):
+    pass
+class N14x509_object_st4DOLLAR_14E(Union):
+    pass
+N14x509_object_st4DOLLAR_14E._fields_ = [
+    ('ptr', STRING),
+    ('x509', POINTER(X509)),
+    ('crl', POINTER(X509_CRL)),
+    ('pkey', POINTER(EVP_PKEY)),
+]
+assert sizeof(N14x509_object_st4DOLLAR_14E) == 4, sizeof(N14x509_object_st4DOLLAR_14E)
+assert alignment(N14x509_object_st4DOLLAR_14E) == 4, alignment(N14x509_object_st4DOLLAR_14E)
+x509_object_st._fields_ = [
+    ('type', c_int),
+    ('data', N14x509_object_st4DOLLAR_14E),
+]
+assert sizeof(x509_object_st) == 8, sizeof(x509_object_st)
+assert alignment(x509_object_st) == 4, alignment(x509_object_st)
+X509_OBJECT = x509_object_st
+class x509_lookup_st(Structure):
+    pass
+X509_LOOKUP = x509_lookup_st
+class x509_lookup_method_st(Structure):
+    pass
+x509_lookup_method_st._fields_ = [
+    ('name', STRING),
+    ('new_item', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
+    ('free', CFUNCTYPE(None, POINTER(X509_LOOKUP))),
+    ('init', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
+    ('shutdown', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
+    ('ctrl', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_long, POINTER(STRING))),
+    ('get_by_subject', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(X509_OBJECT))),
+    ('get_by_issuer_serial', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(ASN1_INTEGER), POINTER(X509_OBJECT))),
+    ('get_by_fingerprint', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(c_ubyte), c_int, POINTER(X509_OBJECT))),
+    ('get_by_alias', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_int, POINTER(X509_OBJECT))),
+]
+assert sizeof(x509_lookup_method_st) == 40, sizeof(x509_lookup_method_st)
+assert alignment(x509_lookup_method_st) == 4, alignment(x509_lookup_method_st)
+X509_LOOKUP_METHOD = x509_lookup_method_st
+x509_store_st._fields_ = [
+    ('cache', c_int),
+    ('objs', POINTER(STACK)),
+    ('get_cert_methods', POINTER(STACK)),
+    ('flags', c_ulong),
+    ('purpose', c_int),
+    ('trust', c_int),
+    ('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+    ('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
+    ('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
+    ('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
+    ('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
+    ('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
+    ('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('references', c_int),
+    ('depth', c_int),
+]
+assert sizeof(x509_store_st) == 76, sizeof(x509_store_st)
+assert alignment(x509_store_st) == 4, alignment(x509_store_st)
+x509_lookup_st._fields_ = [
+    ('init', c_int),
+    ('skip', c_int),
+    ('method', POINTER(X509_LOOKUP_METHOD)),
+    ('method_data', STRING),
+    ('store_ctx', POINTER(X509_STORE)),
+]
+assert sizeof(x509_lookup_st) == 20, sizeof(x509_lookup_st)
+assert alignment(x509_lookup_st) == 4, alignment(x509_lookup_st)
+time_t = __darwin_time_t
+x509_store_ctx_st._fields_ = [
+    ('ctx', POINTER(X509_STORE)),
+    ('current_method', c_int),
+    ('cert', POINTER(X509)),
+    ('untrusted', POINTER(STACK)),
+    ('purpose', c_int),
+    ('trust', c_int),
+    ('check_time', time_t),
+    ('flags', c_ulong),
+    ('other_ctx', c_void_p),
+    ('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+    ('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
+    ('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
+    ('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
+    ('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
+    ('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
+    ('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('depth', c_int),
+    ('valid', c_int),
+    ('last_untrusted', c_int),
+    ('chain', POINTER(STACK)),
+    ('error_depth', c_int),
+    ('error', c_int),
+    ('current_cert', POINTER(X509)),
+    ('current_issuer', POINTER(X509)),
+    ('current_crl', POINTER(X509_CRL)),
+    ('ex_data', CRYPTO_EX_DATA),
+]
+assert sizeof(x509_store_ctx_st) == 116, sizeof(x509_store_ctx_st)
+assert alignment(x509_store_ctx_st) == 4, alignment(x509_store_ctx_st)
+va_list = __darwin_va_list
+__darwin_off_t = __int64_t
+fpos_t = __darwin_off_t
+class __sbuf(Structure):
+    pass
+__sbuf._fields_ = [
+    ('_base', POINTER(c_ubyte)),
+    ('_size', c_int),
+]
+assert sizeof(__sbuf) == 8, sizeof(__sbuf)
+assert alignment(__sbuf) == 4, alignment(__sbuf)
+class __sFILEX(Structure):
+    pass
+__sFILEX._fields_ = [
+]
+class __sFILE(Structure):
+    pass
+__sFILE._pack_ = 4
+__sFILE._fields_ = [
+    ('_p', POINTER(c_ubyte)),
+    ('_r', c_int),
+    ('_w', c_int),
+    ('_flags', c_short),
+    ('_file', c_short),
+    ('_bf', __sbuf),
+    ('_lbfsize', c_int),
+    ('_cookie', c_void_p),
+    ('_close', CFUNCTYPE(c_int, c_void_p)),
+    ('_read', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
+    ('_seek', CFUNCTYPE(fpos_t, c_void_p, c_longlong, c_int)),
+    ('_write', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
+    ('_ub', __sbuf),
+    ('_extra', POINTER(__sFILEX)),
+    ('_ur', c_int),
+    ('_ubuf', c_ubyte * 3),
+    ('_nbuf', c_ubyte * 1),
+    ('_lb', __sbuf),
+    ('_blksize', c_int),
+    ('_offset', fpos_t),
+]
+assert sizeof(__sFILE) == 88, sizeof(__sFILE)
+assert alignment(__sFILE) == 4, alignment(__sFILE)
+FILE = __sFILE
+ct_rune_t = __darwin_ct_rune_t
+rune_t = __darwin_rune_t
+class div_t(Structure):
+    pass
+div_t._fields_ = [
+    ('quot', c_int),
+    ('rem', c_int),
+]
+assert sizeof(div_t) == 8, sizeof(div_t)
+assert alignment(div_t) == 4, alignment(div_t)
+class ldiv_t(Structure):
+    pass
+ldiv_t._fields_ = [
+    ('quot', c_long),
+    ('rem', c_long),
+]
+assert sizeof(ldiv_t) == 8, sizeof(ldiv_t)
+assert alignment(ldiv_t) == 4, alignment(ldiv_t)
+class lldiv_t(Structure):
+    pass
+lldiv_t._pack_ = 4
+lldiv_t._fields_ = [
+    ('quot', c_longlong),
+    ('rem', c_longlong),
+]
+assert sizeof(lldiv_t) == 16, sizeof(lldiv_t)
+assert alignment(lldiv_t) == 4, alignment(lldiv_t)
+__darwin_dev_t = __int32_t
+dev_t = __darwin_dev_t
+__darwin_mode_t = __uint16_t
+mode_t = __darwin_mode_t
+class mcontext(Structure):
+    pass
+mcontext._fields_ = [
+]
+class mcontext64(Structure):
+    pass
+mcontext64._fields_ = [
+]
+class __darwin_pthread_handler_rec(Structure):
+    pass
+__darwin_pthread_handler_rec._fields_ = [
+    ('__routine', CFUNCTYPE(None, c_void_p)),
+    ('__arg', c_void_p),
+    ('__next', POINTER(__darwin_pthread_handler_rec)),
+]
+assert sizeof(__darwin_pthread_handler_rec) == 12, sizeof(__darwin_pthread_handler_rec)
+assert alignment(__darwin_pthread_handler_rec) == 4, alignment(__darwin_pthread_handler_rec)
+class _opaque_pthread_attr_t(Structure):
+    pass
+_opaque_pthread_attr_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 36),
+]
+assert sizeof(_opaque_pthread_attr_t) == 40, sizeof(_opaque_pthread_attr_t)
+assert alignment(_opaque_pthread_attr_t) == 4, alignment(_opaque_pthread_attr_t)
+class _opaque_pthread_cond_t(Structure):
+    pass
+_opaque_pthread_cond_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 24),
+]
+assert sizeof(_opaque_pthread_cond_t) == 28, sizeof(_opaque_pthread_cond_t)
+assert alignment(_opaque_pthread_cond_t) == 4, alignment(_opaque_pthread_cond_t)
+class _opaque_pthread_condattr_t(Structure):
+    pass
+_opaque_pthread_condattr_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 4),
+]
+assert sizeof(_opaque_pthread_condattr_t) == 8, sizeof(_opaque_pthread_condattr_t)
+assert alignment(_opaque_pthread_condattr_t) == 4, alignment(_opaque_pthread_condattr_t)
+class _opaque_pthread_mutex_t(Structure):
+    pass
+_opaque_pthread_mutex_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 40),
+]
+assert sizeof(_opaque_pthread_mutex_t) == 44, sizeof(_opaque_pthread_mutex_t)
+assert alignment(_opaque_pthread_mutex_t) == 4, alignment(_opaque_pthread_mutex_t)
+class _opaque_pthread_mutexattr_t(Structure):
+    pass
+_opaque_pthread_mutexattr_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 8),
+]
+assert sizeof(_opaque_pthread_mutexattr_t) == 12, sizeof(_opaque_pthread_mutexattr_t)
+assert alignment(_opaque_pthread_mutexattr_t) == 4, alignment(_opaque_pthread_mutexattr_t)
+class _opaque_pthread_once_t(Structure):
+    pass
+_opaque_pthread_once_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 4),
+]
+assert sizeof(_opaque_pthread_once_t) == 8, sizeof(_opaque_pthread_once_t)
+assert alignment(_opaque_pthread_once_t) == 4, alignment(_opaque_pthread_once_t)
+class _opaque_pthread_rwlock_t(Structure):
+    pass
+_opaque_pthread_rwlock_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 124),
+]
+assert sizeof(_opaque_pthread_rwlock_t) == 128, sizeof(_opaque_pthread_rwlock_t)
+assert alignment(_opaque_pthread_rwlock_t) == 4, alignment(_opaque_pthread_rwlock_t)
+class _opaque_pthread_rwlockattr_t(Structure):
+    pass
+_opaque_pthread_rwlockattr_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 12),
+]
+assert sizeof(_opaque_pthread_rwlockattr_t) == 16, sizeof(_opaque_pthread_rwlockattr_t)
+assert alignment(_opaque_pthread_rwlockattr_t) == 4, alignment(_opaque_pthread_rwlockattr_t)
+class _opaque_pthread_t(Structure):
+    pass
+_opaque_pthread_t._fields_ = [
+    ('__sig', c_long),
+    ('__cleanup_stack', POINTER(__darwin_pthread_handler_rec)),
+    ('__opaque', c_char * 596),
+]
+assert sizeof(_opaque_pthread_t) == 604, sizeof(_opaque_pthread_t)
+assert alignment(_opaque_pthread_t) == 4, alignment(_opaque_pthread_t)
+__darwin_blkcnt_t = __int64_t
+__darwin_blksize_t = __int32_t
+__darwin_fsblkcnt_t = c_uint
+__darwin_fsfilcnt_t = c_uint
+__darwin_gid_t = __uint32_t
+__darwin_id_t = __uint32_t
+__darwin_ino_t = __uint32_t
+__darwin_mach_port_name_t = __darwin_natural_t
+__darwin_mach_port_t = __darwin_mach_port_name_t
+__darwin_mcontext_t = POINTER(mcontext)
+__darwin_mcontext64_t = POINTER(mcontext64)
+__darwin_pid_t = __int32_t
+__darwin_pthread_attr_t = _opaque_pthread_attr_t
+__darwin_pthread_cond_t = _opaque_pthread_cond_t
+__darwin_pthread_condattr_t = _opaque_pthread_condattr_t
+__darwin_pthread_key_t = c_ulong
+__darwin_pthread_mutex_t = _opaque_pthread_mutex_t
+__darwin_pthread_mutexattr_t = _opaque_pthread_mutexattr_t
+__darwin_pthread_once_t = _opaque_pthread_once_t
+__darwin_pthread_rwlock_t = _opaque_pthread_rwlock_t
+__darwin_pthread_rwlockattr_t = _opaque_pthread_rwlockattr_t
+__darwin_pthread_t = POINTER(_opaque_pthread_t)
+__darwin_sigset_t = __uint32_t
+__darwin_suseconds_t = __int32_t
+__darwin_uid_t = __uint32_t
+__darwin_useconds_t = __uint32_t
+__darwin_uuid_t = c_ubyte * 16
+class sigaltstack(Structure):
+    pass
+sigaltstack._fields_ = [
+    ('ss_sp', c_void_p),
+    ('ss_size', __darwin_size_t),
+    ('ss_flags', c_int),
+]
+assert sizeof(sigaltstack) == 12, sizeof(sigaltstack)
+assert alignment(sigaltstack) == 4, alignment(sigaltstack)
+__darwin_stack_t = sigaltstack
+class ucontext(Structure):
+    pass
+ucontext._fields_ = [
+    ('uc_onstack', c_int),
+    ('uc_sigmask', __darwin_sigset_t),
+    ('uc_stack', __darwin_stack_t),
+    ('uc_link', POINTER(ucontext)),
+    ('uc_mcsize', __darwin_size_t),
+    ('uc_mcontext', __darwin_mcontext_t),
+]
+assert sizeof(ucontext) == 32, sizeof(ucontext)
+assert alignment(ucontext) == 4, alignment(ucontext)
+__darwin_ucontext_t = ucontext
+class ucontext64(Structure):
+    pass
+ucontext64._fields_ = [
+    ('uc_onstack', c_int),
+    ('uc_sigmask', __darwin_sigset_t),
+    ('uc_stack', __darwin_stack_t),
+    ('uc_link', POINTER(ucontext64)),
+    ('uc_mcsize', __darwin_size_t),
+    ('uc_mcontext64', __darwin_mcontext64_t),
+]
+assert sizeof(ucontext64) == 32, sizeof(ucontext64)
+assert alignment(ucontext64) == 4, alignment(ucontext64)
+__darwin_ucontext64_t = ucontext64
+class timeval(Structure):
+    pass
+timeval._fields_ = [
+    ('tv_sec', __darwin_time_t),
+    ('tv_usec', __darwin_suseconds_t),
+]
+assert sizeof(timeval) == 8, sizeof(timeval)
+assert alignment(timeval) == 4, alignment(timeval)
+rlim_t = __int64_t
+class rusage(Structure):
+    pass
+rusage._fields_ = [
+    ('ru_utime', timeval),
+    ('ru_stime', timeval),
+    ('ru_maxrss', c_long),
+    ('ru_ixrss', c_long),
+    ('ru_idrss', c_long),
+    ('ru_isrss', c_long),
+    ('ru_minflt', c_long),
+    ('ru_majflt', c_long),
+    ('ru_nswap', c_long),
+    ('ru_inblock', c_long),
+    ('ru_oublock', c_long),
+    ('ru_msgsnd', c_long),
+    ('ru_msgrcv', c_long),
+    ('ru_nsignals', c_long),
+    ('ru_nvcsw', c_long),
+    ('ru_nivcsw', c_long),
+]
+assert sizeof(rusage) == 72, sizeof(rusage)
+assert alignment(rusage) == 4, alignment(rusage)
+class rlimit(Structure):
+    pass
+rlimit._pack_ = 4
+rlimit._fields_ = [
+    ('rlim_cur', rlim_t),
+    ('rlim_max', rlim_t),
+]
+assert sizeof(rlimit) == 16, sizeof(rlimit)
+assert alignment(rlimit) == 4, alignment(rlimit)
+mcontext_t = __darwin_mcontext_t
+mcontext64_t = __darwin_mcontext64_t
+pthread_attr_t = __darwin_pthread_attr_t
+sigset_t = __darwin_sigset_t
+ucontext_t = __darwin_ucontext_t
+ucontext64_t = __darwin_ucontext64_t
+uid_t = __darwin_uid_t
+class sigval(Union):
+    pass
+sigval._fields_ = [
+    ('sival_int', c_int),
+    ('sival_ptr', c_void_p),
+]
+assert sizeof(sigval) == 4, sizeof(sigval)
+assert alignment(sigval) == 4, alignment(sigval)
+class sigevent(Structure):
+    pass
+sigevent._fields_ = [
+    ('sigev_notify', c_int),
+    ('sigev_signo', c_int),
+    ('sigev_value', sigval),
+    ('sigev_notify_function', CFUNCTYPE(None, sigval)),
+    ('sigev_notify_attributes', POINTER(pthread_attr_t)),
+]
+assert sizeof(sigevent) == 20, sizeof(sigevent)
+assert alignment(sigevent) == 4, alignment(sigevent)
+class __siginfo(Structure):
+    pass
+pid_t = __darwin_pid_t
+__siginfo._fields_ = [
+    ('si_signo', c_int),
+    ('si_errno', c_int),
+    ('si_code', c_int),
+    ('si_pid', pid_t),
+    ('si_uid', uid_t),
+    ('si_status', c_int),
+    ('si_addr', c_void_p),
+    ('si_value', sigval),
+    ('si_band', c_long),
+    ('pad', c_ulong * 7),
+]
+assert sizeof(__siginfo) == 64, sizeof(__siginfo)
+assert alignment(__siginfo) == 4, alignment(__siginfo)
+siginfo_t = __siginfo
+class __sigaction_u(Union):
+    pass
+__sigaction_u._fields_ = [
+    ('__sa_handler', CFUNCTYPE(None, c_int)),
+    ('__sa_sigaction', CFUNCTYPE(None, c_int, POINTER(__siginfo), c_void_p)),
+]
+assert sizeof(__sigaction_u) == 4, sizeof(__sigaction_u)
+assert alignment(__sigaction_u) == 4, alignment(__sigaction_u)
+class __sigaction(Structure):
+    pass
+__sigaction._fields_ = [
+    ('__sigaction_u', __sigaction_u),
+    ('sa_tramp', CFUNCTYPE(None, c_void_p, c_int, c_int, POINTER(siginfo_t), c_void_p)),
+    ('sa_mask', sigset_t),
+    ('sa_flags', c_int),
+]
+assert sizeof(__sigaction) == 16, sizeof(__sigaction)
+assert alignment(__sigaction) == 4, alignment(__sigaction)
+class sigaction(Structure):
+    pass
+sigaction._fields_ = [
+    ('__sigaction_u', __sigaction_u),
+    ('sa_mask', sigset_t),
+    ('sa_flags', c_int),
+]
+assert sizeof(sigaction) == 12, sizeof(sigaction)
+assert alignment(sigaction) == 4, alignment(sigaction)
+sig_t = CFUNCTYPE(None, c_int)
+stack_t = __darwin_stack_t
+class sigvec(Structure):
+    pass
+sigvec._fields_ = [
+    ('sv_handler', CFUNCTYPE(None, c_int)),
+    ('sv_mask', c_int),
+    ('sv_flags', c_int),
+]
+assert sizeof(sigvec) == 12, sizeof(sigvec)
+assert alignment(sigvec) == 4, alignment(sigvec)
+class sigstack(Structure):
+    pass
+sigstack._fields_ = [
+    ('ss_sp', STRING),
+    ('ss_onstack', c_int),
+]
+assert sizeof(sigstack) == 8, sizeof(sigstack)
+assert alignment(sigstack) == 4, alignment(sigstack)
+u_char = c_ubyte
+u_short = c_ushort
+u_int = c_uint
+u_long = c_ulong
+ushort = c_ushort
+uint = c_uint
+u_quad_t = u_int64_t
+quad_t = int64_t
+qaddr_t = POINTER(quad_t)
+caddr_t = STRING
+daddr_t = int32_t
+fixpt_t = u_int32_t
+blkcnt_t = __darwin_blkcnt_t
+blksize_t = __darwin_blksize_t
+gid_t = __darwin_gid_t
+in_addr_t = __uint32_t
+in_port_t = __uint16_t
+ino_t = __darwin_ino_t
+key_t = __int32_t
+nlink_t = __uint16_t
+off_t = __darwin_off_t
+segsz_t = int32_t
+swblk_t = int32_t
+clock_t = __darwin_clock_t
+ssize_t = __darwin_ssize_t
+useconds_t = __darwin_useconds_t
+suseconds_t = __darwin_suseconds_t
+fd_mask = __int32_t
+class fd_set(Structure):
+    pass
+fd_set._fields_ = [
+    ('fds_bits', __int32_t * 32),
+]
+assert sizeof(fd_set) == 128, sizeof(fd_set)
+assert alignment(fd_set) == 4, alignment(fd_set)
+pthread_cond_t = __darwin_pthread_cond_t
+pthread_condattr_t = __darwin_pthread_condattr_t
+pthread_mutex_t = __darwin_pthread_mutex_t
+pthread_mutexattr_t = __darwin_pthread_mutexattr_t
+pthread_once_t = __darwin_pthread_once_t
+pthread_rwlock_t = __darwin_pthread_rwlock_t
+pthread_rwlockattr_t = __darwin_pthread_rwlockattr_t
+pthread_t = __darwin_pthread_t
+pthread_key_t = __darwin_pthread_key_t
+fsblkcnt_t = __darwin_fsblkcnt_t
+fsfilcnt_t = __darwin_fsfilcnt_t
+
+# values for enumeration 'idtype_t'
+idtype_t = c_int # enum
+id_t = __darwin_id_t
+class wait(Union):
+    pass
+class N4wait3DOLLAR_3E(Structure):
+    pass
+N4wait3DOLLAR_3E._fields_ = [
+    ('w_Termsig', c_uint, 7),
+    ('w_Coredump', c_uint, 1),
+    ('w_Retcode', c_uint, 8),
+    ('w_Filler', c_uint, 16),
+]
+assert sizeof(N4wait3DOLLAR_3E) == 4, sizeof(N4wait3DOLLAR_3E)
+assert alignment(N4wait3DOLLAR_3E) == 4, alignment(N4wait3DOLLAR_3E)
+class N4wait3DOLLAR_4E(Structure):
+    pass
+N4wait3DOLLAR_4E._fields_ = [
+    ('w_Stopval', c_uint, 8),
+    ('w_Stopsig', c_uint, 8),
+    ('w_Filler', c_uint, 16),
+]
+assert sizeof(N4wait3DOLLAR_4E) == 4, sizeof(N4wait3DOLLAR_4E)
+assert alignment(N4wait3DOLLAR_4E) == 4, alignment(N4wait3DOLLAR_4E)
+wait._fields_ = [
+    ('w_status', c_int),
+    ('w_T', N4wait3DOLLAR_3E),
+    ('w_S', N4wait3DOLLAR_4E),
+]
+assert sizeof(wait) == 4, sizeof(wait)
+assert alignment(wait) == 4, alignment(wait)
+class timespec(Structure):
+    pass
+timespec._fields_ = [
+    ('tv_sec', time_t),
+    ('tv_nsec', c_long),
+]
+assert sizeof(timespec) == 8, sizeof(timespec)
+assert alignment(timespec) == 4, alignment(timespec)
+class tm(Structure):
+    pass
+tm._fields_ = [
+    ('tm_sec', c_int),
+    ('tm_min', c_int),
+    ('tm_hour', c_int),
+    ('tm_mday', c_int),
+    ('tm_mon', c_int),
+    ('tm_year', c_int),
+    ('tm_wday', c_int),
+    ('tm_yday', c_int),
+    ('tm_isdst', c_int),
+    ('tm_gmtoff', c_long),
+    ('tm_zone', STRING),
+]
+assert sizeof(tm) == 44, sizeof(tm)
+assert alignment(tm) == 4, alignment(tm)
+__gnuc_va_list = STRING
+ptrdiff_t = c_int
+int8_t = c_byte
+int16_t = c_short
+uint8_t = c_ubyte
+uint16_t = c_ushort
+uint32_t = c_uint
+uint64_t = c_ulonglong
+int_least8_t = int8_t
+int_least16_t = int16_t
+int_least32_t = int32_t
+int_least64_t = int64_t
+uint_least8_t = uint8_t
+uint_least16_t = uint16_t
+uint_least32_t = uint32_t
+uint_least64_t = uint64_t
+int_fast8_t = int8_t
+int_fast16_t = int16_t
+int_fast32_t = int32_t
+int_fast64_t = int64_t
+uint_fast8_t = uint8_t
+uint_fast16_t = uint16_t
+uint_fast32_t = uint32_t
+uint_fast64_t = uint64_t
+intptr_t = c_long
+uintptr_t = c_ulong
+intmax_t = c_longlong
+uintmax_t = c_ulonglong
+__all__ = ['ENGINE', 'pkcs7_enc_content_st', '__int16_t',
+           'X509_REVOKED', 'SSL_CTX', 'UIT_BOOLEAN',
+           '__darwin_time_t', 'ucontext64_t', 'int_fast32_t',
+           'pem_ctx_st', 'uint8_t', 'fpos_t', 'X509', 'COMP_CTX',
+           'tm', 'N10pem_ctx_st4DOLLAR_17E', 'swblk_t',
+           'ASN1_TEMPLATE', '__darwin_pthread_t', 'fixpt_t',
+           'BIO_METHOD', 'ASN1_PRINTABLESTRING', 'EVP_ENCODE_CTX',
+           'dh_method', 'bio_f_buffer_ctx_struct', 'in_port_t',
+           'X509_SIG', '__darwin_ssize_t', '__darwin_sigset_t',
+           'wait', 'uint_fast16_t', 'N12asn1_type_st4DOLLAR_11E',
+           'uint_least8_t', 'pthread_rwlock_t', 'ASN1_IA5STRING',
+           'fsfilcnt_t', 'ucontext', '__uint64_t', 'timespec',
+           'x509_cinf_st', 'COMP_METHOD', 'MD5_CTX', 'buf_mem_st',
+           'ASN1_ENCODING_st', 'PBEPARAM', 'X509_NAME_ENTRY',
+           '__darwin_va_list', 'ucontext_t', 'lhash_st',
+           'N4wait3DOLLAR_4E', '__darwin_uuid_t',
+           '_ossl_old_des_ks_struct', 'id_t', 'ASN1_BIT_STRING',
+           'va_list', '__darwin_wchar_t', 'pthread_key_t',
+           'pkcs7_signer_info_st', 'ASN1_METHOD', 'DSA_SIG', 'DSA',
+           'UIT_NONE', 'pthread_t', '__darwin_useconds_t',
+           'uint_fast8_t', 'UI_STRING', 'DES_cblock',
+           '__darwin_mcontext64_t', 'rlim_t', 'PEM_Encode_Seal_st',
+           'SHAstate_st', 'u_quad_t', 'openssl_fptr',
+           '_opaque_pthread_rwlockattr_t',
+           'N18x509_attributes_st4DOLLAR_13E',
+           '__darwin_pthread_rwlock_t', 'daddr_t', 'ui_string_st',
+           'x509_file_st', 'X509_req_info_st', 'int_least64_t',
+           'evp_Encode_Ctx_st', 'X509_OBJECTS', 'CRYPTO_EX_DATA',
+           '__int8_t', 'AUTHORITY_KEYID_st', '_opaque_pthread_attr_t',
+           'sigstack', 'EVP_CIPHER_CTX', 'X509_extension_st', 'pid_t',
+           'RSA_METHOD', 'PEM_USER', 'pem_recip_st', 'env_md_ctx_st',
+           'rc5_key_st', 'ui_st', 'X509_PUBKEY', 'u_int8_t',
+           'ASN1_ITEM_st', 'pkcs7_recip_info_st', 'ssl2_state_st',
+           'off_t', 'N10ssl_ctx_st4DOLLAR_18E', 'crypto_ex_data_st',
+           'ui_method_st', '__darwin_pthread_rwlockattr_t',
+           'CRYPTO_EX_dup', '__darwin_ino_t', '__sFILE',
+           'OSUnknownByteOrder', 'BN_MONT_CTX', 'ASN1_NULL', 'time_t',
+           'CRYPTO_EX_new', 'asn1_type_st', 'CRYPTO_EX_DATA_FUNCS',
+           'user_time_t', 'BIGNUM', 'pthread_rwlockattr_t',
+           'ASN1_VALUE_st', 'DH_METHOD', '__darwin_off_t',
+           '_opaque_pthread_t', 'bn_blinding_st', 'RSA', 'ssize_t',
+           'mcontext64_t', 'user_long_t', 'fsblkcnt_t', 'cert_st',
+           '__darwin_pthread_condattr_t', 'X509_PKEY',
+           '__darwin_id_t', '__darwin_nl_item', 'SSL2_STATE', 'FILE',
+           'pthread_mutexattr_t', 'size_t',
+           '_ossl_old_des_key_schedule', 'pkcs7_issuer_and_serial_st',
+           'sigval', 'CRYPTO_MEM_LEAK_CB', 'X509_NAME', 'blkcnt_t',
+           'uint_least16_t', '__darwin_dev_t', 'evp_cipher_info_st',
+           'BN_BLINDING', 'ssl3_state_st', 'uint_least64_t',
+           'user_addr_t', 'DES_key_schedule', 'RIPEMD160_CTX',
+           'u_char', 'X509_algor_st', 'uid_t', 'sess_cert_st',
+           'u_int64_t', 'u_int16_t', 'sigset_t', '__darwin_ptrdiff_t',
+           'ASN1_CTX', 'STACK', '__int32_t', 'UI_METHOD',
+           'NETSCAPE_SPKI', 'UIT_PROMPT', 'st_CRYPTO_EX_DATA_IMPL',
+           'cast_key_st', 'X509_HASH_DIR_CTX', 'sigevent',
+           'user_ssize_t', 'clock_t', 'aes_key_st',
+           '__darwin_socklen_t', '__darwin_intptr_t', 'int_fast64_t',
+           'asn1_string_table_st', 'uint_fast32_t',
+           'ASN1_VISIBLESTRING', 'DSA_SIG_st', 'obj_name_st',
+           'X509_LOOKUP_METHOD', 'u_int32_t', 'EVP_CIPHER_INFO',
+           '__gnuc_va_list', 'AES_KEY', 'PKCS7_ISSUER_AND_SERIAL',
+           'BN_CTX', '__darwin_blkcnt_t', 'key_t', 'SHA_CTX',
+           'pkcs7_signed_st', 'SSL', 'N10pem_ctx_st4DOLLAR_16E',
+           'pthread_attr_t', 'EVP_MD', 'uint', 'ASN1_BOOLEAN',
+           'ino_t', '__darwin_clock_t', 'ASN1_OCTET_STRING',
+           'asn1_ctx_st', 'BIO_F_BUFFER_CTX', 'bn_mont_ctx_st',
+           'X509_REQ_INFO', 'PEM_CTX', 'sigvec',
+           '__darwin_pthread_mutexattr_t', 'x509_attributes_st',
+           'stack_t', '__darwin_mode_t', '__mbstate_t',
+           'asn1_object_st', 'ASN1_ENCODING', '__uint8_t',
+           'LHASH_NODE', 'PKCS7_SIGNER_INFO', 'asn1_method_st',
+           'stack_st', 'bio_info_cb', 'div_t', 'UIT_VERIFY',
+           'PBEPARAM_st', 'N4wait3DOLLAR_3E', 'quad_t', '__siginfo',
+           '__darwin_mbstate_t', 'rsa_st', 'ASN1_UNIVERSALSTRING',
+           'uint64_t', 'ssl_comp_st', 'X509_OBJECT', 'pthread_cond_t',
+           'DH', '__darwin_wctype_t', 'PKCS7_ENVELOPE', 'ASN1_TLC_st',
+           'sig_atomic_t', 'BIO', 'nlink_t', 'BUF_MEM', 'SSL3_RECORD',
+           'bio_method_st', 'timeval', 'UI_string_types', 'BIO_dummy',
+           'ssl_ctx_st', 'NETSCAPE_CERT_SEQUENCE',
+           'BIT_STRING_BITNAME_st', '__darwin_pthread_attr_t',
+           'int8_t', '__darwin_wint_t', 'OBJ_NAME',
+           'PKCS8_PRIV_KEY_INFO', 'PBE2PARAM_st',
+           'LHASH_DOALL_FN_TYPE', 'x509_st', 'X509_VAL', 'dev_t',
+           'ASN1_TEMPLATE_st', 'MD5state_st', '__uint16_t',
+           'LHASH_DOALL_ARG_FN_TYPE', 'mdc2_ctx_st', 'SSL3_STATE',
+           'ssl3_buffer_st', 'ASN1_ITEM_EXP',
+           '_opaque_pthread_condattr_t', 'mode_t', 'ASN1_VALUE',
+           'qaddr_t', '__darwin_gid_t', 'EVP_PKEY', 'CRYPTO_EX_free',
+           '_ossl_old_des_cblock', 'X509_INFO', 'asn1_string_st',
+           'intptr_t', 'UIT_INFO', 'int_fast8_t', 'sigaltstack',
+           'env_md_st', 'LHASH', '__darwin_ucontext_t',
+           'PKCS7_SIGN_ENVELOPE', '__darwin_mcontext_t', 'ct_rune_t',
+           'MD2_CTX', 'pthread_once_t', 'SSL3_BUFFER', 'fd_mask',
+           'ASN1_TYPE', 'PKCS7_SIGNED', 'ssl3_record_st', 'BF_KEY',
+           'MD4state_st', 'MD4_CTX', 'int16_t', 'SSL_CIPHER',
+           'rune_t', 'X509_TRUST', 'siginfo_t', 'X509_STORE',
+           '__sbuf', 'X509_STORE_CTX', '__darwin_blksize_t', 'ldiv_t',
+           'ASN1_TIME', 'SSL_METHOD', 'X509_LOOKUP',
+           'Netscape_spki_st', 'P_PID', 'sigaction', 'sig_t',
+           'hostent', 'x509_cert_aux_st', '_opaque_pthread_cond_t',
+           'segsz_t', 'ushort', '__darwin_ct_rune_t', 'fd_set',
+           'BN_RECP_CTX', 'x509_lookup_st', 'uint16_t', 'pkcs7_st',
+           'asn1_header_st', '__darwin_pthread_key_t',
+           'x509_trust_st', '__darwin_pthread_handler_rec', 'int32_t',
+           'X509_CRL_INFO', 'N11evp_pkey_st4DOLLAR_12E', 'MDC2_CTX',
+           'N23_ossl_old_des_ks_struct4DOLLAR_10E', 'ASN1_HEADER',
+           'X509_crl_info_st', 'LHASH_HASH_FN_TYPE',
+           '_opaque_pthread_mutexattr_t', 'ssl_st',
+           'N8pkcs7_st4DOLLAR_15E', 'evp_pkey_st',
+           'pkcs7_signedandenveloped_st', '__darwin_mach_port_t',
+           'EVP_PBE_KEYGEN', '_opaque_pthread_mutex_t',
+           'ASN1_UTCTIME', 'mcontext', 'crypto_ex_data_func_st',
+           'u_long', 'PBKDF2PARAM_st', 'rc4_key_st', 'DSA_METHOD',
+           'EVP_CIPHER', 'BIT_STRING_BITNAME', 'PKCS7_RECIP_INFO',
+           'ssl3_enc_method', 'X509_CERT_AUX', 'uintmax_t',
+           'int_fast16_t', 'RC5_32_KEY', 'ucontext64', 'ASN1_INTEGER',
+           'u_short', 'N14x509_object_st4DOLLAR_14E', 'mcontext64',
+           'X509_sig_st', 'ASN1_GENERALSTRING', 'PKCS7', '__sFILEX',
+           'X509_name_entry_st', 'ssl_session_st', 'caddr_t',
+           'bignum_st', 'X509_CINF', '__darwin_pthread_cond_t',
+           'ASN1_TLC', 'PKCS7_ENCRYPT', 'NETSCAPE_SPKAC',
+           'Netscape_spkac_st', 'idtype_t', 'UIT_ERROR',
+           'uint_fast64_t', 'in_addr_t', 'pthread_mutex_t',
+           '__int64_t', 'ASN1_BMPSTRING', 'uint32_t',
+           'PEM_ENCODE_SEAL_CTX', 'suseconds_t', 'ASN1_OBJECT',
+           'X509_val_st', 'private_key_st', 'CRYPTO_dynlock',
+           'X509_objects_st', 'CRYPTO_EX_DATA_IMPL',
+           'pthread_condattr_t', 'PKCS7_DIGEST', 'uint_least32_t',
+           'ASN1_STRING', '__uint32_t', 'P_PGID', 'rsa_meth_st',
+           'X509_crl_st', 'RC2_KEY', '__darwin_fsfilcnt_t',
+           'X509_revoked_st', 'PBE2PARAM', 'blksize_t',
+           'Netscape_certificate_sequence', 'ssl_cipher_st',
+           'bignum_ctx', 'register_t', 'ASN1_UTF8STRING',
+           'pkcs7_encrypted_st', 'RC4_KEY', '__darwin_ucontext64_t',
+           'N13ssl2_state_st4DOLLAR_19E', 'bn_recp_ctx_st',
+           'CAST_KEY', 'X509_ATTRIBUTE', '__darwin_suseconds_t',
+           '__sigaction', 'user_ulong_t', 'syscall_arg_t',
+           'evp_cipher_ctx_st', 'X509_ALGOR', 'mcontext_t',
+           'const_DES_cblock', '__darwin_fsblkcnt_t', 'dsa_st',
+           'int_least8_t', 'MD2state_st', 'X509_EXTENSION',
+           'GEN_SESSION_CB', 'int_least16_t', '__darwin_wctrans_t',
+           'PBKDF2PARAM', 'x509_lookup_method_st', 'pem_password_cb',
+           'X509_info_st', 'x509_store_st', '__darwin_natural_t',
+           'X509_pubkey_st', 'pkcs7_digest_st', '__darwin_size_t',
+           'ASN1_STRING_TABLE', 'OSLittleEndian', 'RIPEMD160state_st',
+           'pkcs7_enveloped_st', 'UI', 'ptrdiff_t', 'X509_REQ',
+           'CRYPTO_dynlock_value', 'X509_req_st', 'x509_store_ctx_st',
+           'N13ssl3_state_st4DOLLAR_20E', 'lhash_node_st',
+           '__darwin_pthread_mutex_t', 'LHASH_COMP_FN_TYPE',
+           '__darwin_rune_t', 'rlimit', '__darwin_pthread_once_t',
+           'OSBigEndian', 'uintptr_t', '__darwin_uid_t', 'u_int',
+           'ASN1_T61STRING', 'gid_t', 'ssl_method_st', 'ASN1_ITEM',
+           'ASN1_ENUMERATED', '_opaque_pthread_rwlock_t',
+           'pkcs8_priv_key_info_st', 'intmax_t', 'sigcontext',
+           'X509_CRL', 'rc2_key_st', 'engine_st', 'x509_object_st',
+           '_opaque_pthread_once_t', 'DES_ks', 'SSL_COMP',
+           'dsa_method', 'int64_t', 'bio_st', 'bf_key_st',
+           'ASN1_GENERALIZEDTIME', 'PKCS7_ENC_CONTENT',
+           '__darwin_pid_t', 'lldiv_t', 'comp_method_st',
+           'EVP_MD_CTX', 'evp_cipher_st', 'X509_name_st',
+           'x509_hash_dir_st', '__darwin_mach_port_name_t',
+           'useconds_t', 'user_size_t', 'SSL_SESSION', 'rusage',
+           'ssl_crock_st', 'int_least32_t', '__sigaction_u', 'dh_st',
+           'P_ALL', '__darwin_stack_t', 'N6DES_ks3DOLLAR_9E',
+           'comp_ctx_st', 'X509_CERT_FILE_CTX']

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/py2_test_grammar.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/py2_test_grammar.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,956 @@
+# Python 2's Lib/test/test_grammar.py (r66189)
+
+# Python test set -- part 1, grammar.
+# This just tests whether the parser accepts them all.
+
+# NOTE: When you run this test as a script from the command line, you
+# get warnings about certain hex/oct constants.  Since those are
+# issued by the parser, you can't suppress them by adding a
+# filterwarnings() call to this module.  Therefore, to shut up the
+# regression test, the filterwarnings() call has been added to
+# regrtest.py.
+
+from test.test_support import run_unittest, check_syntax_error
+import unittest
+import sys
+# testing import *
+from sys import *
+
+class TokenTests(unittest.TestCase):
+
+    def testBackslash(self):
+        # Backslash means line continuation:
+        x = 1 \
+        + 1
+        self.assertEquals(x, 2, 'backslash for line continuation')
+
+        # Backslash does not means continuation in comments :\
+        x = 0
+        self.assertEquals(x, 0, 'backslash ending comment')
+
+    def testPlainIntegers(self):
+        self.assertEquals(0xff, 255)
+        self.assertEquals(0377, 255)
+        self.assertEquals(2147483647, 017777777777)
+        # "0x" is not a valid literal
+        self.assertRaises(SyntaxError, eval, "0x")
+        from sys import maxint
+        if maxint == 2147483647:
+            self.assertEquals(-2147483647-1, -020000000000)
+            # XXX -2147483648
+            self.assert_(037777777777 > 0)
+            self.assert_(0xffffffff > 0)
+            for s in '2147483648', '040000000000', '0x100000000':
+                try:
+                    x = eval(s)
+                except OverflowError:
+                    self.fail("OverflowError on huge integer literal %r" % s)
+        elif maxint == 9223372036854775807:
+            self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
+            self.assert_(01777777777777777777777 > 0)
+            self.assert_(0xffffffffffffffff > 0)
+            for s in '9223372036854775808', '02000000000000000000000', \
+                     '0x10000000000000000':
+                try:
+                    x = eval(s)
+                except OverflowError:
+                    self.fail("OverflowError on huge integer literal %r" % s)
+        else:
+            self.fail('Weird maxint value %r' % maxint)
+
+    def testLongIntegers(self):
+        x = 0L
+        x = 0l
+        x = 0xffffffffffffffffL
+        x = 0xffffffffffffffffl
+        x = 077777777777777777L
+        x = 077777777777777777l
+        x = 123456789012345678901234567890L
+        x = 123456789012345678901234567890l
+
+    def testFloats(self):
+        x = 3.14
+        x = 314.
+        x = 0.314
+        # XXX x = 000.314
+        x = .314
+        x = 3e14
+        x = 3E14
+        x = 3e-14
+        x = 3e+14
+        x = 3.e14
+        x = .3e14
+        x = 3.1e4
+
+    def testStringLiterals(self):
+        x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
+        x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
+        x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
+        x = "doesn't \"shrink\" does it"
+        y = 'doesn\'t "shrink" does it'
+        self.assert_(len(x) == 24 and x == y)
+        x = "does \"shrink\" doesn't it"
+        y = 'does "shrink" doesn\'t it'
+        self.assert_(len(x) == 24 and x == y)
+        x = """
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+"""
+        y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
+        self.assertEquals(x, y)
+        y = '''
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+'''
+        self.assertEquals(x, y)
+        y = "\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the 'lazy' dog.\n\
+"
+        self.assertEquals(x, y)
+        y = '\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the \'lazy\' dog.\n\
+'
+        self.assertEquals(x, y)
+
+
+class GrammarTests(unittest.TestCase):
+
+    # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+    # XXX can't test in a script -- this rule is only used when interactive
+
+    # file_input: (NEWLINE | stmt)* ENDMARKER
+    # Being tested as this very moment this very module
+
+    # expr_input: testlist NEWLINE
+    # XXX Hard to test -- used only in calls to input()
+
+    def testEvalInput(self):
+        # testlist ENDMARKER
+        x = eval('1, 0 or 1')
+
+    def testFuncdef(self):
+        ### 'def' NAME parameters ':' suite
+        ### parameters: '(' [varargslist] ')'
+        ### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
+        ###            | ('**'|'*' '*') NAME)
+        ###            | fpdef ['=' test] (',' fpdef ['=' test])* [',']
+        ### fpdef: NAME | '(' fplist ')'
+        ### fplist: fpdef (',' fpdef)* [',']
+        ### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
+        ### argument: [test '='] test   # Really [keyword '='] test
+        def f1(): pass
+        f1()
+        f1(*())
+        f1(*(), **{})
+        def f2(one_argument): pass
+        def f3(two, arguments): pass
+        def f4(two, (compound, (argument, list))): pass
+        def f5((compound, first), two): pass
+        self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
+        self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
+        if sys.platform.startswith('java'):
+            self.assertEquals(f4.func_code.co_varnames,
+                   ('two', '(compound, (argument, list))', 'compound', 'argument',
+                                'list',))
+            self.assertEquals(f5.func_code.co_varnames,
+                   ('(compound, first)', 'two', 'compound', 'first'))
+        else:
+            self.assertEquals(f4.func_code.co_varnames,
+                  ('two', '.1', 'compound', 'argument',  'list'))
+            self.assertEquals(f5.func_code.co_varnames,
+                  ('.0', 'two', 'compound', 'first'))
+        def a1(one_arg,): pass
+        def a2(two, args,): pass
+        def v0(*rest): pass
+        def v1(a, *rest): pass
+        def v2(a, b, *rest): pass
+        def v3(a, (b, c), *rest): return a, b, c, rest
+
+        f1()
+        f2(1)
+        f2(1,)
+        f3(1, 2)
+        f3(1, 2,)
+        f4(1, (2, (3, 4)))
+        v0()
+        v0(1)
+        v0(1,)
+        v0(1,2)
+        v0(1,2,3,4,5,6,7,8,9,0)
+        v1(1)
+        v1(1,)
+        v1(1,2)
+        v1(1,2,3)
+        v1(1,2,3,4,5,6,7,8,9,0)
+        v2(1,2)
+        v2(1,2,3)
+        v2(1,2,3,4)
+        v2(1,2,3,4,5,6,7,8,9,0)
+        v3(1,(2,3))
+        v3(1,(2,3),4)
+        v3(1,(2,3),4,5,6,7,8,9,0)
+
+        # ceval unpacks the formal arguments into the first argcount names;
+        # thus, the names nested inside tuples must appear after these names.
+        if sys.platform.startswith('java'):
+            self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
+        else:
+            self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
+        self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
+        def d01(a=1): pass
+        d01()
+        d01(1)
+        d01(*(1,))
+        d01(**{'a':2})
+        def d11(a, b=1): pass
+        d11(1)
+        d11(1, 2)
+        d11(1, **{'b':2})
+        def d21(a, b, c=1): pass
+        d21(1, 2)
+        d21(1, 2, 3)
+        d21(*(1, 2, 3))
+        d21(1, *(2, 3))
+        d21(1, 2, *(3,))
+        d21(1, 2, **{'c':3})
+        def d02(a=1, b=2): pass
+        d02()
+        d02(1)
+        d02(1, 2)
+        d02(*(1, 2))
+        d02(1, *(2,))
+        d02(1, **{'b':2})
+        d02(**{'a': 1, 'b': 2})
+        def d12(a, b=1, c=2): pass
+        d12(1)
+        d12(1, 2)
+        d12(1, 2, 3)
+        def d22(a, b, c=1, d=2): pass
+        d22(1, 2)
+        d22(1, 2, 3)
+        d22(1, 2, 3, 4)
+        def d01v(a=1, *rest): pass
+        d01v()
+        d01v(1)
+        d01v(1, 2)
+        d01v(*(1, 2, 3, 4))
+        d01v(*(1,))
+        d01v(**{'a':2})
+        def d11v(a, b=1, *rest): pass
+        d11v(1)
+        d11v(1, 2)
+        d11v(1, 2, 3)
+        def d21v(a, b, c=1, *rest): pass
+        d21v(1, 2)
+        d21v(1, 2, 3)
+        d21v(1, 2, 3, 4)
+        d21v(*(1, 2, 3, 4))
+        d21v(1, 2, **{'c': 3})
+        def d02v(a=1, b=2, *rest): pass
+        d02v()
+        d02v(1)
+        d02v(1, 2)
+        d02v(1, 2, 3)
+        d02v(1, *(2, 3, 4))
+        d02v(**{'a': 1, 'b': 2})
+        def d12v(a, b=1, c=2, *rest): pass
+        d12v(1)
+        d12v(1, 2)
+        d12v(1, 2, 3)
+        d12v(1, 2, 3, 4)
+        d12v(*(1, 2, 3, 4))
+        d12v(1, 2, *(3, 4, 5))
+        d12v(1, *(2,), **{'c': 3})
+        def d22v(a, b, c=1, d=2, *rest): pass
+        d22v(1, 2)
+        d22v(1, 2, 3)
+        d22v(1, 2, 3, 4)
+        d22v(1, 2, 3, 4, 5)
+        d22v(*(1, 2, 3, 4))
+        d22v(1, 2, *(3, 4, 5))
+        d22v(1, *(2, 3), **{'d': 4})
+        def d31v((x)): pass
+        d31v(1)
+        def d32v((x,)): pass
+        d32v((1,))
+
+        # keyword arguments after *arglist
+        def f(*args, **kwargs):
+            return args, kwargs
+        self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
+                                                    {'x':2, 'y':5}))
+        self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
+        self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
+
+        # Check ast errors in *args and *kwargs
+        check_syntax_error(self, "f(*g(1=2))")
+        check_syntax_error(self, "f(**g(1=2))")
+
+    def testLambdef(self):
+        ### lambdef: 'lambda' [varargslist] ':' test
+        l1 = lambda : 0
+        self.assertEquals(l1(), 0)
+        l2 = lambda : a[d] # XXX just testing the expression
+        l3 = lambda : [2 < x for x in [-1, 3, 0L]]
+        self.assertEquals(l3(), [0, 1, 0])
+        l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
+        self.assertEquals(l4(), 1)
+        l5 = lambda x, y, z=2: x + y + z
+        self.assertEquals(l5(1, 2), 5)
+        self.assertEquals(l5(1, 2, 3), 6)
+        check_syntax_error(self, "lambda x: x = 2")
+        check_syntax_error(self, "lambda (None,): None")
+
+    ### stmt: simple_stmt | compound_stmt
+    # Tested below
+
+    def testSimpleStmt(self):
+        ### simple_stmt: small_stmt (';' small_stmt)* [';']
+        x = 1; pass; del x
+        def foo():
+            # verify statments that end with semi-colons
+            x = 1; pass; del x;
+        foo()
+
+    ### small_stmt: expr_stmt | print_stmt  | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
+    # Tested below
+
+    def testExprStmt(self):
+        # (exprlist '=')* exprlist
+        1
+        1, 2, 3
+        x = 1
+        x = 1, 2, 3
+        x = y = z = 1, 2, 3
+        x, y, z = 1, 2, 3
+        abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
+
+        check_syntax_error(self, "x + 1 = 1")
+        check_syntax_error(self, "a + 1 = b + 2")
+
+    def testPrintStmt(self):
+        # 'print' (test ',')* [test]
+        import StringIO
+
+        # Can't test printing to real stdout without comparing output
+        # which is not available in unittest.
+        save_stdout = sys.stdout
+        sys.stdout = StringIO.StringIO()
+
+        print 1, 2, 3
+        print 1, 2, 3,
+        print
+        print 0 or 1, 0 or 1,
+        print 0 or 1
+
+        # 'print' '>>' test ','
+        print >> sys.stdout, 1, 2, 3
+        print >> sys.stdout, 1, 2, 3,
+        print >> sys.stdout
+        print >> sys.stdout, 0 or 1, 0 or 1,
+        print >> sys.stdout, 0 or 1
+
+        # test printing to an instance
+        class Gulp:
+            def write(self, msg): pass
+
+        gulp = Gulp()
+        print >> gulp, 1, 2, 3
+        print >> gulp, 1, 2, 3,
+        print >> gulp
+        print >> gulp, 0 or 1, 0 or 1,
+        print >> gulp, 0 or 1
+
+        # test print >> None
+        def driver():
+            oldstdout = sys.stdout
+            sys.stdout = Gulp()
+            try:
+                tellme(Gulp())
+                tellme()
+            finally:
+                sys.stdout = oldstdout
+
+        # we should see this once
+        def tellme(file=sys.stdout):
+            print >> file, 'hello world'
+
+        driver()
+
+        # we should not see this at all
+        def tellme(file=None):
+            print >> file, 'goodbye universe'
+
+        driver()
+
+        self.assertEqual(sys.stdout.getvalue(), '''\
+1 2 3
+1 2 3
+1 1 1
+1 2 3
+1 2 3
+1 1 1
+hello world
+''')
+        sys.stdout = save_stdout
+
+        # syntax errors
+        check_syntax_error(self, 'print ,')
+        check_syntax_error(self, 'print >> x,')
+
+    def testDelStmt(self):
+        # 'del' exprlist
+        abc = [1,2,3]
+        x, y, z = abc
+        xyz = x, y, z
+
+        del abc
+        del x, y, (z, xyz)
+
+    def testPassStmt(self):
+        # 'pass'
+        pass
+
+    # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
+    # Tested below
+
+    def testBreakStmt(self):
+        # 'break'
+        while 1: break
+
+    def testContinueStmt(self):
+        # 'continue'
+        i = 1
+        while i: i = 0; continue
+
+        msg = ""
+        while not msg:
+            msg = "ok"
+            try:
+                continue
+                msg = "continue failed to continue inside try"
+            except:
+                msg = "continue inside try called except block"
+        if msg != "ok":
+            self.fail(msg)
+
+        msg = ""
+        while not msg:
+            msg = "finally block not called"
+            try:
+                continue
+            finally:
+                msg = "ok"
+        if msg != "ok":
+            self.fail(msg)
+
+    def test_break_continue_loop(self):
+        # This test warrants an explanation. It is a test specifically for SF bugs
+        # #463359 and #462937. The bug is that a 'break' statement executed or
+        # exception raised inside a try/except inside a loop, *after* a continue
+        # statement has been executed in that loop, will cause the wrong number of
+        # arguments to be popped off the stack and the instruction pointer reset to
+        # a very small number (usually 0.) Because of this, the following test
+        # *must* written as a function, and the tracking vars *must* be function
+        # arguments with default values. Otherwise, the test will loop and loop.
+
+        def test_inner(extra_burning_oil = 1, count=0):
+            big_hippo = 2
+            while big_hippo:
+                count += 1
+                try:
+                    if extra_burning_oil and big_hippo == 1:
+                        extra_burning_oil -= 1
+                        break
+                    big_hippo -= 1
+                    continue
+                except:
+                    raise
+            if count > 2 or big_hippo <> 1:
+                self.fail("continue then break in try/except in loop broken!")
+        test_inner()
+
+    def testReturn(self):
+        # 'return' [testlist]
+        def g1(): return
+        def g2(): return 1
+        g1()
+        x = g2()
+        check_syntax_error(self, "class foo:return 1")
+
+    def testYield(self):
+        check_syntax_error(self, "class foo:yield 1")
+
+    def testRaise(self):
+        # 'raise' test [',' test]
+        try: raise RuntimeError, 'just testing'
+        except RuntimeError: pass
+        try: raise KeyboardInterrupt
+        except KeyboardInterrupt: pass
+
+    def testImport(self):
+        # 'import' dotted_as_names
+        import sys
+        import time, sys
+        # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
+        from time import time
+        from time import (time)
+        # not testable inside a function, but already done at top of the module
+        # from sys import *
+        from sys import path, argv
+        from sys import (path, argv)
+        from sys import (path, argv,)
+
+    def testGlobal(self):
+        # 'global' NAME (',' NAME)*
+        global a
+        global a, b
+        global one, two, three, four, five, six, seven, eight, nine, ten
+
+    def testExec(self):
+        # 'exec' expr ['in' expr [',' expr]]
+        z = None
+        del z
+        exec 'z=1+1\n'
+        if z != 2: self.fail('exec \'z=1+1\'\\n')
+        del z
+        exec 'z=1+1'
+        if z != 2: self.fail('exec \'z=1+1\'')
+        z = None
+        del z
+        import types
+        if hasattr(types, "UnicodeType"):
+            exec r"""if 1:
+            exec u'z=1+1\n'
+            if z != 2: self.fail('exec u\'z=1+1\'\\n')
+            del z
+            exec u'z=1+1'
+            if z != 2: self.fail('exec u\'z=1+1\'')"""
+        g = {}
+        exec 'z = 1' in g
+        if g.has_key('__builtins__'): del g['__builtins__']
+        if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
+        g = {}
+        l = {}
+
+        import warnings
+        warnings.filterwarnings("ignore", "global statement", module="<string>")
+        exec 'global a; a = 1; b = 2' in g, l
+        if g.has_key('__builtins__'): del g['__builtins__']
+        if l.has_key('__builtins__'): del l['__builtins__']
+        if (g, l) != ({'a':1}, {'b':2}):
+            self.fail('exec ... in g (%s), l (%s)' %(g,l))
+
+    def testAssert(self):
+        # assert_stmt: 'assert' test [',' test]
+        assert 1
+        assert 1, 1
+        assert lambda x:x
+        assert 1, lambda x:x+1
+        try:
+            assert 0, "msg"
+        except AssertionError, e:
+            self.assertEquals(e.args[0], "msg")
+        else:
+            if __debug__:
+                self.fail("AssertionError not raised by assert 0")
+
+    ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
+    # Tested below
+
+    def testIf(self):
+        # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+        if 1: pass
+        if 1: pass
+        else: pass
+        if 0: pass
+        elif 0: pass
+        if 0: pass
+        elif 0: pass
+        elif 0: pass
+        elif 0: pass
+        else: pass
+
+    def testWhile(self):
+        # 'while' test ':' suite ['else' ':' suite]
+        while 0: pass
+        while 0: pass
+        else: pass
+
+        # Issue1920: "while 0" is optimized away,
+        # ensure that the "else" clause is still present.
+        x = 0
+        while 0:
+            x = 1
+        else:
+            x = 2
+        self.assertEquals(x, 2)
+
+    def testFor(self):
+        # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
+        for i in 1, 2, 3: pass
+        for i, j, k in (): pass
+        else: pass
+        class Squares:
+            def __init__(self, max):
+                self.max = max
+                self.sofar = []
+            def __len__(self): return len(self.sofar)
+            def __getitem__(self, i):
+                if not 0 <= i < self.max: raise IndexError
+                n = len(self.sofar)
+                while n <= i:
+                    self.sofar.append(n*n)
+                    n = n+1
+                return self.sofar[i]
+        n = 0
+        for x in Squares(10): n = n+x
+        if n != 285:
+            self.fail('for over growing sequence')
+
+        result = []
+        for x, in [(1,), (2,), (3,)]:
+            result.append(x)
+        self.assertEqual(result, [1, 2, 3])
+
+    def testTry(self):
+        ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
+        ###         | 'try' ':' suite 'finally' ':' suite
+        ### except_clause: 'except' [expr [('as' | ',') expr]]
+        try:
+            1/0
+        except ZeroDivisionError:
+            pass
+        else:
+            pass
+        try: 1/0
+        except EOFError: pass
+        except TypeError as msg: pass
+        except RuntimeError, msg: pass
+        except: pass
+        else: pass
+        try: 1/0
+        except (EOFError, TypeError, ZeroDivisionError): pass
+        try: 1/0
+        except (EOFError, TypeError, ZeroDivisionError), msg: pass
+        try: pass
+        finally: pass
+
+    def testSuite(self):
+        # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
+        if 1: pass
+        if 1:
+            pass
+        if 1:
+            #
+            #
+            #
+            pass
+            pass
+            #
+            pass
+            #
+
+    def testTest(self):
+        ### and_test ('or' and_test)*
+        ### and_test: not_test ('and' not_test)*
+        ### not_test: 'not' not_test | comparison
+        if not 1: pass
+        if 1 and 1: pass
+        if 1 or 1: pass
+        if not not not 1: pass
+        if not 1 and 1 and 1: pass
+        if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
+
+    def testComparison(self):
+        ### comparison: expr (comp_op expr)*
+        ### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+        if 1: pass
+        x = (1 == 1)
+        if 1 == 1: pass
+        if 1 != 1: pass
+        if 1 <> 1: pass
+        if 1 < 1: pass
+        if 1 > 1: pass
+        if 1 <= 1: pass
+        if 1 >= 1: pass
+        if 1 is 1: pass
+        if 1 is not 1: pass
+        if 1 in (): pass
+        if 1 not in (): pass
+        if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
+
+    def testBinaryMaskOps(self):
+        x = 1 & 1
+        x = 1 ^ 1
+        x = 1 | 1
+
+    def testShiftOps(self):
+        x = 1 << 1
+        x = 1 >> 1
+        x = 1 << 1 >> 1
+
+    def testAdditiveOps(self):
+        x = 1
+        x = 1 + 1
+        x = 1 - 1 - 1
+        x = 1 - 1 + 1 - 1 + 1
+
+    def testMultiplicativeOps(self):
+        x = 1 * 1
+        x = 1 / 1
+        x = 1 % 1
+        x = 1 / 1 * 1 % 1
+
+    def testUnaryOps(self):
+        x = +1
+        x = -1
+        x = ~1
+        x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
+        x = -1*1/1 + 1*1 - ---1*1
+
+    def testSelectors(self):
+        ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
+        ### subscript: expr | [expr] ':' [expr]
+
+        import sys, time
+        c = sys.path[0]
+        x = time.time()
+        x = sys.modules['time'].time()
+        a = '01234'
+        c = a[0]
+        c = a[-1]
+        s = a[0:5]
+        s = a[:5]
+        s = a[0:]
+        s = a[:]
+        s = a[-5:]
+        s = a[:-1]
+        s = a[-4:-3]
+        # A rough test of SF bug 1333982.  http://python.org/sf/1333982
+        # The testing here is fairly incomplete.
+        # Test cases should include: commas with 1 and 2 colons
+        d = {}
+        d[1] = 1
+        d[1,] = 2
+        d[1,2] = 3
+        d[1,2,3] = 4
+        L = list(d)
+        L.sort()
+        self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
+
+    def testAtoms(self):
+        ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
+        ### dictmaker: test ':' test (',' test ':' test)* [',']
+
+        x = (1)
+        x = (1 or 2 or 3)
+        x = (1 or 2 or 3, 2, 3)
+
+        x = []
+        x = [1]
+        x = [1 or 2 or 3]
+        x = [1 or 2 or 3, 2, 3]
+        x = []
+
+        x = {}
+        x = {'one': 1}
+        x = {'one': 1,}
+        x = {'one' or 'two': 1 or 2}
+        x = {'one': 1, 'two': 2}
+        x = {'one': 1, 'two': 2,}
+        x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
+
+        x = `x`
+        x = `1 or 2 or 3`
+        self.assertEqual(`1,2`, '(1, 2)')
+
+        x = x
+        x = 'x'
+        x = 123
+
+    ### exprlist: expr (',' expr)* [',']
+    ### testlist: test (',' test)* [',']
+    # These have been exercised enough above
+
+    def testClassdef(self):
+        # 'class' NAME ['(' [testlist] ')'] ':' suite
+        class B: pass
+        class B2(): pass
+        class C1(B): pass
+        class C2(B): pass
+        class D(C1, C2, B): pass
+        class C:
+            def meth1(self): pass
+            def meth2(self, arg): pass
+            def meth3(self, a1, a2): pass
+        # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+        # decorators: decorator+
+        # decorated: decorators (classdef | funcdef)
+        def class_decorator(x):
+            x.decorated = True
+            return x
+        @class_decorator
+        class G:
+            pass
+        self.assertEqual(G.decorated, True)
+
+    def testListcomps(self):
+        # list comprehension tests
+        nums = [1, 2, 3, 4, 5]
+        strs = ["Apple", "Banana", "Coconut"]
+        spcs = ["  Apple", " Banana ", "Coco  nut  "]
+
+        self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco  nut'])
+        self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
+        self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
+        self.assertEqual([(i, s) for i in nums for s in strs],
+                         [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
+                          (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
+                          (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
+                          (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
+                          (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
+        self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
+                         [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
+                          (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
+                          (5, 'Banana'), (5, 'Coconut')])
+        self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
+                         [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
+
+        def test_in_func(l):
+            return [None < x < 3 for x in l if x > 2]
+
+        self.assertEqual(test_in_func(nums), [False, False, False])
+
+        def test_nested_front():
+            self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
+                             [[1, 2], [3, 4], [5, 6]])
+
+        test_nested_front()
+
+        check_syntax_error(self, "[i, s for i in nums for s in strs]")
+        check_syntax_error(self, "[x if y]")
+
+        suppliers = [
+          (1, "Boeing"),
+          (2, "Ford"),
+          (3, "Macdonalds")
+        ]
+
+        parts = [
+          (10, "Airliner"),
+          (20, "Engine"),
+          (30, "Cheeseburger")
+        ]
+
+        suppart = [
+          (1, 10), (1, 20), (2, 20), (3, 30)
+        ]
+
+        x = [
+          (sname, pname)
+            for (sno, sname) in suppliers
+              for (pno, pname) in parts
+                for (sp_sno, sp_pno) in suppart
+                  if sno == sp_sno and pno == sp_pno
+        ]
+
+        self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
+                             ('Macdonalds', 'Cheeseburger')])
+
+    def testGenexps(self):
+        # generator expression tests
+        g = ([x for x in range(10)] for x in range(1))
+        self.assertEqual(g.next(), [x for x in range(10)])
+        try:
+            g.next()
+            self.fail('should produce StopIteration exception')
+        except StopIteration:
+            pass
+
+        a = 1
+        try:
+            g = (a for d in a)
+            g.next()
+            self.fail('should produce TypeError')
+        except TypeError:
+            pass
+
+        self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
+        self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
+
+        a = [x for x in range(10)]
+        b = (x for x in (y for y in a))
+        self.assertEqual(sum(b), sum([x for x in range(10)]))
+
+        self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
+        self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
+        self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
+        check_syntax_error(self, "foo(x for x in range(10), 100)")
+        check_syntax_error(self, "foo(100, x for x in range(10))")
+
+    def testComprehensionSpecials(self):
+        # test for outmost iterable precomputation
+        x = 10; g = (i for i in range(x)); x = 5
+        self.assertEqual(len(list(g)), 10)
+
+        # This should hold, since we're only precomputing outmost iterable.
+        x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
+        x = 5; t = True;
+        self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
+
+        # Grammar allows multiple adjacent 'if's in listcomps and genexps,
+        # even though it's silly. Make sure it works (ifelse broke this.)
+        self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
+        self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
+
+        # verify unpacking single element tuples in listcomp/genexp.
+        self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
+        self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
+
+    def testIfElseExpr(self):
+        # Test ifelse expressions in various cases
+        def _checkeval(msg, ret):
+            "helper to check that evaluation of expressions is done correctly"
+            print x
+            return ret
+
+        self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
+        self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
+        self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
+        self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
+        self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
+        self.assertEqual((5 and 6 if 0 else 1), 1)
+        self.assertEqual(((5 and 6) if 0 else 1), 1)
+        self.assertEqual((5 and (6 if 1 else 1)), 6)
+        self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
+        self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
+        self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
+        self.assertEqual((not 5 if 1 else 1), False)
+        self.assertEqual((not 5 if 0 else 1), 1)
+        self.assertEqual((6 + 1 if 1 else 2), 7)
+        self.assertEqual((6 - 1 if 1 else 2), 5)
+        self.assertEqual((6 * 2 if 1 else 4), 12)
+        self.assertEqual((6 / 2 if 1 else 3), 3)
+        self.assertEqual((6 < 4 if 0 else 2), 2)
+
+
+def test_main():
+    run_unittest(TokenTests, GrammarTests)
+
+if __name__ == '__main__':
+    test_main()

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/data/py3_test_grammar.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/data/py3_test_grammar.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,903 @@
+# Python test set -- part 1, grammar.
+# This just tests whether the parser accepts them all.
+
+# NOTE: When you run this test as a script from the command line, you
+# get warnings about certain hex/oct constants.  Since those are
+# issued by the parser, you can't suppress them by adding a
+# filterwarnings() call to this module.  Therefore, to shut up the
+# regression test, the filterwarnings() call has been added to
+# regrtest.py.
+
+from test.support import run_unittest, check_syntax_error
+import unittest
+import sys
+# testing import *
+from sys import *
+
+class TokenTests(unittest.TestCase):
+
+    def testBackslash(self):
+        # Backslash means line continuation:
+        x = 1 \
+        + 1
+        self.assertEquals(x, 2, 'backslash for line continuation')
+
+        # Backslash does not means continuation in comments :\
+        x = 0
+        self.assertEquals(x, 0, 'backslash ending comment')
+
+    def testPlainIntegers(self):
+        self.assertEquals(type(000), type(0))
+        self.assertEquals(0xff, 255)
+        self.assertEquals(0o377, 255)
+        self.assertEquals(2147483647, 0o17777777777)
+        self.assertEquals(0b1001, 9)
+        # "0x" is not a valid literal
+        self.assertRaises(SyntaxError, eval, "0x")
+        from sys import maxsize
+        if maxsize == 2147483647:
+            self.assertEquals(-2147483647-1, -0o20000000000)
+            # XXX -2147483648
+            self.assert_(0o37777777777 > 0)
+            self.assert_(0xffffffff > 0)
+            self.assert_(0b1111111111111111111111111111111 > 0)
+            for s in ('2147483648', '0o40000000000', '0x100000000',
+                      '0b10000000000000000000000000000000'):
+                try:
+                    x = eval(s)
+                except OverflowError:
+                    self.fail("OverflowError on huge integer literal %r" % s)
+        elif maxsize == 9223372036854775807:
+            self.assertEquals(-9223372036854775807-1, -0o1000000000000000000000)
+            self.assert_(0o1777777777777777777777 > 0)
+            self.assert_(0xffffffffffffffff > 0)
+            self.assert_(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
+            for s in '9223372036854775808', '0o2000000000000000000000', \
+                     '0x10000000000000000', \
+                     '0b100000000000000000000000000000000000000000000000000000000000000':
+                try:
+                    x = eval(s)
+                except OverflowError:
+                    self.fail("OverflowError on huge integer literal %r" % s)
+        else:
+            self.fail('Weird maxsize value %r' % maxsize)
+
+    def testLongIntegers(self):
+        x = 0
+        x = 0xffffffffffffffff
+        x = 0Xffffffffffffffff
+        x = 0o77777777777777777
+        x = 0O77777777777777777
+        x = 123456789012345678901234567890
+        x = 0b100000000000000000000000000000000000000000000000000000000000000000000
+        x = 0B111111111111111111111111111111111111111111111111111111111111111111111
+
+    def testFloats(self):
+        x = 3.14
+        x = 314.
+        x = 0.314
+        # XXX x = 000.314
+        x = .314
+        x = 3e14
+        x = 3E14
+        x = 3e-14
+        x = 3e+14
+        x = 3.e14
+        x = .3e14
+        x = 3.1e4
+
+    def testStringLiterals(self):
+        x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
+        x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
+        x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
+        x = "doesn't \"shrink\" does it"
+        y = 'doesn\'t "shrink" does it'
+        self.assert_(len(x) == 24 and x == y)
+        x = "does \"shrink\" doesn't it"
+        y = 'does "shrink" doesn\'t it'
+        self.assert_(len(x) == 24 and x == y)
+        x = """
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+"""
+        y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
+        self.assertEquals(x, y)
+        y = '''
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+'''
+        self.assertEquals(x, y)
+        y = "\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the 'lazy' dog.\n\
+"
+        self.assertEquals(x, y)
+        y = '\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the \'lazy\' dog.\n\
+'
+        self.assertEquals(x, y)
+
+    def testEllipsis(self):
+        x = ...
+        self.assert_(x is Ellipsis)
+        self.assertRaises(SyntaxError, eval, ".. .")
+
+class GrammarTests(unittest.TestCase):
+
+    # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+    # XXX can't test in a script -- this rule is only used when interactive
+
+    # file_input: (NEWLINE | stmt)* ENDMARKER
+    # Being tested as this very moment this very module
+
+    # expr_input: testlist NEWLINE
+    # XXX Hard to test -- used only in calls to input()
+
+    def testEvalInput(self):
+        # testlist ENDMARKER
+        x = eval('1, 0 or 1')
+
+    def testFuncdef(self):
+        ### [decorators] 'def' NAME parameters ['->' test] ':' suite
+        ### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+        ### decorators: decorator+
+        ### parameters: '(' [typedargslist] ')'
+        ### typedargslist: ((tfpdef ['=' test] ',')*
+        ###                ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
+        ###                | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
+        ### tfpdef: NAME [':' test]
+        ### varargslist: ((vfpdef ['=' test] ',')*
+        ###              ('*' [vfpdef] (',' vfpdef ['=' test])*  [',' '**' vfpdef] | '**' vfpdef)
+        ###              | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
+        ### vfpdef: NAME
+        def f1(): pass
+        f1()
+        f1(*())
+        f1(*(), **{})
+        def f2(one_argument): pass
+        def f3(two, arguments): pass
+        self.assertEquals(f2.__code__.co_varnames, ('one_argument',))
+        self.assertEquals(f3.__code__.co_varnames, ('two', 'arguments'))
+        def a1(one_arg,): pass
+        def a2(two, args,): pass
+        def v0(*rest): pass
+        def v1(a, *rest): pass
+        def v2(a, b, *rest): pass
+
+        f1()
+        f2(1)
+        f2(1,)
+        f3(1, 2)
+        f3(1, 2,)
+        v0()
+        v0(1)
+        v0(1,)
+        v0(1,2)
+        v0(1,2,3,4,5,6,7,8,9,0)
+        v1(1)
+        v1(1,)
+        v1(1,2)
+        v1(1,2,3)
+        v1(1,2,3,4,5,6,7,8,9,0)
+        v2(1,2)
+        v2(1,2,3)
+        v2(1,2,3,4)
+        v2(1,2,3,4,5,6,7,8,9,0)
+
+        def d01(a=1): pass
+        d01()
+        d01(1)
+        d01(*(1,))
+        d01(**{'a':2})
+        def d11(a, b=1): pass
+        d11(1)
+        d11(1, 2)
+        d11(1, **{'b':2})
+        def d21(a, b, c=1): pass
+        d21(1, 2)
+        d21(1, 2, 3)
+        d21(*(1, 2, 3))
+        d21(1, *(2, 3))
+        d21(1, 2, *(3,))
+        d21(1, 2, **{'c':3})
+        def d02(a=1, b=2): pass
+        d02()
+        d02(1)
+        d02(1, 2)
+        d02(*(1, 2))
+        d02(1, *(2,))
+        d02(1, **{'b':2})
+        d02(**{'a': 1, 'b': 2})
+        def d12(a, b=1, c=2): pass
+        d12(1)
+        d12(1, 2)
+        d12(1, 2, 3)
+        def d22(a, b, c=1, d=2): pass
+        d22(1, 2)
+        d22(1, 2, 3)
+        d22(1, 2, 3, 4)
+        def d01v(a=1, *rest): pass
+        d01v()
+        d01v(1)
+        d01v(1, 2)
+        d01v(*(1, 2, 3, 4))
+        d01v(*(1,))
+        d01v(**{'a':2})
+        def d11v(a, b=1, *rest): pass
+        d11v(1)
+        d11v(1, 2)
+        d11v(1, 2, 3)
+        def d21v(a, b, c=1, *rest): pass
+        d21v(1, 2)
+        d21v(1, 2, 3)
+        d21v(1, 2, 3, 4)
+        d21v(*(1, 2, 3, 4))
+        d21v(1, 2, **{'c': 3})
+        def d02v(a=1, b=2, *rest): pass
+        d02v()
+        d02v(1)
+        d02v(1, 2)
+        d02v(1, 2, 3)
+        d02v(1, *(2, 3, 4))
+        d02v(**{'a': 1, 'b': 2})
+        def d12v(a, b=1, c=2, *rest): pass
+        d12v(1)
+        d12v(1, 2)
+        d12v(1, 2, 3)
+        d12v(1, 2, 3, 4)
+        d12v(*(1, 2, 3, 4))
+        d12v(1, 2, *(3, 4, 5))
+        d12v(1, *(2,), **{'c': 3})
+        def d22v(a, b, c=1, d=2, *rest): pass
+        d22v(1, 2)
+        d22v(1, 2, 3)
+        d22v(1, 2, 3, 4)
+        d22v(1, 2, 3, 4, 5)
+        d22v(*(1, 2, 3, 4))
+        d22v(1, 2, *(3, 4, 5))
+        d22v(1, *(2, 3), **{'d': 4})
+
+        # keyword argument type tests
+        try:
+            str('x', **{b'foo':1 })
+        except TypeError:
+            pass
+        else:
+            self.fail('Bytes should not work as keyword argument names')
+        # keyword only argument tests
+        def pos0key1(*, key): return key
+        pos0key1(key=100)
+        def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
+        pos2key2(1, 2, k1=100)
+        pos2key2(1, 2, k1=100, k2=200)
+        pos2key2(1, 2, k2=100, k1=200)
+        def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
+        pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
+        pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
+
+        # keyword arguments after *arglist
+        def f(*args, **kwargs):
+            return args, kwargs
+        self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
+                                                    {'x':2, 'y':5}))
+        self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
+        self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
+
+        # argument annotation tests
+        def f(x) -> list: pass
+        self.assertEquals(f.__annotations__, {'return': list})
+        def f(x:int): pass
+        self.assertEquals(f.__annotations__, {'x': int})
+        def f(*x:str): pass
+        self.assertEquals(f.__annotations__, {'x': str})
+        def f(**x:float): pass
+        self.assertEquals(f.__annotations__, {'x': float})
+        def f(x, y:1+2): pass
+        self.assertEquals(f.__annotations__, {'y': 3})
+        def f(a, b:1, c:2, d): pass
+        self.assertEquals(f.__annotations__, {'b': 1, 'c': 2})
+        def f(a, b:1, c:2, d, e:3=4, f=5, *g:6): pass
+        self.assertEquals(f.__annotations__,
+                          {'b': 1, 'c': 2, 'e': 3, 'g': 6})
+        def f(a, b:1, c:2, d, e:3=4, f=5, *g:6, h:7, i=8, j:9=10,
+              **k:11) -> 12: pass
+        self.assertEquals(f.__annotations__,
+                          {'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
+                           'k': 11, 'return': 12})
+        # Check for SF Bug #1697248 - mixing decorators and a return annotation
+        def null(x): return x
+        @null
+        def f(x) -> list: pass
+        self.assertEquals(f.__annotations__, {'return': list})
+
+        # test MAKE_CLOSURE with a variety of oparg's
+        closure = 1
+        def f(): return closure
+        def f(x=1): return closure
+        def f(*, k=1): return closure
+        def f() -> int: return closure
+
+        # Check ast errors in *args and *kwargs
+        check_syntax_error(self, "f(*g(1=2))")
+        check_syntax_error(self, "f(**g(1=2))")
+
+    def testLambdef(self):
+        ### lambdef: 'lambda' [varargslist] ':' test
+        l1 = lambda : 0
+        self.assertEquals(l1(), 0)
+        l2 = lambda : a[d] # XXX just testing the expression
+        l3 = lambda : [2 < x for x in [-1, 3, 0]]
+        self.assertEquals(l3(), [0, 1, 0])
+        l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
+        self.assertEquals(l4(), 1)
+        l5 = lambda x, y, z=2: x + y + z
+        self.assertEquals(l5(1, 2), 5)
+        self.assertEquals(l5(1, 2, 3), 6)
+        check_syntax_error(self, "lambda x: x = 2")
+        check_syntax_error(self, "lambda (None,): None")
+        l6 = lambda x, y, *, k=20: x+y+k
+        self.assertEquals(l6(1,2), 1+2+20)
+        self.assertEquals(l6(1,2,k=10), 1+2+10)
+
+
+    ### stmt: simple_stmt | compound_stmt
+    # Tested below
+
+    def testSimpleStmt(self):
+        ### simple_stmt: small_stmt (';' small_stmt)* [';']
+        x = 1; pass; del x
+        def foo():
+            # verify statments that end with semi-colons
+            x = 1; pass; del x;
+        foo()
+
+    ### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
+    # Tested below
+
+    def testExprStmt(self):
+        # (exprlist '=')* exprlist
+        1
+        1, 2, 3
+        x = 1
+        x = 1, 2, 3
+        x = y = z = 1, 2, 3
+        x, y, z = 1, 2, 3
+        abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
+
+        check_syntax_error(self, "x + 1 = 1")
+        check_syntax_error(self, "a + 1 = b + 2")
+
+    def testDelStmt(self):
+        # 'del' exprlist
+        abc = [1,2,3]
+        x, y, z = abc
+        xyz = x, y, z
+
+        del abc
+        del x, y, (z, xyz)
+
+    def testPassStmt(self):
+        # 'pass'
+        pass
+
+    # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
+    # Tested below
+
+    def testBreakStmt(self):
+        # 'break'
+        while 1: break
+
+    def testContinueStmt(self):
+        # 'continue'
+        i = 1
+        while i: i = 0; continue
+
+        msg = ""
+        while not msg:
+            msg = "ok"
+            try:
+                continue
+                msg = "continue failed to continue inside try"
+            except:
+                msg = "continue inside try called except block"
+        if msg != "ok":
+            self.fail(msg)
+
+        msg = ""
+        while not msg:
+            msg = "finally block not called"
+            try:
+                continue
+            finally:
+                msg = "ok"
+        if msg != "ok":
+            self.fail(msg)
+
+    def test_break_continue_loop(self):
+        # This test warrants an explanation. It is a test specifically for SF bugs
+        # #463359 and #462937. The bug is that a 'break' statement executed or
+        # exception raised inside a try/except inside a loop, *after* a continue
+        # statement has been executed in that loop, will cause the wrong number of
+        # arguments to be popped off the stack and the instruction pointer reset to
+        # a very small number (usually 0.) Because of this, the following test
+        # *must* written as a function, and the tracking vars *must* be function
+        # arguments with default values. Otherwise, the test will loop and loop.
+
+        def test_inner(extra_burning_oil = 1, count=0):
+            big_hippo = 2
+            while big_hippo:
+                count += 1
+                try:
+                    if extra_burning_oil and big_hippo == 1:
+                        extra_burning_oil -= 1
+                        break
+                    big_hippo -= 1
+                    continue
+                except:
+                    raise
+            if count > 2 or big_hippo != 1:
+                self.fail("continue then break in try/except in loop broken!")
+        test_inner()
+
+    def testReturn(self):
+        # 'return' [testlist]
+        def g1(): return
+        def g2(): return 1
+        g1()
+        x = g2()
+        check_syntax_error(self, "class foo:return 1")
+
+    def testYield(self):
+        check_syntax_error(self, "class foo:yield 1")
+
+    def testRaise(self):
+        # 'raise' test [',' test]
+        try: raise RuntimeError('just testing')
+        except RuntimeError: pass
+        try: raise KeyboardInterrupt
+        except KeyboardInterrupt: pass
+
+    def testImport(self):
+        # 'import' dotted_as_names
+        import sys
+        import time, sys
+        # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
+        from time import time
+        from time import (time)
+        # not testable inside a function, but already done at top of the module
+        # from sys import *
+        from sys import path, argv
+        from sys import (path, argv)
+        from sys import (path, argv,)
+
+    def testGlobal(self):
+        # 'global' NAME (',' NAME)*
+        global a
+        global a, b
+        global one, two, three, four, five, six, seven, eight, nine, ten
+
+    def testNonlocal(self):
+        # 'nonlocal' NAME (',' NAME)*
+        x = 0
+        y = 0
+        def f():
+            nonlocal x
+            nonlocal x, y
+
+    def testAssert(self):
+        # assert_stmt: 'assert' test [',' test]
+        assert 1
+        assert 1, 1
+        assert lambda x:x
+        assert 1, lambda x:x+1
+        try:
+            assert 0, "msg"
+        except AssertionError as e:
+            self.assertEquals(e.args[0], "msg")
+        else:
+            if __debug__:
+                self.fail("AssertionError not raised by assert 0")
+
+    ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
+    # Tested below
+
+    def testIf(self):
+        # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+        if 1: pass
+        if 1: pass
+        else: pass
+        if 0: pass
+        elif 0: pass
+        if 0: pass
+        elif 0: pass
+        elif 0: pass
+        elif 0: pass
+        else: pass
+
+    def testWhile(self):
+        # 'while' test ':' suite ['else' ':' suite]
+        while 0: pass
+        while 0: pass
+        else: pass
+
+        # Issue1920: "while 0" is optimized away,
+        # ensure that the "else" clause is still present.
+        x = 0
+        while 0:
+            x = 1
+        else:
+            x = 2
+        self.assertEquals(x, 2)
+
+    def testFor(self):
+        # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
+        for i in 1, 2, 3: pass
+        for i, j, k in (): pass
+        else: pass
+        class Squares:
+            def __init__(self, max):
+                self.max = max
+                self.sofar = []
+            def __len__(self): return len(self.sofar)
+            def __getitem__(self, i):
+                if not 0 <= i < self.max: raise IndexError
+                n = len(self.sofar)
+                while n <= i:
+                    self.sofar.append(n*n)
+                    n = n+1
+                return self.sofar[i]
+        n = 0
+        for x in Squares(10): n = n+x
+        if n != 285:
+            self.fail('for over growing sequence')
+
+        result = []
+        for x, in [(1,), (2,), (3,)]:
+            result.append(x)
+        self.assertEqual(result, [1, 2, 3])
+
+    def testTry(self):
+        ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
+        ###         | 'try' ':' suite 'finally' ':' suite
+        ### except_clause: 'except' [expr ['as' expr]]
+        try:
+            1/0
+        except ZeroDivisionError:
+            pass
+        else:
+            pass
+        try: 1/0
+        except EOFError: pass
+        except TypeError as msg: pass
+        except RuntimeError as msg: pass
+        except: pass
+        else: pass
+        try: 1/0
+        except (EOFError, TypeError, ZeroDivisionError): pass
+        try: 1/0
+        except (EOFError, TypeError, ZeroDivisionError) as msg: pass
+        try: pass
+        finally: pass
+
+    def testSuite(self):
+        # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
+        if 1: pass
+        if 1:
+            pass
+        if 1:
+            #
+            #
+            #
+            pass
+            pass
+            #
+            pass
+            #
+
+    def testTest(self):
+        ### and_test ('or' and_test)*
+        ### and_test: not_test ('and' not_test)*
+        ### not_test: 'not' not_test | comparison
+        if not 1: pass
+        if 1 and 1: pass
+        if 1 or 1: pass
+        if not not not 1: pass
+        if not 1 and 1 and 1: pass
+        if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
+
+    def testComparison(self):
+        ### comparison: expr (comp_op expr)*
+        ### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+        if 1: pass
+        x = (1 == 1)
+        if 1 == 1: pass
+        if 1 != 1: pass
+        if 1 < 1: pass
+        if 1 > 1: pass
+        if 1 <= 1: pass
+        if 1 >= 1: pass
+        if 1 is 1: pass
+        if 1 is not 1: pass
+        if 1 in (): pass
+        if 1 not in (): pass
+        if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
+
+    def testBinaryMaskOps(self):
+        x = 1 & 1
+        x = 1 ^ 1
+        x = 1 | 1
+
+    def testShiftOps(self):
+        x = 1 << 1
+        x = 1 >> 1
+        x = 1 << 1 >> 1
+
+    def testAdditiveOps(self):
+        x = 1
+        x = 1 + 1
+        x = 1 - 1 - 1
+        x = 1 - 1 + 1 - 1 + 1
+
+    def testMultiplicativeOps(self):
+        x = 1 * 1
+        x = 1 / 1
+        x = 1 % 1
+        x = 1 / 1 * 1 % 1
+
+    def testUnaryOps(self):
+        x = +1
+        x = -1
+        x = ~1
+        x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
+        x = -1*1/1 + 1*1 - ---1*1
+
+    def testSelectors(self):
+        ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
+        ### subscript: expr | [expr] ':' [expr]
+
+        import sys, time
+        c = sys.path[0]
+        x = time.time()
+        x = sys.modules['time'].time()
+        a = '01234'
+        c = a[0]
+        c = a[-1]
+        s = a[0:5]
+        s = a[:5]
+        s = a[0:]
+        s = a[:]
+        s = a[-5:]
+        s = a[:-1]
+        s = a[-4:-3]
+        # A rough test of SF bug 1333982.  http://python.org/sf/1333982
+        # The testing here is fairly incomplete.
+        # Test cases should include: commas with 1 and 2 colons
+        d = {}
+        d[1] = 1
+        d[1,] = 2
+        d[1,2] = 3
+        d[1,2,3] = 4
+        L = list(d)
+        L.sort(key=lambda x: x if isinstance(x, tuple) else ())
+        self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
+
+    def testAtoms(self):
+        ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
+        ### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
+
+        x = (1)
+        x = (1 or 2 or 3)
+        x = (1 or 2 or 3, 2, 3)
+
+        x = []
+        x = [1]
+        x = [1 or 2 or 3]
+        x = [1 or 2 or 3, 2, 3]
+        x = []
+
+        x = {}
+        x = {'one': 1}
+        x = {'one': 1,}
+        x = {'one' or 'two': 1 or 2}
+        x = {'one': 1, 'two': 2}
+        x = {'one': 1, 'two': 2,}
+        x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
+
+        x = {'one'}
+        x = {'one', 1,}
+        x = {'one', 'two', 'three'}
+        x = {2, 3, 4,}
+
+        x = x
+        x = 'x'
+        x = 123
+
+    ### exprlist: expr (',' expr)* [',']
+    ### testlist: test (',' test)* [',']
+    # These have been exercised enough above
+
+    def testClassdef(self):
+        # 'class' NAME ['(' [testlist] ')'] ':' suite
+        class B: pass
+        class B2(): pass
+        class C1(B): pass
+        class C2(B): pass
+        class D(C1, C2, B): pass
+        class C:
+            def meth1(self): pass
+            def meth2(self, arg): pass
+            def meth3(self, a1, a2): pass
+
+        # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+        # decorators: decorator+
+        # decorated: decorators (classdef | funcdef)
+        def class_decorator(x): return x
+        @class_decorator
+        class G: pass
+
+    def testDictcomps(self):
+        # dictorsetmaker: ( (test ':' test (comp_for |
+        #                                   (',' test ':' test)* [','])) |
+        #                   (test (comp_for | (',' test)* [','])) )
+        nums = [1, 2, 3]
+        self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
+
+    def testListcomps(self):
+        # list comprehension tests
+        nums = [1, 2, 3, 4, 5]
+        strs = ["Apple", "Banana", "Coconut"]
+        spcs = ["  Apple", " Banana ", "Coco  nut  "]
+
+        self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco  nut'])
+        self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
+        self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
+        self.assertEqual([(i, s) for i in nums for s in strs],
+                         [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
+                          (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
+                          (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
+                          (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
+                          (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
+        self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
+                         [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
+                          (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
+                          (5, 'Banana'), (5, 'Coconut')])
+        self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
+                         [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
+
+        def test_in_func(l):
+            return [0 < x < 3 for x in l if x > 2]
+
+        self.assertEqual(test_in_func(nums), [False, False, False])
+
+        def test_nested_front():
+            self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
+                             [[1, 2], [3, 4], [5, 6]])
+
+        test_nested_front()
+
+        check_syntax_error(self, "[i, s for i in nums for s in strs]")
+        check_syntax_error(self, "[x if y]")
+
+        suppliers = [
+          (1, "Boeing"),
+          (2, "Ford"),
+          (3, "Macdonalds")
+        ]
+
+        parts = [
+          (10, "Airliner"),
+          (20, "Engine"),
+          (30, "Cheeseburger")
+        ]
+
+        suppart = [
+          (1, 10), (1, 20), (2, 20), (3, 30)
+        ]
+
+        x = [
+          (sname, pname)
+            for (sno, sname) in suppliers
+              for (pno, pname) in parts
+                for (sp_sno, sp_pno) in suppart
+                  if sno == sp_sno and pno == sp_pno
+        ]
+
+        self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
+                             ('Macdonalds', 'Cheeseburger')])
+
+    def testGenexps(self):
+        # generator expression tests
+        g = ([x for x in range(10)] for x in range(1))
+        self.assertEqual(next(g), [x for x in range(10)])
+        try:
+            next(g)
+            self.fail('should produce StopIteration exception')
+        except StopIteration:
+            pass
+
+        a = 1
+        try:
+            g = (a for d in a)
+            next(g)
+            self.fail('should produce TypeError')
+        except TypeError:
+            pass
+
+        self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
+        self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
+
+        a = [x for x in range(10)]
+        b = (x for x in (y for y in a))
+        self.assertEqual(sum(b), sum([x for x in range(10)]))
+
+        self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
+        self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
+        self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
+        check_syntax_error(self, "foo(x for x in range(10), 100)")
+        check_syntax_error(self, "foo(100, x for x in range(10))")
+
+    def testComprehensionSpecials(self):
+        # test for outmost iterable precomputation
+        x = 10; g = (i for i in range(x)); x = 5
+        self.assertEqual(len(list(g)), 10)
+
+        # This should hold, since we're only precomputing outmost iterable.
+        x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
+        x = 5; t = True;
+        self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
+
+        # Grammar allows multiple adjacent 'if's in listcomps and genexps,
+        # even though it's silly. Make sure it works (ifelse broke this.)
+        self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
+        self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
+
+        # verify unpacking single element tuples in listcomp/genexp.
+        self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
+        self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
+
+    def testIfElseExpr(self):
+        # Test ifelse expressions in various cases
+        def _checkeval(msg, ret):
+            "helper to check that evaluation of expressions is done correctly"
+            print(x)
+            return ret
+
+        # the next line is not allowed anymore
+        #self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
+        self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
+        self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
+        self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
+        self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
+        self.assertEqual((5 and 6 if 0 else 1), 1)
+        self.assertEqual(((5 and 6) if 0 else 1), 1)
+        self.assertEqual((5 and (6 if 1 else 1)), 6)
+        self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
+        self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
+        self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
+        self.assertEqual((not 5 if 1 else 1), False)
+        self.assertEqual((not 5 if 0 else 1), 1)
+        self.assertEqual((6 + 1 if 1 else 2), 7)
+        self.assertEqual((6 - 1 if 1 else 2), 5)
+        self.assertEqual((6 * 2 if 1 else 4), 12)
+        self.assertEqual((6 / 2 if 1 else 3), 3)
+        self.assertEqual((6 < 4 if 0 else 2), 2)
+
+
+def test_main():
+    run_unittest(TokenTests, GrammarTests)
+
+if __name__ == '__main__':
+    test_main()

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/pytree_idempotency.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/pytree_idempotency.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,92 @@
+#!/usr/bin/env python2.5
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Main program for testing the infrastructure."""
+
+__author__ = "Guido van Rossum <guido at python.org>"
+
+# Support imports (need to be imported first)
+from . import support
+
+# Python imports
+import os
+import sys
+import logging
+
+# Local imports
+from .. import pytree
+import pgen2
+from pgen2 import driver
+
+logging.basicConfig()
+
+def main():
+    gr = driver.load_grammar("Grammar.txt")
+    dr = driver.Driver(gr, convert=pytree.convert)
+
+    fn = "example.py"
+    tree = dr.parse_file(fn, debug=True)
+    if not diff(fn, tree):
+        print "No diffs."
+    if not sys.argv[1:]:
+        return # Pass a dummy argument to run the complete test suite below
+
+    problems = []
+
+    # Process every imported module
+    for name in sys.modules:
+        mod = sys.modules[name]
+        if mod is None or not hasattr(mod, "__file__"):
+            continue
+        fn = mod.__file__
+        if fn.endswith(".pyc"):
+            fn = fn[:-1]
+        if not fn.endswith(".py"):
+            continue
+        print >>sys.stderr, "Parsing", fn
+        tree = dr.parse_file(fn, debug=True)
+        if diff(fn, tree):
+            problems.append(fn)
+
+    # Process every single module on sys.path (but not in packages)
+    for dir in sys.path:
+        try:
+            names = os.listdir(dir)
+        except os.error:
+            continue
+        print >>sys.stderr, "Scanning", dir, "..."
+        for name in names:
+            if not name.endswith(".py"):
+                continue
+            print >>sys.stderr, "Parsing", name
+            fn = os.path.join(dir, name)
+            try:
+                tree = dr.parse_file(fn, debug=True)
+            except pgen2.parse.ParseError, err:
+                print "ParseError:", err
+            else:
+                if diff(fn, tree):
+                    problems.append(fn)
+
+    # Show summary of problem files
+    if not problems:
+        print "No problems.  Congratulations!"
+    else:
+        print "Problems in following files:"
+        for fn in problems:
+            print "***", fn
+
+def diff(fn, tree):
+    f = open("@", "w")
+    try:
+        f.write(str(tree))
+    finally:
+        f.close()
+    try:
+        return os.system("diff -u %s @" % fn)
+    finally:
+        os.remove("@")
+
+if __name__ == "__main__":
+    main()

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/support.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/support.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,78 @@
+"""Support code for test_*.py files"""
+# Original Author: Collin Winter
+
+# Python imports
+import unittest
+import sys
+import os
+import os.path
+import re
+from textwrap import dedent
+
+#sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
+
+# Local imports
+from .. import pytree
+from .. import refactor
+from ..pgen2 import driver
+
+test_pkg = "refactor.fixes"
+test_dir = os.path.dirname(__file__)
+proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
+grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
+grammar = driver.load_grammar(grammar_path)
+driver = driver.Driver(grammar, convert=pytree.convert)
+
+def parse_version(version_string):
+    """Returns a version tuple matching input version_string."""
+    if not version_string:
+        return ()
+
+    version_list = []
+    for token in version_string.split('.'):
+        try:
+            version_list.append(int(token))
+        except ValueError:
+            version_list.append(token)
+    return tuple(version_list)
+
+def parse_string(string):
+    return driver.parse_string(reformat(string), debug=True)
+
+# Python 2.3's TestSuite is not iter()-able
+if sys.version_info < (2, 4):
+    def TestSuite_iter(self):
+        return iter(self._tests)
+    unittest.TestSuite.__iter__ = TestSuite_iter
+
+def run_all_tests(test_mod=None, tests=None):
+    if tests is None:
+        tests = unittest.TestLoader().loadTestsFromModule(test_mod)
+    unittest.TextTestRunner(verbosity=2).run(tests)
+
+def reformat(string):
+    return dedent(string) + "\n\n"
+
+def get_refactorer(fixers=None, options=None, pkg_name=None):
+    """
+    A convenience function for creating a RefactoringTool for tests.
+
+    fixers is a list of fixers for the RefactoringTool to use. By default
+    "refactor.fixes.*" is used. options is an optional dictionary of options to
+    be passed to the RefactoringTool.
+    """
+    pkg_name = pkg_name or test_pkg
+    if fixers is not None:
+        fixers = [pkg_name + ".fix_" + fix for fix in fixers]
+    else:
+        fixers = refactor.get_fixers_from_package(pkg_name)
+    options = options or {}
+    return refactor.RefactoringTool(fixers, options, explicit=True)
+
+def all_project_files():
+    for dirpath, dirnames, filenames in os.walk(proj_dir):
+        for filename in filenames:
+            if filename.endswith(".py"):
+                yield os.path.join(dirpath, filename)
+
+TestCase = unittest.TestCase

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/test_all_fixers.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/test_all_fixers.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,35 @@
+#!/usr/bin/env python2.5
+"""Tests that run all fixer modules over an input stream.
+
+This has been broken out into its own test module because of its
+running time.
+"""
+# Author: Collin Winter
+
+# Testing imports
+try:
+    from . import support
+except ImportError:
+    import support
+
+# Python imports
+import unittest
+
+# Local imports
+from .. import pytree
+from .. import refactor
+
+class Test_all(support.TestCase):
+    def setUp(self):
+        options = {"print_function" : False}
+        self.refactor = support.get_refactorer(options=options)
+
+    def test_all_project_files(self):
+        for filepath in support.all_project_files():
+            print "Fixing %s..." % filepath
+            self.refactor.refactor_string(open(filepath).read(), filepath)
+
+
+if __name__ == "__main__":
+    import __main__
+    support.run_all_tests(__main__)

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/test_fixers.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/test_fixers.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,4033 @@
+#!/usr/bin/env python2.5
+""" Test suite for the fixer modules """
+# Author: Collin Winter
+
+# Testing imports
+try:
+    from tests import support
+except ImportError:
+    import support
+
+# Python imports
+import os
+import unittest
+from itertools import chain
+from operator import itemgetter
+
+# Local imports
+from lib2to3 import pygram, pytree, refactor, fixer_util
+
+
+class FixerTestCase(support.TestCase):
+    def setUp(self, fix_list=None):
+        if fix_list is None:
+            fix_list = [self.fixer]
+        options = {"print_function" : False}
+        self.refactor = support.get_refactorer(fix_list, options, "refactor.fixes.from2")
+        self.fixer_log = []
+        self.filename = "<string>"
+
+        for fixer in chain(self.refactor.pre_order,
+                           self.refactor.post_order):
+            fixer.log = self.fixer_log
+
+    def _check(self, before, after):
+        before = support.reformat(before)
+        after = support.reformat(after)
+        tree = self.refactor.refactor_string(before, self.filename)
+        self.failUnlessEqual(after, str(tree))
+        return tree
+
+    def check(self, before, after, ignore_warnings=False):
+        tree = self._check(before, after)
+        self.failUnless(tree.was_changed)
+        if not ignore_warnings:
+            self.failUnlessEqual(self.fixer_log, [])
+
+    def warns(self, before, after, message, unchanged=False):
+        tree = self._check(before, after)
+        self.failUnless(message in "".join(self.fixer_log))
+        if not unchanged:
+            self.failUnless(tree.was_changed)
+
+    def warns_unchanged(self, before, message):
+        self.warns(before, before, message, unchanged=True)
+
+    def unchanged(self, before, ignore_warnings=False):
+        self._check(before, before)
+        if not ignore_warnings:
+            self.failUnlessEqual(self.fixer_log, [])
+
+    def assert_runs_after(self, *names):
+        fixes = [self.fixer]
+        fixes.extend(names)
+        options = {"print_function" : False}
+        r = support.get_refactorer(fixes, options)
+        (pre, post) = r.get_fixers()
+        n = "fix_" + self.fixer
+        if post and post[-1].__class__.__module__.endswith(n):
+            # We're the last fixer to run
+            return
+        if pre and pre[-1].__class__.__module__.endswith(n) and not post:
+            # We're the last in pre and post is empty
+            return
+        self.fail("Fixer run order (%s) is incorrect; %s should be last."\
+               %(", ".join([x.__class__.__module__ for x in (pre+post)]), n))
+
+class Test_ne(FixerTestCase):
+    fixer = "ne"
+
+    def test_basic(self):
+        b = """if x <> y:
+            pass"""
+
+        a = """if x != y:
+            pass"""
+        self.check(b, a)
+
+    def test_no_spaces(self):
+        b = """if x<>y:
+            pass"""
+
+        a = """if x!=y:
+            pass"""
+        self.check(b, a)
+
+    def test_chained(self):
+        b = """if x<>y<>z:
+            pass"""
+
+        a = """if x!=y!=z:
+            pass"""
+        self.check(b, a)
+
+class Test_has_key(FixerTestCase):
+    fixer = "has_key"
+
+    def test_1(self):
+        b = """x = d.has_key("x") or d.has_key("y")"""
+        a = """x = "x" in d or "y" in d"""
+        self.check(b, a)
+
+    def test_2(self):
+        b = """x = a.b.c.d.has_key("x") ** 3"""
+        a = """x = ("x" in a.b.c.d) ** 3"""
+        self.check(b, a)
+
+    def test_3(self):
+        b = """x = a.b.has_key(1 + 2).__repr__()"""
+        a = """x = (1 + 2 in a.b).__repr__()"""
+        self.check(b, a)
+
+    def test_4(self):
+        b = """x = a.b.has_key(1 + 2).__repr__() ** -3 ** 4"""
+        a = """x = (1 + 2 in a.b).__repr__() ** -3 ** 4"""
+        self.check(b, a)
+
+    def test_5(self):
+        b = """x = a.has_key(f or g)"""
+        a = """x = (f or g) in a"""
+        self.check(b, a)
+
+    def test_6(self):
+        b = """x = a + b.has_key(c)"""
+        a = """x = a + (c in b)"""
+        self.check(b, a)
+
+    def test_7(self):
+        b = """x = a.has_key(lambda: 12)"""
+        a = """x = (lambda: 12) in a"""
+        self.check(b, a)
+
+    def test_8(self):
+        b = """x = a.has_key(a for a in b)"""
+        a = """x = (a for a in b) in a"""
+        self.check(b, a)
+
+    def test_9(self):
+        b = """if not a.has_key(b): pass"""
+        a = """if b not in a: pass"""
+        self.check(b, a)
+
+    def test_10(self):
+        b = """if not a.has_key(b).__repr__(): pass"""
+        a = """if not (b in a).__repr__(): pass"""
+        self.check(b, a)
+
+    def test_11(self):
+        b = """if not a.has_key(b) ** 2: pass"""
+        a = """if not (b in a) ** 2: pass"""
+        self.check(b, a)
+
+class Test_apply(FixerTestCase):
+    fixer = "apply"
+
+    def test_1(self):
+        b = """x = apply(f, g + h)"""
+        a = """x = f(*g + h)"""
+        self.check(b, a)
+
+    def test_2(self):
+        b = """y = apply(f, g, h)"""
+        a = """y = f(*g, **h)"""
+        self.check(b, a)
+
+    def test_3(self):
+        b = """z = apply(fs[0], g or h, h or g)"""
+        a = """z = fs[0](*g or h, **h or g)"""
+        self.check(b, a)
+
+    def test_4(self):
+        b = """apply(f, (x, y) + t)"""
+        a = """f(*(x, y) + t)"""
+        self.check(b, a)
+
+    def test_5(self):
+        b = """apply(f, args,)"""
+        a = """f(*args)"""
+        self.check(b, a)
+
+    def test_6(self):
+        b = """apply(f, args, kwds,)"""
+        a = """f(*args, **kwds)"""
+        self.check(b, a)
+
+    # Test that complex functions are parenthesized
+
+    def test_complex_1(self):
+        b = """x = apply(f+g, args)"""
+        a = """x = (f+g)(*args)"""
+        self.check(b, a)
+
+    def test_complex_2(self):
+        b = """x = apply(f*g, args)"""
+        a = """x = (f*g)(*args)"""
+        self.check(b, a)
+
+    def test_complex_3(self):
+        b = """x = apply(f**g, args)"""
+        a = """x = (f**g)(*args)"""
+        self.check(b, a)
+
+    # But dotted names etc. not
+
+    def test_dotted_name(self):
+        b = """x = apply(f.g, args)"""
+        a = """x = f.g(*args)"""
+        self.check(b, a)
+
+    def test_subscript(self):
+        b = """x = apply(f[x], args)"""
+        a = """x = f[x](*args)"""
+        self.check(b, a)
+
+    def test_call(self):
+        b = """x = apply(f(), args)"""
+        a = """x = f()(*args)"""
+        self.check(b, a)
+
+    # Extreme case
+    def test_extreme(self):
+        b = """x = apply(a.b.c.d.e.f, args, kwds)"""
+        a = """x = a.b.c.d.e.f(*args, **kwds)"""
+        self.check(b, a)
+
+    # XXX Comments in weird places still get lost
+    def test_weird_comments(self):
+        b = """apply(   # foo
+          f, # bar
+          args)"""
+        a = """f(*args)"""
+        self.check(b, a)
+
+    # These should *not* be touched
+
+    def test_unchanged_1(self):
+        s = """apply()"""
+        self.unchanged(s)
+
+    def test_unchanged_2(self):
+        s = """apply(f)"""
+        self.unchanged(s)
+
+    def test_unchanged_3(self):
+        s = """apply(f,)"""
+        self.unchanged(s)
+
+    def test_unchanged_4(self):
+        s = """apply(f, args, kwds, extras)"""
+        self.unchanged(s)
+
+    def test_unchanged_5(self):
+        s = """apply(f, *args, **kwds)"""
+        self.unchanged(s)
+
+    def test_unchanged_6(self):
+        s = """apply(f, *args)"""
+        self.unchanged(s)
+
+    def test_unchanged_7(self):
+        s = """apply(func=f, args=args, kwds=kwds)"""
+        self.unchanged(s)
+
+    def test_unchanged_8(self):
+        s = """apply(f, args=args, kwds=kwds)"""
+        self.unchanged(s)
+
+    def test_unchanged_9(self):
+        s = """apply(f, args, kwds=kwds)"""
+        self.unchanged(s)
+
+    def test_space_1(self):
+        a = """apply(  f,  args,   kwds)"""
+        b = """f(*args, **kwds)"""
+        self.check(a, b)
+
+    def test_space_2(self):
+        a = """apply(  f  ,args,kwds   )"""
+        b = """f(*args, **kwds)"""
+        self.check(a, b)
+
+class Test_intern(FixerTestCase):
+    fixer = "intern"
+
+    def test_prefix_preservation(self):
+        b = """x =   intern(  a  )"""
+        a = """import sys\nx =   sys.intern(  a  )"""
+        self.check(b, a)
+
+        b = """y = intern("b" # test
+              )"""
+        a = """import sys\ny = sys.intern("b" # test
+              )"""
+        self.check(b, a)
+
+        b = """z = intern(a+b+c.d,   )"""
+        a = """import sys\nz = sys.intern(a+b+c.d,   )"""
+        self.check(b, a)
+
+    def test(self):
+        b = """x = intern(a)"""
+        a = """import sys\nx = sys.intern(a)"""
+        self.check(b, a)
+
+        b = """z = intern(a+b+c.d,)"""
+        a = """import sys\nz = sys.intern(a+b+c.d,)"""
+        self.check(b, a)
+
+        b = """intern("y%s" % 5).replace("y", "")"""
+        a = """import sys\nsys.intern("y%s" % 5).replace("y", "")"""
+        self.check(b, a)
+
+    # These should not be refactored
+
+    def test_unchanged(self):
+        s = """intern(a=1)"""
+        self.unchanged(s)
+
+        s = """intern(f, g)"""
+        self.unchanged(s)
+
+        s = """intern(*h)"""
+        self.unchanged(s)
+
+        s = """intern(**i)"""
+        self.unchanged(s)
+
+        s = """intern()"""
+        self.unchanged(s)
+
+class Test_reduce(FixerTestCase):
+    fixer = "reduce"
+
+    def test_simple_call(self):
+        b = "reduce(a, b, c)"
+        a = "from functools import reduce\nreduce(a, b, c)"
+        self.check(b, a)
+
+    def test_call_with_lambda(self):
+        b = "reduce(lambda x, y: x + y, seq)"
+        a = "from functools import reduce\nreduce(lambda x, y: x + y, seq)"
+        self.check(b, a)
+
+    def test_unchanged(self):
+        s = "reduce(a)"
+        self.unchanged(s)
+
+        s = "reduce(a, b=42)"
+        self.unchanged(s)
+
+        s = "reduce(a, b, c, d)"
+        self.unchanged(s)
+
+        s = "reduce(**c)"
+        self.unchanged(s)
+
+        s = "reduce()"
+        self.unchanged(s)
+
+class Test_print(FixerTestCase):
+    fixer = "print"
+
+    def test_prefix_preservation(self):
+        b = """print 1,   1+1,   1+1+1"""
+        a = """print(1,   1+1,   1+1+1)"""
+        self.check(b, a)
+
+    def test_idempotency(self):
+        s = """print()"""
+        self.unchanged(s)
+
+        s = """print('')"""
+        self.unchanged(s)
+
+    def test_idempotency_print_as_function(self):
+        print_stmt = pygram.python_grammar.keywords.pop("print")
+        try:
+            s = """print(1, 1+1, 1+1+1)"""
+            self.unchanged(s)
+
+            s = """print()"""
+            self.unchanged(s)
+
+            s = """print('')"""
+            self.unchanged(s)
+        finally:
+            pygram.python_grammar.keywords["print"] = print_stmt
+
+    def test_1(self):
+        b = """print 1, 1+1, 1+1+1"""
+        a = """print(1, 1+1, 1+1+1)"""
+        self.check(b, a)
+
+    def test_2(self):
+        b = """print 1, 2"""
+        a = """print(1, 2)"""
+        self.check(b, a)
+
+    def test_3(self):
+        b = """print"""
+        a = """print()"""
+        self.check(b, a)
+
+    def test_4(self):
+        # from bug 3000
+        b = """print whatever; print"""
+        a = """print(whatever); print()"""
+        self.check(b, a)
+
+    def test_5(self):
+        b = """print; print whatever;"""
+        a = """print(); print(whatever);"""
+
+    def test_tuple(self):
+        b = """print (a, b, c)"""
+        a = """print((a, b, c))"""
+        self.check(b, a)
+
+    # trailing commas
+
+    def test_trailing_comma_1(self):
+        b = """print 1, 2, 3,"""
+        a = """print(1, 2, 3, end=' ')"""
+        self.check(b, a)
+
+    def test_trailing_comma_2(self):
+        b = """print 1, 2,"""
+        a = """print(1, 2, end=' ')"""
+        self.check(b, a)
+
+    def test_trailing_comma_3(self):
+        b = """print 1,"""
+        a = """print(1, end=' ')"""
+        self.check(b, a)
+
+    # >> stuff
+
+    def test_vargs_without_trailing_comma(self):
+        b = """print >>sys.stderr, 1, 2, 3"""
+        a = """print(1, 2, 3, file=sys.stderr)"""
+        self.check(b, a)
+
+    def test_with_trailing_comma(self):
+        b = """print >>sys.stderr, 1, 2,"""
+        a = """print(1, 2, end=' ', file=sys.stderr)"""
+        self.check(b, a)
+
+    def test_no_trailing_comma(self):
+        b = """print >>sys.stderr, 1+1"""
+        a = """print(1+1, file=sys.stderr)"""
+        self.check(b, a)
+
+    def test_spaces_before_file(self):
+        b = """print >>  sys.stderr"""
+        a = """print(file=sys.stderr)"""
+        self.check(b, a)
+
+    # With from __future__ import print_function
+    def test_with_future_print_function(self):
+        # XXX: These tests won't actually do anything until the parser
+        #      is fixed so it won't crash when it sees print(x=y).
+        #      When #2412 is fixed, the try/except block can be taken
+        #      out and the tests can be run like normal.
+        try:
+            s = "from __future__ import print_function\n"\
+                "print('Hai!', end=' ')"
+            self.unchanged(s)
+
+            b = "print 'Hello, world!'"
+            a = "print('Hello, world!')"
+            self.check(b, a)
+
+            s = "from __future__ import *\n"\
+                "print('Hai!', end=' ')"
+            self.unchanged(s)
+        except:
+            return
+        else:
+            self.assertFalse(True, "#2421 has been fixed -- printing tests "\
+                                   "need to be updated!")
+
+class Test_exec(FixerTestCase):
+    fixer = "exec"
+
+    def test_prefix_preservation(self):
+        b = """  exec code in ns1,   ns2"""
+        a = """  exec(code, ns1,   ns2)"""
+        self.check(b, a)
+
+    def test_basic(self):
+        b = """exec code"""
+        a = """exec(code)"""
+        self.check(b, a)
+
+    def test_with_globals(self):
+        b = """exec code in ns"""
+        a = """exec(code, ns)"""
+        self.check(b, a)
+
+    def test_with_globals_locals(self):
+        b = """exec code in ns1, ns2"""
+        a = """exec(code, ns1, ns2)"""
+        self.check(b, a)
+
+    def test_complex_1(self):
+        b = """exec (a.b()) in ns"""
+        a = """exec((a.b()), ns)"""
+        self.check(b, a)
+
+    def test_complex_2(self):
+        b = """exec a.b() + c in ns"""
+        a = """exec(a.b() + c, ns)"""
+        self.check(b, a)
+
+    # These should not be touched
+
+    def test_unchanged_1(self):
+        s = """exec(code)"""
+        self.unchanged(s)
+
+    def test_unchanged_2(self):
+        s = """exec (code)"""
+        self.unchanged(s)
+
+    def test_unchanged_3(self):
+        s = """exec(code, ns)"""
+        self.unchanged(s)
+
+    def test_unchanged_4(self):
+        s = """exec(code, ns1, ns2)"""
+        self.unchanged(s)
+
+class Test_repr(FixerTestCase):
+    fixer = "repr"
+
+    def test_prefix_preservation(self):
+        b = """x =   `1 + 2`"""
+        a = """x =   repr(1 + 2)"""
+        self.check(b, a)
+
+    def test_simple_1(self):
+        b = """x = `1 + 2`"""
+        a = """x = repr(1 + 2)"""
+        self.check(b, a)
+
+    def test_simple_2(self):
+        b = """y = `x`"""
+        a = """y = repr(x)"""
+        self.check(b, a)
+
+    def test_complex(self):
+        b = """z = `y`.__repr__()"""
+        a = """z = repr(y).__repr__()"""
+        self.check(b, a)
+
+    def test_tuple(self):
+        b = """x = `1, 2, 3`"""
+        a = """x = repr((1, 2, 3))"""
+        self.check(b, a)
+
+    def test_nested(self):
+        b = """x = `1 + `2``"""
+        a = """x = repr(1 + repr(2))"""
+        self.check(b, a)
+
+    def test_nested_tuples(self):
+        b = """x = `1, 2 + `3, 4``"""
+        a = """x = repr((1, 2 + repr((3, 4))))"""
+        self.check(b, a)
+
+class Test_except(FixerTestCase):
+    fixer = "except"
+
+    def test_prefix_preservation(self):
+        b = """
+            try:
+                pass
+            except (RuntimeError, ImportError),    e:
+                pass"""
+        a = """
+            try:
+                pass
+            except (RuntimeError, ImportError) as    e:
+                pass"""
+        self.check(b, a)
+
+    def test_simple(self):
+        b = """
+            try:
+                pass
+            except Foo, e:
+                pass"""
+        a = """
+            try:
+                pass
+            except Foo as e:
+                pass"""
+        self.check(b, a)
+
+    def test_simple_no_space_before_target(self):
+        b = """
+            try:
+                pass
+            except Foo,e:
+                pass"""
+        a = """
+            try:
+                pass
+            except Foo as e:
+                pass"""
+        self.check(b, a)
+
+    def test_tuple_unpack(self):
+        b = """
+            def foo():
+                try:
+                    pass
+                except Exception, (f, e):
+                    pass
+                except ImportError, e:
+                    pass"""
+
+        a = """
+            def foo():
+                try:
+                    pass
+                except Exception as xxx_todo_changeme:
+                    (f, e) = xxx_todo_changeme.args
+                    pass
+                except ImportError as e:
+                    pass"""
+        self.check(b, a)
+
+    def test_multi_class(self):
+        b = """
+            try:
+                pass
+            except (RuntimeError, ImportError), e:
+                pass"""
+
+        a = """
+            try:
+                pass
+            except (RuntimeError, ImportError) as e:
+                pass"""
+        self.check(b, a)
+
+    def test_list_unpack(self):
+        b = """
+            try:
+                pass
+            except Exception, [a, b]:
+                pass"""
+
+        a = """
+            try:
+                pass
+            except Exception as xxx_todo_changeme:
+                [a, b] = xxx_todo_changeme.args
+                pass"""
+        self.check(b, a)
+
+    def test_weird_target_1(self):
+        b = """
+            try:
+                pass
+            except Exception, d[5]:
+                pass"""
+
+        a = """
+            try:
+                pass
+            except Exception as xxx_todo_changeme:
+                d[5] = xxx_todo_changeme
+                pass"""
+        self.check(b, a)
+
+    def test_weird_target_2(self):
+        b = """
+            try:
+                pass
+            except Exception, a.foo:
+                pass"""
+
+        a = """
+            try:
+                pass
+            except Exception as xxx_todo_changeme:
+                a.foo = xxx_todo_changeme
+                pass"""
+        self.check(b, a)
+
+    def test_weird_target_3(self):
+        b = """
+            try:
+                pass
+            except Exception, a().foo:
+                pass"""
+
+        a = """
+            try:
+                pass
+            except Exception as xxx_todo_changeme:
+                a().foo = xxx_todo_changeme
+                pass"""
+        self.check(b, a)
+
+    def test_bare_except(self):
+        b = """
+            try:
+                pass
+            except Exception, a:
+                pass
+            except:
+                pass"""
+
+        a = """
+            try:
+                pass
+            except Exception as a:
+                pass
+            except:
+                pass"""
+        self.check(b, a)
+
+    def test_bare_except_and_else_finally(self):
+        b = """
+            try:
+                pass
+            except Exception, a:
+                pass
+            except:
+                pass
+            else:
+                pass
+            finally:
+                pass"""
+
+        a = """
+            try:
+                pass
+            except Exception as a:
+                pass
+            except:
+                pass
+            else:
+                pass
+            finally:
+                pass"""
+        self.check(b, a)
+
+    def test_multi_fixed_excepts_before_bare_except(self):
+        b = """
+            try:
+                pass
+            except TypeError, b:
+                pass
+            except Exception, a:
+                pass
+            except:
+                pass"""
+
+        a = """
+            try:
+                pass
+            except TypeError as b:
+                pass
+            except Exception as a:
+                pass
+            except:
+                pass"""
+        self.check(b, a)
+
+    # These should not be touched:
+
+    def test_unchanged_1(self):
+        s = """
+            try:
+                pass
+            except:
+                pass"""
+        self.unchanged(s)
+
+    def test_unchanged_2(self):
+        s = """
+            try:
+                pass
+            except Exception:
+                pass"""
+        self.unchanged(s)
+
+    def test_unchanged_3(self):
+        s = """
+            try:
+                pass
+            except (Exception, SystemExit):
+                pass"""
+        self.unchanged(s)
+
+class Test_raise(FixerTestCase):
+    fixer = "raise"
+
+    def test_basic(self):
+        b = """raise Exception, 5"""
+        a = """raise Exception(5)"""
+        self.check(b, a)
+
+    def test_prefix_preservation(self):
+        b = """raise Exception,5"""
+        a = """raise Exception(5)"""
+        self.check(b, a)
+
+        b = """raise   Exception,    5"""
+        a = """raise   Exception(5)"""
+        self.check(b, a)
+
+    def test_with_comments(self):
+        b = """raise Exception, 5 # foo"""
+        a = """raise Exception(5) # foo"""
+        self.check(b, a)
+
+        b = """raise E, (5, 6) % (a, b) # foo"""
+        a = """raise E((5, 6) % (a, b)) # foo"""
+        self.check(b, a)
+
+        b = """def foo():
+                    raise Exception, 5, 6 # foo"""
+        a = """def foo():
+                    raise Exception(5).with_traceback(6) # foo"""
+        self.check(b, a)
+
+    def test_tuple_value(self):
+        b = """raise Exception, (5, 6, 7)"""
+        a = """raise Exception(5, 6, 7)"""
+        self.check(b, a)
+
+    def test_tuple_detection(self):
+        b = """raise E, (5, 6) % (a, b)"""
+        a = """raise E((5, 6) % (a, b))"""
+        self.check(b, a)
+
+    def test_tuple_exc_1(self):
+        b = """raise (((E1, E2), E3), E4), V"""
+        a = """raise E1(V)"""
+        self.check(b, a)
+
+    def test_tuple_exc_2(self):
+        b = """raise (E1, (E2, E3), E4), V"""
+        a = """raise E1(V)"""
+        self.check(b, a)
+
+    # These should produce a warning
+
+    def test_string_exc(self):
+        s = """raise 'foo'"""
+        self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+    def test_string_exc_val(self):
+        s = """raise "foo", 5"""
+        self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+    def test_string_exc_val_tb(self):
+        s = """raise "foo", 5, 6"""
+        self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+    # These should result in traceback-assignment
+
+    def test_tb_1(self):
+        b = """def foo():
+                    raise Exception, 5, 6"""
+        a = """def foo():
+                    raise Exception(5).with_traceback(6)"""
+        self.check(b, a)
+
+    def test_tb_2(self):
+        b = """def foo():
+                    a = 5
+                    raise Exception, 5, 6
+                    b = 6"""
+        a = """def foo():
+                    a = 5
+                    raise Exception(5).with_traceback(6)
+                    b = 6"""
+        self.check(b, a)
+
+    def test_tb_3(self):
+        b = """def foo():
+                    raise Exception,5,6"""
+        a = """def foo():
+                    raise Exception(5).with_traceback(6)"""
+        self.check(b, a)
+
+    def test_tb_4(self):
+        b = """def foo():
+                    a = 5
+                    raise Exception,5,6
+                    b = 6"""
+        a = """def foo():
+                    a = 5
+                    raise Exception(5).with_traceback(6)
+                    b = 6"""
+        self.check(b, a)
+
+    def test_tb_5(self):
+        b = """def foo():
+                    raise Exception, (5, 6, 7), 6"""
+        a = """def foo():
+                    raise Exception(5, 6, 7).with_traceback(6)"""
+        self.check(b, a)
+
+    def test_tb_6(self):
+        b = """def foo():
+                    a = 5
+                    raise Exception, (5, 6, 7), 6
+                    b = 6"""
+        a = """def foo():
+                    a = 5
+                    raise Exception(5, 6, 7).with_traceback(6)
+                    b = 6"""
+        self.check(b, a)
+
+class Test_throw(FixerTestCase):
+    fixer = "throw"
+
+    def test_1(self):
+        b = """g.throw(Exception, 5)"""
+        a = """g.throw(Exception(5))"""
+        self.check(b, a)
+
+    def test_2(self):
+        b = """g.throw(Exception,5)"""
+        a = """g.throw(Exception(5))"""
+        self.check(b, a)
+
+    def test_3(self):
+        b = """g.throw(Exception, (5, 6, 7))"""
+        a = """g.throw(Exception(5, 6, 7))"""
+        self.check(b, a)
+
+    def test_4(self):
+        b = """5 + g.throw(Exception, 5)"""
+        a = """5 + g.throw(Exception(5))"""
+        self.check(b, a)
+
+    # These should produce warnings
+
+    def test_warn_1(self):
+        s = """g.throw("foo")"""
+        self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+    def test_warn_2(self):
+        s = """g.throw("foo", 5)"""
+        self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+    def test_warn_3(self):
+        s = """g.throw("foo", 5, 6)"""
+        self.warns_unchanged(s, "Python 3 does not support string exceptions")
+
+    # These should not be touched
+
+    def test_untouched_1(self):
+        s = """g.throw(Exception)"""
+        self.unchanged(s)
+
+    def test_untouched_2(self):
+        s = """g.throw(Exception(5, 6))"""
+        self.unchanged(s)
+
+    def test_untouched_3(self):
+        s = """5 + g.throw(Exception(5, 6))"""
+        self.unchanged(s)
+
+    # These should result in traceback-assignment
+
+    def test_tb_1(self):
+        b = """def foo():
+                    g.throw(Exception, 5, 6)"""
+        a = """def foo():
+                    g.throw(Exception(5).with_traceback(6))"""
+        self.check(b, a)
+
+    def test_tb_2(self):
+        b = """def foo():
+                    a = 5
+                    g.throw(Exception, 5, 6)
+                    b = 6"""
+        a = """def foo():
+                    a = 5
+                    g.throw(Exception(5).with_traceback(6))
+                    b = 6"""
+        self.check(b, a)
+
+    def test_tb_3(self):
+        b = """def foo():
+                    g.throw(Exception,5,6)"""
+        a = """def foo():
+                    g.throw(Exception(5).with_traceback(6))"""
+        self.check(b, a)
+
+    def test_tb_4(self):
+        b = """def foo():
+                    a = 5
+                    g.throw(Exception,5,6)
+                    b = 6"""
+        a = """def foo():
+                    a = 5
+                    g.throw(Exception(5).with_traceback(6))
+                    b = 6"""
+        self.check(b, a)
+
+    def test_tb_5(self):
+        b = """def foo():
+                    g.throw(Exception, (5, 6, 7), 6)"""
+        a = """def foo():
+                    g.throw(Exception(5, 6, 7).with_traceback(6))"""
+        self.check(b, a)
+
+    def test_tb_6(self):
+        b = """def foo():
+                    a = 5
+                    g.throw(Exception, (5, 6, 7), 6)
+                    b = 6"""
+        a = """def foo():
+                    a = 5
+                    g.throw(Exception(5, 6, 7).with_traceback(6))
+                    b = 6"""
+        self.check(b, a)
+
+    def test_tb_7(self):
+        b = """def foo():
+                    a + g.throw(Exception, 5, 6)"""
+        a = """def foo():
+                    a + g.throw(Exception(5).with_traceback(6))"""
+        self.check(b, a)
+
+    def test_tb_8(self):
+        b = """def foo():
+                    a = 5
+                    a + g.throw(Exception, 5, 6)
+                    b = 6"""
+        a = """def foo():
+                    a = 5
+                    a + g.throw(Exception(5).with_traceback(6))
+                    b = 6"""
+        self.check(b, a)
+
+class Test_long(FixerTestCase):
+    fixer = "long"
+
+    def test_1(self):
+        b = """x = long(x)"""
+        a = """x = int(x)"""
+        self.check(b, a)
+
+    def test_2(self):
+        b = """y = isinstance(x, long)"""
+        a = """y = isinstance(x, int)"""
+        self.check(b, a)
+
+    def test_3(self):
+        b = """z = type(x) in (int, long)"""
+        a = """z = type(x) in (int, int)"""
+        self.check(b, a)
+
+    def test_unchanged(self):
+        s = """long = True"""
+        self.unchanged(s)
+
+        s = """s.long = True"""
+        self.unchanged(s)
+
+        s = """def long(): pass"""
+        self.unchanged(s)
+
+        s = """class long(): pass"""
+        self.unchanged(s)
+
+        s = """def f(long): pass"""
+        self.unchanged(s)
+
+        s = """def f(g, long): pass"""
+        self.unchanged(s)
+
+        s = """def f(x, long=True): pass"""
+        self.unchanged(s)
+
+    def test_prefix_preservation(self):
+        b = """x =   long(  x  )"""
+        a = """x =   int(  x  )"""
+        self.check(b, a)
+
+
+class Test_execfile(FixerTestCase):
+    fixer = "execfile"
+
+    def test_conversion(self):
+        b = """execfile("fn")"""
+        a = """exec(compile(open("fn").read(), "fn", 'exec'))"""
+        self.check(b, a)
+
+        b = """execfile("fn", glob)"""
+        a = """exec(compile(open("fn").read(), "fn", 'exec'), glob)"""
+        self.check(b, a)
+
+        b = """execfile("fn", glob, loc)"""
+        a = """exec(compile(open("fn").read(), "fn", 'exec'), glob, loc)"""
+        self.check(b, a)
+
+        b = """execfile("fn", globals=glob)"""
+        a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob)"""
+        self.check(b, a)
+
+        b = """execfile("fn", locals=loc)"""
+        a = """exec(compile(open("fn").read(), "fn", 'exec'), locals=loc)"""
+        self.check(b, a)
+
+        b = """execfile("fn", globals=glob, locals=loc)"""
+        a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob, locals=loc)"""
+        self.check(b, a)
+
+    def test_spacing(self):
+        b = """execfile( "fn" )"""
+        a = """exec(compile(open( "fn" ).read(), "fn", 'exec'))"""
+        self.check(b, a)
+
+        b = """execfile("fn",  globals = glob)"""
+        a = """exec(compile(open("fn").read(), "fn", 'exec'),  globals = glob)"""
+        self.check(b, a)
+
+
+class Test_isinstance(FixerTestCase):
+    fixer = "isinstance"
+
+    def test_remove_multiple_items(self):
+        b = """isinstance(x, (int, int, int))"""
+        a = """isinstance(x, int)"""
+        self.check(b, a)
+
+        b = """isinstance(x, (int, float, int, int, float))"""
+        a = """isinstance(x, (int, float))"""
+        self.check(b, a)
+
+        b = """isinstance(x, (int, float, int, int, float, str))"""
+        a = """isinstance(x, (int, float, str))"""
+        self.check(b, a)
+
+        b = """isinstance(foo() + bar(), (x(), y(), x(), int, int))"""
+        a = """isinstance(foo() + bar(), (x(), y(), x(), int))"""
+        self.check(b, a)
+
+    def test_prefix_preservation(self):
+        b = """if    isinstance(  foo(), (  bar, bar, baz )) : pass"""
+        a = """if    isinstance(  foo(), (  bar, baz )) : pass"""
+        self.check(b, a)
+
+    def test_unchanged(self):
+        self.unchanged("isinstance(x, (str, int))")
+
+class Test_dict(FixerTestCase):
+    fixer = "dict"
+
+    def test_prefix_preservation(self):
+        b = "if   d. keys  (  )  : pass"
+        a = "if   list(d. keys  (  ))  : pass"
+        self.check(b, a)
+
+        b = "if   d. items  (  )  : pass"
+        a = "if   list(d. items  (  ))  : pass"
+        self.check(b, a)
+
+        b = "if   d. iterkeys  ( )  : pass"
+        a = "if   iter(d. keys  ( ))  : pass"
+        self.check(b, a)
+
+        b = "[i for i in    d.  iterkeys(  )  ]"
+        a = "[i for i in    d.  keys(  )  ]"
+        self.check(b, a)
+
+    def test_trailing_comment(self):
+        b = "d.keys() # foo"
+        a = "list(d.keys()) # foo"
+        self.check(b, a)
+
+        b = "d.items()  # foo"
+        a = "list(d.items())  # foo"
+        self.check(b, a)
+
+        b = "d.iterkeys()  # foo"
+        a = "iter(d.keys())  # foo"
+        self.check(b, a)
+
+        b = """[i for i in d.iterkeys() # foo
+               ]"""
+        a = """[i for i in d.keys() # foo
+               ]"""
+        self.check(b, a)
+
+    def test_unchanged(self):
+        for wrapper in fixer_util.consuming_calls:
+            s = "s = %s(d.keys())" % wrapper
+            self.unchanged(s)
+
+            s = "s = %s(d.values())" % wrapper
+            self.unchanged(s)
+
+            s = "s = %s(d.items())" % wrapper
+            self.unchanged(s)
+
+    def test_01(self):
+        b = "d.keys()"
+        a = "list(d.keys())"
+        self.check(b, a)
+
+        b = "a[0].foo().keys()"
+        a = "list(a[0].foo().keys())"
+        self.check(b, a)
+
+    def test_02(self):
+        b = "d.items()"
+        a = "list(d.items())"
+        self.check(b, a)
+
+    def test_03(self):
+        b = "d.values()"
+        a = "list(d.values())"
+        self.check(b, a)
+
+    def test_04(self):
+        b = "d.iterkeys()"
+        a = "iter(d.keys())"
+        self.check(b, a)
+
+    def test_05(self):
+        b = "d.iteritems()"
+        a = "iter(d.items())"
+        self.check(b, a)
+
+    def test_06(self):
+        b = "d.itervalues()"
+        a = "iter(d.values())"
+        self.check(b, a)
+
+    def test_07(self):
+        s = "list(d.keys())"
+        self.unchanged(s)
+
+    def test_08(self):
+        s = "sorted(d.keys())"
+        self.unchanged(s)
+
+    def test_09(self):
+        b = "iter(d.keys())"
+        a = "iter(list(d.keys()))"
+        self.check(b, a)
+
+    def test_10(self):
+        b = "foo(d.keys())"
+        a = "foo(list(d.keys()))"
+        self.check(b, a)
+
+    def test_11(self):
+        b = "for i in d.keys(): print i"
+        a = "for i in list(d.keys()): print i"
+        self.check(b, a)
+
+    def test_12(self):
+        b = "for i in d.iterkeys(): print i"
+        a = "for i in d.keys(): print i"
+        self.check(b, a)
+
+    def test_13(self):
+        b = "[i for i in d.keys()]"
+        a = "[i for i in list(d.keys())]"
+        self.check(b, a)
+
+    def test_14(self):
+        b = "[i for i in d.iterkeys()]"
+        a = "[i for i in d.keys()]"
+        self.check(b, a)
+
+    def test_15(self):
+        b = "(i for i in d.keys())"
+        a = "(i for i in list(d.keys()))"
+        self.check(b, a)
+
+    def test_16(self):
+        b = "(i for i in d.iterkeys())"
+        a = "(i for i in d.keys())"
+        self.check(b, a)
+
+    def test_17(self):
+        b = "iter(d.iterkeys())"
+        a = "iter(d.keys())"
+        self.check(b, a)
+
+    def test_18(self):
+        b = "list(d.iterkeys())"
+        a = "list(d.keys())"
+        self.check(b, a)
+
+    def test_19(self):
+        b = "sorted(d.iterkeys())"
+        a = "sorted(d.keys())"
+        self.check(b, a)
+
+    def test_20(self):
+        b = "foo(d.iterkeys())"
+        a = "foo(iter(d.keys()))"
+        self.check(b, a)
+
+    def test_21(self):
+        b = "print h.iterkeys().next()"
+        a = "print iter(h.keys()).next()"
+        self.check(b, a)
+
+    def test_22(self):
+        b = "print h.keys()[0]"
+        a = "print list(h.keys())[0]"
+        self.check(b, a)
+
+    def test_23(self):
+        b = "print list(h.iterkeys().next())"
+        a = "print list(iter(h.keys()).next())"
+        self.check(b, a)
+
+    def test_24(self):
+        b = "for x in h.keys()[0]: print x"
+        a = "for x in list(h.keys())[0]: print x"
+        self.check(b, a)
+
+class Test_xrange(FixerTestCase):
+    fixer = "xrange"
+
+    def test_prefix_preservation(self):
+        b = """x =    xrange(  10  )"""
+        a = """x =    range(  10  )"""
+        self.check(b, a)
+
+        b = """x = xrange(  1  ,  10   )"""
+        a = """x = range(  1  ,  10   )"""
+        self.check(b, a)
+
+        b = """x = xrange(  0  ,  10 ,  2 )"""
+        a = """x = range(  0  ,  10 ,  2 )"""
+        self.check(b, a)
+
+    def test_single_arg(self):
+        b = """x = xrange(10)"""
+        a = """x = range(10)"""
+        self.check(b, a)
+
+    def test_two_args(self):
+        b = """x = xrange(1, 10)"""
+        a = """x = range(1, 10)"""
+        self.check(b, a)
+
+    def test_three_args(self):
+        b = """x = xrange(0, 10, 2)"""
+        a = """x = range(0, 10, 2)"""
+        self.check(b, a)
+
+    def test_wrap_in_list(self):
+        b = """x = range(10, 3, 9)"""
+        a = """x = list(range(10, 3, 9))"""
+        self.check(b, a)
+
+        b = """x = foo(range(10, 3, 9))"""
+        a = """x = foo(list(range(10, 3, 9)))"""
+        self.check(b, a)
+
+        b = """x = range(10, 3, 9) + [4]"""
+        a = """x = list(range(10, 3, 9)) + [4]"""
+        self.check(b, a)
+
+        b = """x = range(10)[::-1]"""
+        a = """x = list(range(10))[::-1]"""
+        self.check(b, a)
+
+        b = """x = range(10)  [3]"""
+        a = """x = list(range(10))  [3]"""
+        self.check(b, a)
+
+    def test_xrange_in_for(self):
+        b = """for i in xrange(10):\n    j=i"""
+        a = """for i in range(10):\n    j=i"""
+        self.check(b, a)
+
+        b = """[i for i in xrange(10)]"""
+        a = """[i for i in range(10)]"""
+        self.check(b, a)
+
+    def test_range_in_for(self):
+        self.unchanged("for i in range(10): pass")
+        self.unchanged("[i for i in range(10)]")
+
+    def test_in_contains_test(self):
+        self.unchanged("x in range(10, 3, 9)")
+
+    def test_in_consuming_context(self):
+        for call in fixer_util.consuming_calls:
+            self.unchanged("a = %s(range(10))" % call)
+
+class Test_raw_input(FixerTestCase):
+    fixer = "raw_input"
+
+    def test_prefix_preservation(self):
+        b = """x =    raw_input(   )"""
+        a = """x =    input(   )"""
+        self.check(b, a)
+
+        b = """x = raw_input(   ''   )"""
+        a = """x = input(   ''   )"""
+        self.check(b, a)
+
+    def test_1(self):
+        b = """x = raw_input()"""
+        a = """x = input()"""
+        self.check(b, a)
+
+    def test_2(self):
+        b = """x = raw_input('')"""
+        a = """x = input('')"""
+        self.check(b, a)
+
+    def test_3(self):
+        b = """x = raw_input('prompt')"""
+        a = """x = input('prompt')"""
+        self.check(b, a)
+
+    def test_4(self):
+        b = """x = raw_input(foo(a) + 6)"""
+        a = """x = input(foo(a) + 6)"""
+        self.check(b, a)
+
+    def test_5(self):
+        b = """x = raw_input(invite).split()"""
+        a = """x = input(invite).split()"""
+        self.check(b, a)
+
+    def test_6(self):
+        b = """x = raw_input(invite) . split ()"""
+        a = """x = input(invite) . split ()"""
+        self.check(b, a)
+
+    def test_8(self):
+        b = "x = int(raw_input())"
+        a = "x = int(input())"
+        self.check(b, a)
+
+class Test_funcattrs(FixerTestCase):
+    fixer = "funcattrs"
+
+    attrs = ["closure", "doc", "name", "defaults", "code", "globals", "dict"]
+
+    def test(self):
+        for attr in self.attrs:
+            b = "a.func_%s" % attr
+            a = "a.__%s__" % attr
+            self.check(b, a)
+
+            b = "self.foo.func_%s.foo_bar" % attr
+            a = "self.foo.__%s__.foo_bar" % attr
+            self.check(b, a)
+
+    def test_unchanged(self):
+        for attr in self.attrs:
+            s = "foo(func_%s + 5)" % attr
+            self.unchanged(s)
+
+            s = "f(foo.__%s__)" % attr
+            self.unchanged(s)
+
+            s = "f(foo.__%s__.foo)" % attr
+            self.unchanged(s)
+
+class Test_xreadlines(FixerTestCase):
+    fixer = "xreadlines"
+
+    def test_call(self):
+        b = "for x in f.xreadlines(): pass"
+        a = "for x in f: pass"
+        self.check(b, a)
+
+        b = "for x in foo().xreadlines(): pass"
+        a = "for x in foo(): pass"
+        self.check(b, a)
+
+        b = "for x in (5 + foo()).xreadlines(): pass"
+        a = "for x in (5 + foo()): pass"
+        self.check(b, a)
+
+    def test_attr_ref(self):
+        b = "foo(f.xreadlines + 5)"
+        a = "foo(f.__iter__ + 5)"
+        self.check(b, a)
+
+        b = "foo(f().xreadlines + 5)"
+        a = "foo(f().__iter__ + 5)"
+        self.check(b, a)
+
+        b = "foo((5 + f()).xreadlines + 5)"
+        a = "foo((5 + f()).__iter__ + 5)"
+        self.check(b, a)
+
+    def test_unchanged(self):
+        s = "for x in f.xreadlines(5): pass"
+        self.unchanged(s)
+
+        s = "for x in f.xreadlines(k=5): pass"
+        self.unchanged(s)
+
+        s = "for x in f.xreadlines(*k, **v): pass"
+        self.unchanged(s)
+
+        s = "foo(xreadlines)"
+        self.unchanged(s)
+
+
+class ImportsFixerTests:
+
+    def test_import_module(self):
+        for old, new in self.modules.items():
+            b = "import %s" % old
+            a = "import %s" % new
+            self.check(b, a)
+
+            b = "import foo, %s, bar" % old
+            a = "import foo, %s, bar" % new
+            self.check(b, a)
+
+    def test_import_from(self):
+        for old, new in self.modules.items():
+            b = "from %s import foo" % old
+            a = "from %s import foo" % new
+            self.check(b, a)
+
+            b = "from %s import foo, bar" % old
+            a = "from %s import foo, bar" % new
+            self.check(b, a)
+
+            b = "from %s import (yes, no)" % old
+            a = "from %s import (yes, no)" % new
+            self.check(b, a)
+
+    def test_import_module_as(self):
+        for old, new in self.modules.items():
+            b = "import %s as foo_bar" % old
+            a = "import %s as foo_bar" % new
+            self.check(b, a)
+
+            b = "import %s as foo_bar" % old
+            a = "import %s as foo_bar" % new
+            self.check(b, a)
+
+    def test_import_from_as(self):
+        for old, new in self.modules.items():
+            b = "from %s import foo as bar" % old
+            a = "from %s import foo as bar" % new
+            self.check(b, a)
+
+    def test_star(self):
+        for old, new in self.modules.items():
+            b = "from %s import *" % old
+            a = "from %s import *" % new
+            self.check(b, a)
+
+    def test_import_module_usage(self):
+        for old, new in self.modules.items():
+            b = """
+                import %s
+                foo(%s.bar)
+                """ % (old, old)
+            a = """
+                import %s
+                foo(%s.bar)
+                """ % (new, new)
+            self.check(b, a)
+
+            b = """
+                from %s import x
+                %s = 23
+                """ % (old, old)
+            a = """
+                from %s import x
+                %s = 23
+                """ % (new, old)
+            self.check(b, a)
+
+            s = """
+                def f():
+                    %s.method()
+                """ % (old,)
+            self.unchanged(s)
+
+            # test nested usage
+            b = """
+                import %s
+                %s.bar(%s.foo)
+                """ % (old, old, old)
+            a = """
+                import %s
+                %s.bar(%s.foo)
+                """ % (new, new, new)
+            self.check(b, a)
+
+            b = """
+                import %s
+                x.%s
+                """ % (old, old)
+            a = """
+                import %s
+                x.%s
+                """ % (new, old)
+            self.check(b, a)
+
+
+class Test_imports(FixerTestCase, ImportsFixerTests):
+    fixer = "imports"
+    from refactor.fixes.from2.fix_imports import MAPPING as modules
+
+    def test_multiple_imports(self):
+        b = """import urlparse, cStringIO"""
+        a = """import urllib.parse, io"""
+        self.check(b, a)
+
+    def test_multiple_imports_as(self):
+        b = """
+            import copy_reg as bar, HTMLParser as foo, urlparse
+            s = urlparse.spam(bar.foo())
+            """
+        a = """
+            import copyreg as bar, html.parser as foo, urllib.parse
+            s = urllib.parse.spam(bar.foo())
+            """
+        self.check(b, a)
+
+
+class Test_imports2(FixerTestCase, ImportsFixerTests):
+    fixer = "imports2"
+    from refactor.fixes.from2.fix_imports2 import MAPPING as modules
+
+
+class Test_imports_fixer_order(FixerTestCase, ImportsFixerTests):
+
+    def setUp(self):
+        super(Test_imports_fixer_order, self).setUp(['imports', 'imports2'])
+        from refactor.fixes.from2.fix_imports2 import MAPPING as mapping2
+        self.modules = mapping2.copy()
+        from refactor.fixes.from2.fix_imports import MAPPING as mapping1
+        for key in ('dbhash', 'dumbdbm', 'dbm', 'gdbm'):
+            self.modules[key] = mapping1[key]
+
+
+class Test_urllib(FixerTestCase):
+    fixer = "urllib"
+    from refactor.fixes.from2.fix_urllib import MAPPING as modules
+
+    def test_import_module(self):
+        for old, changes in self.modules.items():
+            b = "import %s" % old
+            a = "import %s" % ", ".join(map(itemgetter(0), changes))
+            self.check(b, a)
+
+    def test_import_from(self):
+        for old, changes in self.modules.items():
+            all_members = []
+            for new, members in changes:
+                for member in members:
+                    all_members.append(member)
+                    b = "from %s import %s" % (old, member)
+                    a = "from %s import %s" % (new, member)
+                    self.check(b, a)
+
+                    s = "from foo import %s" % member
+                    self.unchanged(s)
+
+                b = "from %s import %s" % (old, ", ".join(members))
+                a = "from %s import %s" % (new, ", ".join(members))
+                self.check(b, a)
+
+                s = "from foo import %s" % ", ".join(members)
+                self.unchanged(s)
+
+            # test the breaking of a module into multiple replacements
+            b = "from %s import %s" % (old, ", ".join(all_members))
+            a = "\n".join(["from %s import %s" % (new, ", ".join(members))
+                            for (new, members) in changes])
+            self.check(b, a)
+
+    def test_import_module_as(self):
+        for old in self.modules:
+            s = "import %s as foo" % old
+            self.warns_unchanged(s, "This module is now multiple modules")
+
+    def test_import_from_as(self):
+        for old, changes in self.modules.items():
+            for new, members in changes:
+                for member in members:
+                    b = "from %s import %s as foo_bar" % (old, member)
+                    a = "from %s import %s as foo_bar" % (new, member)
+                    self.check(b, a)
+
+    def test_star(self):
+        for old in self.modules:
+            s = "from %s import *" % old
+            self.warns_unchanged(s, "Cannot handle star imports")
+
+    def test_import_module_usage(self):
+        for old, changes in self.modules.items():
+            for new, members in changes:
+                for member in members:
+                    b = """
+                        import %s
+                        foo(%s.%s)
+                        """ % (old, old, member)
+                    a = """
+                        import %s
+                        foo(%s.%s)
+                        """ % (", ".join([n for (n, mems)
+                                           in self.modules[old]]),
+                                         new, member)
+                    self.check(b, a)
+
+
+class Test_input(FixerTestCase):
+    fixer = "input"
+
+    def test_prefix_preservation(self):
+        b = """x =   input(   )"""
+        a = """x =   eval(input(   ))"""
+        self.check(b, a)
+
+        b = """x = input(   ''   )"""
+        a = """x = eval(input(   ''   ))"""
+        self.check(b, a)
+
+    def test_trailing_comment(self):
+        b = """x = input()  #  foo"""
+        a = """x = eval(input())  #  foo"""
+        self.check(b, a)
+
+    def test_idempotency(self):
+        s = """x = eval(input())"""
+        self.unchanged(s)
+
+        s = """x = eval(input(''))"""
+        self.unchanged(s)
+
+        s = """x = eval(input(foo(5) + 9))"""
+        self.unchanged(s)
+
+    def test_1(self):
+        b = """x = input()"""
+        a = """x = eval(input())"""
+        self.check(b, a)
+
+    def test_2(self):
+        b = """x = input('')"""
+        a = """x = eval(input(''))"""
+        self.check(b, a)
+
+    def test_3(self):
+        b = """x = input('prompt')"""
+        a = """x = eval(input('prompt'))"""
+        self.check(b, a)
+
+    def test_4(self):
+        b = """x = input(foo(5) + 9)"""
+        a = """x = eval(input(foo(5) + 9))"""
+        self.check(b, a)
+
+class Test_tuple_params(FixerTestCase):
+    fixer = "tuple_params"
+
+    def test_unchanged_1(self):
+        s = """def foo(): pass"""
+        self.unchanged(s)
+
+    def test_unchanged_2(self):
+        s = """def foo(a, b, c): pass"""
+        self.unchanged(s)
+
+    def test_unchanged_3(self):
+        s = """def foo(a=3, b=4, c=5): pass"""
+        self.unchanged(s)
+
+    def test_1(self):
+        b = """
+            def foo(((a, b), c)):
+                x = 5"""
+
+        a = """
+            def foo(xxx_todo_changeme):
+                ((a, b), c) = xxx_todo_changeme
+                x = 5"""
+        self.check(b, a)
+
+    def test_2(self):
+        b = """
+            def foo(((a, b), c), d):
+                x = 5"""
+
+        a = """
+            def foo(xxx_todo_changeme, d):
+                ((a, b), c) = xxx_todo_changeme
+                x = 5"""
+        self.check(b, a)
+
+    def test_3(self):
+        b = """
+            def foo(((a, b), c), d) -> e:
+                x = 5"""
+
+        a = """
+            def foo(xxx_todo_changeme, d) -> e:
+                ((a, b), c) = xxx_todo_changeme
+                x = 5"""
+        self.check(b, a)
+
+    def test_semicolon(self):
+        b = """
+            def foo(((a, b), c)): x = 5; y = 7"""
+
+        a = """
+            def foo(xxx_todo_changeme): ((a, b), c) = xxx_todo_changeme; x = 5; y = 7"""
+        self.check(b, a)
+
+    def test_keywords(self):
+        b = """
+            def foo(((a, b), c), d, e=5) -> z:
+                x = 5"""
+
+        a = """
+            def foo(xxx_todo_changeme, d, e=5) -> z:
+                ((a, b), c) = xxx_todo_changeme
+                x = 5"""
+        self.check(b, a)
+
+    def test_varargs(self):
+        b = """
+            def foo(((a, b), c), d, *vargs, **kwargs) -> z:
+                x = 5"""
+
+        a = """
+            def foo(xxx_todo_changeme, d, *vargs, **kwargs) -> z:
+                ((a, b), c) = xxx_todo_changeme
+                x = 5"""
+        self.check(b, a)
+
+    def test_multi_1(self):
+        b = """
+            def foo(((a, b), c), (d, e, f)) -> z:
+                x = 5"""
+
+        a = """
+            def foo(xxx_todo_changeme, xxx_todo_changeme1) -> z:
+                ((a, b), c) = xxx_todo_changeme
+                (d, e, f) = xxx_todo_changeme1
+                x = 5"""
+        self.check(b, a)
+
+    def test_multi_2(self):
+        b = """
+            def foo(x, ((a, b), c), d, (e, f, g), y) -> z:
+                x = 5"""
+
+        a = """
+            def foo(x, xxx_todo_changeme, d, xxx_todo_changeme1, y) -> z:
+                ((a, b), c) = xxx_todo_changeme
+                (e, f, g) = xxx_todo_changeme1
+                x = 5"""
+        self.check(b, a)
+
+    def test_docstring(self):
+        b = """
+            def foo(((a, b), c), (d, e, f)) -> z:
+                "foo foo foo foo"
+                x = 5"""
+
+        a = """
+            def foo(xxx_todo_changeme, xxx_todo_changeme1) -> z:
+                "foo foo foo foo"
+                ((a, b), c) = xxx_todo_changeme
+                (d, e, f) = xxx_todo_changeme1
+                x = 5"""
+        self.check(b, a)
+
+    def test_lambda_no_change(self):
+        s = """lambda x: x + 5"""
+        self.unchanged(s)
+
+    def test_lambda_parens_single_arg(self):
+        b = """lambda (x): x + 5"""
+        a = """lambda x: x + 5"""
+        self.check(b, a)
+
+        b = """lambda(x): x + 5"""
+        a = """lambda x: x + 5"""
+        self.check(b, a)
+
+        b = """lambda ((((x)))): x + 5"""
+        a = """lambda x: x + 5"""
+        self.check(b, a)
+
+        b = """lambda((((x)))): x + 5"""
+        a = """lambda x: x + 5"""
+        self.check(b, a)
+
+    def test_lambda_simple(self):
+        b = """lambda (x, y): x + f(y)"""
+        a = """lambda x_y: x_y[0] + f(x_y[1])"""
+        self.check(b, a)
+
+        b = """lambda(x, y): x + f(y)"""
+        a = """lambda x_y: x_y[0] + f(x_y[1])"""
+        self.check(b, a)
+
+        b = """lambda (((x, y))): x + f(y)"""
+        a = """lambda x_y: x_y[0] + f(x_y[1])"""
+        self.check(b, a)
+
+        b = """lambda(((x, y))): x + f(y)"""
+        a = """lambda x_y: x_y[0] + f(x_y[1])"""
+        self.check(b, a)
+
+    def test_lambda_one_tuple(self):
+        b = """lambda (x,): x + f(x)"""
+        a = """lambda x1: x1[0] + f(x1[0])"""
+        self.check(b, a)
+
+        b = """lambda (((x,))): x + f(x)"""
+        a = """lambda x1: x1[0] + f(x1[0])"""
+        self.check(b, a)
+
+    def test_lambda_simple_multi_use(self):
+        b = """lambda (x, y): x + x + f(x) + x"""
+        a = """lambda x_y: x_y[0] + x_y[0] + f(x_y[0]) + x_y[0]"""
+        self.check(b, a)
+
+    def test_lambda_simple_reverse(self):
+        b = """lambda (x, y): y + x"""
+        a = """lambda x_y: x_y[1] + x_y[0]"""
+        self.check(b, a)
+
+    def test_lambda_nested(self):
+        b = """lambda (x, (y, z)): x + y + z"""
+        a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + x_y_z[1][1]"""
+        self.check(b, a)
+
+        b = """lambda (((x, (y, z)))): x + y + z"""
+        a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + x_y_z[1][1]"""
+        self.check(b, a)
+
+    def test_lambda_nested_multi_use(self):
+        b = """lambda (x, (y, z)): x + y + f(y)"""
+        a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + f(x_y_z[1][0])"""
+        self.check(b, a)
+
+class Test_methodattrs(FixerTestCase):
+    fixer = "methodattrs"
+
+    attrs = ["func", "self", "class"]
+
+    def test(self):
+        for attr in self.attrs:
+            b = "a.im_%s" % attr
+            if attr == "class":
+                a = "a.__self__.__class__"
+            else:
+                a = "a.__%s__" % attr
+            self.check(b, a)
+
+            b = "self.foo.im_%s.foo_bar" % attr
+            if attr == "class":
+                a = "self.foo.__self__.__class__.foo_bar"
+            else:
+                a = "self.foo.__%s__.foo_bar" % attr
+            self.check(b, a)
+
+    def test_unchanged(self):
+        for attr in self.attrs:
+            s = "foo(im_%s + 5)" % attr
+            self.unchanged(s)
+
+            s = "f(foo.__%s__)" % attr
+            self.unchanged(s)
+
+            s = "f(foo.__%s__.foo)" % attr
+            self.unchanged(s)
+
+class Test_next(FixerTestCase):
+    fixer = "next"
+
+    def test_1(self):
+        b = """it.next()"""
+        a = """next(it)"""
+        self.check(b, a)
+
+    def test_2(self):
+        b = """a.b.c.d.next()"""
+        a = """next(a.b.c.d)"""
+        self.check(b, a)
+
+    def test_3(self):
+        b = """(a + b).next()"""
+        a = """next((a + b))"""
+        self.check(b, a)
+
+    def test_4(self):
+        b = """a().next()"""
+        a = """next(a())"""
+        self.check(b, a)
+
+    def test_5(self):
+        b = """a().next() + b"""
+        a = """next(a()) + b"""
+        self.check(b, a)
+
+    def test_6(self):
+        b = """c(      a().next() + b)"""
+        a = """c(      next(a()) + b)"""
+        self.check(b, a)
+
+    def test_prefix_preservation_1(self):
+        b = """
+            for a in b:
+                foo(a)
+                a.next()
+            """
+        a = """
+            for a in b:
+                foo(a)
+                next(a)
+            """
+        self.check(b, a)
+
+    def test_prefix_preservation_2(self):
+        b = """
+            for a in b:
+                foo(a) # abc
+                # def
+                a.next()
+            """
+        a = """
+            for a in b:
+                foo(a) # abc
+                # def
+                next(a)
+            """
+        self.check(b, a)
+
+    def test_prefix_preservation_3(self):
+        b = """
+            next = 5
+            for a in b:
+                foo(a)
+                a.next()
+            """
+        a = """
+            next = 5
+            for a in b:
+                foo(a)
+                a.__next__()
+            """
+        self.check(b, a, ignore_warnings=True)
+
+    def test_prefix_preservation_4(self):
+        b = """
+            next = 5
+            for a in b:
+                foo(a) # abc
+                # def
+                a.next()
+            """
+        a = """
+            next = 5
+            for a in b:
+                foo(a) # abc
+                # def
+                a.__next__()
+            """
+        self.check(b, a, ignore_warnings=True)
+
+    def test_prefix_preservation_5(self):
+        b = """
+            next = 5
+            for a in b:
+                foo(foo(a), # abc
+                    a.next())
+            """
+        a = """
+            next = 5
+            for a in b:
+                foo(foo(a), # abc
+                    a.__next__())
+            """
+        self.check(b, a, ignore_warnings=True)
+
+    def test_prefix_preservation_6(self):
+        b = """
+            for a in b:
+                foo(foo(a), # abc
+                    a.next())
+            """
+        a = """
+            for a in b:
+                foo(foo(a), # abc
+                    next(a))
+            """
+        self.check(b, a)
+
+    def test_method_1(self):
+        b = """
+            class A:
+                def next(self):
+                    pass
+            """
+        a = """
+            class A:
+                def __next__(self):
+                    pass
+            """
+        self.check(b, a)
+
+    def test_method_2(self):
+        b = """
+            class A(object):
+                def next(self):
+                    pass
+            """
+        a = """
+            class A(object):
+                def __next__(self):
+                    pass
+            """
+        self.check(b, a)
+
+    def test_method_3(self):
+        b = """
+            class A:
+                def next(x):
+                    pass
+            """
+        a = """
+            class A:
+                def __next__(x):
+                    pass
+            """
+        self.check(b, a)
+
+    def test_method_4(self):
+        b = """
+            class A:
+                def __init__(self, foo):
+                    self.foo = foo
+
+                def next(self):
+                    pass
+
+                def __iter__(self):
+                    return self
+            """
+        a = """
+            class A:
+                def __init__(self, foo):
+                    self.foo = foo
+
+                def __next__(self):
+                    pass
+
+                def __iter__(self):
+                    return self
+            """
+        self.check(b, a)
+
+    def test_method_unchanged(self):
+        s = """
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.unchanged(s)
+
+    def test_shadowing_assign_simple(self):
+        s = """
+            next = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_assign_tuple_1(self):
+        s = """
+            (next, a) = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_assign_tuple_2(self):
+        s = """
+            (a, (b, (next, c)), a) = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_assign_list_1(self):
+        s = """
+            [next, a] = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_assign_list_2(self):
+        s = """
+            [a, [b, [next, c]], a] = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_builtin_assign(self):
+        s = """
+            def foo():
+                __builtin__.next = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_builtin_assign_in_tuple(self):
+        s = """
+            def foo():
+                (a, __builtin__.next) = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_builtin_assign_in_list(self):
+        s = """
+            def foo():
+                [a, __builtin__.next] = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_assign_to_next(self):
+        s = """
+            def foo():
+                A.next = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.unchanged(s)
+
+    def test_assign_to_next_in_tuple(self):
+        s = """
+            def foo():
+                (a, A.next) = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.unchanged(s)
+
+    def test_assign_to_next_in_list(self):
+        s = """
+            def foo():
+                [a, A.next] = foo
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.unchanged(s)
+
+    def test_shadowing_import_1(self):
+        s = """
+            import foo.bar as next
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_import_2(self):
+        s = """
+            import bar, bar.foo as next
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_import_3(self):
+        s = """
+            import bar, bar.foo as next, baz
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_import_from_1(self):
+        s = """
+            from x import next
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_import_from_2(self):
+        s = """
+            from x.a import next
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_import_from_3(self):
+        s = """
+            from x import a, next, b
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_import_from_4(self):
+        s = """
+            from x.a import a, next, b
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_funcdef_1(self):
+        s = """
+            def next(a):
+                pass
+
+            class A:
+                def next(self, a, b):
+                    pass
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_funcdef_2(self):
+        b = """
+            def next(a):
+                pass
+
+            class A:
+                def next(self):
+                    pass
+
+            it.next()
+            """
+        a = """
+            def next(a):
+                pass
+
+            class A:
+                def __next__(self):
+                    pass
+
+            it.__next__()
+            """
+        self.warns(b, a, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_global_1(self):
+        s = """
+            def f():
+                global next
+                next = 5
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_global_2(self):
+        s = """
+            def f():
+                global a, next, b
+                next = 5
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_for_simple(self):
+        s = """
+            for next in it():
+                pass
+
+            b = 5
+            c = 6
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_for_tuple_1(self):
+        s = """
+            for next, b in it():
+                pass
+
+            b = 5
+            c = 6
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_shadowing_for_tuple_2(self):
+        s = """
+            for a, (next, c), b in it():
+                pass
+
+            b = 5
+            c = 6
+            """
+        self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
+
+    def test_noncall_access_1(self):
+        b = """gnext = g.next"""
+        a = """gnext = g.__next__"""
+        self.check(b, a)
+
+    def test_noncall_access_2(self):
+        b = """f(g.next + 5)"""
+        a = """f(g.__next__ + 5)"""
+        self.check(b, a)
+
+    def test_noncall_access_3(self):
+        b = """f(g().next + 5)"""
+        a = """f(g().__next__ + 5)"""
+        self.check(b, a)
+
+class Test_nonzero(FixerTestCase):
+    fixer = "nonzero"
+
+    def test_1(self):
+        b = """
+            class A:
+                def __nonzero__(self):
+                    pass
+            """
+        a = """
+            class A:
+                def __bool__(self):
+                    pass
+            """
+        self.check(b, a)
+
+    def test_2(self):
+        b = """
+            class A(object):
+                def __nonzero__(self):
+                    pass
+            """
+        a = """
+            class A(object):
+                def __bool__(self):
+                    pass
+            """
+        self.check(b, a)
+
+    def test_unchanged_1(self):
+        s = """
+            class A(object):
+                def __bool__(self):
+                    pass
+            """
+        self.unchanged(s)
+
+    def test_unchanged_2(self):
+        s = """
+            class A(object):
+                def __nonzero__(self, a):
+                    pass
+            """
+        self.unchanged(s)
+
+    def test_unchanged_func(self):
+        s = """
+            def __nonzero__(self):
+                pass
+            """
+        self.unchanged(s)
+
+class Test_numliterals(FixerTestCase):
+    fixer = "numliterals"
+
+    def test_octal_1(self):
+        b = """0755"""
+        a = """0o755"""
+        self.check(b, a)
+
+    def test_long_int_1(self):
+        b = """a = 12L"""
+        a = """a = 12"""
+        self.check(b, a)
+
+    def test_long_int_2(self):
+        b = """a = 12l"""
+        a = """a = 12"""
+        self.check(b, a)
+
+    def test_long_hex(self):
+        b = """b = 0x12l"""
+        a = """b = 0x12"""
+        self.check(b, a)
+
+    def test_comments_and_spacing(self):
+        b = """b =   0x12L"""
+        a = """b =   0x12"""
+        self.check(b, a)
+
+        b = """b = 0755 # spam"""
+        a = """b = 0o755 # spam"""
+        self.check(b, a)
+
+    def test_unchanged_int(self):
+        s = """5"""
+        self.unchanged(s)
+
+    def test_unchanged_float(self):
+        s = """5.0"""
+        self.unchanged(s)
+
+    def test_unchanged_octal(self):
+        s = """0o755"""
+        self.unchanged(s)
+
+    def test_unchanged_hex(self):
+        s = """0xABC"""
+        self.unchanged(s)
+
+    def test_unchanged_exp(self):
+        s = """5.0e10"""
+        self.unchanged(s)
+
+    def test_unchanged_complex_int(self):
+        s = """5 + 4j"""
+        self.unchanged(s)
+
+    def test_unchanged_complex_float(self):
+        s = """5.4 + 4.9j"""
+        self.unchanged(s)
+
+    def test_unchanged_complex_bare(self):
+        s = """4j"""
+        self.unchanged(s)
+        s = """4.4j"""
+        self.unchanged(s)
+
+class Test_renames(FixerTestCase):
+    fixer = "renames"
+
+    modules = {"sys":  ("maxint", "maxsize"),
+              }
+
+    def test_import_from(self):
+        for mod, (old, new) in self.modules.items():
+            b = "from %s import %s" % (mod, old)
+            a = "from %s import %s" % (mod, new)
+            self.check(b, a)
+
+            s = "from foo import %s" % old
+            self.unchanged(s)
+
+    def test_import_from_as(self):
+        for mod, (old, new) in self.modules.items():
+            b = "from %s import %s as foo_bar" % (mod, old)
+            a = "from %s import %s as foo_bar" % (mod, new)
+            self.check(b, a)
+
+    def test_import_module_usage(self):
+        for mod, (old, new) in self.modules.items():
+            b = """
+                import %s
+                foo(%s, %s.%s)
+                """ % (mod, mod, mod, old)
+            a = """
+                import %s
+                foo(%s, %s.%s)
+                """ % (mod, mod, mod, new)
+            self.check(b, a)
+
+    def XXX_test_from_import_usage(self):
+        # not implemented yet
+        for mod, (old, new) in self.modules.items():
+            b = """
+                from %s import %s
+                foo(%s, %s)
+                """ % (mod, old, mod, old)
+            a = """
+                from %s import %s
+                foo(%s, %s)
+                """ % (mod, new, mod, new)
+            self.check(b, a)
+
+class Test_unicode(FixerTestCase):
+    fixer = "unicode"
+
+    def test_unicode_call(self):
+        b = """unicode(x, y, z)"""
+        a = """str(x, y, z)"""
+        self.check(b, a)
+
+    def test_unicode_literal_1(self):
+        b = '''u"x"'''
+        a = '''"x"'''
+        self.check(b, a)
+
+    def test_unicode_literal_2(self):
+        b = """ur'x'"""
+        a = """r'x'"""
+        self.check(b, a)
+
+    def test_unicode_literal_3(self):
+        b = """UR'''x'''"""
+        a = """R'''x'''"""
+        self.check(b, a)
+
+class Test_callable(FixerTestCase):
+    fixer = "callable"
+
+    def test_prefix_preservation(self):
+        b = """callable(    x)"""
+        a = """hasattr(    x, '__call__')"""
+        self.check(b, a)
+
+        b = """if     callable(x): pass"""
+        a = """if     hasattr(x, '__call__'): pass"""
+        self.check(b, a)
+
+    def test_callable_call(self):
+        b = """callable(x)"""
+        a = """hasattr(x, '__call__')"""
+        self.check(b, a)
+
+    def test_callable_should_not_change(self):
+        a = """callable(*x)"""
+        self.unchanged(a)
+
+        a = """callable(x, y)"""
+        self.unchanged(a)
+
+        a = """callable(x, kw=y)"""
+        self.unchanged(a)
+
+        a = """callable()"""
+        self.unchanged(a)
+
+class Test_filter(FixerTestCase):
+    fixer = "filter"
+
+    def test_prefix_preservation(self):
+        b = """x =   filter(    foo,     'abc'   )"""
+        a = """x =   list(filter(    foo,     'abc'   ))"""
+        self.check(b, a)
+
+        b = """x =   filter(  None , 'abc'  )"""
+        a = """x =   [_f for _f in 'abc' if _f]"""
+        self.check(b, a)
+
+    def test_filter_basic(self):
+        b = """x = filter(None, 'abc')"""
+        a = """x = [_f for _f in 'abc' if _f]"""
+        self.check(b, a)
+
+        b = """x = len(filter(f, 'abc'))"""
+        a = """x = len(list(filter(f, 'abc')))"""
+        self.check(b, a)
+
+        b = """x = filter(lambda x: x%2 == 0, range(10))"""
+        a = """x = [x for x in range(10) if x%2 == 0]"""
+        self.check(b, a)
+
+        # Note the parens around x
+        b = """x = filter(lambda (x): x%2 == 0, range(10))"""
+        a = """x = [x for x in range(10) if x%2 == 0]"""
+        self.check(b, a)
+
+        # XXX This (rare) case is not supported
+##         b = """x = filter(f, 'abc')[0]"""
+##         a = """x = list(filter(f, 'abc'))[0]"""
+##         self.check(b, a)
+
+    def test_filter_nochange(self):
+        a = """b.join(filter(f, 'abc'))"""
+        self.unchanged(a)
+        a = """(a + foo(5)).join(filter(f, 'abc'))"""
+        self.unchanged(a)
+        a = """iter(filter(f, 'abc'))"""
+        self.unchanged(a)
+        a = """list(filter(f, 'abc'))"""
+        self.unchanged(a)
+        a = """list(filter(f, 'abc'))[0]"""
+        self.unchanged(a)
+        a = """set(filter(f, 'abc'))"""
+        self.unchanged(a)
+        a = """set(filter(f, 'abc')).pop()"""
+        self.unchanged(a)
+        a = """tuple(filter(f, 'abc'))"""
+        self.unchanged(a)
+        a = """any(filter(f, 'abc'))"""
+        self.unchanged(a)
+        a = """all(filter(f, 'abc'))"""
+        self.unchanged(a)
+        a = """sum(filter(f, 'abc'))"""
+        self.unchanged(a)
+        a = """sorted(filter(f, 'abc'))"""
+        self.unchanged(a)
+        a = """sorted(filter(f, 'abc'), key=blah)"""
+        self.unchanged(a)
+        a = """sorted(filter(f, 'abc'), key=blah)[0]"""
+        self.unchanged(a)
+        a = """for i in filter(f, 'abc'): pass"""
+        self.unchanged(a)
+        a = """[x for x in filter(f, 'abc')]"""
+        self.unchanged(a)
+        a = """(x for x in filter(f, 'abc'))"""
+        self.unchanged(a)
+
+    def test_future_builtins(self):
+        a = "from future_builtins import spam, filter; filter(f, 'ham')"
+        self.unchanged(a)
+
+        b = """from future_builtins import spam; x = filter(f, 'abc')"""
+        a = """from future_builtins import spam; x = list(filter(f, 'abc'))"""
+        self.check(b, a)
+
+        a = "from future_builtins import *; filter(f, 'ham')"
+        self.unchanged(a)
+
+class Test_map(FixerTestCase):
+    fixer = "map"
+
+    def check(self, b, a):
+        self.unchanged("from future_builtins import map; " + b, a)
+        super(Test_map, self).check(b, a)
+
+    def test_prefix_preservation(self):
+        b = """x =    map(   f,    'abc'   )"""
+        a = """x =    list(map(   f,    'abc'   ))"""
+        self.check(b, a)
+
+    def test_trailing_comment(self):
+        b = """x = map(f, 'abc')   #   foo"""
+        a = """x = list(map(f, 'abc'))   #   foo"""
+        self.check(b, a)
+
+    def test_map_basic(self):
+        b = """x = map(f, 'abc')"""
+        a = """x = list(map(f, 'abc'))"""
+        self.check(b, a)
+
+        b = """x = len(map(f, 'abc', 'def'))"""
+        a = """x = len(list(map(f, 'abc', 'def')))"""
+        self.check(b, a)
+
+        b = """x = map(None, 'abc')"""
+        a = """x = list('abc')"""
+        self.check(b, a)
+
+        b = """x = map(None, 'abc', 'def')"""
+        a = """x = list(map(None, 'abc', 'def'))"""
+        self.check(b, a)
+
+        b = """x = map(lambda x: x+1, range(4))"""
+        a = """x = [x+1 for x in range(4)]"""
+        self.check(b, a)
+
+        # Note the parens around x
+        b = """x = map(lambda (x): x+1, range(4))"""
+        a = """x = [x+1 for x in range(4)]"""
+        self.check(b, a)
+
+        b = """
+            foo()
+            # foo
+            map(f, x)
+            """
+        a = """
+            foo()
+            # foo
+            list(map(f, x))
+            """
+        self.warns(b, a, "You should use a for loop here")
+
+        # XXX This (rare) case is not supported
+##         b = """x = map(f, 'abc')[0]"""
+##         a = """x = list(map(f, 'abc'))[0]"""
+##         self.check(b, a)
+
+    def test_map_nochange(self):
+        a = """b.join(map(f, 'abc'))"""
+        self.unchanged(a)
+        a = """(a + foo(5)).join(map(f, 'abc'))"""
+        self.unchanged(a)
+        a = """iter(map(f, 'abc'))"""
+        self.unchanged(a)
+        a = """list(map(f, 'abc'))"""
+        self.unchanged(a)
+        a = """list(map(f, 'abc'))[0]"""
+        self.unchanged(a)
+        a = """set(map(f, 'abc'))"""
+        self.unchanged(a)
+        a = """set(map(f, 'abc')).pop()"""
+        self.unchanged(a)
+        a = """tuple(map(f, 'abc'))"""
+        self.unchanged(a)
+        a = """any(map(f, 'abc'))"""
+        self.unchanged(a)
+        a = """all(map(f, 'abc'))"""
+        self.unchanged(a)
+        a = """sum(map(f, 'abc'))"""
+        self.unchanged(a)
+        a = """sorted(map(f, 'abc'))"""
+        self.unchanged(a)
+        a = """sorted(map(f, 'abc'), key=blah)"""
+        self.unchanged(a)
+        a = """sorted(map(f, 'abc'), key=blah)[0]"""
+        self.unchanged(a)
+        a = """for i in map(f, 'abc'): pass"""
+        self.unchanged(a)
+        a = """[x for x in map(f, 'abc')]"""
+        self.unchanged(a)
+        a = """(x for x in map(f, 'abc'))"""
+        self.unchanged(a)
+
+    def test_future_builtins(self):
+        a = "from future_builtins import spam, map, eggs; map(f, 'ham')"
+        self.unchanged(a)
+
+        b = """from future_builtins import spam, eggs; x = map(f, 'abc')"""
+        a = """from future_builtins import spam, eggs; x = list(map(f, 'abc'))"""
+        self.check(b, a)
+
+        a = "from future_builtins import *; map(f, 'ham')"
+        self.unchanged(a)
+
+class Test_zip(FixerTestCase):
+    fixer = "zip"
+
+    def check(self, b, a):
+        self.unchanged("from future_builtins import zip; " + b, a)
+        super(Test_zip, self).check(b, a)
+
+    def test_zip_basic(self):
+        b = """x = zip(a, b, c)"""
+        a = """x = list(zip(a, b, c))"""
+        self.check(b, a)
+
+        b = """x = len(zip(a, b))"""
+        a = """x = len(list(zip(a, b)))"""
+        self.check(b, a)
+
+    def test_zip_nochange(self):
+        a = """b.join(zip(a, b))"""
+        self.unchanged(a)
+        a = """(a + foo(5)).join(zip(a, b))"""
+        self.unchanged(a)
+        a = """iter(zip(a, b))"""
+        self.unchanged(a)
+        a = """list(zip(a, b))"""
+        self.unchanged(a)
+        a = """list(zip(a, b))[0]"""
+        self.unchanged(a)
+        a = """set(zip(a, b))"""
+        self.unchanged(a)
+        a = """set(zip(a, b)).pop()"""
+        self.unchanged(a)
+        a = """tuple(zip(a, b))"""
+        self.unchanged(a)
+        a = """any(zip(a, b))"""
+        self.unchanged(a)
+        a = """all(zip(a, b))"""
+        self.unchanged(a)
+        a = """sum(zip(a, b))"""
+        self.unchanged(a)
+        a = """sorted(zip(a, b))"""
+        self.unchanged(a)
+        a = """sorted(zip(a, b), key=blah)"""
+        self.unchanged(a)
+        a = """sorted(zip(a, b), key=blah)[0]"""
+        self.unchanged(a)
+        a = """for i in zip(a, b): pass"""
+        self.unchanged(a)
+        a = """[x for x in zip(a, b)]"""
+        self.unchanged(a)
+        a = """(x for x in zip(a, b))"""
+        self.unchanged(a)
+
+    def test_future_builtins(self):
+        a = "from future_builtins import spam, zip, eggs; zip(a, b)"
+        self.unchanged(a)
+
+        b = """from future_builtins import spam, eggs; x = zip(a, b)"""
+        a = """from future_builtins import spam, eggs; x = list(zip(a, b))"""
+        self.check(b, a)
+
+        a = "from future_builtins import *; zip(a, b)"
+        self.unchanged(a)
+
+class Test_standarderror(FixerTestCase):
+    fixer = "standarderror"
+
+    def test(self):
+        b = """x =    StandardError()"""
+        a = """x =    Exception()"""
+        self.check(b, a)
+
+        b = """x = StandardError(a, b, c)"""
+        a = """x = Exception(a, b, c)"""
+        self.check(b, a)
+
+        b = """f(2 + StandardError(a, b, c))"""
+        a = """f(2 + Exception(a, b, c))"""
+        self.check(b, a)
+
+class Test_types(FixerTestCase):
+    fixer = "types"
+
+    def test_basic_types_convert(self):
+        b = """types.StringType"""
+        a = """bytes"""
+        self.check(b, a)
+
+        b = """types.DictType"""
+        a = """dict"""
+        self.check(b, a)
+
+        b = """types . IntType"""
+        a = """int"""
+        self.check(b, a)
+
+        b = """types.ListType"""
+        a = """list"""
+        self.check(b, a)
+
+        b = """types.LongType"""
+        a = """int"""
+        self.check(b, a)
+
+        b = """types.NoneType"""
+        a = """type(None)"""
+        self.check(b, a)
+
+class Test_idioms(FixerTestCase):
+    fixer = "idioms"
+
+    def test_while(self):
+        b = """while 1: foo()"""
+        a = """while True: foo()"""
+        self.check(b, a)
+
+        b = """while   1: foo()"""
+        a = """while   True: foo()"""
+        self.check(b, a)
+
+        b = """
+            while 1:
+                foo()
+            """
+        a = """
+            while True:
+                foo()
+            """
+        self.check(b, a)
+
+    def test_while_unchanged(self):
+        s = """while 11: foo()"""
+        self.unchanged(s)
+
+        s = """while 0: foo()"""
+        self.unchanged(s)
+
+        s = """while foo(): foo()"""
+        self.unchanged(s)
+
+        s = """while []: foo()"""
+        self.unchanged(s)
+
+    def test_eq_simple(self):
+        b = """type(x) == T"""
+        a = """isinstance(x, T)"""
+        self.check(b, a)
+
+        b = """if   type(x) == T: pass"""
+        a = """if   isinstance(x, T): pass"""
+        self.check(b, a)
+
+    def test_eq_reverse(self):
+        b = """T == type(x)"""
+        a = """isinstance(x, T)"""
+        self.check(b, a)
+
+        b = """if   T == type(x): pass"""
+        a = """if   isinstance(x, T): pass"""
+        self.check(b, a)
+
+    def test_eq_expression(self):
+        b = """type(x+y) == d.get('T')"""
+        a = """isinstance(x+y, d.get('T'))"""
+        self.check(b, a)
+
+        b = """type(   x  +  y) == d.get('T')"""
+        a = """isinstance(x  +  y, d.get('T'))"""
+        self.check(b, a)
+
+    def test_is_simple(self):
+        b = """type(x) is T"""
+        a = """isinstance(x, T)"""
+        self.check(b, a)
+
+        b = """if   type(x) is T: pass"""
+        a = """if   isinstance(x, T): pass"""
+        self.check(b, a)
+
+    def test_is_reverse(self):
+        b = """T is type(x)"""
+        a = """isinstance(x, T)"""
+        self.check(b, a)
+
+        b = """if   T is type(x): pass"""
+        a = """if   isinstance(x, T): pass"""
+        self.check(b, a)
+
+    def test_is_expression(self):
+        b = """type(x+y) is d.get('T')"""
+        a = """isinstance(x+y, d.get('T'))"""
+        self.check(b, a)
+
+        b = """type(   x  +  y) is d.get('T')"""
+        a = """isinstance(x  +  y, d.get('T'))"""
+        self.check(b, a)
+
+    def test_is_not_simple(self):
+        b = """type(x) is not T"""
+        a = """not isinstance(x, T)"""
+        self.check(b, a)
+
+        b = """if   type(x) is not T: pass"""
+        a = """if   not isinstance(x, T): pass"""
+        self.check(b, a)
+
+    def test_is_not_reverse(self):
+        b = """T is not type(x)"""
+        a = """not isinstance(x, T)"""
+        self.check(b, a)
+
+        b = """if   T is not type(x): pass"""
+        a = """if   not isinstance(x, T): pass"""
+        self.check(b, a)
+
+    def test_is_not_expression(self):
+        b = """type(x+y) is not d.get('T')"""
+        a = """not isinstance(x+y, d.get('T'))"""
+        self.check(b, a)
+
+        b = """type(   x  +  y) is not d.get('T')"""
+        a = """not isinstance(x  +  y, d.get('T'))"""
+        self.check(b, a)
+
+    def test_ne_simple(self):
+        b = """type(x) != T"""
+        a = """not isinstance(x, T)"""
+        self.check(b, a)
+
+        b = """if   type(x) != T: pass"""
+        a = """if   not isinstance(x, T): pass"""
+        self.check(b, a)
+
+    def test_ne_reverse(self):
+        b = """T != type(x)"""
+        a = """not isinstance(x, T)"""
+        self.check(b, a)
+
+        b = """if   T != type(x): pass"""
+        a = """if   not isinstance(x, T): pass"""
+        self.check(b, a)
+
+    def test_ne_expression(self):
+        b = """type(x+y) != d.get('T')"""
+        a = """not isinstance(x+y, d.get('T'))"""
+        self.check(b, a)
+
+        b = """type(   x  +  y) != d.get('T')"""
+        a = """not isinstance(x  +  y, d.get('T'))"""
+        self.check(b, a)
+
+    def test_type_unchanged(self):
+        a = """type(x).__name__"""
+        self.unchanged(a)
+
+    def test_sort_list_call(self):
+        b = """
+            v = list(t)
+            v.sort()
+            foo(v)
+            """
+        a = """
+            v = sorted(t)
+            foo(v)
+            """
+        self.check(b, a)
+
+        b = """
+            v = list(foo(b) + d)
+            v.sort()
+            foo(v)
+            """
+        a = """
+            v = sorted(foo(b) + d)
+            foo(v)
+            """
+        self.check(b, a)
+
+        b = """
+            while x:
+                v = list(t)
+                v.sort()
+                foo(v)
+            """
+        a = """
+            while x:
+                v = sorted(t)
+                foo(v)
+            """
+        self.check(b, a)
+
+        b = """
+            v = list(t)
+            # foo
+            v.sort()
+            foo(v)
+            """
+        a = """
+            v = sorted(t)
+            # foo
+            foo(v)
+            """
+        self.check(b, a)
+
+        b = r"""
+            v = list(   t)
+            v.sort()
+            foo(v)
+            """
+        a = r"""
+            v = sorted(   t)
+            foo(v)
+            """
+        self.check(b, a)
+
+    def test_sort_simple_expr(self):
+        b = """
+            v = t
+            v.sort()
+            foo(v)
+            """
+        a = """
+            v = sorted(t)
+            foo(v)
+            """
+        self.check(b, a)
+
+        b = """
+            v = foo(b)
+            v.sort()
+            foo(v)
+            """
+        a = """
+            v = sorted(foo(b))
+            foo(v)
+            """
+        self.check(b, a)
+
+        b = """
+            v = b.keys()
+            v.sort()
+            foo(v)
+            """
+        a = """
+            v = sorted(b.keys())
+            foo(v)
+            """
+        self.check(b, a)
+
+        b = """
+            v = foo(b) + d
+            v.sort()
+            foo(v)
+            """
+        a = """
+            v = sorted(foo(b) + d)
+            foo(v)
+            """
+        self.check(b, a)
+
+        b = """
+            while x:
+                v = t
+                v.sort()
+                foo(v)
+            """
+        a = """
+            while x:
+                v = sorted(t)
+                foo(v)
+            """
+        self.check(b, a)
+
+        b = """
+            v = t
+            # foo
+            v.sort()
+            foo(v)
+            """
+        a = """
+            v = sorted(t)
+            # foo
+            foo(v)
+            """
+        self.check(b, a)
+
+        b = r"""
+            v =   t
+            v.sort()
+            foo(v)
+            """
+        a = r"""
+            v =   sorted(t)
+            foo(v)
+            """
+        self.check(b, a)
+
+    def test_sort_unchanged(self):
+        s = """
+            v = list(t)
+            w.sort()
+            foo(w)
+            """
+        self.unchanged(s)
+
+        s = """
+            v = list(t)
+            v.sort(u)
+            foo(v)
+            """
+        self.unchanged(s)
+
+class Test_basestring(FixerTestCase):
+    fixer = "basestring"
+
+    def test_basestring(self):
+        b = """isinstance(x, basestring)"""
+        a = """isinstance(x, str)"""
+        self.check(b, a)
+
+class Test_buffer(FixerTestCase):
+    fixer = "buffer"
+
+    def test_buffer(self):
+        b = """x = buffer(y)"""
+        a = """x = memoryview(y)"""
+        self.check(b, a)
+
+class Test_future(FixerTestCase):
+    fixer = "future"
+
+    def test_future(self):
+        b = """from __future__ import braces"""
+        a = """"""
+        self.check(b, a)
+
+        b = """# comment\nfrom __future__ import braces"""
+        a = """# comment\n"""
+        self.check(b, a)
+
+        b = """from __future__ import braces\n# comment"""
+        a = """\n# comment"""
+        self.check(b, a)
+
+    def test_run_order(self):
+        self.assert_runs_after('print')
+
+class Test_itertools(FixerTestCase):
+    fixer = "itertools"
+
+    def checkall(self, before, after):
+        # Because we need to check with and without the itertools prefix
+        # and on each of the three functions, these loops make it all
+        # much easier
+        for i in ('itertools.', ''):
+            for f in ('map', 'filter', 'zip'):
+                b = before %(i+'i'+f)
+                a = after %(f)
+                self.check(b, a)
+
+    def test_0(self):
+        # A simple example -- test_1 covers exactly the same thing,
+        # but it's not quite as clear.
+        b = "itertools.izip(a, b)"
+        a = "zip(a, b)"
+        self.check(b, a)
+
+    def test_1(self):
+        b = """%s(f, a)"""
+        a = """%s(f, a)"""
+        self.checkall(b, a)
+
+    def test_2(self):
+        b = """itertools.ifilterfalse(a, b)"""
+        a = """itertools.filterfalse(a, b)"""
+        self.check(b, a)
+
+    def test_4(self):
+        b = """ifilterfalse(a, b)"""
+        a = """filterfalse(a, b)"""
+        self.check(b, a)
+
+    def test_space_1(self):
+        b = """    %s(f, a)"""
+        a = """    %s(f, a)"""
+        self.checkall(b, a)
+
+    def test_space_2(self):
+        b = """    itertools.ifilterfalse(a, b)"""
+        a = """    itertools.filterfalse(a, b)"""
+        self.check(b, a)
+
+    def test_run_order(self):
+        self.assert_runs_after('map', 'zip', 'filter')
+
+class Test_itertools_imports(FixerTestCase):
+    fixer = 'itertools_imports'
+
+    def test_reduced(self):
+        b = "from itertools import imap, izip, foo"
+        a = "from itertools import foo"
+        self.check(b, a)
+
+        b = "from itertools import bar, imap, izip, foo"
+        a = "from itertools import bar, foo"
+        self.check(b, a)
+
+    def test_comments(self):
+        b = "#foo\nfrom itertools import imap, izip"
+        a = "#foo\n"
+        self.check(b, a)
+
+    def test_none(self):
+        b = "from itertools import imap, izip"
+        a = ""
+        self.check(b, a)
+
+        b = "from itertools import izip"
+        a = ""
+        self.check(b, a)
+
+    def test_import_as(self):
+        b = "from itertools import izip, bar as bang, imap"
+        a = "from itertools import bar as bang"
+        self.check(b, a)
+
+        b = "from itertools import izip as _zip, imap, bar"
+        a = "from itertools import bar"
+        self.check(b, a)
+
+        b = "from itertools import imap as _map"
+        a = ""
+        self.check(b, a)
+
+        b = "from itertools import imap as _map, izip as _zip"
+        a = ""
+        self.check(b, a)
+
+        s = "from itertools import bar as bang"
+        self.unchanged(s)
+
+    def test_ifilter(self):
+        b = "from itertools import ifilterfalse"
+        a = "from itertools import filterfalse"
+        self.check(b, a)
+
+        b = "from itertools import imap, ifilterfalse, foo"
+        a = "from itertools import filterfalse, foo"
+        self.check(b, a)
+
+        b = "from itertools import bar, ifilterfalse, foo"
+        a = "from itertools import bar, filterfalse, foo"
+        self.check(b, a)
+
+
+    def test_unchanged(self):
+        s = "from itertools import foo"
+        self.unchanged(s)
+
+class Test_import(FixerTestCase):
+    fixer = "import"
+
+    def setUp(self):
+        super(Test_import, self).setUp()
+        # Need to replace fix_import's exists method
+        # so we can check that it's doing the right thing
+        self.files_checked = []
+        self.present_files = set()
+        self.always_exists = True
+        def fake_exists(name):
+            self.files_checked.append(name)
+            return self.always_exists or (name in self.present_files)
+
+        from refactor.fixes.from2 import fix_import
+        fix_import.exists = fake_exists
+
+    def tearDown(self):
+        from lib2to3.fixes import fix_import
+        fix_import.exists = os.path.exists
+
+    def check_both(self, b, a):
+        self.always_exists = True
+        super(Test_import, self).check(b, a)
+        self.always_exists = False
+        super(Test_import, self).unchanged(b)
+
+    def test_files_checked(self):
+        def p(path):
+            # Takes a unix path and returns a path with correct separators
+            return os.path.pathsep.join(path.split("/"))
+
+        self.always_exists = False
+        self.present_files = set(['__init__.py'])
+        expected_extensions = ('.py', os.path.pathsep, '.pyc', '.so',
+                               '.sl', '.pyd')
+        names_to_test = (p("/spam/eggs.py"), "ni.py", p("../../shrubbery.py"))
+
+        for name in names_to_test:
+            self.files_checked = []
+            self.filename = name
+            self.unchanged("import jam")
+
+            if os.path.dirname(name):
+                name = os.path.dirname(name) + '/jam'
+            else:
+                name = 'jam'
+            expected_checks = set(name + ext for ext in expected_extensions)
+            expected_checks.add("__init__.py")
+
+            self.assertEqual(set(self.files_checked), expected_checks)
+
+    def test_not_in_package(self):
+        s = "import bar"
+        self.always_exists = False
+        self.present_files = set(["bar.py"])
+        self.unchanged(s)
+
+    def test_in_package(self):
+        b = "import bar"
+        a = "from . import bar"
+        self.always_exists = False
+        self.present_files = set(["__init__.py", "bar.py"])
+        self.check(b, a)
+
+    def test_comments_and_indent(self):
+        b = "import bar # Foo"
+        a = "from . import bar # Foo"
+        self.check(b, a)
+
+    def test_from(self):
+        b = "from foo import bar, baz"
+        a = "from .foo import bar, baz"
+        self.check_both(b, a)
+
+        b = "from foo import bar"
+        a = "from .foo import bar"
+        self.check_both(b, a)
+
+        b = "from foo import (bar, baz)"
+        a = "from .foo import (bar, baz)"
+        self.check_both(b, a)
+
+    def test_dotted_from(self):
+        b = "from green.eggs import ham"
+        a = "from .green.eggs import ham"
+        self.check_both(b, a)
+
+    def test_from_as(self):
+        b = "from green.eggs import ham as spam"
+        a = "from .green.eggs import ham as spam"
+        self.check_both(b, a)
+
+    def test_import(self):
+        b = "import foo"
+        a = "from . import foo"
+        self.check_both(b, a)
+
+        b = "import foo, bar"
+        a = "from . import foo, bar"
+        self.check_both(b, a)
+
+        b = "import foo, bar, x"
+        a = "from . import foo, bar, x"
+        self.check_both(b, a)
+
+        b = "import x, y, z"
+        a = "from . import x, y, z"
+        self.check_both(b, a)
+
+    def test_import_as(self):
+        b = "import foo as x"
+        a = "from . import foo as x"
+        self.check_both(b, a)
+
+        b = "import a as b, b as c, c as d"
+        a = "from . import a as b, b as c, c as d"
+        self.check_both(b, a)
+
+    def test_local_and_absolute(self):
+        self.always_exists = False
+        self.present_files = set(["foo.py", "__init__.py"])
+
+        s = "import foo, bar"
+        self.warns_unchanged(s, "absolute and local imports together")
+
+    def test_dotted_import(self):
+        b = "import foo.bar"
+        a = "from . import foo.bar"
+        self.check_both(b, a)
+
+    def test_dotted_import_as(self):
+        b = "import foo.bar as bang"
+        a = "from . import foo.bar as bang"
+        self.check_both(b, a)
+
+    def test_prefix(self):
+        b = """
+        # prefix
+        import foo.bar
+        """
+        a = """
+        # prefix
+        from . import foo.bar
+        """
+        self.check_both(b, a)
+
+
+class Test_set_literal(FixerTestCase):
+
+    fixer = "set_literal"
+
+    def test_basic(self):
+        b = """set([1, 2, 3])"""
+        a = """{1, 2, 3}"""
+        self.check(b, a)
+
+        b = """set((1, 2, 3))"""
+        a = """{1, 2, 3}"""
+        self.check(b, a)
+
+        b = """set((1,))"""
+        a = """{1}"""
+        self.check(b, a)
+
+        b = """set([1])"""
+        self.check(b, a)
+
+        b = """set((a, b))"""
+        a = """{a, b}"""
+        self.check(b, a)
+
+        b = """set([a, b])"""
+        self.check(b, a)
+
+        b = """set((a*234, f(args=23)))"""
+        a = """{a*234, f(args=23)}"""
+        self.check(b, a)
+
+        b = """set([a*23, f(23)])"""
+        a = """{a*23, f(23)}"""
+        self.check(b, a)
+
+        b = """set([a-234**23])"""
+        a = """{a-234**23}"""
+        self.check(b, a)
+
+    def test_listcomps(self):
+        b = """set([x for x in y])"""
+        a = """{x for x in y}"""
+        self.check(b, a)
+
+        b = """set([x for x in y if x == m])"""
+        a = """{x for x in y if x == m}"""
+        self.check(b, a)
+
+        b = """set([x for x in y for a in b])"""
+        a = """{x for x in y for a in b}"""
+        self.check(b, a)
+
+        b = """set([f(x) - 23 for x in y])"""
+        a = """{f(x) - 23 for x in y}"""
+        self.check(b, a)
+
+    def test_whitespace(self):
+        b = """set( [1, 2])"""
+        a = """{1, 2}"""
+        self.check(b, a)
+
+        b = """set([1 ,  2])"""
+        a = """{1 ,  2}"""
+        self.check(b, a)
+
+        b = """set([ 1 ])"""
+        a = """{ 1 }"""
+        self.check(b, a)
+
+        b = """set( [1] )"""
+        a = """{1}"""
+        self.check(b, a)
+
+        b = """set([  1,  2  ])"""
+        a = """{  1,  2  }"""
+        self.check(b, a)
+
+        b = """set([x  for x in y ])"""
+        a = """{x  for x in y }"""
+        self.check(b, a)
+
+        b = """set(
+                   [1, 2]
+               )
+            """
+        a = """{1, 2}\n"""
+        self.check(b, a)
+
+    def test_comments(self):
+        b = """set((1, 2)) # Hi"""
+        a = """{1, 2} # Hi"""
+        self.check(b, a)
+
+        # This isn't optimal behavior, but the fixer is optional.
+        b = """
+            # Foo
+            set( # Bar
+               (1, 2)
+            )
+            """
+        a = """
+            # Foo
+            {1, 2}
+            """
+        self.check(b, a)
+
+    def test_unchanged(self):
+        s = """set()"""
+        self.unchanged(s)
+
+        s = """set(a)"""
+        self.unchanged(s)
+
+        s = """set(a, b, c)"""
+        self.unchanged(s)
+
+        # Don't transform generators because they might have to be lazy.
+        s = """set(x for x in y)"""
+        self.unchanged(s)
+
+        s = """set(x for x in y if z)"""
+        self.unchanged(s)
+
+        s = """set(a*823-23**2 + f(23))"""
+        self.unchanged(s)
+
+
+class Test_sys_exc(FixerTestCase):
+    fixer = "sys_exc"
+
+    def test_0(self):
+        b = "sys.exc_type"
+        a = "sys.exc_info()[0]"
+        self.check(b, a)
+
+    def test_1(self):
+        b = "sys.exc_value"
+        a = "sys.exc_info()[1]"
+        self.check(b, a)
+
+    def test_2(self):
+        b = "sys.exc_traceback"
+        a = "sys.exc_info()[2]"
+        self.check(b, a)
+
+    def test_3(self):
+        b = "sys.exc_type # Foo"
+        a = "sys.exc_info()[0] # Foo"
+        self.check(b, a)
+
+    def test_4(self):
+        b = "sys.  exc_type"
+        a = "sys.  exc_info()[0]"
+        self.check(b, a)
+
+    def test_5(self):
+        b = "sys  .exc_type"
+        a = "sys  .exc_info()[0]"
+        self.check(b, a)
+
+
+class Test_paren(FixerTestCase):
+    fixer = "paren"
+
+    def test_0(self):
+        b = """[i for i in 1, 2 ]"""
+        a = """[i for i in (1, 2) ]"""
+        self.check(b, a)
+
+    def test_1(self):
+        b = """[i for i in 1, 2, ]"""
+        a = """[i for i in (1, 2,) ]"""
+        self.check(b, a)
+
+    def test_2(self):
+        b = """[i for i  in     1, 2 ]"""
+        a = """[i for i  in     (1, 2) ]"""
+        self.check(b, a)
+
+    def test_3(self):
+        b = """[i for i in 1, 2 if i]"""
+        a = """[i for i in (1, 2) if i]"""
+        self.check(b, a)
+
+    def test_4(self):
+        b = """[i for i in 1,    2    ]"""
+        a = """[i for i in (1,    2)    ]"""
+        self.check(b, a)
+
+    def test_5(self):
+        b = """(i for i in 1, 2)"""
+        a = """(i for i in (1, 2))"""
+        self.check(b, a)
+
+    def test_6(self):
+        b = """(i for i in 1   ,2   if i)"""
+        a = """(i for i in (1   ,2)   if i)"""
+        self.check(b, a)
+
+    def test_unchanged_0(self):
+        s = """[i for i in (1, 2)]"""
+        self.unchanged(s)
+
+    def test_unchanged_1(self):
+        s = """[i for i in foo()]"""
+        self.unchanged(s)
+
+    def test_unchanged_2(self):
+        s = """[i for i in (1, 2) if nothing]"""
+        self.unchanged(s)
+
+    def test_unchanged_3(self):
+        s = """(i for i in (1, 2))"""
+        self.unchanged(s)
+
+    def test_unchanged_4(self):
+        s = """[i for i in m]"""
+        self.unchanged(s)
+
+class Test_metaclass(FixerTestCase):
+
+    fixer = 'metaclass'
+
+    def test_unchanged(self):
+        self.unchanged("class X(): pass")
+        self.unchanged("class X(object): pass")
+        self.unchanged("class X(object1, object2): pass")
+        self.unchanged("class X(object1, object2, object3): pass")
+        self.unchanged("class X(metaclass=Meta): pass")
+        self.unchanged("class X(b, arg=23, metclass=Meta): pass")
+        self.unchanged("class X(b, arg=23, metaclass=Meta, other=42): pass")
+
+        s = """
+        class X:
+            def __metaclass__(self): pass
+        """
+        self.unchanged(s)
+
+        s = """
+        class X:
+            a[23] = 74
+        """
+        self.unchanged(s)
+
+    def test_comments(self):
+        b = """
+        class X:
+            # hi
+            __metaclass__ = AppleMeta
+        """
+        a = """
+        class X(metaclass=AppleMeta):
+            # hi
+            pass
+        """
+        self.check(b, a)
+
+        b = """
+        class X:
+            __metaclass__ = Meta
+            # Bedtime!
+        """
+        a = """
+        class X(metaclass=Meta):
+            pass
+            # Bedtime!
+        """
+        self.check(b, a)
+
+    def test_meta(self):
+        # no-parent class, odd body
+        b = """
+        class X():
+            __metaclass__ = Q
+            pass
+        """
+        a = """
+        class X(metaclass=Q):
+            pass
+        """
+        self.check(b, a)
+
+        # one parent class, no body
+        b = """class X(object): __metaclass__ = Q"""
+        a = """class X(object, metaclass=Q): pass"""
+        self.check(b, a)
+
+
+        # one parent, simple body
+        b = """
+        class X(object):
+            __metaclass__ = Meta
+            bar = 7
+        """
+        a = """
+        class X(object, metaclass=Meta):
+            bar = 7
+        """
+        self.check(b, a)
+
+        b = """
+        class X:
+            __metaclass__ = Meta; x = 4; g = 23
+        """
+        a = """
+        class X(metaclass=Meta):
+            x = 4; g = 23
+        """
+        self.check(b, a)
+
+        # one parent, simple body, __metaclass__ last
+        b = """
+        class X(object):
+            bar = 7
+            __metaclass__ = Meta
+        """
+        a = """
+        class X(object, metaclass=Meta):
+            bar = 7
+        """
+        self.check(b, a)
+
+        # redefining __metaclass__
+        b = """
+        class X():
+            __metaclass__ = A
+            __metaclass__ = B
+            bar = 7
+        """
+        a = """
+        class X(metaclass=B):
+            bar = 7
+        """
+        self.check(b, a)
+
+        # multiple inheritance, simple body
+        b = """
+        class X(clsA, clsB):
+            __metaclass__ = Meta
+            bar = 7
+        """
+        a = """
+        class X(clsA, clsB, metaclass=Meta):
+            bar = 7
+        """
+        self.check(b, a)
+
+        # keywords in the class statement
+        b = """class m(a, arg=23): __metaclass__ = Meta"""
+        a = """class m(a, arg=23, metaclass=Meta): pass"""
+        self.check(b, a)
+
+        b = """
+        class X(expression(2 + 4)):
+            __metaclass__ = Meta
+        """
+        a = """
+        class X(expression(2 + 4), metaclass=Meta):
+            pass
+        """
+        self.check(b, a)
+
+        b = """
+        class X(expression(2 + 4), x**4):
+            __metaclass__ = Meta
+        """
+        a = """
+        class X(expression(2 + 4), x**4, metaclass=Meta):
+            pass
+        """
+        self.check(b, a)
+
+        b = """
+        class X:
+            __metaclass__ = Meta
+            save.py = 23
+        """
+        a = """
+        class X(metaclass=Meta):
+            save.py = 23
+        """
+        self.check(b, a)
+
+
+class Test_getcwdu(FixerTestCase):
+
+    fixer = 'getcwdu'
+
+    def test_basic(self):
+        b = """os.getcwdu"""
+        a = """os.getcwd"""
+        self.check(b, a)
+
+        b = """os.getcwdu()"""
+        a = """os.getcwd()"""
+        self.check(b, a)
+
+        b = """meth = os.getcwdu"""
+        a = """meth = os.getcwd"""
+        self.check(b, a)
+
+        b = """os.getcwdu(args)"""
+        a = """os.getcwd(args)"""
+        self.check(b, a)
+
+    def test_comment(self):
+        b = """os.getcwdu() # Foo"""
+        a = """os.getcwd() # Foo"""
+        self.check(b, a)
+
+    def test_unchanged(self):
+        s = """os.getcwd()"""
+        self.unchanged(s)
+
+        s = """getcwdu()"""
+        self.unchanged(s)
+
+        s = """os.getcwdb()"""
+        self.unchanged(s)
+
+    def test_indentation(self):
+        b = """
+            if 1:
+                os.getcwdu()
+            """
+        a = """
+            if 1:
+                os.getcwd()
+            """
+        self.check(b, a)
+
+    def test_multilation(self):
+        b = """os .getcwdu()"""
+        a = """os .getcwd()"""
+        self.check(b, a)
+
+        b = """os.  getcwdu"""
+        a = """os.  getcwd"""
+        self.check(b, a)
+
+        b = """os.getcwdu (  )"""
+        a = """os.getcwd (  )"""
+        self.check(b, a)
+
+
+if __name__ == "__main__":
+    import __main__
+    support.run_all_tests(__main__)

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/test_parser.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/test_parser.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,202 @@
+#!/usr/bin/env python2.5
+"""Test suite for 2to3's parser and grammar files.
+
+This is the place to add tests for changes to 2to3's grammar, such as those
+merging the grammars for Python 2 and 3. In addition to specific tests for
+parts of the grammar we've changed, we also make sure we can parse the
+test_grammar.py files from both Python 2 and Python 3.
+"""
+# Author: Collin Winter
+
+# Testing imports
+from . import support
+from .support import driver, test_dir
+
+# Python imports
+import os
+import os.path
+
+# Local imports
+from refactor.pgen2.parse import ParseError
+
+
+class GrammarTest(support.TestCase):
+    def validate(self, code):
+        support.parse_string(code)
+
+    def invalid_syntax(self, code):
+        try:
+            self.validate(code)
+        except ParseError:
+            pass
+        else:
+            raise AssertionError("Syntax shouldn't have been valid")
+
+
+class TestRaiseChanges(GrammarTest):
+    def test_2x_style_1(self):
+        self.validate("raise")
+
+    def test_2x_style_2(self):
+        self.validate("raise E, V")
+
+    def test_2x_style_3(self):
+        self.validate("raise E, V, T")
+
+    def test_2x_style_invalid_1(self):
+        self.invalid_syntax("raise E, V, T, Z")
+
+    def test_3x_style(self):
+        self.validate("raise E1 from E2")
+
+    def test_3x_style_invalid_1(self):
+        self.invalid_syntax("raise E, V from E1")
+
+    def test_3x_style_invalid_2(self):
+        self.invalid_syntax("raise E from E1, E2")
+
+    def test_3x_style_invalid_3(self):
+        self.invalid_syntax("raise from E1, E2")
+
+    def test_3x_style_invalid_4(self):
+        self.invalid_syntax("raise E from")
+
+
+# Adapated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
+class TestFunctionAnnotations(GrammarTest):
+    def test_1(self):
+        self.validate("""def f(x) -> list: pass""")
+
+    def test_2(self):
+        self.validate("""def f(x:int): pass""")
+
+    def test_3(self):
+        self.validate("""def f(*x:str): pass""")
+
+    def test_4(self):
+        self.validate("""def f(**x:float): pass""")
+
+    def test_5(self):
+        self.validate("""def f(x, y:1+2): pass""")
+
+    def test_6(self):
+        self.validate("""def f(a, (b:1, c:2, d)): pass""")
+
+    def test_7(self):
+        self.validate("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
+
+    def test_8(self):
+        s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
+                        *g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
+        self.validate(s)
+
+
+class TestExcept(GrammarTest):
+    def test_new(self):
+        s = """
+            try:
+                x
+            except E as N:
+                y"""
+        self.validate(s)
+
+    def test_old(self):
+        s = """
+            try:
+                x
+            except E, N:
+                y"""
+        self.validate(s)
+
+
+# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
+class TestSetLiteral(GrammarTest):
+    def test_1(self):
+        self.validate("""x = {'one'}""")
+
+    def test_2(self):
+        self.validate("""x = {'one', 1,}""")
+
+    def test_3(self):
+        self.validate("""x = {'one', 'two', 'three'}""")
+
+    def test_4(self):
+        self.validate("""x = {2, 3, 4,}""")
+
+
+class TestNumericLiterals(GrammarTest):
+    def test_new_octal_notation(self):
+        self.validate("""0o7777777777777""")
+        self.invalid_syntax("""0o7324528887""")
+
+    def test_new_binary_notation(self):
+        self.validate("""0b101010""")
+        self.invalid_syntax("""0b0101021""")
+
+
+class TestClassDef(GrammarTest):
+    def test_new_syntax(self):
+        self.validate("class B(t=7): pass")
+        self.validate("class B(t, *args): pass")
+        self.validate("class B(t, **kwargs): pass")
+        self.validate("class B(t, *args, **kwargs): pass")
+        self.validate("class B(t, y=9, *args, **kwargs): pass")
+
+
+class TestParserIdempotency(support.TestCase):
+
+    """A cut-down version of pytree_idempotency.py."""
+
+    def test_all_project_files(self):
+        for filepath in support.all_project_files():
+            print "Parsing %s..." % filepath
+            tree = driver.parse_file(filepath, debug=True)
+            if diff(filepath, tree):
+                self.fail("Idempotency failed: %s" % filepath)
+
+
+class TestLiterals(GrammarTest):
+
+    def test_multiline_bytes_literals(self):
+        s = """
+            md5test(b"\xaa" * 80,
+                    (b"Test Using Larger Than Block-Size Key "
+                     b"and Larger Than One Block-Size Data"),
+                    "6f630fad67cda0ee1fb1f562db3aa53e")
+            """
+        self.validate(s)
+
+    def test_multiline_bytes_tripquote_literals(self):
+        s = '''
+            b"""
+            <?xml version="1.0" encoding="UTF-8"?>
+            <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
+            """
+            '''
+        self.validate(s)
+
+    def test_multiline_str_literals(self):
+        s = """
+            md5test("\xaa" * 80,
+                    ("Test Using Larger Than Block-Size Key "
+                     "and Larger Than One Block-Size Data"),
+                    "6f630fad67cda0ee1fb1f562db3aa53e")
+            """
+        self.validate(s)
+
+
+def diff(fn, tree):
+    f = open("@", "w")
+    try:
+        f.write(str(tree))
+    finally:
+        f.close()
+    try:
+        return os.system("diff -u %s @" % fn)
+    finally:
+        os.remove("@")
+
+
+if __name__ == "__main__":
+    import __main__
+    support.run_all_tests(__main__)

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/test_pytree.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/test_pytree.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,458 @@
+#!/usr/bin/env python2.5
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Unit tests for pytree.py.
+
+NOTE: Please *don't* add doc strings to individual test methods!
+In verbose mode, printing of the module, class and method name is much
+more helpful than printing of (the first line of) the docstring,
+especially when debugging a test.
+"""
+
+# Testing imports
+from . import support
+
+# Local imports (XXX should become a package)
+from .. import pytree
+
+try:
+    sorted
+except NameError:
+    def sorted(lst):
+        l = list(lst)
+        l.sort()
+        return l
+
+class TestNodes(support.TestCase):
+
+    """Unit tests for nodes (Base, Leaf, Node)."""
+
+    def testBaseCantConstruct(self):
+        if __debug__:
+            # Test that instantiating Base() raises an AssertionError
+            self.assertRaises(AssertionError, pytree.Base)
+
+    def testLeaf(self):
+        l1 = pytree.Leaf(100, "foo")
+        self.assertEqual(l1.type, 100)
+        self.assertEqual(l1.value, "foo")
+
+    def testLeafRepr(self):
+        l1 = pytree.Leaf(100, "foo")
+        self.assertEqual(repr(l1), "Leaf(100, 'foo')")
+
+    def testLeafStr(self):
+        l1 = pytree.Leaf(100, "foo")
+        self.assertEqual(str(l1), "foo")
+        l2 = pytree.Leaf(100, "foo", context=(" ", (10, 1)))
+        self.assertEqual(str(l2), " foo")
+
+    def testLeafStrNumericValue(self):
+        # Make sure that the Leaf's value is stringified. Failing to
+        #  do this can cause a TypeError in certain situations.
+        l1 = pytree.Leaf(2, 5)
+        l1.set_prefix("foo_")
+        self.assertEqual(str(l1), "foo_5")
+
+    def testLeafEq(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "foo", context=(" ", (1, 0)))
+        self.assertEqual(l1, l2)
+        l3 = pytree.Leaf(101, "foo")
+        l4 = pytree.Leaf(100, "bar")
+        self.assertNotEqual(l1, l3)
+        self.assertNotEqual(l1, l4)
+
+    def testLeafPrefix(self):
+        l1 = pytree.Leaf(100, "foo")
+        self.assertEqual(l1.get_prefix(), "")
+        self.failIf(l1.was_changed)
+        l1.set_prefix("  ##\n\n")
+        self.assertEqual(l1.get_prefix(), "  ##\n\n")
+        self.failUnless(l1.was_changed)
+
+    def testNode(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(200, "bar")
+        n1 = pytree.Node(1000, [l1, l2])
+        self.assertEqual(n1.type, 1000)
+        self.assertEqual(n1.children, [l1, l2])
+
+    def testNodeRepr(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
+        n1 = pytree.Node(1000, [l1, l2])
+        self.assertEqual(repr(n1),
+                         "Node(1000, [%s, %s])" % (repr(l1), repr(l2)))
+
+    def testNodeStr(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
+        n1 = pytree.Node(1000, [l1, l2])
+        self.assertEqual(str(n1), "foo bar")
+
+    def testNodePrefix(self):
+        l1 = pytree.Leaf(100, "foo")
+        self.assertEqual(l1.get_prefix(), "")
+        n1 = pytree.Node(1000, [l1])
+        self.assertEqual(n1.get_prefix(), "")
+        n1.set_prefix(" ")
+        self.assertEqual(n1.get_prefix(), " ")
+        self.assertEqual(l1.get_prefix(), " ")
+
+    def testGetSuffix(self):
+        l1 = pytree.Leaf(100, "foo", prefix="a")
+        l2 = pytree.Leaf(100, "bar", prefix="b")
+        n1 = pytree.Node(1000, [l1, l2])
+
+        self.assertEqual(l1.get_suffix(), l2.get_prefix())
+        self.assertEqual(l2.get_suffix(), "")
+        self.assertEqual(n1.get_suffix(), "")
+
+        l3 = pytree.Leaf(100, "bar", prefix="c")
+        n2 = pytree.Node(1000, [n1, l3])
+
+        self.assertEqual(n1.get_suffix(), l3.get_prefix())
+        self.assertEqual(l3.get_suffix(), "")
+        self.assertEqual(n2.get_suffix(), "")
+
+    def testNodeEq(self):
+        n1 = pytree.Node(1000, ())
+        n2 = pytree.Node(1000, [], context=(" ", (1, 0)))
+        self.assertEqual(n1, n2)
+        n3 = pytree.Node(1001, ())
+        self.assertNotEqual(n1, n3)
+
+    def testNodeEqRecursive(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1])
+        n2 = pytree.Node(1000, [l2])
+        self.assertEqual(n1, n2)
+        l3 = pytree.Leaf(100, "bar")
+        n3 = pytree.Node(1000, [l3])
+        self.assertNotEqual(n1, n3)
+
+    def testReplace(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "+")
+        l3 = pytree.Leaf(100, "bar")
+        n1 = pytree.Node(1000, [l1, l2, l3])
+        self.assertEqual(n1.children, [l1, l2, l3])
+        self.failUnless(isinstance(n1.children, list))
+        self.failIf(n1.was_changed)
+        l2new = pytree.Leaf(100, "-")
+        l2.replace(l2new)
+        self.assertEqual(n1.children, [l1, l2new, l3])
+        self.failUnless(isinstance(n1.children, list))
+        self.failUnless(n1.was_changed)
+
+    def testReplaceWithList(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "+")
+        l3 = pytree.Leaf(100, "bar")
+        n1 = pytree.Node(1000, [l1, l2, l3])
+
+        l2.replace([pytree.Leaf(100, "*"), pytree.Leaf(100, "*")])
+        self.assertEqual(str(n1), "foo**bar")
+        self.failUnless(isinstance(n1.children, list))
+
+    def testPostOrder(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar")
+        n1 = pytree.Node(1000, [l1, l2])
+        self.assertEqual(list(n1.post_order()), [l1, l2, n1])
+
+    def testPreOrder(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar")
+        n1 = pytree.Node(1000, [l1, l2])
+        self.assertEqual(list(n1.pre_order()), [n1, l1, l2])
+
+    def testChangedLeaf(self):
+        l1 = pytree.Leaf(100, "f")
+        self.failIf(l1.was_changed)
+
+        l1.changed()
+        self.failUnless(l1.was_changed)
+
+    def testChangedNode(self):
+        l1 = pytree.Leaf(100, "f")
+        n1 = pytree.Node(1000, [l1])
+        self.failIf(n1.was_changed)
+
+        n1.changed()
+        self.failUnless(n1.was_changed)
+
+    def testChangedRecursive(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "+")
+        l3 = pytree.Leaf(100, "bar")
+        n1 = pytree.Node(1000, [l1, l2, l3])
+        n2 = pytree.Node(1000, [n1])
+        self.failIf(l1.was_changed)
+        self.failIf(n1.was_changed)
+        self.failIf(n2.was_changed)
+
+        n1.changed()
+        self.failUnless(n1.was_changed)
+        self.failUnless(n2.was_changed)
+        self.failIf(l1.was_changed)
+
+    def testLeafConstructorPrefix(self):
+        for prefix in ("xyz_", ""):
+            l1 = pytree.Leaf(100, "self", prefix=prefix)
+            self.failUnless(str(l1), prefix + "self")
+            self.assertEqual(l1.get_prefix(), prefix)
+
+    def testNodeConstructorPrefix(self):
+        for prefix in ("xyz_", ""):
+            l1 = pytree.Leaf(100, "self")
+            l2 = pytree.Leaf(100, "foo", prefix="_")
+            n1 = pytree.Node(1000, [l1, l2], prefix=prefix)
+            self.failUnless(str(n1), prefix + "self_foo")
+            self.assertEqual(n1.get_prefix(), prefix)
+            self.assertEqual(l1.get_prefix(), prefix)
+            self.assertEqual(l2.get_prefix(), "_")
+
+    def testRemove(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1, l2])
+        n2 = pytree.Node(1000, [n1])
+
+        self.assertEqual(n1.remove(), 0)
+        self.assertEqual(n2.children, [])
+        self.assertEqual(l1.parent, n1)
+        self.assertEqual(n1.parent, None)
+        self.assertEqual(n2.parent, None)
+        self.failIf(n1.was_changed)
+        self.failUnless(n2.was_changed)
+
+        self.assertEqual(l2.remove(), 1)
+        self.assertEqual(l1.remove(), 0)
+        self.assertEqual(n1.children, [])
+        self.assertEqual(l1.parent, None)
+        self.assertEqual(n1.parent, None)
+        self.assertEqual(n2.parent, None)
+        self.failUnless(n1.was_changed)
+        self.failUnless(n2.was_changed)
+
+    def testRemoveParentless(self):
+        n1 = pytree.Node(1000, [])
+        n1.remove()
+        self.assertEqual(n1.parent, None)
+
+        l1 = pytree.Leaf(100, "foo")
+        l1.remove()
+        self.assertEqual(l1.parent, None)
+
+    def testNodeSetChild(self):
+        l1 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1])
+
+        l2 = pytree.Leaf(100, "bar")
+        n1.set_child(0, l2)
+        self.assertEqual(l1.parent, None)
+        self.assertEqual(l2.parent, n1)
+        self.assertEqual(n1.children, [l2])
+
+        n2 = pytree.Node(1000, [l1])
+        n2.set_child(0, n1)
+        self.assertEqual(l1.parent, None)
+        self.assertEqual(n1.parent, n2)
+        self.assertEqual(n2.parent, None)
+        self.assertEqual(n2.children, [n1])
+
+        self.assertRaises(IndexError, n1.set_child, 4, l2)
+        # I don't care what it raises, so long as it's an exception
+        self.assertRaises(Exception, n1.set_child, 0, list)
+
+    def testNodeInsertChild(self):
+        l1 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1])
+
+        l2 = pytree.Leaf(100, "bar")
+        n1.insert_child(0, l2)
+        self.assertEqual(l2.parent, n1)
+        self.assertEqual(n1.children, [l2, l1])
+
+        l3 = pytree.Leaf(100, "abc")
+        n1.insert_child(2, l3)
+        self.assertEqual(n1.children, [l2, l1, l3])
+
+        # I don't care what it raises, so long as it's an exception
+        self.assertRaises(Exception, n1.insert_child, 0, list)
+
+    def testNodeAppendChild(self):
+        n1 = pytree.Node(1000, [])
+
+        l1 = pytree.Leaf(100, "foo")
+        n1.append_child(l1)
+        self.assertEqual(l1.parent, n1)
+        self.assertEqual(n1.children, [l1])
+
+        l2 = pytree.Leaf(100, "bar")
+        n1.append_child(l2)
+        self.assertEqual(l2.parent, n1)
+        self.assertEqual(n1.children, [l1, l2])
+
+        # I don't care what it raises, so long as it's an exception
+        self.assertRaises(Exception, n1.append_child, list)
+
+    def testNodeNextSibling(self):
+        n1 = pytree.Node(1000, [])
+        n2 = pytree.Node(1000, [])
+        p1 = pytree.Node(1000, [n1, n2])
+
+        self.failUnless(n1.next_sibling is n2)
+        self.assertEqual(n2.next_sibling, None)
+        self.assertEqual(p1.next_sibling, None)
+
+    def testLeafNextSibling(self):
+        l1 = pytree.Leaf(100, "a")
+        l2 = pytree.Leaf(100, "b")
+        p1 = pytree.Node(1000, [l1, l2])
+
+        self.failUnless(l1.next_sibling is l2)
+        self.assertEqual(l2.next_sibling, None)
+        self.assertEqual(p1.next_sibling, None)
+
+    def testNodePrevSibling(self):
+        n1 = pytree.Node(1000, [])
+        n2 = pytree.Node(1000, [])
+        p1 = pytree.Node(1000, [n1, n2])
+
+        self.failUnless(n2.prev_sibling is n1)
+        self.assertEqual(n1.prev_sibling, None)
+        self.assertEqual(p1.prev_sibling, None)
+
+    def testLeafPrevSibling(self):
+        l1 = pytree.Leaf(100, "a")
+        l2 = pytree.Leaf(100, "b")
+        p1 = pytree.Node(1000, [l1, l2])
+
+        self.failUnless(l2.prev_sibling is l1)
+        self.assertEqual(l1.prev_sibling, None)
+        self.assertEqual(p1.prev_sibling, None)
+
+
+class TestPatterns(support.TestCase):
+
+    """Unit tests for tree matching patterns."""
+
+    def testBasicPatterns(self):
+        # Build a tree
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar")
+        l3 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1, l2])
+        n2 = pytree.Node(1000, [l3])
+        root = pytree.Node(1000, [n1, n2])
+        # Build a pattern matching a leaf
+        pl = pytree.LeafPattern(100, "foo", name="pl")
+        r = {}
+        self.assertFalse(pl.match(root, results=r))
+        self.assertEqual(r, {})
+        self.assertFalse(pl.match(n1, results=r))
+        self.assertEqual(r, {})
+        self.assertFalse(pl.match(n2, results=r))
+        self.assertEqual(r, {})
+        self.assertTrue(pl.match(l1, results=r))
+        self.assertEqual(r, {"pl": l1})
+        r = {}
+        self.assertFalse(pl.match(l2, results=r))
+        self.assertEqual(r, {})
+        # Build a pattern matching a node
+        pn = pytree.NodePattern(1000, [pl], name="pn")
+        self.assertFalse(pn.match(root, results=r))
+        self.assertEqual(r, {})
+        self.assertFalse(pn.match(n1, results=r))
+        self.assertEqual(r, {})
+        self.assertTrue(pn.match(n2, results=r))
+        self.assertEqual(r, {"pn": n2, "pl": l3})
+        r = {}
+        self.assertFalse(pn.match(l1, results=r))
+        self.assertEqual(r, {})
+        self.assertFalse(pn.match(l2, results=r))
+        self.assertEqual(r, {})
+
+    def testWildcardPatterns(self):
+        # Build a tree for testing
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar")
+        l3 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1, l2])
+        n2 = pytree.Node(1000, [l3])
+        root = pytree.Node(1000, [n1, n2])
+        # Build a pattern
+        pl = pytree.LeafPattern(100, "foo", name="pl")
+        pn = pytree.NodePattern(1000, [pl], name="pn")
+        pw = pytree.WildcardPattern([[pn], [pl, pl]], name="pw")
+        r = {}
+        self.assertFalse(pw.match_seq([root], r))
+        self.assertEqual(r, {})
+        self.assertFalse(pw.match_seq([n1], r))
+        self.assertEqual(r, {})
+        self.assertTrue(pw.match_seq([n2], r))
+        # These are easier to debug
+        self.assertEqual(sorted(r.keys()), ["pl", "pn", "pw"])
+        self.assertEqual(r["pl"], l1)
+        self.assertEqual(r["pn"], n2)
+        self.assertEqual(r["pw"], [n2])
+        # But this is equivalent
+        self.assertEqual(r, {"pl": l1, "pn": n2, "pw": [n2]})
+        r = {}
+        self.assertTrue(pw.match_seq([l1, l3], r))
+        self.assertEqual(r, {"pl": l3, "pw": [l1, l3]})
+        self.assert_(r["pl"] is l3)
+        r = {}
+
+    def testGenerateMatches(self):
+        la = pytree.Leaf(1, "a")
+        lb = pytree.Leaf(1, "b")
+        lc = pytree.Leaf(1, "c")
+        ld = pytree.Leaf(1, "d")
+        le = pytree.Leaf(1, "e")
+        lf = pytree.Leaf(1, "f")
+        leaves = [la, lb, lc, ld, le, lf]
+        root = pytree.Node(1000, leaves)
+        pa = pytree.LeafPattern(1, "a", "pa")
+        pb = pytree.LeafPattern(1, "b", "pb")
+        pc = pytree.LeafPattern(1, "c", "pc")
+        pd = pytree.LeafPattern(1, "d", "pd")
+        pe = pytree.LeafPattern(1, "e", "pe")
+        pf = pytree.LeafPattern(1, "f", "pf")
+        pw = pytree.WildcardPattern([[pa, pb, pc], [pd, pe],
+                                     [pa, pb], [pc, pd], [pe, pf]],
+                                    min=1, max=4, name="pw")
+        self.assertEqual([x[0] for x in pw.generate_matches(leaves)],
+                         [3, 5, 2, 4, 6])
+        pr = pytree.NodePattern(type=1000, content=[pw], name="pr")
+        matches = list(pytree.generate_matches([pr], [root]))
+        self.assertEqual(len(matches), 1)
+        c, r = matches[0]
+        self.assertEqual(c, 1)
+        self.assertEqual(str(r["pr"]), "abcdef")
+        self.assertEqual(r["pw"], [la, lb, lc, ld, le, lf])
+        for c in "abcdef":
+            self.assertEqual(r["p" + c], pytree.Leaf(1, c))
+
+    def testHasKeyExample(self):
+        pattern = pytree.NodePattern(331,
+                                     (pytree.LeafPattern(7),
+                                      pytree.WildcardPattern(name="args"),
+                                      pytree.LeafPattern(8)))
+        l1 = pytree.Leaf(7, "(")
+        l2 = pytree.Leaf(3, "x")
+        l3 = pytree.Leaf(8, ")")
+        node = pytree.Node(331, [l1, l2, l3])
+        r = {}
+        self.assert_(pattern.match(node, r))
+        self.assertEqual(r["args"], [l2])
+
+
+if __name__ == "__main__":
+    import __main__
+    support.run_all_tests(__main__)

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/test_refactor.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/test_refactor.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,168 @@
+"""
+Unit tests for refactor.py.
+"""
+
+import sys
+import os
+import operator
+import StringIO
+import tempfile
+import unittest
+
+from lib2to3 import refactor, pygram, fixer_base
+
+from . import support
+
+
+FIXER_DIR = os.path.join(os.path.dirname(__file__), "data/fixers")
+
+sys.path.append(FIXER_DIR)
+try:
+    _DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
+finally:
+    sys.path.pop()
+
+class TestRefactoringTool(unittest.TestCase):
+
+    def setUp(self):
+        sys.path.append(FIXER_DIR)
+
+    def tearDown(self):
+        sys.path.pop()
+
+    def check_instances(self, instances, classes):
+        for inst, cls in zip(instances, classes):
+            if not isinstance(inst, cls):
+                self.fail("%s are not instances of %s" % instances, classes)
+
+    def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
+        return refactor.RefactoringTool(fixers, options, explicit)
+
+    def test_print_function_option(self):
+        gram = pygram.python_grammar
+        save = gram.keywords["print"]
+        try:
+            rt = self.rt({"print_function" : True})
+            self.assertRaises(KeyError, operator.itemgetter("print"),
+                              gram.keywords)
+        finally:
+            gram.keywords["print"] = save
+
+    def test_fixer_loading_helpers(self):
+        contents = ["explicit", "first", "last", "parrot", "preorder"]
+        non_prefixed = refactor.get_all_fix_names("myfixes")
+        prefixed = refactor.get_all_fix_names("myfixes", False)
+        full_names = refactor.get_fixers_from_package("myfixes")
+        self.assertEqual(prefixed, ["fix_" + name for name in contents])
+        self.assertEqual(non_prefixed, contents)
+        self.assertEqual(full_names,
+                         ["myfixes.fix_" + name for name in contents])
+
+    def test_get_headnode_dict(self):
+        class NoneFix(fixer_base.BaseFix):
+            PATTERN = None
+
+        class FileInputFix(fixer_base.BaseFix):
+            PATTERN = "file_input< any * >"
+
+        no_head = NoneFix({}, [])
+        with_head = FileInputFix({}, [])
+        d = refactor.get_headnode_dict([no_head, with_head])
+        expected = {None: [no_head],
+                    pygram.python_symbols.file_input : [with_head]}
+        self.assertEqual(d, expected)
+
+    def test_fixer_loading(self):
+        from myfixes.fix_first import FixFirst
+        from myfixes.fix_last import FixLast
+        from myfixes.fix_parrot import FixParrot
+        from myfixes.fix_preorder import FixPreorder
+
+        rt = self.rt()
+        pre, post = rt.get_fixers()
+
+        self.check_instances(pre, [FixPreorder])
+        self.check_instances(post, [FixFirst, FixParrot, FixLast])
+
+    def test_naughty_fixers(self):
+        self.assertRaises(ImportError, self.rt, fixers=["not_here"])
+        self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
+        self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
+
+    def test_refactor_string(self):
+        rt = self.rt()
+        input = "def parrot(): pass\n\n"
+        tree = rt.refactor_string(input, "<test>")
+        self.assertNotEqual(str(tree), input)
+
+        input = "def f(): pass\n\n"
+        tree = rt.refactor_string(input, "<test>")
+        self.assertEqual(str(tree), input)
+
+    def test_refactor_stdin(self):
+
+        class MyRT(refactor.RefactoringTool):
+
+            def print_output(self, lines):
+                diff_lines.extend(lines)
+
+        diff_lines = []
+        rt = MyRT(_DEFAULT_FIXERS)
+        save = sys.stdin
+        sys.stdin = StringIO.StringIO("def parrot(): pass\n\n")
+        try:
+            rt.refactor_stdin()
+        finally:
+            sys.stdin = save
+        expected = """--- <stdin> (original)
++++ <stdin> (refactored)
+@@ -1,2 +1,2 @@
+-def parrot(): pass
++def cheese(): pass""".splitlines()
+        self.assertEqual(diff_lines[:-1], expected)
+
+    def test_refactor_file(self):
+        test_file = os.path.join(FIXER_DIR, "parrot_example.py")
+        old_contents = open(test_file, "r").read()
+        rt = self.rt()
+
+        rt.refactor_file(test_file)
+        self.assertEqual(old_contents, open(test_file, "r").read())
+
+        rt.refactor_file(test_file, True)
+        try:
+            self.assertNotEqual(old_contents, open(test_file, "r").read())
+        finally:
+            open(test_file, "w").write(old_contents)
+
+    def test_refactor_docstring(self):
+        rt = self.rt()
+
+        def example():
+            """
+            >>> example()
+            42
+            """
+        out = rt.refactor_docstring(example.__doc__, "<test>")
+        self.assertEqual(out, example.__doc__)
+
+        def parrot():
+            """
+            >>> def parrot():
+            ...      return 43
+            """
+        out = rt.refactor_docstring(parrot.__doc__, "<test>")
+        self.assertNotEqual(out, parrot.__doc__)
+
+    def test_explicit(self):
+        from myfixes.fix_explicit import FixExplicit
+
+        rt = self.rt(fixers=["myfixes.fix_explicit"])
+        self.assertEqual(len(rt.post_order), 0)
+
+        rt = self.rt(explicit=["myfixes.fix_explicit"])
+        for fix in rt.post_order:
+            if isinstance(fix, FixExplicit):
+                break
+        else:
+            self.fail("explicit fixer not loaded")

Added: sandbox/trunk/refactor_pkg/lib2to3/tests/test_util.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/lib2to3/tests/test_util.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,559 @@
+#!/usr/bin/env python2.5
+""" Test suite for the code in fixes.util """
+# Author: Collin Winter
+
+# Testing imports
+from . import support
+
+# Python imports
+import os.path
+
+# Local imports
+from .. import pytree
+from .. import fixer_util
+from refactor.fixer_util import Attr, Name
+
+
+def parse(code, strip_levels=0):
+    # The topmost node is file_input, which we don't care about.
+    # The next-topmost node is a *_stmt node, which we also don't care about
+    tree = support.parse_string(code)
+    for i in range(strip_levels):
+        tree = tree.children[0]
+    tree.parent = None
+    return tree
+
+class MacroTestCase(support.TestCase):
+    def assertStr(self, node, string):
+        if isinstance(node, (tuple, list)):
+            node = pytree.Node(fixer_util.syms.simple_stmt, node)
+        self.assertEqual(str(node), string)
+
+
+class Test_is_tuple(support.TestCase):
+    def is_tuple(self, string):
+        return fixer_util.is_tuple(parse(string, strip_levels=2))
+
+    def test_valid(self):
+        self.failUnless(self.is_tuple("(a, b)"))
+        self.failUnless(self.is_tuple("(a, (b, c))"))
+        self.failUnless(self.is_tuple("((a, (b, c)),)"))
+        self.failUnless(self.is_tuple("(a,)"))
+        self.failUnless(self.is_tuple("()"))
+
+    def test_invalid(self):
+        self.failIf(self.is_tuple("(a)"))
+        self.failIf(self.is_tuple("('foo') % (b, c)"))
+
+
+class Test_is_list(support.TestCase):
+    def is_list(self, string):
+        return fixer_util.is_list(parse(string, strip_levels=2))
+
+    def test_valid(self):
+        self.failUnless(self.is_list("[]"))
+        self.failUnless(self.is_list("[a]"))
+        self.failUnless(self.is_list("[a, b]"))
+        self.failUnless(self.is_list("[a, [b, c]]"))
+        self.failUnless(self.is_list("[[a, [b, c]],]"))
+
+    def test_invalid(self):
+        self.failIf(self.is_list("[]+[]"))
+
+
+class Test_Attr(MacroTestCase):
+    def test(self):
+        call = parse("foo()", strip_levels=2)
+
+        self.assertStr(Attr(Name("a"), Name("b")), "a.b")
+        self.assertStr(Attr(call, Name("b")), "foo().b")
+
+    def test_returns(self):
+        attr = Attr(Name("a"), Name("b"))
+        self.assertEqual(type(attr), list)
+
+
+class Test_Name(MacroTestCase):
+    def test(self):
+        self.assertStr(Name("a"), "a")
+        self.assertStr(Name("foo.foo().bar"), "foo.foo().bar")
+        self.assertStr(Name("a", prefix="b"), "ba")
+
+
+class Test_does_tree_import(support.TestCase):
+    def _find_bind_rec(self, name, node):
+        # Search a tree for a binding -- used to find the starting
+        # point for these tests.
+        c = fixer_util.find_binding(name, node)
+        if c: return c
+        for child in node.children:
+            c = self._find_bind_rec(name, child)
+            if c: return c
+
+    def does_tree_import(self, package, name, string):
+        node = parse(string)
+        # Find the binding of start -- that's what we'll go from
+        node = self._find_bind_rec('start', node)
+        return fixer_util.does_tree_import(package, name, node)
+
+    def try_with(self, string):
+        failing_tests = (("a", "a", "from a import b"),
+                         ("a.d", "a", "from a.d import b"),
+                         ("d.a", "a", "from d.a import b"),
+                         (None, "a", "import b"),
+                         (None, "a", "import b, c, d"))
+        for package, name, import_ in failing_tests:
+            n = self.does_tree_import(package, name, import_ + "\n" + string)
+            self.failIf(n)
+            n = self.does_tree_import(package, name, string + "\n" + import_)
+            self.failIf(n)
+
+        passing_tests = (("a", "a", "from a import a"),
+                         ("x", "a", "from x import a"),
+                         ("x", "a", "from x import b, c, a, d"),
+                         ("x.b", "a", "from x.b import a"),
+                         ("x.b", "a", "from x.b import b, c, a, d"),
+                         (None, "a", "import a"),
+                         (None, "a", "import b, c, a, d"))
+        for package, name, import_ in passing_tests:
+            n = self.does_tree_import(package, name, import_ + "\n" + string)
+            self.failUnless(n)
+            n = self.does_tree_import(package, name, string + "\n" + import_)
+            self.failUnless(n)
+
+    def test_in_function(self):
+        self.try_with("def foo():\n\tbar.baz()\n\tstart=3")
+
+class Test_find_binding(support.TestCase):
+    def find_binding(self, name, string, package=None):
+        return fixer_util.find_binding(name, parse(string), package)
+
+    def test_simple_assignment(self):
+        self.failUnless(self.find_binding("a", "a = b"))
+        self.failUnless(self.find_binding("a", "a = [b, c, d]"))
+        self.failUnless(self.find_binding("a", "a = foo()"))
+        self.failUnless(self.find_binding("a", "a = foo().foo.foo[6][foo]"))
+        self.failIf(self.find_binding("a", "foo = a"))
+        self.failIf(self.find_binding("a", "foo = (a, b, c)"))
+
+    def test_tuple_assignment(self):
+        self.failUnless(self.find_binding("a", "(a,) = b"))
+        self.failUnless(self.find_binding("a", "(a, b, c) = [b, c, d]"))
+        self.failUnless(self.find_binding("a", "(c, (d, a), b) = foo()"))
+        self.failUnless(self.find_binding("a", "(a, b) = foo().foo[6][foo]"))
+        self.failIf(self.find_binding("a", "(foo, b) = (b, a)"))
+        self.failIf(self.find_binding("a", "(foo, (b, c)) = (a, b, c)"))
+
+    def test_list_assignment(self):
+        self.failUnless(self.find_binding("a", "[a] = b"))
+        self.failUnless(self.find_binding("a", "[a, b, c] = [b, c, d]"))
+        self.failUnless(self.find_binding("a", "[c, [d, a], b] = foo()"))
+        self.failUnless(self.find_binding("a", "[a, b] = foo().foo[a][foo]"))
+        self.failIf(self.find_binding("a", "[foo, b] = (b, a)"))
+        self.failIf(self.find_binding("a", "[foo, [b, c]] = (a, b, c)"))
+
+    def test_invalid_assignments(self):
+        self.failIf(self.find_binding("a", "foo.a = 5"))
+        self.failIf(self.find_binding("a", "foo[a] = 5"))
+        self.failIf(self.find_binding("a", "foo(a) = 5"))
+        self.failIf(self.find_binding("a", "foo(a, b) = 5"))
+
+    def test_simple_import(self):
+        self.failUnless(self.find_binding("a", "import a"))
+        self.failUnless(self.find_binding("a", "import b, c, a, d"))
+        self.failIf(self.find_binding("a", "import b"))
+        self.failIf(self.find_binding("a", "import b, c, d"))
+
+    def test_from_import(self):
+        self.failUnless(self.find_binding("a", "from x import a"))
+        self.failUnless(self.find_binding("a", "from a import a"))
+        self.failUnless(self.find_binding("a", "from x import b, c, a, d"))
+        self.failUnless(self.find_binding("a", "from x.b import a"))
+        self.failUnless(self.find_binding("a", "from x.b import b, c, a, d"))
+        self.failIf(self.find_binding("a", "from a import b"))
+        self.failIf(self.find_binding("a", "from a.d import b"))
+        self.failIf(self.find_binding("a", "from d.a import b"))
+
+    def test_import_as(self):
+        self.failUnless(self.find_binding("a", "import b as a"))
+        self.failUnless(self.find_binding("a", "import b as a, c, a as f, d"))
+        self.failIf(self.find_binding("a", "import a as f"))
+        self.failIf(self.find_binding("a", "import b, c as f, d as e"))
+
+    def test_from_import_as(self):
+        self.failUnless(self.find_binding("a", "from x import b as a"))
+        self.failUnless(self.find_binding("a", "from x import g as a, d as b"))
+        self.failUnless(self.find_binding("a", "from x.b import t as a"))
+        self.failUnless(self.find_binding("a", "from x.b import g as a, d"))
+        self.failIf(self.find_binding("a", "from a import b as t"))
+        self.failIf(self.find_binding("a", "from a.d import b as t"))
+        self.failIf(self.find_binding("a", "from d.a import b as t"))
+
+    def test_simple_import_with_package(self):
+        self.failUnless(self.find_binding("b", "import b"))
+        self.failUnless(self.find_binding("b", "import b, c, d"))
+        self.failIf(self.find_binding("b", "import b", "b"))
+        self.failIf(self.find_binding("b", "import b, c, d", "c"))
+
+    def test_from_import_with_package(self):
+        self.failUnless(self.find_binding("a", "from x import a", "x"))
+        self.failUnless(self.find_binding("a", "from a import a", "a"))
+        self.failUnless(self.find_binding("a", "from x import *", "x"))
+        self.failUnless(self.find_binding("a", "from x import b, c, a, d", "x"))
+        self.failUnless(self.find_binding("a", "from x.b import a", "x.b"))
+        self.failUnless(self.find_binding("a", "from x.b import *", "x.b"))
+        self.failUnless(self.find_binding("a", "from x.b import b, c, a, d", "x.b"))
+        self.failIf(self.find_binding("a", "from a import b", "a"))
+        self.failIf(self.find_binding("a", "from a.d import b", "a.d"))
+        self.failIf(self.find_binding("a", "from d.a import b", "a.d"))
+        self.failIf(self.find_binding("a", "from x.y import *", "a.b"))
+
+    def test_import_as_with_package(self):
+        self.failIf(self.find_binding("a", "import b.c as a", "b.c"))
+        self.failIf(self.find_binding("a", "import a as f", "f"))
+        self.failIf(self.find_binding("a", "import a as f", "a"))
+
+    def test_from_import_as_with_package(self):
+        # Because it would take a lot of special-case code in the fixers
+        # to deal with from foo import bar as baz, we'll simply always
+        # fail if there is an "from ... import ... as ..."
+        self.failIf(self.find_binding("a", "from x import b as a", "x"))
+        self.failIf(self.find_binding("a", "from x import g as a, d as b", "x"))
+        self.failIf(self.find_binding("a", "from x.b import t as a", "x.b"))
+        self.failIf(self.find_binding("a", "from x.b import g as a, d", "x.b"))
+        self.failIf(self.find_binding("a", "from a import b as t", "a"))
+        self.failIf(self.find_binding("a", "from a import b as t", "b"))
+        self.failIf(self.find_binding("a", "from a import b as t", "t"))
+
+    def test_function_def(self):
+        self.failUnless(self.find_binding("a", "def a(): pass"))
+        self.failUnless(self.find_binding("a", "def a(b, c, d): pass"))
+        self.failUnless(self.find_binding("a", "def a(): b = 7"))
+        self.failIf(self.find_binding("a", "def d(b, (c, a), e): pass"))
+        self.failIf(self.find_binding("a", "def d(a=7): pass"))
+        self.failIf(self.find_binding("a", "def d(a): pass"))
+        self.failIf(self.find_binding("a", "def d(): a = 7"))
+
+        s = """
+            def d():
+                def a():
+                    pass"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_class_def(self):
+        self.failUnless(self.find_binding("a", "class a: pass"))
+        self.failUnless(self.find_binding("a", "class a(): pass"))
+        self.failUnless(self.find_binding("a", "class a(b): pass"))
+        self.failUnless(self.find_binding("a", "class a(b, c=8): pass"))
+        self.failIf(self.find_binding("a", "class d: pass"))
+        self.failIf(self.find_binding("a", "class d(a): pass"))
+        self.failIf(self.find_binding("a", "class d(b, a=7): pass"))
+        self.failIf(self.find_binding("a", "class d(b, *a): pass"))
+        self.failIf(self.find_binding("a", "class d(b, **a): pass"))
+        self.failIf(self.find_binding("a", "class d: a = 7"))
+
+        s = """
+            class d():
+                class a():
+                    pass"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_for(self):
+        self.failUnless(self.find_binding("a", "for a in r: pass"))
+        self.failUnless(self.find_binding("a", "for a, b in r: pass"))
+        self.failUnless(self.find_binding("a", "for (a, b) in r: pass"))
+        self.failUnless(self.find_binding("a", "for c, (a,) in r: pass"))
+        self.failUnless(self.find_binding("a", "for c, (a, b) in r: pass"))
+        self.failUnless(self.find_binding("a", "for c in r: a = c"))
+        self.failIf(self.find_binding("a", "for c in a: pass"))
+
+    def test_for_nested(self):
+        s = """
+            for b in r:
+                for a in b:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for a, c in b:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for (a, c) in b:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for (a,) in b:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for c, (a, d) in b:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for c in b:
+                    a = 7"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for c in b:
+                    d = a"""
+        self.failIf(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for c in a:
+                    d = 7"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_if(self):
+        self.failUnless(self.find_binding("a", "if b in r: a = c"))
+        self.failIf(self.find_binding("a", "if a in r: d = e"))
+
+    def test_if_nested(self):
+        s = """
+            if b in r:
+                if c in d:
+                    a = c"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            if b in r:
+                if c in d:
+                    c = a"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_while(self):
+        self.failUnless(self.find_binding("a", "while b in r: a = c"))
+        self.failIf(self.find_binding("a", "while a in r: d = e"))
+
+    def test_while_nested(self):
+        s = """
+            while b in r:
+                while c in d:
+                    a = c"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            while b in r:
+                while c in d:
+                    c = a"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_try_except(self):
+        s = """
+            try:
+                a = 6
+            except:
+                b = 8"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except KeyError:
+                pass
+            except:
+                a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                b = 6"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_try_except_nested(self):
+        s = """
+            try:
+                try:
+                    a = 6
+                except:
+                    pass
+            except:
+                b = 8"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                try:
+                    a = 6
+                except:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                try:
+                    pass
+                except:
+                    a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                try:
+                    b = 8
+                except KeyError:
+                    pass
+                except:
+                    a = 6
+            except:
+                pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                pass
+            except:
+                try:
+                    b = 8
+                except KeyError:
+                    pass
+                except:
+                    a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                b = 6"""
+        self.failIf(self.find_binding("a", s))
+
+        s = """
+            try:
+                try:
+                    b = 8
+                except:
+                    c = d
+            except:
+                try:
+                    b = 6
+                except:
+                    t = 8
+                except:
+                    o = y"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_try_except_finally(self):
+        s = """
+            try:
+                c = 6
+            except:
+                b = 8
+            finally:
+                a = 9"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            finally:
+                a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            finally:
+                b = 6"""
+        self.failIf(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                b = 9
+            finally:
+                b = 6"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_try_except_finally_nested(self):
+        s = """
+            try:
+                c = 6
+            except:
+                b = 8
+            finally:
+                try:
+                    a = 9
+                except:
+                    b = 9
+                finally:
+                    c = 9"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            finally:
+                try:
+                    pass
+                finally:
+                    a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            finally:
+                try:
+                    b = 6
+                finally:
+                    b = 7"""
+        self.failIf(self.find_binding("a", s))
+
+class Test_touch_import(support.TestCase):
+
+    def test_after_docstring(self):
+        node = parse('"""foo"""\nbar()')
+        fixer_util.touch_import(None, "foo", node)
+        self.assertEqual(str(node), '"""foo"""\nimport foo\nbar()\n\n')
+
+    def test_after_imports(self):
+        node = parse('"""foo"""\nimport bar\nbar()')
+        fixer_util.touch_import(None, "foo", node)
+        self.assertEqual(str(node), '"""foo"""\nimport bar\nimport foo\nbar()\n\n')
+
+    def test_beginning(self):
+        node = parse('bar()')
+        fixer_util.touch_import(None, "foo", node)
+        self.assertEqual(str(node), 'import foo\nbar()\n\n')
+
+    def test_from_import(self):
+        node = parse('bar()')
+        fixer_util.touch_import("cgi", "escape", node)
+        self.assertEqual(str(node), 'from cgi import escape\nbar()\n\n')
+
+    def test_name_import(self):
+        node = parse('bar()')
+        fixer_util.touch_import(None, "cgi", node)
+        self.assertEqual(str(node), 'import cgi\nbar()\n\n')
+
+
+if __name__ == "__main__":
+    import __main__
+    support.run_all_tests(__main__)

Added: sandbox/trunk/refactor_pkg/refactor/Grammar.txt
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/Grammar.txt	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,155 @@
+# Grammar for Python
+
+# Note:  Changing the grammar specified in this file will most likely
+#        require corresponding changes in the parser module
+#        (../Modules/parsermodule.c).  If you can't make the changes to
+#        that module yourself, please co-ordinate the required changes
+#        with someone who can; ask around on python-dev for help.  Fred
+#        Drake <fdrake at acm.org> will probably be listening there.
+
+# NOTE WELL: You should also follow all the steps listed in PEP 306,
+# "How to Change Python's Grammar"
+
+# Commands for Kees Blom's railroad program
+#diagram:token NAME
+#diagram:token NUMBER
+#diagram:token STRING
+#diagram:token NEWLINE
+#diagram:token ENDMARKER
+#diagram:token INDENT
+#diagram:output\input python.bla
+#diagram:token DEDENT
+#diagram:output\textwidth 20.04cm\oddsidemargin  0.0cm\evensidemargin 0.0cm
+#diagram:rules
+
+# Start symbols for the grammar:
+#	file_input is a module or sequence of commands read from an input file;
+#	single_input is a single interactive statement;
+#	eval_input is the input for the eval() and input() functions.
+# NB: compound_stmt in single_input is followed by extra NEWLINE!
+file_input: (NEWLINE | stmt)* ENDMARKER
+single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+eval_input: testlist NEWLINE* ENDMARKER
+
+decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+decorators: decorator+
+decorated: decorators (classdef | funcdef)
+funcdef: 'def' NAME parameters ['->' test] ':' suite
+parameters: '(' [typedargslist] ')'
+typedargslist: ((tfpdef ['=' test] ',')*
+                ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname)
+                | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
+tname: NAME [':' test]
+tfpdef: tname | '(' tfplist ')'
+tfplist: tfpdef (',' tfpdef)* [',']
+varargslist: ((vfpdef ['=' test] ',')*
+              ('*' [vname] (',' vname ['=' test])*  [',' '**' vname] | '**' vname)
+              | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
+vname: NAME
+vfpdef: vname | '(' vfplist ')'
+vfplist: vfpdef (',' vfpdef)* [',']
+
+stmt: simple_stmt | compound_stmt
+simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+small_stmt: (expr_stmt | print_stmt  | del_stmt | pass_stmt | flow_stmt |
+             import_stmt | global_stmt | exec_stmt | assert_stmt)
+expr_stmt: testlist (augassign (yield_expr|testlist) |
+                     ('=' (yield_expr|testlist))*)
+augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+            '<<=' | '>>=' | '**=' | '//=')
+# For normal assignments, additional restrictions enforced by the interpreter
+print_stmt: 'print' ( [ test (',' test)* [','] ] |
+                      '>>' test [ (',' test)+ [','] ] )
+del_stmt: 'del' exprlist
+pass_stmt: 'pass'
+flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
+break_stmt: 'break'
+continue_stmt: 'continue'
+return_stmt: 'return' [testlist]
+yield_stmt: yield_expr
+raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]]
+import_stmt: import_name | import_from
+import_name: 'import' dotted_as_names
+import_from: ('from' ('.'* dotted_name | '.'+)
+              'import' ('*' | '(' import_as_names ')' | import_as_names))
+import_as_name: NAME ['as' NAME]
+dotted_as_name: dotted_name ['as' NAME]
+import_as_names: import_as_name (',' import_as_name)* [',']
+dotted_as_names: dotted_as_name (',' dotted_as_name)*
+dotted_name: NAME ('.' NAME)*
+global_stmt: ('global' | 'nonlocal') NAME (',' NAME)*
+exec_stmt: 'exec' expr ['in' test [',' test]]
+assert_stmt: 'assert' test [',' test]
+
+compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
+if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+while_stmt: 'while' test ':' suite ['else' ':' suite]
+for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
+try_stmt: ('try' ':' suite
+           ((except_clause ':' suite)+
+	    ['else' ':' suite]
+	    ['finally' ':' suite] |
+	   'finally' ':' suite))
+with_stmt: 'with' test [ with_var ] ':' suite
+with_var: 'as' expr
+# NB compile.c makes sure that the default except clause is last
+except_clause: 'except' [test [(',' | 'as') test]]
+suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
+
+# Backward compatibility cruft to support:
+# [ x for x in lambda: True, lambda: False if x() ]
+# even while also allowing:
+# lambda x: 5 if x else 2
+# (But not a mix of the two)
+testlist_safe: old_test [(',' old_test)+ [',']]
+old_test: or_test | old_lambdef
+old_lambdef: 'lambda' [varargslist] ':' old_test
+
+test: or_test ['if' or_test 'else' test] | lambdef
+or_test: and_test ('or' and_test)*
+and_test: not_test ('and' not_test)*
+not_test: 'not' not_test | comparison
+comparison: expr (comp_op expr)*
+comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+expr: xor_expr ('|' xor_expr)*
+xor_expr: and_expr ('^' and_expr)*
+and_expr: shift_expr ('&' shift_expr)*
+shift_expr: arith_expr (('<<'|'>>') arith_expr)*
+arith_expr: term (('+'|'-') term)*
+term: factor (('*'|'/'|'%'|'//') factor)*
+factor: ('+'|'-'|'~') factor | power
+power: atom trailer* ['**' factor]
+atom: ('(' [yield_expr|testlist_gexp] ')' |
+       '[' [listmaker] ']' |
+       '{' [dictsetmaker] '}' |
+       '`' testlist1 '`' |
+       NAME | NUMBER | STRING+ | '.' '.' '.')
+listmaker: test ( comp_for | (',' test)* [','] )
+testlist_gexp: test ( comp_for | (',' test)* [','] )
+lambdef: 'lambda' [varargslist] ':' test
+trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+subscriptlist: subscript (',' subscript)* [',']
+subscript: test | [test] ':' [test] [sliceop]
+sliceop: ':' [test]
+exprlist: expr (',' expr)* [',']
+testlist: test (',' test)* [',']
+dictsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
+                (test (comp_for | (',' test)* [','])) )
+
+classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
+
+arglist: (argument ',')* (argument [',']
+                         |'*' test (',' argument)* [',' '**' test] 
+                         |'**' test)
+argument: test [comp_for] | test '=' test  # Really [keyword '='] test
+
+comp_iter: comp_for | comp_if
+comp_for: 'for' exprlist 'in' testlist_safe [comp_iter]
+comp_if: 'if' old_test [comp_iter]
+
+testlist1: test (',' test)*
+
+# not used in grammar, but may appear in "node" passed from Parser to Compiler
+encoding_decl: NAME
+
+yield_expr: 'yield' [testlist]

Added: sandbox/trunk/refactor_pkg/refactor/PatternGrammar.txt
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/PatternGrammar.txt	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,28 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+# A grammar to describe tree matching patterns.
+# Not shown here:
+# - 'TOKEN' stands for any token (leaf node)
+# - 'any' stands for any node (leaf or interior)
+# With 'any' we can still specify the sub-structure.
+
+# The start symbol is 'Matcher'.
+
+Matcher: Alternatives ENDMARKER
+
+Alternatives: Alternative ('|' Alternative)*
+
+Alternative: (Unit | NegatedUnit)+
+
+Unit: [NAME '='] ( STRING [Repeater]
+                 | NAME [Details] [Repeater]
+                 | '(' Alternatives ')' [Repeater]
+                 | '[' Alternatives ']'
+		 )
+
+NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')')
+
+Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}'
+
+Details: '<' Alternatives '>'

Added: sandbox/trunk/refactor_pkg/refactor/__init__.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/__init__.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,8 @@
+from . import fixer_base
+from . import fixer_util
+from . import main
+from . import patcomp
+from . import pgen2
+from . import pygram
+from . import pytree
+from . import refactor

Added: sandbox/trunk/refactor_pkg/refactor/fixer_base.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixer_base.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,178 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Base class for fixers (optional, but recommended)."""
+
+# Python imports
+import logging
+import itertools
+
+# Local imports
+from .patcomp import PatternCompiler
+from . import pygram
+from .fixer_util import does_tree_import
+
+class BaseFix(object):
+
+    """Optional base class for fixers.
+
+    The subclass name must be FixFooBar where FooBar is the result of
+    removing underscores and capitalizing the words of the fix name.
+    For example, the class name for a fixer named 'has_key' should be
+    FixHasKey.
+    """
+
+    PATTERN = None  # Most subclasses should override with a string literal
+    pattern = None  # Compiled pattern, set by compile_pattern()
+    options = None  # Options object passed to initializer
+    filename = None # The filename (set by set_filename)
+    logger = None   # A logger (set by set_filename)
+    numbers = itertools.count(1) # For new_name()
+    used_names = set() # A set of all used NAMEs
+    order = "post" # Does the fixer prefer pre- or post-order traversal
+    explicit = False # Is this ignored by refactor.py -f all?
+    run_order = 5   # Fixers will be sorted by run order before execution
+                    # Lower numbers will be run first.
+
+    # Shortcut for access to Python grammar symbols
+    syms = pygram.python_symbols
+
+    def __init__(self, options, log):
+        """Initializer.  Subclass may override.
+
+        Args:
+            options: an dict containing the options passed to RefactoringTool
+            that could be used to customize the fixer through the command line.
+            log: a list to append warnings and other messages to.
+        """
+        self.options = options
+        self.log = log
+        self.compile_pattern()
+
+    def compile_pattern(self):
+        """Compiles self.PATTERN into self.pattern.
+
+        Subclass may override if it doesn't want to use
+        self.{pattern,PATTERN} in .match().
+        """
+        if self.PATTERN is not None:
+            self.pattern = PatternCompiler().compile_pattern(self.PATTERN)
+
+    def set_filename(self, filename):
+        """Set the filename, and a logger derived from it.
+
+        The main refactoring tool should call this.
+        """
+        self.filename = filename
+        self.logger = logging.getLogger(filename)
+
+    def match(self, node):
+        """Returns match for a given parse tree node.
+
+        Should return a true or false object (not necessarily a bool).
+        It may return a non-empty dict of matching sub-nodes as
+        returned by a matching pattern.
+
+        Subclass may override.
+        """
+        results = {"node": node}
+        return self.pattern.match(node, results) and results
+
+    def transform(self, node, results):
+        """Returns the transformation for a given parse tree node.
+
+        Args:
+          node: the root of the parse tree that matched the fixer.
+          results: a dict mapping symbolic names to part of the match.
+
+        Returns:
+          None, or a node that is a modified copy of the
+          argument node.  The node argument may also be modified in-place to
+          effect the same change.
+
+        Subclass *must* override.
+        """
+        raise NotImplementedError()
+
+    def new_name(self, template="xxx_todo_changeme"):
+        """Return a string suitable for use as an identifier
+
+        The new name is guaranteed not to conflict with other identifiers.
+        """
+        name = template
+        while name in self.used_names:
+            name = template + str(self.numbers.next())
+        self.used_names.add(name)
+        return name
+
+    def log_message(self, message):
+        if self.first_log:
+            self.first_log = False
+            self.log.append("### In file %s ###" % self.filename)
+        self.log.append(message)
+
+    def cannot_convert(self, node, reason=None):
+        """Warn the user that a given chunk of code is not valid Python 3,
+        but that it cannot be converted automatically.
+
+        First argument is the top-level node for the code in question.
+        Optional second argument is why it can't be converted.
+        """
+        lineno = node.get_lineno()
+        for_output = node.clone()
+        for_output.set_prefix("")
+        msg = "Line %d: could not convert: %s"
+        self.log_message(msg % (lineno, for_output))
+        if reason:
+            self.log_message(reason)
+
+    def warning(self, node, reason):
+        """Used for warning the user about possible uncertainty in the
+        translation.
+
+        First argument is the top-level node for the code in question.
+        Optional second argument is why it can't be converted.
+        """
+        lineno = node.get_lineno()
+        self.log_message("Line %d: %s" % (lineno, reason))
+
+    def start_tree(self, tree, filename):
+        """Some fixers need to maintain tree-wide state.
+        This method is called once, at the start of tree fix-up.
+
+        tree - the root node of the tree to be processed.
+        filename - the name of the file the tree came from.
+        """
+        self.used_names = tree.used_names
+        self.set_filename(filename)
+        self.numbers = itertools.count(1)
+        self.first_log = True
+
+    def finish_tree(self, tree, filename):
+        """Some fixers need to maintain tree-wide state.
+        This method is called once, at the conclusion of tree fix-up.
+
+        tree - the root node of the tree to be processed.
+        filename - the name of the file the tree came from.
+        """
+        pass
+
+
+class ConditionalFix(BaseFix):
+    """ Base class for fixers which not execute if an import is found. """
+
+    # This is the name of the import which, if found, will cause the test to be skipped
+    skip_on = None
+
+    def start_tree(self, *args):
+        super(ConditionalFix, self).start_tree(*args)
+        self._should_skip = None
+
+    def should_skip(self, node):
+        if self._should_skip is not None:
+            return self._should_skip
+        pkg = self.skip_on.split(".")
+        name = pkg[-1]
+        pkg = ".".join(pkg[:-1])
+        self._should_skip = does_tree_import(pkg, name, node)
+        return self._should_skip

Added: sandbox/trunk/refactor_pkg/refactor/fixer_util.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixer_util.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,425 @@
+"""Utility functions, node construction macros, etc."""
+# Author: Collin Winter
+
+# Local imports
+from .pgen2 import token
+from .pytree import Leaf, Node
+from .pygram import python_symbols as syms
+from . import patcomp
+
+
+###########################################################
+### Common node-construction "macros"
+###########################################################
+
+def KeywordArg(keyword, value):
+    return Node(syms.argument,
+                [keyword, Leaf(token.EQUAL, '='), value])
+
+def LParen():
+    return Leaf(token.LPAR, "(")
+
+def RParen():
+    return Leaf(token.RPAR, ")")
+
+def Assign(target, source):
+    """Build an assignment statement"""
+    if not isinstance(target, list):
+        target = [target]
+    if not isinstance(source, list):
+        source.set_prefix(" ")
+        source = [source]
+
+    return Node(syms.atom,
+                target + [Leaf(token.EQUAL, "=", prefix=" ")] + source)
+
+def Name(name, prefix=None):
+    """Return a NAME leaf"""
+    return Leaf(token.NAME, name, prefix=prefix)
+
+def Attr(obj, attr):
+    """A node tuple for obj.attr"""
+    return [obj, Node(syms.trailer, [Dot(), attr])]
+
+def Comma():
+    """A comma leaf"""
+    return Leaf(token.COMMA, ",")
+
+def Dot():
+    """A period (.) leaf"""
+    return Leaf(token.DOT, ".")
+
+def ArgList(args, lparen=LParen(), rparen=RParen()):
+    """A parenthesised argument list, used by Call()"""
+    node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
+    if args:
+        node.insert_child(1, Node(syms.arglist, args))
+    return node
+
+def Call(func_name, args=None, prefix=None):
+    """A function call"""
+    node = Node(syms.power, [func_name, ArgList(args)])
+    if prefix is not None:
+        node.set_prefix(prefix)
+    return node
+
+def Newline():
+    """A newline literal"""
+    return Leaf(token.NEWLINE, "\n")
+
+def BlankLine():
+    """A blank line"""
+    return Leaf(token.NEWLINE, "")
+
+def Number(n, prefix=None):
+    return Leaf(token.NUMBER, n, prefix=prefix)
+
+def Subscript(index_node):
+    """A numeric or string subscript"""
+    return Node(syms.trailer, [Leaf(token.LBRACE, '['),
+                               index_node,
+                               Leaf(token.RBRACE, ']')])
+
+def String(string, prefix=None):
+    """A string leaf"""
+    return Leaf(token.STRING, string, prefix=prefix)
+
+def ListComp(xp, fp, it, test=None):
+    """A list comprehension of the form [xp for fp in it if test].
+
+    If test is None, the "if test" part is omitted.
+    """
+    xp.set_prefix("")
+    fp.set_prefix(" ")
+    it.set_prefix(" ")
+    for_leaf = Leaf(token.NAME, "for")
+    for_leaf.set_prefix(" ")
+    in_leaf = Leaf(token.NAME, "in")
+    in_leaf.set_prefix(" ")
+    inner_args = [for_leaf, fp, in_leaf, it]
+    if test:
+        test.set_prefix(" ")
+        if_leaf = Leaf(token.NAME, "if")
+        if_leaf.set_prefix(" ")
+        inner_args.append(Node(syms.comp_if, [if_leaf, test]))
+    inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
+    return Node(syms.atom,
+                       [Leaf(token.LBRACE, "["),
+                        inner,
+                        Leaf(token.RBRACE, "]")])
+
+def FromImport(package_name, name_leafs):
+    """ Return an import statement in the form:
+        from package import name_leafs"""
+    # XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
+    #assert package_name == '.' or '.' not in package_name, "FromImport has "\
+    #       "not been tested with dotted package names -- use at your own "\
+    #       "peril!"
+
+    for leaf in name_leafs:
+        # Pull the leaves out of their old tree
+        leaf.remove()
+
+    children = [Leaf(token.NAME, 'from'),
+                Leaf(token.NAME, package_name, prefix=" "),
+                Leaf(token.NAME, 'import', prefix=" "),
+                Node(syms.import_as_names, name_leafs)]
+    imp = Node(syms.import_from, children)
+    return imp
+
+
+###########################################################
+### Determine whether a node represents a given literal
+###########################################################
+
+def is_tuple(node):
+    """Does the node represent a tuple literal?"""
+    if isinstance(node, Node) and node.children == [LParen(), RParen()]:
+        return True
+    return (isinstance(node, Node)
+            and len(node.children) == 3
+            and isinstance(node.children[0], Leaf)
+            and isinstance(node.children[1], Node)
+            and isinstance(node.children[2], Leaf)
+            and node.children[0].value == "("
+            and node.children[2].value == ")")
+
+def is_list(node):
+    """Does the node represent a list literal?"""
+    return (isinstance(node, Node)
+            and len(node.children) > 1
+            and isinstance(node.children[0], Leaf)
+            and isinstance(node.children[-1], Leaf)
+            and node.children[0].value == "["
+            and node.children[-1].value == "]")
+
+
+###########################################################
+### Misc
+###########################################################
+
+def parenthesize(node):
+    return Node(syms.atom, [LParen(), node, RParen()])
+
+
+consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum",
+                       "min", "max"])
+
+def attr_chain(obj, attr):
+    """Follow an attribute chain.
+
+    If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
+    use this to iterate over all objects in the chain. Iteration is
+    terminated by getattr(x, attr) is None.
+
+    Args:
+        obj: the starting object
+        attr: the name of the chaining attribute
+
+    Yields:
+        Each successive object in the chain.
+    """
+    next = getattr(obj, attr)
+    while next:
+        yield next
+        next = getattr(next, attr)
+
+p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
+        | comp_for< 'for' any 'in' node=any any* >
+     """
+p1 = """
+power<
+    ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
+      'any' | 'all' | (any* trailer< '.' 'join' >) )
+    trailer< '(' node=any ')' >
+    any*
+>
+"""
+p2 = """
+power<
+    'sorted'
+    trailer< '(' arglist<node=any any*> ')' >
+    any*
+>
+"""
+pats_built = False
+def in_special_context(node):
+    """ Returns true if node is in an environment where all that is required
+        of it is being itterable (ie, it doesn't matter if it returns a list
+        or an itterator).
+        See test_map_nochange in test_fixers.py for some examples and tests.
+        """
+    global p0, p1, p2, pats_built
+    if not pats_built:
+        p1 = patcomp.compile_pattern(p1)
+        p0 = patcomp.compile_pattern(p0)
+        p2 = patcomp.compile_pattern(p2)
+        pats_built = True
+    patterns = [p0, p1, p2]
+    for pattern, parent in zip(patterns, attr_chain(node, "parent")):
+        results = {}
+        if pattern.match(parent, results) and results["node"] is node:
+            return True
+    return False
+
+def is_probably_builtin(node):
+    """
+    Check that something isn't an attribute or function name etc.
+    """
+    prev = node.prev_sibling
+    if prev is not None and prev.type == token.DOT:
+        # Attribute lookup.
+        return False
+    parent = node.parent
+    if parent.type in (syms.funcdef, syms.classdef):
+        return False
+    if parent.type == syms.expr_stmt and parent.children[0] is node:
+        # Assignment.
+        return False
+    if parent.type == syms.parameters or \
+            (parent.type == syms.typedargslist and (
+            (prev is not None and prev.type == token.COMMA) or
+            parent.children[0] is node
+            )):
+        # The name of an argument.
+        return False
+    return True
+
+###########################################################
+### The following functions are to find bindings in a suite
+###########################################################
+
+def make_suite(node):
+    if node.type == syms.suite:
+        return node
+    node = node.clone()
+    parent, node.parent = node.parent, None
+    suite = Node(syms.suite, [node])
+    suite.parent = parent
+    return suite
+
+def find_root(node):
+    """Find the top level namespace."""
+    # Scamper up to the top level namespace
+    while node.type != syms.file_input:
+        assert node.parent, "Tree is insane! root found before "\
+                           "file_input node was found."
+        node = node.parent
+    return node
+
+def does_tree_import(package, name, node):
+    """ Returns true if name is imported from package at the
+        top level of the tree which node belongs to.
+        To cover the case of an import like 'import foo', use
+        None for the package and 'foo' for the name. """
+    binding = find_binding(name, find_root(node), package)
+    return bool(binding)
+
+def is_import(node):
+    """Returns true if the node is an import statement."""
+    return node.type in (syms.import_name, syms.import_from)
+
+def touch_import(package, name, node):
+    """ Works like `does_tree_import` but adds an import statement
+        if it was not imported. """
+    def is_import_stmt(node):
+        return node.type == syms.simple_stmt and node.children and \
+               is_import(node.children[0])
+
+    root = find_root(node)
+
+    if does_tree_import(package, name, root):
+        return
+
+    add_newline_before = False
+
+    # figure out where to insert the new import.  First try to find
+    # the first import and then skip to the last one.
+    insert_pos = offset = 0
+    for idx, node in enumerate(root.children):
+        if not is_import_stmt(node):
+            continue
+        for offset, node2 in enumerate(root.children[idx:]):
+            if not is_import_stmt(node2):
+                break
+        insert_pos = idx + offset
+        break
+
+    # if there are no imports where we can insert, find the docstring.
+    # if that also fails, we stick to the beginning of the file
+    if insert_pos == 0:
+        for idx, node in enumerate(root.children):
+            if node.type == syms.simple_stmt and node.children and \
+               node.children[0].type == token.STRING:
+                insert_pos = idx + 1
+                add_newline_before
+                break
+
+    if package is None:
+        import_ = Node(syms.import_name, [
+            Leaf(token.NAME, 'import'),
+            Leaf(token.NAME, name, prefix=' ')
+        ])
+    else:
+        import_ = FromImport(package, [Leaf(token.NAME, name, prefix=' ')])
+
+    children = [import_, Newline()]
+    if add_newline_before:
+        children.insert(0, Newline())
+    root.insert_child(insert_pos, Node(syms.simple_stmt, children))
+
+
+_def_syms = set([syms.classdef, syms.funcdef])
+def find_binding(name, node, package=None):
+    """ Returns the node which binds variable name, otherwise None.
+        If optional argument package is supplied, only imports will
+        be returned.
+        See test cases for examples."""
+    for child in node.children:
+        ret = None
+        if child.type == syms.for_stmt:
+            if _find(name, child.children[1]):
+                return child
+            n = find_binding(name, make_suite(child.children[-1]), package)
+            if n: ret = n
+        elif child.type in (syms.if_stmt, syms.while_stmt):
+            n = find_binding(name, make_suite(child.children[-1]), package)
+            if n: ret = n
+        elif child.type == syms.try_stmt:
+            n = find_binding(name, make_suite(child.children[2]), package)
+            if n:
+                ret = n
+            else:
+                for i, kid in enumerate(child.children[3:]):
+                    if kid.type == token.COLON and kid.value == ":":
+                        # i+3 is the colon, i+4 is the suite
+                        n = find_binding(name, make_suite(child.children[i+4]), package)
+                        if n: ret = n
+        elif child.type in _def_syms and child.children[1].value == name:
+            ret = child
+        elif _is_import_binding(child, name, package):
+            ret = child
+        elif child.type == syms.simple_stmt:
+            ret = find_binding(name, child, package)
+        elif child.type == syms.expr_stmt:
+            if _find(name, child.children[0]):
+                ret = child
+
+        if ret:
+            if not package:
+                return ret
+            if is_import(ret):
+                return ret
+    return None
+
+_block_syms = set([syms.funcdef, syms.classdef, syms.trailer])
+def _find(name, node):
+    nodes = [node]
+    while nodes:
+        node = nodes.pop()
+        if node.type > 256 and node.type not in _block_syms:
+            nodes.extend(node.children)
+        elif node.type == token.NAME and node.value == name:
+            return node
+    return None
+
+def _is_import_binding(node, name, package=None):
+    """ Will reuturn node if node will import name, or node
+        will import * from package.  None is returned otherwise.
+        See test cases for examples. """
+
+    if node.type == syms.import_name and not package:
+        imp = node.children[1]
+        if imp.type == syms.dotted_as_names:
+            for child in imp.children:
+                if child.type == syms.dotted_as_name:
+                    if child.children[2].value == name:
+                        return node
+                elif child.type == token.NAME and child.value == name:
+                    return node
+        elif imp.type == syms.dotted_as_name:
+            last = imp.children[-1]
+            if last.type == token.NAME and last.value == name:
+                return node
+        elif imp.type == token.NAME and imp.value == name:
+            return node
+    elif node.type == syms.import_from:
+        # unicode(...) is used to make life easier here, because
+        # from a.b import parses to ['import', ['a', '.', 'b'], ...]
+        if package and unicode(node.children[1]).strip() != package:
+            return None
+        n = node.children[3]
+        if package and _find('as', n):
+            # See test_from_import_as for explanation
+            return None
+        elif n.type == syms.import_as_names and _find(name, n):
+            return node
+        elif n.type == syms.import_as_name:
+            child = n.children[2]
+            if child.type == token.NAME and child.value == name:
+                return node
+        elif n.type == token.NAME and n.value == name:
+            return node
+        elif package and n.type == token.STAR:
+            return node
+    return None

Added: sandbox/trunk/refactor_pkg/refactor/fixes/__init__.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/__init__.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,3 @@
+from . import from2
+from . import from3
+from .from2 import *

Added: sandbox/trunk/refactor_pkg/refactor/fixes/fixer_common.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/fixer_common.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,4 @@
+# Common fixer imports
+from .. import fixer_base
+from ..fixer_util import Name, Call, consuming_calls, attr_chain
+from .. import patcomp

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/__init__.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/__init__.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,49 @@
+from . import fix_apply
+from . import fix_basestring
+from . import fix_buffer
+from . import fix_callable
+from . import fix_dict
+from . import fix_except
+from . import fix_exec
+from . import fix_execfile
+from . import fix_filter
+from . import fix_funcattrs
+from . import fix_future
+from . import fix_getcwdu
+from . import fix_has_key
+from . import fix_idioms
+from . import fix_import
+from . import fix_imports
+from . import fix_imports2
+from . import fix_input
+from . import fix_intern
+from . import fix_isinstance
+from . import fix_itertools
+from . import fix_itertools_imports
+from . import fix_long
+from . import fix_map
+from . import fix_metaclass
+from . import fix_methodattrs
+from . import fix_ne
+from . import fix_next
+from . import fix_nonzero
+from . import fix_numliterals
+from . import fix_paren
+from . import fix_print
+from . import fix_raise
+from . import fix_raw_input
+from . import fix_reduce
+from . import fix_renames
+from . import fix_repr
+from . import fix_set_literal
+from . import fix_standarderror
+from . import fix_sys_exc
+from . import fix_throw
+from . import fix_tuple_params
+from . import fix_types
+from . import fix_unicode
+from . import fix_urllib
+from . import fix_ws_comma
+from . import fix_xrange
+from . import fix_xreadlines
+from . import fix_zip

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_apply.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_apply.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,58 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for apply().
+
+This converts apply(func, v, k) into (func)(*v, **k)."""
+
+# Local imports
+from ... import pytree
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Call, Comma, parenthesize
+
+class FixApply(fixer_base.BaseFix):
+
+    PATTERN = """
+    power< 'apply'
+        trailer<
+            '('
+            arglist<
+                (not argument<NAME '=' any>) func=any ','
+                (not argument<NAME '=' any>) args=any [','
+                (not argument<NAME '=' any>) kwds=any] [',']
+            >
+            ')'
+        >
+    >
+    """
+
+    def transform(self, node, results):
+        syms = self.syms
+        assert results
+        func = results["func"]
+        args = results["args"]
+        kwds = results.get("kwds")
+        prefix = node.get_prefix()
+        func = func.clone()
+        if (func.type not in (token.NAME, syms.atom) and
+            (func.type != syms.power or
+             func.children[-2].type == token.DOUBLESTAR)):
+            # Need to parenthesize
+            func = parenthesize(func)
+        func.set_prefix("")
+        args = args.clone()
+        args.set_prefix("")
+        if kwds is not None:
+            kwds = kwds.clone()
+            kwds.set_prefix("")
+        l_newargs = [pytree.Leaf(token.STAR, "*"), args]
+        if kwds is not None:
+            l_newargs.extend([Comma(),
+                              pytree.Leaf(token.DOUBLESTAR, "**"),
+                              kwds])
+            l_newargs[-2].set_prefix(" ") # that's the ** token
+        # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
+        # can be translated into f(x, y, *t) instead of f(*(x, y) + t)
+        #new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
+        return Call(func, l_newargs, prefix=prefix)

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_basestring.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_basestring.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,13 @@
+"""Fixer for basestring -> str."""
+# Author: Christian Heimes
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name
+
+class FixBasestring(fixer_base.BaseFix):
+
+    PATTERN = "'basestring'"
+
+    def transform(self, node, results):
+        return Name("str", prefix=node.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_buffer.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_buffer.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,21 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that changes buffer(...) into memoryview(...)."""
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name
+
+
+class FixBuffer(fixer_base.BaseFix):
+
+    explicit = True # The user must ask for this fixer
+
+    PATTERN = """
+              power< name='buffer' trailer< '(' [any] ')' > >
+              """
+
+    def transform(self, node, results):
+        name = results["name"]
+        name.replace(Name("memoryview", prefix=name.get_prefix()))

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_callable.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_callable.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,31 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for callable().
+
+This converts callable(obj) into hasattr(obj, '__call__')."""
+
+# Local imports
+from ... import pytree
+from ... import fixer_base
+from ...fixer_util import Call, Name, String
+
+class FixCallable(fixer_base.BaseFix):
+
+    # Ignore callable(*args) or use of keywords.
+    # Either could be a hint that the builtin callable() is not being used.
+    PATTERN = """
+    power< 'callable'
+           trailer< lpar='('
+                    ( not(arglist | argument<any '=' any>) func=any
+                      | func=arglist<(not argument<any '=' any>) any ','> )
+                    rpar=')' >
+           after=any*
+    >
+    """
+
+    def transform(self, node, results):
+        func = results["func"]
+
+        args = [func.clone(), String(', '), String("'__call__'")]
+        return Call(Name("hasattr"), args, prefix=node.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_dict.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_dict.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,99 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for dict methods.
+
+d.keys() -> list(d.keys())
+d.items() -> list(d.items())
+d.values() -> list(d.values())
+
+d.iterkeys() -> iter(d.keys())
+d.iteritems() -> iter(d.items())
+d.itervalues() -> iter(d.values())
+
+Except in certain very specific contexts: the iter() can be dropped
+when the context is list(), sorted(), iter() or for...in; the list()
+can be dropped when the context is list() or sorted() (but not iter()
+or for...in!). Special contexts that apply to both: list(), sorted(), tuple()
+set(), any(), all(), sum().
+
+Note: iter(d.keys()) could be written as iter(d) but since the
+original d.iterkeys() was also redundant we don't fix this.  And there
+are (rare) contexts where it makes a difference (e.g. when passing it
+as an argument to a function that introspects the argument).
+"""
+
+# Local imports
+from ... import pytree
+from ... import patcomp
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Name, Call, LParen, RParen, ArgList, Dot
+from ... import fixer_util
+
+
+iter_exempt = fixer_util.consuming_calls | set(["iter"])
+
+
+class FixDict(fixer_base.BaseFix):
+    PATTERN = """
+    power< head=any+
+         trailer< '.' method=('keys'|'items'|'values'|
+                              'iterkeys'|'iteritems'|'itervalues') >
+         parens=trailer< '(' ')' >
+         tail=any*
+    >
+    """
+
+    def transform(self, node, results):
+        head = results["head"]
+        method = results["method"][0] # Extract node for method name
+        tail = results["tail"]
+        syms = self.syms
+        method_name = method.value
+        isiter = method_name.startswith("iter")
+        if isiter:
+            method_name = method_name[4:]
+        assert method_name in ("keys", "items", "values"), repr(method)
+        head = [n.clone() for n in head]
+        tail = [n.clone() for n in tail]
+        special = not tail and self.in_special_context(node, isiter)
+        args = head + [pytree.Node(syms.trailer,
+                                   [Dot(),
+                                    Name(method_name,
+                                         prefix=method.get_prefix())]),
+                       results["parens"].clone()]
+        new = pytree.Node(syms.power, args)
+        if not special:
+            new.set_prefix("")
+            new = Call(Name(isiter and "iter" or "list"), [new])
+        if tail:
+            new = pytree.Node(syms.power, [new] + tail)
+        new.set_prefix(node.get_prefix())
+        return new
+
+    P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
+    p1 = patcomp.compile_pattern(P1)
+
+    P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
+            | comp_for< 'for' any 'in' node=any any* >
+         """
+    p2 = patcomp.compile_pattern(P2)
+
+    def in_special_context(self, node, isiter):
+        if node.parent is None:
+            return False
+        results = {}
+        if (node.parent.parent is not None and
+               self.p1.match(node.parent.parent, results) and
+               results["node"] is node):
+            if isiter:
+                # iter(d.iterkeys()) -> iter(d.keys()), etc.
+                return results["func"].value in iter_exempt
+            else:
+                # list(d.keys()) -> list(d.keys()), etc.
+                return results["func"].value in fixer_util.consuming_calls
+        if not isiter:
+            return False
+        # for ... in d.iterkeys() -> for ... in d.keys(), etc.
+        return self.p2.match(node.parent, results) and results["node"] is node

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_except.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_except.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,92 @@
+"""Fixer for except statements with named exceptions.
+
+The following cases will be converted:
+
+- "except E, T:" where T is a name:
+
+    except E as T:
+
+- "except E, T:" where T is not a name, tuple or list:
+
+        except E as t:
+            T = t
+
+    This is done because the target of an "except" clause must be a
+    name.
+
+- "except E, T:" where T is a tuple or list literal:
+
+        except E as t:
+            T = t.args
+"""
+# Author: Collin Winter
+
+# Local imports
+from ... import pytree
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
+
+def find_excepts(nodes):
+    for i, n in enumerate(nodes):
+        if n.type == syms.except_clause:
+            if n.children[0].value == 'except':
+                yield (n, nodes[i+2])
+
+class FixExcept(fixer_base.BaseFix):
+
+    PATTERN = """
+    try_stmt< 'try' ':' suite
+                  cleanup=(except_clause ':' suite)+
+                  tail=(['except' ':' suite]
+                        ['else' ':' suite]
+                        ['finally' ':' suite]) >
+    """
+
+    def transform(self, node, results):
+        syms = self.syms
+
+        tail = [n.clone() for n in results["tail"]]
+
+        try_cleanup = [ch.clone() for ch in results["cleanup"]]
+        for except_clause, e_suite in find_excepts(try_cleanup):
+            if len(except_clause.children) == 4:
+                (E, comma, N) = except_clause.children[1:4]
+                comma.replace(Name("as", prefix=" "))
+
+                if N.type != token.NAME:
+                    # Generate a new N for the except clause
+                    new_N = Name(self.new_name(), prefix=" ")
+                    target = N.clone()
+                    target.set_prefix("")
+                    N.replace(new_N)
+                    new_N = new_N.clone()
+
+                    # Insert "old_N = new_N" as the first statement in
+                    #  the except body. This loop skips leading whitespace
+                    #  and indents
+                    #TODO(cwinter) suite-cleanup
+                    suite_stmts = e_suite.children
+                    for i, stmt in enumerate(suite_stmts):
+                        if isinstance(stmt, pytree.Node):
+                            break
+
+                    # The assignment is different if old_N is a tuple or list
+                    # In that case, the assignment is old_N = new_N.args
+                    if is_tuple(N) or is_list(N):
+                        assign = Assign(target, Attr(new_N, Name('args')))
+                    else:
+                        assign = Assign(target, new_N)
+
+                    #TODO(cwinter) stopgap until children becomes a smart list
+                    for child in reversed(suite_stmts[:i]):
+                        e_suite.insert_child(0, child)
+                    e_suite.insert_child(i, assign)
+                elif N.get_prefix() == "":
+                    # No space after a comma is legal; no space after "as",
+                    # not so much.
+                    N.set_prefix(" ")
+
+        #TODO(cwinter) fix this when children becomes a smart list
+        children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
+        return pytree.Node(node.type, children)

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_exec.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_exec.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,39 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for exec.
+
+This converts usages of the exec statement into calls to a built-in
+exec() function.
+
+exec code in ns1, ns2 -> exec(code, ns1, ns2)
+"""
+
+# Local imports
+from ... import pytree
+from ... import fixer_base
+from ...fixer_util import Comma, Name, Call
+
+
+class FixExec(fixer_base.BaseFix):
+
+    PATTERN = """
+    exec_stmt< 'exec' a=any 'in' b=any [',' c=any] >
+    |
+    exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any >
+    """
+
+    def transform(self, node, results):
+        assert results
+        syms = self.syms
+        a = results["a"]
+        b = results.get("b")
+        c = results.get("c")
+        args = [a.clone()]
+        args[0].set_prefix("")
+        if b is not None:
+            args.extend([Comma(), b.clone()])
+        if c is not None:
+            args.extend([Comma(), c.clone()])
+
+        return Call(Name("exec"), args, prefix=node.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_execfile.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_execfile.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,51 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for execfile.
+
+This converts usages of the execfile function into calls to the built-in
+exec() function.
+"""
+
+from ... import fixer_base
+from ...fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
+                          ArgList, String, syms)
+
+
+class FixExecfile(fixer_base.BaseFix):
+
+    PATTERN = """
+    power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
+    |
+    power< 'execfile' trailer< '(' filename=any ')' > >
+    """
+
+    def transform(self, node, results):
+        assert results
+        filename = results["filename"]
+        globals = results.get("globals")
+        locals = results.get("locals")
+
+        # Copy over the prefix from the right parentheses end of the execfile
+        # call.
+        execfile_paren = node.children[-1].children[-1].clone()
+        # Construct open().read().
+        open_args = ArgList([filename.clone()], rparen=execfile_paren)
+        open_call = Node(syms.power, [Name("open"), open_args])
+        read = [Node(syms.trailer, [Dot(), Name('read')]),
+                Node(syms.trailer, [LParen(), RParen()])]
+        open_expr = [open_call] + read
+        # Wrap the open call in a compile call. This is so the filename will be
+        # preserved in the execed code.
+        filename_arg = filename.clone()
+        filename_arg.set_prefix(" ")
+        exec_str = String("'exec'", " ")
+        compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
+        compile_call = Call(Name("compile"), compile_args, "")
+        # Finally, replace the execfile call with an exec call.
+        args = [compile_call]
+        if globals is not None:
+            args.extend([Comma(), globals.clone()])
+        if locals is not None:
+            args.extend([Comma(), locals.clone()])
+        return Call(Name("exec"), args, prefix=node.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_filter.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_filter.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,75 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that changes filter(F, X) into list(filter(F, X)).
+
+We avoid the transformation if the filter() call is directly contained
+in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or
+for V in <>:.
+
+NOTE: This is still not correct if the original code was depending on
+filter(F, X) to return a string if X is a string and a tuple if X is a
+tuple.  That would require type inference, which we don't do.  Let
+Python 2.6 figure it out.
+"""
+
+# Local imports
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Name, Call, ListComp, in_special_context
+
+class FixFilter(fixer_base.ConditionalFix):
+
+    PATTERN = """
+    filter_lambda=power<
+        'filter'
+        trailer<
+            '('
+            arglist<
+                lambdef< 'lambda'
+                         (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
+                >
+                ','
+                it=any
+            >
+            ')'
+        >
+    >
+    |
+    power<
+        'filter'
+        trailer< '(' arglist< none='None' ',' seq=any > ')' >
+    >
+    |
+    power<
+        'filter'
+        args=trailer< '(' [any] ')' >
+    >
+    """
+
+    skip_on = "future_builtins.filter"
+
+    def transform(self, node, results):
+        if self.should_skip(node):
+            return
+
+        if "filter_lambda" in results:
+            new = ListComp(results.get("fp").clone(),
+                           results.get("fp").clone(),
+                           results.get("it").clone(),
+                           results.get("xp").clone())
+
+        elif "none" in results:
+            new = ListComp(Name("_f"),
+                           Name("_f"),
+                           results["seq"].clone(),
+                           Name("_f"))
+
+        else:
+            if in_special_context(node):
+                return None
+            new = node.clone()
+            new.set_prefix("")
+            new = Call(Name("list"), [new])
+        new.set_prefix(node.get_prefix())
+        return new

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_funcattrs.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_funcattrs.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,19 @@
+"""Fix function attribute names (f.func_x -> f.__x__)."""
+# Author: Collin Winter
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name
+
+
+class FixFuncattrs(fixer_base.BaseFix):
+    PATTERN = """
+    power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals'
+                                  | 'func_name' | 'func_defaults' | 'func_code'
+                                  | 'func_dict') > any* >
+    """
+
+    def transform(self, node, results):
+        attr = results["attr"][0]
+        attr.replace(Name(("__%s__" % attr.value[5:]),
+                          prefix=attr.get_prefix()))

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_future.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_future.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,20 @@
+"""Remove __future__ imports
+
+from __future__ import foo is replaced with an empty line.
+"""
+# Author: Christian Heimes
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import BlankLine
+
+class FixFuture(fixer_base.BaseFix):
+    PATTERN = """import_from< 'from' module_name="__future__" 'import' any >"""
+
+    # This should be run last -- some things check for the import
+    run_order = 10
+
+    def transform(self, node, results):
+        new = BlankLine()
+        new.prefix = node.get_prefix()
+        return new

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_getcwdu.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_getcwdu.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,18 @@
+"""
+Fixer that changes os.getcwdu() to os.getcwd().
+"""
+# Author: Victor Stinner
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name
+
+class FixGetcwdu(fixer_base.BaseFix):
+
+    PATTERN = """
+              power< 'os' trailer< dot='.' name='getcwdu' > any* >
+              """
+
+    def transform(self, node, results):
+        name = results["name"]
+        name.replace(Name("getcwd", prefix=name.get_prefix()))

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_has_key.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_has_key.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,109 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for has_key().
+
+Calls to .has_key() methods are expressed in terms of the 'in'
+operator:
+
+    d.has_key(k) -> k in d
+
+CAVEATS:
+1) While the primary target of this fixer is dict.has_key(), the
+   fixer will change any has_key() method call, regardless of its
+   class.
+
+2) Cases like this will not be converted:
+
+    m = d.has_key
+    if m(k):
+        ...
+
+   Only *calls* to has_key() are converted. While it is possible to
+   convert the above to something like
+
+    m = d.__contains__
+    if m(k):
+        ...
+
+   this is currently not done.
+"""
+
+# Local imports
+from ... import pytree
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Name, parenthesize
+
+
+class FixHasKey(fixer_base.BaseFix):
+
+    PATTERN = """
+    anchor=power<
+        before=any+
+        trailer< '.' 'has_key' >
+        trailer<
+            '('
+            ( not(arglist | argument<any '=' any>) arg=any
+            | arglist<(not argument<any '=' any>) arg=any ','>
+            )
+            ')'
+        >
+        after=any*
+    >
+    |
+    negation=not_test<
+        'not'
+        anchor=power<
+            before=any+
+            trailer< '.' 'has_key' >
+            trailer<
+                '('
+                ( not(arglist | argument<any '=' any>) arg=any
+                | arglist<(not argument<any '=' any>) arg=any ','>
+                )
+                ')'
+            >
+        >
+    >
+    """
+
+    def transform(self, node, results):
+        assert results
+        syms = self.syms
+        if (node.parent.type == syms.not_test and
+            self.pattern.match(node.parent)):
+            # Don't transform a node matching the first alternative of the
+            # pattern when its parent matches the second alternative
+            return None
+        negation = results.get("negation")
+        anchor = results["anchor"]
+        prefix = node.get_prefix()
+        before = [n.clone() for n in results["before"]]
+        arg = results["arg"].clone()
+        after = results.get("after")
+        if after:
+            after = [n.clone() for n in after]
+        if arg.type in (syms.comparison, syms.not_test, syms.and_test,
+                        syms.or_test, syms.test, syms.lambdef, syms.argument):
+            arg = parenthesize(arg)
+        if len(before) == 1:
+            before = before[0]
+        else:
+            before = pytree.Node(syms.power, before)
+        before.set_prefix(" ")
+        n_op = Name("in", prefix=" ")
+        if negation:
+            n_not = Name("not", prefix=" ")
+            n_op = pytree.Node(syms.comp_op, (n_not, n_op))
+        new = pytree.Node(syms.comparison, (arg, n_op, before))
+        if after:
+            new = parenthesize(new)
+            new = pytree.Node(syms.power, (new,) + tuple(after))
+        if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr,
+                                syms.and_expr, syms.shift_expr,
+                                syms.arith_expr, syms.term,
+                                syms.factor, syms.power):
+            new = parenthesize(new)
+        new.set_prefix(prefix)
+        return new

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_idioms.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_idioms.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,134 @@
+"""Adjust some old Python 2 idioms to their modern counterparts.
+
+* Change some type comparisons to isinstance() calls:
+    type(x) == T -> isinstance(x, T)
+    type(x) is T -> isinstance(x, T)
+    type(x) != T -> not isinstance(x, T)
+    type(x) is not T -> not isinstance(x, T)
+
+* Change "while 1:" into "while True:".
+
+* Change both
+
+    v = list(EXPR)
+    v.sort()
+    foo(v)
+
+and the more general
+
+    v = EXPR
+    v.sort()
+    foo(v)
+
+into
+
+    v = sorted(EXPR)
+    foo(v)
+"""
+# Author: Jacques Frechet, Collin Winter
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Call, Comma, Name, Node, syms
+
+CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)"
+TYPE = "power< 'type' trailer< '(' x=any ')' > >"
+
+class FixIdioms(fixer_base.BaseFix):
+
+    explicit = True # The user must ask for this fixer
+
+    PATTERN = r"""
+        isinstance=comparison< %s %s T=any >
+        |
+        isinstance=comparison< T=any %s %s >
+        |
+        while_stmt< 'while' while='1' ':' any+ >
+        |
+        sorted=any<
+            any*
+            simple_stmt<
+              expr_stmt< id1=any '='
+                         power< list='list' trailer< '(' (not arglist<any+>) any ')' > >
+              >
+              '\n'
+            >
+            sort=
+            simple_stmt<
+              power< id2=any
+                     trailer< '.' 'sort' > trailer< '(' ')' >
+              >
+              '\n'
+            >
+            next=any*
+        >
+        |
+        sorted=any<
+            any*
+            simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' >
+            sort=
+            simple_stmt<
+              power< id2=any
+                     trailer< '.' 'sort' > trailer< '(' ')' >
+              >
+              '\n'
+            >
+            next=any*
+        >
+    """ % (TYPE, CMP, CMP, TYPE)
+
+    def match(self, node):
+        r = super(FixIdioms, self).match(node)
+        # If we've matched one of the sort/sorted subpatterns above, we
+        # want to reject matches where the initial assignment and the
+        # subsequent .sort() call involve different identifiers.
+        if r and "sorted" in r:
+            if r["id1"] == r["id2"]:
+                return r
+            return None
+        return r
+
+    def transform(self, node, results):
+        if "isinstance" in results:
+            return self.transform_isinstance(node, results)
+        elif "while" in results:
+            return self.transform_while(node, results)
+        elif "sorted" in results:
+            return self.transform_sort(node, results)
+        else:
+            raise RuntimeError("Invalid match")
+
+    def transform_isinstance(self, node, results):
+        x = results["x"].clone() # The thing inside of type()
+        T = results["T"].clone() # The type being compared against
+        x.set_prefix("")
+        T.set_prefix(" ")
+        test = Call(Name("isinstance"), [x, Comma(), T])
+        if "n" in results:
+            test.set_prefix(" ")
+            test = Node(syms.not_test, [Name("not"), test])
+        test.set_prefix(node.get_prefix())
+        return test
+
+    def transform_while(self, node, results):
+        one = results["while"]
+        one.replace(Name("True", prefix=one.get_prefix()))
+
+    def transform_sort(self, node, results):
+        sort_stmt = results["sort"]
+        next_stmt = results["next"]
+        list_call = results.get("list")
+        simple_expr = results.get("expr")
+
+        if list_call:
+            list_call.replace(Name("sorted", prefix=list_call.get_prefix()))
+        elif simple_expr:
+            new = simple_expr.clone()
+            new.set_prefix("")
+            simple_expr.replace(Call(Name("sorted"), [new],
+                                     prefix=simple_expr.get_prefix()))
+        else:
+            raise RuntimeError("should not have reached here")
+        sort_stmt.remove()
+        if next_stmt:
+            next_stmt[0].set_prefix(sort_stmt.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_import.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_import.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,90 @@
+"""Fixer for import statements.
+If spam is being imported from the local directory, this import:
+    from spam import eggs
+Becomes:
+    from .spam import eggs
+
+And this import:
+    import spam
+Becomes:
+    from . import spam
+"""
+
+# Local imports
+from ... import fixer_base
+from os.path import dirname, join, exists, pathsep
+from ...fixer_util import FromImport, syms, token
+
+
+def traverse_imports(names):
+    """
+    Walks over all the names imported in a dotted_as_names node.
+    """
+    pending = [names]
+    while pending:
+        node = pending.pop()
+        if node.type == token.NAME:
+            yield node.value
+        elif node.type == syms.dotted_name:
+            yield "".join([ch.value for ch in node.children])
+        elif node.type == syms.dotted_as_name:
+            pending.append(node.children[0])
+        elif node.type == syms.dotted_as_names:
+            pending.extend(node.children[::-2])
+        else:
+            raise AssertionError("unkown node type")
+
+
+class FixImport(fixer_base.BaseFix):
+
+    PATTERN = """
+    import_from< 'from' imp=any 'import' ['('] any [')'] >
+    |
+    import_name< 'import' imp=any >
+    """
+
+    def transform(self, node, results):
+        imp = results['imp']
+
+        if node.type == syms.import_from:
+            # Some imps are top-level (eg: 'import ham')
+            # some are first level (eg: 'import ham.eggs')
+            # some are third level (eg: 'import ham.eggs as spam')
+            # Hence, the loop
+            while not hasattr(imp, 'value'):
+                imp = imp.children[0]
+            if self.probably_a_local_import(imp.value):
+                imp.value = "." + imp.value
+                imp.changed()
+                return node
+        else:
+            have_local = False
+            have_absolute = False
+            for mod_name in traverse_imports(imp):
+                if self.probably_a_local_import(mod_name):
+                    have_local = True
+                else:
+                    have_absolute = True
+            if have_absolute:
+                if have_local:
+                    # We won't handle both sibling and absolute imports in the
+                    # same statement at the moment.
+                    self.warning(node, "absolute and local imports together")
+                return
+
+            new = FromImport('.', [imp])
+            new.set_prefix(node.get_prefix())
+            return new
+
+    def probably_a_local_import(self, imp_name):
+        imp_name = imp_name.split('.', 1)[0]
+        base_path = dirname(self.filename)
+        base_path = join(base_path, imp_name)
+        # If there is no __init__.py next to the file its not in a package
+        # so can't be a relative import.
+        if not exists(join(dirname(base_path), '__init__.py')):
+            return False
+        for ext in ['.py', pathsep, '.pyc', '.so', '.sl', '.pyd']:
+            if exists(base_path + ext):
+                return True
+        return False

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_imports.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_imports.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,145 @@
+"""Fix incompatible imports and module references."""
+# Authors: Collin Winter, Nick Edds
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name, attr_chain
+
+MAPPING = {'StringIO':  'io',
+           'cStringIO': 'io',
+           'cPickle': 'pickle',
+           '__builtin__' : 'builtins',
+           'copy_reg': 'copyreg',
+           'Queue': 'queue',
+           'SocketServer': 'socketserver',
+           'ConfigParser': 'configparser',
+           'repr': 'reprlib',
+           'FileDialog': 'tkinter.filedialog',
+           'tkFileDialog': 'tkinter.filedialog',
+           'SimpleDialog': 'tkinter.simpledialog',
+           'tkSimpleDialog': 'tkinter.simpledialog',
+           'tkColorChooser': 'tkinter.colorchooser',
+           'tkCommonDialog': 'tkinter.commondialog',
+           'Dialog': 'tkinter.dialog',
+           'Tkdnd': 'tkinter.dnd',
+           'tkFont': 'tkinter.font',
+           'tkMessageBox': 'tkinter.messagebox',
+           'ScrolledText': 'tkinter.scrolledtext',
+           'Tkconstants': 'tkinter.constants',
+           'Tix': 'tkinter.tix',
+           'ttk': 'tkinter.ttk',
+           'Tkinter': 'tkinter',
+           'markupbase': '_markupbase',
+           '_winreg': 'winreg',
+           'thread': '_thread',
+           'dummy_thread': '_dummy_thread',
+           # anydbm and whichdb are handled by fix_imports2
+           'dbhash': 'dbm.bsd',
+           'dumbdbm': 'dbm.dumb',
+           'dbm': 'dbm.ndbm',
+           'gdbm': 'dbm.gnu',
+           'xmlrpclib': 'xmlrpc.client',
+           'DocXMLRPCServer': 'xmlrpc.server',
+           'SimpleXMLRPCServer': 'xmlrpc.server',
+           'httplib': 'http.client',
+           'htmlentitydefs' : 'html.entities',
+           'HTMLParser' : 'html.parser',
+           'Cookie': 'http.cookies',
+           'cookielib': 'http.cookiejar',
+           'BaseHTTPServer': 'http.server',
+           'SimpleHTTPServer': 'http.server',
+           'CGIHTTPServer': 'http.server',
+           #'test.test_support': 'test.support',
+           'commands': 'subprocess',
+           'UserString' : 'collections',
+           'UserList' : 'collections',
+           'urlparse' : 'urllib.parse',
+           'robotparser' : 'urllib.robotparser',
+}
+
+
+def alternates(members):
+    return "(" + "|".join(map(repr, members)) + ")"
+
+
+def build_pattern(mapping=MAPPING):
+    mod_list = ' | '.join(["module_name='%s'" % key for key in mapping])
+    bare_names = alternates(mapping.keys())
+
+    yield """name_import=import_name< 'import' ((%s) |
+               multiple_imports=dotted_as_names< any* (%s) any* >) >
+          """ % (mod_list, mod_list)
+    yield """import_from< 'from' (%s) 'import' ['(']
+              ( any | import_as_name< any 'as' any > |
+                import_as_names< any* >)  [')'] >
+          """ % mod_list
+    yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > |
+               multiple_imports=dotted_as_names<
+                 any* dotted_as_name< (%s) 'as' any > any* >) >
+          """ % (mod_list, mod_list)
+
+    # Find usages of module members in code e.g. thread.foo(bar)
+    yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names
+
+
+class FixImports(fixer_base.BaseFix):
+
+    order = "pre" # Pre-order tree traversal
+
+    # This is overridden in fix_imports2.
+    mapping = MAPPING
+
+    # We want to run this fixer late, so fix_import doesn't try to make stdlib
+    # renames into relative imports.
+    run_order = 6
+
+    def build_pattern(self):
+        return "|".join(build_pattern(self.mapping))
+
+    def compile_pattern(self):
+        # We override this, so MAPPING can be pragmatically altered and the
+        # changes will be reflected in PATTERN.
+        self.PATTERN = self.build_pattern()
+        super(FixImports, self).compile_pattern()
+
+    # Don't match the node if it's within another match.
+    def match(self, node):
+        match = super(FixImports, self).match
+        results = match(node)
+        if results:
+            # Module usage could be in the trailer of an attribute lookup, so we
+            # might have nested matches when "bare_with_attr" is present.
+            if "bare_with_attr" not in results and \
+                    any([match(obj) for obj in attr_chain(node, "parent")]):
+                return False
+            return results
+        return False
+
+    def start_tree(self, tree, filename):
+        super(FixImports, self).start_tree(tree, filename)
+        self.replace = {}
+
+    def transform(self, node, results):
+        import_mod = results.get("module_name")
+        if import_mod:
+            mod_name = import_mod.value
+            new_name = self.mapping[mod_name]
+            import_mod.replace(Name(new_name, prefix=import_mod.get_prefix()))
+            if "name_import" in results:
+                # If it's not a "from x import x, y" or "import x as y" import,
+                # marked its usage to be replaced.
+                self.replace[mod_name] = new_name
+            if "multiple_imports" in results:
+                # This is a nasty hack to fix multiple imports on a line (e.g.,
+                # "import StringIO, urlparse"). The problem is that I can't
+                # figure out an easy way to make a pattern recognize the keys of
+                # MAPPING randomly sprinkled in an import statement.
+                results = self.match(node)
+                if results:
+                    self.transform(node, results)
+        else:
+            # Replace usage of the module.
+            bare_name = results["bare_with_attr"][0]
+            new_name = self.replace.get(bare_name.value)
+            if new_name:
+                bare_name.replace(Name(new_name, prefix=bare_name.get_prefix()))

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_imports2.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_imports2.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,16 @@
+"""Fix incompatible imports and module references that must be fixed after
+fix_imports."""
+from . import fix_imports
+
+
+MAPPING = {
+            'whichdb': 'dbm',
+            'anydbm': 'dbm',
+          }
+
+
+class FixImports2(fix_imports.FixImports):
+
+    run_order = 7
+
+    mapping = MAPPING

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_input.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_input.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,26 @@
+"""Fixer that changes input(...) into eval(input(...))."""
+# Author: Andre Roberge
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Call, Name
+from ... import patcomp
+
+
+context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >")
+
+
+class FixInput(fixer_base.BaseFix):
+
+    PATTERN = """
+              power< 'input' args=trailer< '(' [any] ')' > >
+              """
+
+    def transform(self, node, results):
+        # If we're already wrapped in a eval() call, we're done.
+        if context.match(node.parent.parent):
+            return
+
+        new = node.clone()
+        new.set_prefix("")
+        return Call(Name("eval"), [new], prefix=node.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_intern.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_intern.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,44 @@
+# Copyright 2006 Georg Brandl.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for intern().
+
+intern(s) -> sys.intern(s)"""
+
+# Local imports
+from ... import pytree
+from ... import fixer_base
+from ...fixer_util import Name, Attr, touch_import
+
+
+class FixIntern(fixer_base.BaseFix):
+
+    PATTERN = """
+    power< 'intern'
+           trailer< lpar='('
+                    ( not(arglist | argument<any '=' any>) obj=any
+                      | obj=arglist<(not argument<any '=' any>) any ','> )
+                    rpar=')' >
+           after=any*
+    >
+    """
+
+    def transform(self, node, results):
+        syms = self.syms
+        obj = results["obj"].clone()
+        if obj.type == syms.arglist:
+            newarglist = obj.clone()
+        else:
+            newarglist = pytree.Node(syms.arglist, [obj.clone()])
+        after = results["after"]
+        if after:
+            after = [n.clone() for n in after]
+        new = pytree.Node(syms.power,
+                          Attr(Name("sys"), Name("intern")) +
+                          [pytree.Node(syms.trailer,
+                                       [results["lpar"].clone(),
+                                        newarglist,
+                                        results["rpar"].clone()])] + after)
+        new.set_prefix(node.get_prefix())
+        touch_import(None, 'sys', node)
+        return new

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_isinstance.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_isinstance.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,52 @@
+# Copyright 2008 Armin Ronacher.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that cleans up a tuple argument to isinstance after the tokens
+in it were fixed.  This is mainly used to remove double occurrences of
+tokens as a leftover of the long -> int / unicode -> str conversion.
+
+eg.  isinstance(x, (int, long)) -> isinstance(x, (int, int))
+       -> isinstance(x, int)
+"""
+
+from ... import fixer_base
+from ...fixer_util import token
+
+
+class FixIsinstance(fixer_base.BaseFix):
+
+    PATTERN = """
+    power<
+        'isinstance'
+        trailer< '(' arglist< any ',' atom< '('
+            args=testlist_gexp< any+ >
+        ')' > > ')' >
+    >
+    """
+
+    run_order = 6
+
+    def transform(self, node, results):
+        names_inserted = set()
+        testlist = results["args"]
+        args = testlist.children
+        new_args = []
+        iterator = enumerate(args)
+        for idx, arg in iterator:
+            if arg.type == token.NAME and arg.value in names_inserted:
+                if idx < len(args) - 1 and args[idx + 1].type == token.COMMA:
+                    iterator.next()
+                    continue
+            else:
+                new_args.append(arg)
+                if arg.type == token.NAME:
+                    names_inserted.add(arg.value)
+        if new_args and new_args[-1].type == token.COMMA:
+            del new_args[-1]
+        if len(new_args) == 1:
+            atom = testlist.parent
+            new_args[0].set_prefix(atom.get_prefix())
+            atom.replace(new_args[0])
+        else:
+            args[:] = new_args
+            node.changed()

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_itertools.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_itertools.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,41 @@
+""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and
+    itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363)
+
+    imports from itertools are fixed in fix_itertools_import.py
+
+    If itertools is imported as something else (ie: import itertools as it;
+    it.izip(spam, eggs)) method calls will not get fixed.
+    """
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name
+
+class FixItertools(fixer_base.BaseFix):
+    it_funcs = "('imap'|'ifilter'|'izip'|'ifilterfalse')"
+    PATTERN = """
+              power< it='itertools'
+                  trailer<
+                     dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > >
+              |
+              power< func=%(it_funcs)s trailer< '(' [any] ')' > >
+              """ %(locals())
+
+    # Needs to be run after fix_(map|zip|filter)
+    run_order = 6
+
+    def transform(self, node, results):
+        prefix = None
+        func = results['func'][0]
+        if 'it' in results and func.value != 'ifilterfalse':
+            dot, it = (results['dot'], results['it'])
+            # Remove the 'itertools'
+            prefix = it.get_prefix()
+            it.remove()
+            # Replace the node wich contains ('.', 'function') with the
+            # function (to be consistant with the second part of the pattern)
+            dot.remove()
+            func.parent.replace(func)
+
+        prefix = prefix or func.get_prefix()
+        func.replace(Name(func.value[1:], prefix=prefix))

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_itertools_imports.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_itertools_imports.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,52 @@
+""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import BlankLine, syms, token
+
+
+class FixItertoolsImports(fixer_base.BaseFix):
+    PATTERN = """
+              import_from< 'from' 'itertools' 'import' imports=any >
+              """ %(locals())
+
+    def transform(self, node, results):
+        imports = results['imports']
+        if imports.type == syms.import_as_name or not imports.children:
+            children = [imports]
+        else:
+            children = imports.children
+        for child in children[::2]:
+            if child.type == token.NAME:
+                member = child.value
+                name_node = child
+            else:
+                assert child.type == syms.import_as_name
+                name_node = child.children[0]
+            member_name = name_node.value
+            if member_name in ('imap', 'izip', 'ifilter'):
+                child.value = None
+                child.remove()
+            elif member_name == 'ifilterfalse':
+                node.changed()
+                name_node.value = 'filterfalse'
+
+        # Make sure the import statement is still sane
+        children = imports.children[:] or [imports]
+        remove_comma = True
+        for child in children:
+            if remove_comma and child.type == token.COMMA:
+                child.remove()
+            else:
+                remove_comma ^= True
+
+        if children[-1].type == token.COMMA:
+            children[-1].remove()
+
+        # If there are no imports left, just get rid of the entire statement
+        if not (imports.children or getattr(imports, 'value', None)) or \
+                imports.parent is None:
+            p = node.get_prefix()
+            node = BlankLine()
+            node.prefix = p
+        return node

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_long.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_long.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,22 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that turns 'long' into 'int' everywhere.
+"""
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name, Number, is_probably_builtin
+
+
+class FixLong(fixer_base.BaseFix):
+
+    PATTERN = "'long'"
+
+    static_int = Name("int")
+
+    def transform(self, node, results):
+        if is_probably_builtin(node):
+            new = self.static_int.clone()
+            new.set_prefix(node.get_prefix())
+            return new

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_map.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_map.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,82 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that changes map(F, ...) into list(map(F, ...)) unless there
+exists a 'from future_builtins import map' statement in the top-level
+namespace.
+
+As a special case, map(None, X) is changed into list(X).  (This is
+necessary because the semantics are changed in this case -- the new
+map(None, X) is equivalent to [(x,) for x in X].)
+
+We avoid the transformation (except for the special case mentioned
+above) if the map() call is directly contained in iter(<>), list(<>),
+tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
+
+NOTE: This is still not correct if the original code was depending on
+map(F, X, Y, ...) to go on until the longest argument is exhausted,
+substituting None for missing values -- like zip(), it now stops as
+soon as the shortest argument is exhausted.
+"""
+
+# Local imports
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Name, Call, ListComp, in_special_context
+from ...pygram import python_symbols as syms
+
+class FixMap(fixer_base.ConditionalFix):
+
+    PATTERN = """
+    map_none=power<
+        'map'
+        trailer< '(' arglist< 'None' ',' arg=any [','] > ')' >
+    >
+    |
+    map_lambda=power<
+        'map'
+        trailer<
+            '('
+            arglist<
+                lambdef< 'lambda'
+                         (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any
+                >
+                ','
+                it=any
+            >
+            ')'
+        >
+    >
+    |
+    power<
+        'map'
+        args=trailer< '(' [any] ')' >
+    >
+    """
+
+    skip_on = 'future_builtins.map'
+
+    def transform(self, node, results):
+        if self.should_skip(node):
+            return
+
+        if node.parent.type == syms.simple_stmt:
+            self.warning(node, "You should use a for loop here")
+            new = node.clone()
+            new.set_prefix("")
+            new = Call(Name("list"), [new])
+        elif "map_lambda" in results:
+            new = ListComp(results.get("xp").clone(),
+                           results.get("fp").clone(),
+                           results.get("it").clone())
+        else:
+            if "map_none" in results:
+                new = results["arg"].clone()
+            else:
+                if in_special_context(node):
+                    return None
+                new = node.clone()
+            new.set_prefix("")
+            new = Call(Name("list"), [new])
+        new.set_prefix(node.get_prefix())
+        return new

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_metaclass.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_metaclass.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,227 @@
+"""Fixer for __metaclass__ = X -> (metaclass=X) methods.
+
+   The various forms of classef (inherits nothing, inherits once, inherints
+   many) don't parse the same in the CST so we look at ALL classes for
+   a __metaclass__ and if we find one normalize the inherits to all be
+   an arglist.
+
+   For one-liner classes ('class X: pass') there is no indent/dedent so
+   we normalize those into having a suite.
+
+   Moving the __metaclass__ into the classdef can also cause the class
+   body to be empty so there is some special casing for that as well.
+
+   This fixer also tries very hard to keep original indenting and spacing
+   in all those corner cases.
+
+"""
+# Author: Jack Diederich
+
+# Local imports
+from ... import fixer_base
+from ...pygram import token
+from ...fixer_util import Name, syms, Node, Leaf
+
+
+def has_metaclass(parent):
+    """ we have to check the cls_node without changing it.
+        There are two possiblities:
+          1)  clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta')
+          2)  clsdef => simple_stmt => expr_stmt => Leaf('__meta')
+    """
+    for node in parent.children:
+        if node.type == syms.suite:
+            return has_metaclass(node)
+        elif node.type == syms.simple_stmt and node.children:
+            expr_node = node.children[0]
+            if expr_node.type == syms.expr_stmt and expr_node.children:
+                left_side = expr_node.children[0]
+                if isinstance(left_side, Leaf) and \
+                        left_side.value == '__metaclass__':
+                    return True
+    return False
+
+
+def fixup_parse_tree(cls_node):
+    """ one-line classes don't get a suite in the parse tree so we add
+        one to normalize the tree
+    """
+    for node in cls_node.children:
+        if node.type == syms.suite:
+            # already in the prefered format, do nothing
+            return
+
+    # !%@#! oneliners have no suite node, we have to fake one up
+    for i, node in enumerate(cls_node.children):
+        if node.type == token.COLON:
+            break
+    else:
+        raise ValueError("No class suite and no ':'!")
+
+    # move everything into a suite node
+    suite = Node(syms.suite, [])
+    while cls_node.children[i+1:]:
+        move_node = cls_node.children[i+1]
+        suite.append_child(move_node.clone())
+        move_node.remove()
+    cls_node.append_child(suite)
+    node = suite
+
+
+def fixup_simple_stmt(parent, i, stmt_node):
+    """ if there is a semi-colon all the parts count as part of the same
+        simple_stmt.  We just want the __metaclass__ part so we move
+        everything efter the semi-colon into its own simple_stmt node
+    """
+    for semi_ind, node in enumerate(stmt_node.children):
+        if node.type == token.SEMI: # *sigh*
+            break
+    else:
+        return
+
+    node.remove() # kill the semicolon
+    new_expr = Node(syms.expr_stmt, [])
+    new_stmt = Node(syms.simple_stmt, [new_expr])
+    while stmt_node.children[semi_ind:]:
+        move_node = stmt_node.children[semi_ind]
+        new_expr.append_child(move_node.clone())
+        move_node.remove()
+    parent.insert_child(i, new_stmt)
+    new_leaf1 = new_stmt.children[0].children[0]
+    old_leaf1 = stmt_node.children[0].children[0]
+    new_leaf1.set_prefix(old_leaf1.get_prefix())
+
+
+def remove_trailing_newline(node):
+    if node.children and node.children[-1].type == token.NEWLINE:
+        node.children[-1].remove()
+
+
+def find_metas(cls_node):
+    # find the suite node (Mmm, sweet nodes)
+    for node in cls_node.children:
+        if node.type == syms.suite:
+            break
+    else:
+        raise ValueError("No class suite!")
+
+    # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ]
+    for i, simple_node in list(enumerate(node.children)):
+        if simple_node.type == syms.simple_stmt and simple_node.children:
+            expr_node = simple_node.children[0]
+            if expr_node.type == syms.expr_stmt and expr_node.children:
+                # Check if the expr_node is a simple assignment.
+                left_node = expr_node.children[0]
+                if isinstance(left_node, Leaf) and \
+                        left_node.value == '__metaclass__':
+                    # We found a assignment to __metaclass__.
+                    fixup_simple_stmt(node, i, simple_node)
+                    remove_trailing_newline(simple_node)
+                    yield (node, i, simple_node)
+
+
+def fixup_indent(suite):
+    """ If an INDENT is followed by a thing with a prefix then nuke the prefix
+        Otherwise we get in trouble when removing __metaclass__ at suite start
+    """
+    kids = suite.children[::-1]
+    # find the first indent
+    while kids:
+        node = kids.pop()
+        if node.type == token.INDENT:
+            break
+
+    # find the first Leaf
+    while kids:
+        node = kids.pop()
+        if isinstance(node, Leaf) and node.type != token.DEDENT:
+            if node.prefix:
+                node.set_prefix('')
+            return
+        else:
+            kids.extend(node.children[::-1])
+
+
+class FixMetaclass(fixer_base.BaseFix):
+
+    PATTERN = """
+    classdef<any*>
+    """
+
+    def transform(self, node, results):
+        if not has_metaclass(node):
+            return node
+
+        fixup_parse_tree(node)
+
+        # find metaclasses, keep the last one
+        last_metaclass = None
+        for suite, i, stmt in find_metas(node):
+            last_metaclass = stmt
+            stmt.remove()
+
+        text_type = node.children[0].type # always Leaf(nnn, 'class')
+
+        # figure out what kind of classdef we have
+        if len(node.children) == 7:
+            # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite])
+            #                 0        1       2    3        4    5    6
+            if node.children[3].type == syms.arglist:
+                arglist = node.children[3]
+            # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite])
+            else:
+                parent = node.children[3].clone()
+                arglist = Node(syms.arglist, [parent])
+                node.set_child(3, arglist)
+        elif len(node.children) == 6:
+            # Node(classdef, ['class', 'name', '(',  ')', ':', suite])
+            #                 0        1       2     3    4    5
+            arglist = Node(syms.arglist, [])
+            node.insert_child(3, arglist)
+        elif len(node.children) == 4:
+            # Node(classdef, ['class', 'name', ':', suite])
+            #                 0        1       2    3
+            arglist = Node(syms.arglist, [])
+            node.insert_child(2, Leaf(token.RPAR, ')'))
+            node.insert_child(2, arglist)
+            node.insert_child(2, Leaf(token.LPAR, '('))
+        else:
+            raise ValueError("Unexpected class definition")
+
+        # now stick the metaclass in the arglist
+        meta_txt = last_metaclass.children[0].children[0]
+        meta_txt.value = 'metaclass'
+        orig_meta_prefix = meta_txt.get_prefix()
+
+        if arglist.children:
+            arglist.append_child(Leaf(token.COMMA, ','))
+            meta_txt.set_prefix(' ')
+        else:
+            meta_txt.set_prefix('')
+
+        # compact the expression "metaclass = Meta" -> "metaclass=Meta"
+        expr_stmt = last_metaclass.children[0]
+        assert expr_stmt.type == syms.expr_stmt
+        expr_stmt.children[1].set_prefix('')
+        expr_stmt.children[2].set_prefix('')
+
+        arglist.append_child(last_metaclass)
+
+        fixup_indent(suite)
+
+        # check for empty suite
+        if not suite.children:
+            # one-liner that was just __metaclass_
+            suite.remove()
+            pass_leaf = Leaf(text_type, 'pass')
+            pass_leaf.set_prefix(orig_meta_prefix)
+            node.append_child(pass_leaf)
+            node.append_child(Leaf(token.NEWLINE, '\n'))
+
+        elif len(suite.children) > 1 and \
+                 (suite.children[-2].type == token.INDENT and
+                  suite.children[-1].type == token.DEDENT):
+            # there was only one line in the class body and it was __metaclass__
+            pass_leaf = Leaf(text_type, 'pass')
+            suite.insert_child(-1, pass_leaf)
+            suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_methodattrs.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_methodattrs.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,23 @@
+"""Fix bound method attributes (method.im_? -> method.__?__).
+"""
+# Author: Christian Heimes
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name
+
+MAP = {
+    "im_func" : "__func__",
+    "im_self" : "__self__",
+    "im_class" : "__self__.__class__"
+    }
+
+class FixMethodattrs(fixer_base.BaseFix):
+    PATTERN = """
+    power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* >
+    """
+
+    def transform(self, node, results):
+        attr = results["attr"][0]
+        new = MAP[attr.value]
+        attr.replace(Name(new, prefix=attr.get_prefix()))

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_ne.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_ne.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,22 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that turns <> into !=."""
+
+# Local imports
+from ... import pytree
+from ...pgen2 import token
+from ... import fixer_base
+
+
+class FixNe(fixer_base.BaseFix):
+    # This is so simple that we don't need the pattern compiler.
+
+    def match(self, node):
+        # Override
+        return node.type == token.NOTEQUAL and node.value == "<>"
+
+    def transform(self, node, results):
+        new = pytree.Leaf(token.NOTEQUAL, "!=")
+        new.set_prefix(node.get_prefix())
+        return new

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_next.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_next.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,103 @@
+"""Fixer for it.next() -> next(it), per PEP 3114."""
+# Author: Collin Winter
+
+# Things that currently aren't covered:
+#   - listcomp "next" names aren't warned
+#   - "with" statement targets aren't checked
+
+# Local imports
+from ...pgen2 import token
+from ...pygram import python_symbols as syms
+from ... import fixer_base
+from ...fixer_util import Name, Call, find_binding
+
+bind_warning = "Calls to builtin next() possibly shadowed by global binding"
+
+
+class FixNext(fixer_base.BaseFix):
+    PATTERN = """
+    power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
+    |
+    power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
+    |
+    classdef< 'class' any+ ':'
+              suite< any*
+                     funcdef< 'def'
+                              name='next'
+                              parameters< '(' NAME ')' > any+ >
+                     any* > >
+    |
+    global=global_stmt< 'global' any* 'next' any* >
+    """
+
+    order = "pre" # Pre-order tree traversal
+
+    def start_tree(self, tree, filename):
+        super(FixNext, self).start_tree(tree, filename)
+
+        n = find_binding('next', tree)
+        if n:
+            self.warning(n, bind_warning)
+            self.shadowed_next = True
+        else:
+            self.shadowed_next = False
+
+    def transform(self, node, results):
+        assert results
+
+        base = results.get("base")
+        attr = results.get("attr")
+        name = results.get("name")
+        mod = results.get("mod")
+
+        if base:
+            if self.shadowed_next:
+                attr.replace(Name("__next__", prefix=attr.get_prefix()))
+            else:
+                base = [n.clone() for n in base]
+                base[0].set_prefix("")
+                node.replace(Call(Name("next", prefix=node.get_prefix()), base))
+        elif name:
+            n = Name("__next__", prefix=name.get_prefix())
+            name.replace(n)
+        elif attr:
+            # We don't do this transformation if we're assigning to "x.next".
+            # Unfortunately, it doesn't seem possible to do this in PATTERN,
+            #  so it's being done here.
+            if is_assign_target(node):
+                head = results["head"]
+                if "".join([str(n) for n in head]).strip() == '__builtin__':
+                    self.warning(node, bind_warning)
+                return
+            attr.replace(Name("__next__"))
+        elif "global" in results:
+            self.warning(node, bind_warning)
+            self.shadowed_next = True
+
+
+### The following functions help test if node is part of an assignment
+###  target.
+
+def is_assign_target(node):
+    assign = find_assign(node)
+    if assign is None:
+        return False
+
+    for child in assign.children:
+        if child.type == token.EQUAL:
+            return False
+        elif is_subtree(child, node):
+            return True
+    return False
+
+def find_assign(node):
+    if node.type == syms.expr_stmt:
+        return node
+    if node.type == syms.simple_stmt or node.parent is None:
+        return None
+    return find_assign(node.parent)
+
+def is_subtree(root, node):
+    if root == node:
+        return True
+    return any([is_subtree(c, node) for c in root.children])

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_nonzero.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_nonzero.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,20 @@
+"""Fixer for __nonzero__ -> __bool__ methods."""
+# Author: Collin Winter
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name, syms
+
+class FixNonzero(fixer_base.BaseFix):
+    PATTERN = """
+    classdef< 'class' any+ ':'
+              suite< any*
+                     funcdef< 'def' name='__nonzero__'
+                              parameters< '(' NAME ')' > any+ >
+                     any* > >
+    """
+
+    def transform(self, node, results):
+        name = results["name"]
+        new = Name("__bool__", prefix=name.get_prefix())
+        name.replace(new)

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_numliterals.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_numliterals.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,27 @@
+"""Fixer that turns 1L into 1, 0755 into 0o755.
+"""
+# Copyright 2007 Georg Brandl.
+# Licensed to PSF under a Contributor Agreement.
+
+# Local imports
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Number
+
+
+class FixNumliterals(fixer_base.BaseFix):
+    # This is so simple that we don't need the pattern compiler.
+
+    def match(self, node):
+        # Override
+        return (node.type == token.NUMBER and
+                (node.value.startswith("0") or node.value[-1] in "Ll"))
+
+    def transform(self, node, results):
+        val = node.value
+        if val[-1] in 'Ll':
+            val = val[:-1]
+        elif val.startswith('0') and val.isdigit() and len(set(val)) > 1:
+            val = "0o" + val[1:]
+
+        return Number(val, prefix=node.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_paren.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_paren.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,42 @@
+"""Fixer that addes parentheses where they are required
+
+This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``."""
+
+# By Taek Joo Kim and Benjamin Peterson
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import LParen, RParen
+
+# XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2]
+class FixParen(fixer_base.BaseFix):
+    PATTERN = """
+        atom< ('[' | '(')
+            (listmaker< any
+                comp_for<
+                    'for' NAME 'in'
+                    target=testlist_safe< any (',' any)+ [',']
+                     >
+                    [any]
+                >
+            >
+            |
+            testlist_gexp< any
+                comp_for<
+                    'for' NAME 'in'
+                    target=testlist_safe< any (',' any)+ [',']
+                     >
+                    [any]
+                >
+            >)
+        (']' | ')') >
+    """
+
+    def transform(self, node, results):
+        target = results["target"]
+
+        lparen = LParen()
+        lparen.set_prefix(target.get_prefix())
+        target.set_prefix("") # Make it hug the parentheses
+        target.insert_child(0, lparen)
+        target.append_child(RParen())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_print.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_print.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,90 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for print.
+
+Change:
+    'print'          into 'print()'
+    'print ...'      into 'print(...)'
+    'print ... ,'    into 'print(..., end=" ")'
+    'print >>x, ...' into 'print(..., file=x)'
+
+No changes are applied if print_function is imported from __future__
+
+"""
+
+# Local imports
+from ... import patcomp
+from ... import pytree
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Name, Call, Comma, String, is_tuple
+
+
+parend_expr = patcomp.compile_pattern(
+              """atom< '(' [atom|STRING|NAME] ')' >"""
+              )
+
+
+class FixPrint(fixer_base.ConditionalFix):
+
+    PATTERN = """
+              simple_stmt< any* bare='print' any* > | print_stmt
+              """
+
+    skip_on = '__future__.print_function'
+
+    def transform(self, node, results):
+        assert results
+
+        if self.should_skip(node):
+            return
+
+        bare_print = results.get("bare")
+
+        if bare_print:
+            # Special-case print all by itself
+            bare_print.replace(Call(Name("print"), [],
+                               prefix=bare_print.get_prefix()))
+            return
+        assert node.children[0] == Name("print")
+        args = node.children[1:]
+        if len(args) == 1 and parend_expr.match(args[0]):
+            # We don't want to keep sticking parens around an
+            # already-parenthesised expression.
+            return
+
+        sep = end = file = None
+        if args and args[-1] == Comma():
+            args = args[:-1]
+            end = " "
+        if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"):
+            assert len(args) >= 2
+            file = args[1].clone()
+            args = args[3:] # Strip a possible comma after the file expression
+        # Now synthesize a print(args, sep=..., end=..., file=...) node.
+        l_args = [arg.clone() for arg in args]
+        if l_args:
+            l_args[0].set_prefix("")
+        if sep is not None or end is not None or file is not None:
+            if sep is not None:
+                self.add_kwarg(l_args, "sep", String(repr(sep)))
+            if end is not None:
+                self.add_kwarg(l_args, "end", String(repr(end)))
+            if file is not None:
+                self.add_kwarg(l_args, "file", file)
+        n_stmt = Call(Name("print"), l_args)
+        n_stmt.set_prefix(node.get_prefix())
+        return n_stmt
+
+    def add_kwarg(self, l_nodes, s_kwd, n_expr):
+        # XXX All this prefix-setting may lose comments (though rarely)
+        n_expr.set_prefix("")
+        n_argument = pytree.Node(self.syms.argument,
+                                 (Name(s_kwd),
+                                  pytree.Leaf(token.EQUAL, "="),
+                                  n_expr))
+        if l_nodes:
+            l_nodes.append(Comma())
+            n_argument.set_prefix(" ")
+        l_nodes.append(n_argument)

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_raise.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_raise.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,82 @@
+"""Fixer for 'raise E, V, T'
+
+raise         -> raise
+raise E       -> raise E
+raise E, V    -> raise E(V)
+raise E, V, T -> raise E(V).with_traceback(T)
+
+raise (((E, E'), E''), E'''), V -> raise E(V)
+raise "foo", V, T               -> warns about string exceptions
+
+
+CAVEATS:
+1) "raise E, V" will be incorrectly translated if V is an exception
+   instance. The correct Python 3 idiom is
+
+        raise E from V
+
+   but since we can't detect instance-hood by syntax alone and since
+   any client code would have to be changed as well, we don't automate
+   this.
+"""
+# Author: Collin Winter
+
+# Local imports
+from ... import pytree
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Name, Call, Attr, ArgList, is_tuple
+
+class FixRaise(fixer_base.BaseFix):
+
+    PATTERN = """
+    raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] >
+    """
+
+    def transform(self, node, results):
+        syms = self.syms
+
+        exc = results["exc"].clone()
+        if exc.type is token.STRING:
+            self.cannot_convert(node, "Python 3 does not support string exceptions")
+            return
+
+        # Python 2 supports
+        #  raise ((((E1, E2), E3), E4), E5), V
+        # as a synonym for
+        #  raise E1, V
+        # Since Python 3 will not support this, we recurse down any tuple
+        # literals, always taking the first element.
+        if is_tuple(exc):
+            while is_tuple(exc):
+                # exc.children[1:-1] is the unparenthesized tuple
+                # exc.children[1].children[0] is the first element of the tuple
+                exc = exc.children[1].children[0].clone()
+            exc.set_prefix(" ")
+
+        if "val" not in results:
+            # One-argument raise
+            new = pytree.Node(syms.raise_stmt, [Name("raise"), exc])
+            new.set_prefix(node.get_prefix())
+            return new
+
+        val = results["val"].clone()
+        if is_tuple(val):
+            args = [c.clone() for c in val.children[1:-1]]
+        else:
+            val.set_prefix("")
+            args = [val]
+
+        if "tb" in results:
+            tb = results["tb"].clone()
+            tb.set_prefix("")
+
+            e = Call(exc, args)
+            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
+            new = pytree.Node(syms.simple_stmt, [Name("raise")] + with_tb)
+            new.set_prefix(node.get_prefix())
+            return new
+        else:
+            return pytree.Node(syms.raise_stmt,
+                               [Name("raise"), Call(exc, args)],
+                               prefix=node.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_raw_input.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_raw_input.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,16 @@
+"""Fixer that changes raw_input(...) into input(...)."""
+# Author: Andre Roberge
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name
+
+class FixRawInput(fixer_base.BaseFix):
+
+    PATTERN = """
+              power< name='raw_input' trailer< '(' [any] ')' > any* >
+              """
+
+    def transform(self, node, results):
+        name = results["name"]
+        name.replace(Name("input", prefix=name.get_prefix()))

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_reduce.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_reduce.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,33 @@
+# Copyright 2008 Armin Ronacher.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for reduce().
+
+Makes sure reduce() is imported from the functools module if reduce is
+used in that module.
+"""
+
+from ... import pytree
+from ... import fixer_base
+from ...fixer_util import Name, Attr, touch_import
+
+
+
+class FixReduce(fixer_base.BaseFix):
+
+    PATTERN = """
+    power< 'reduce'
+        trailer< '('
+            arglist< (
+                (not(argument<any '=' any>) any ','
+                 not(argument<any '=' any>) any) |
+                (not(argument<any '=' any>) any ','
+                 not(argument<any '=' any>) any ','
+                 not(argument<any '=' any>) any)
+            ) >
+        ')' >
+    >
+    """
+
+    def transform(self, node, results):
+        touch_import('functools', 'reduce', node)

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_renames.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_renames.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,69 @@
+"""Fix incompatible renames
+
+Fixes:
+  * sys.maxint -> sys.maxsize
+"""
+# Author: Christian Heimes
+# based on Collin Winter's fix_import
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name, attr_chain
+
+MAPPING = {"sys":  {"maxint" : "maxsize"},
+          }
+LOOKUP = {}
+
+def alternates(members):
+    return "(" + "|".join(map(repr, members)) + ")"
+
+
+def build_pattern():
+    #bare = set()
+    for module, replace in MAPPING.items():
+        for old_attr, new_attr in replace.items():
+            LOOKUP[(module, old_attr)] = new_attr
+            #bare.add(module)
+            #bare.add(old_attr)
+            #yield """
+            #      import_name< 'import' (module=%r
+            #          | dotted_as_names< any* module=%r any* >) >
+            #      """ % (module, module)
+            yield """
+                  import_from< 'from' module_name=%r 'import'
+                      ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
+                  """ % (module, old_attr, old_attr)
+            yield """
+                  power< module_name=%r trailer< '.' attr_name=%r > any* >
+                  """ % (module, old_attr)
+    #yield """bare_name=%s""" % alternates(bare)
+
+
+class FixRenames(fixer_base.BaseFix):
+    PATTERN = "|".join(build_pattern())
+
+    order = "pre" # Pre-order tree traversal
+
+    # Don't match the node if it's within another match
+    def match(self, node):
+        match = super(FixRenames, self).match
+        results = match(node)
+        if results:
+            if any([match(obj) for obj in attr_chain(node, "parent")]):
+                return False
+            return results
+        return False
+
+    #def start_tree(self, tree, filename):
+    #    super(FixRenames, self).start_tree(tree, filename)
+    #    self.replace = {}
+
+    def transform(self, node, results):
+        mod_name = results.get("module_name")
+        attr_name = results.get("attr_name")
+        #bare_name = results.get("bare_name")
+        #import_mod = results.get("module")
+
+        if mod_name and attr_name:
+            new_attr = LOOKUP[(mod_name.value, attr_name.value)]
+            attr_name.replace(Name(new_attr, prefix=attr_name.get_prefix()))

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_repr.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_repr.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,22 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that transforms `xyzzy` into repr(xyzzy)."""
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Call, Name, parenthesize
+
+
+class FixRepr(fixer_base.BaseFix):
+
+    PATTERN = """
+              atom < '`' expr=any '`' >
+              """
+
+    def transform(self, node, results):
+        expr = results["expr"].clone()
+
+        if expr.type == self.syms.testlist1:
+            expr = parenthesize(expr)
+        return Call(Name("repr"), [expr], prefix=node.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_set_literal.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_set_literal.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,50 @@
+"""
+Optional fixer to transform set() calls to set literals.
+"""
+
+# Author: Benjamin Peterson
+
+from ... import fixer_base, pytree
+from ...fixer_util import token, syms
+
+class FixSetLiteral(fixer_base.BaseFix):
+
+    explicit = True
+
+    PATTERN = """power< 'set' trailer< '('
+                     (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) >
+                                |
+                                single=any) ']' >
+                     |
+                     atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' >
+                     )
+                     ')' > >
+              """
+
+    def transform(self, node, results):
+        single = results.get("single")
+        if single:
+            # Make a fake listmaker
+            fake = pytree.Node(syms.listmaker, [single.clone()])
+            single.replace(fake)
+            items = fake
+        else:
+            items = results["items"]
+
+        # Build the contents of the literal
+        literal = [pytree.Leaf(token.LBRACE, "{")]
+        literal.extend(n.clone() for n in items.children)
+        literal.append(pytree.Leaf(token.RBRACE, "}"))
+        # Set the prefix of the right brace to that of the ')' or ']'
+        literal[-1].set_prefix(items.next_sibling.get_prefix())
+        maker = pytree.Node(syms.dictsetmaker, literal)
+        maker.set_prefix(node.get_prefix())
+
+        # If the original was a one tuple, we need to remove the extra comma.
+        if len(maker.children) == 4:
+            n = maker.children[2]
+            n.remove()
+            maker.children[-1].set_prefix(n.get_prefix())
+
+        # Finally, replace the set call with our shiny new literal.
+        return maker

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_standarderror.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_standarderror.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,18 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for StandardError -> Exception."""
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name
+
+
+class FixStandarderror(fixer_base.BaseFix):
+
+    PATTERN = """
+              'StandardError'
+              """
+
+    def transform(self, node, results):
+        return Name("Exception", prefix=node.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_sys_exc.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_sys_exc.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,29 @@
+"""Fixer for sys.exc_{type, value, traceback}
+
+sys.exc_type -> sys.exc_info()[0]
+sys.exc_value -> sys.exc_info()[1]
+sys.exc_traceback -> sys.exc_info()[2]
+"""
+
+# By Jeff Balogh and Benjamin Peterson
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Attr, Call, Name, Number, Subscript, Node, syms
+
+class FixSysExc(fixer_base.BaseFix):
+    # This order matches the ordering of sys.exc_info().
+    exc_info = ["exc_type", "exc_value", "exc_traceback"]
+    PATTERN = """
+              power< 'sys' trailer< dot='.' attribute=(%s) > >
+              """ % '|'.join("'%s'" % e for e in exc_info)
+
+    def transform(self, node, results):
+        sys_attr = results["attribute"][0]
+        index = Number(self.exc_info.index(sys_attr.value))
+
+        call = Call(Name("exc_info"), prefix=sys_attr.get_prefix())
+        attr = Attr(Name("sys"), call)
+        attr[1].children[0].set_prefix(results["dot"].get_prefix())
+        attr.append(Subscript(index))
+        return Node(syms.power, attr, prefix=node.get_prefix())

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_throw.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_throw.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,56 @@
+"""Fixer for generator.throw(E, V, T).
+
+g.throw(E)       -> g.throw(E)
+g.throw(E, V)    -> g.throw(E(V))
+g.throw(E, V, T) -> g.throw(E(V).with_traceback(T))
+
+g.throw("foo"[, V[, T]]) will warn about string exceptions."""
+# Author: Collin Winter
+
+# Local imports
+from ... import pytree
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Name, Call, ArgList, Attr, is_tuple
+
+class FixThrow(fixer_base.BaseFix):
+
+    PATTERN = """
+    power< any trailer< '.' 'throw' >
+           trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' >
+    >
+    |
+    power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > >
+    """
+
+    def transform(self, node, results):
+        syms = self.syms
+
+        exc = results["exc"].clone()
+        if exc.type is token.STRING:
+            self.cannot_convert(node, "Python 3 does not support string exceptions")
+            return
+
+        # Leave "g.throw(E)" alone
+        val = results.get("val")
+        if val is None:
+            return
+
+        val = val.clone()
+        if is_tuple(val):
+            args = [c.clone() for c in val.children[1:-1]]
+        else:
+            val.set_prefix("")
+            args = [val]
+
+        throw_args = results["args"]
+
+        if "tb" in results:
+            tb = results["tb"].clone()
+            tb.set_prefix("")
+
+            e = Call(exc, args)
+            with_tb = Attr(e, Name('with_traceback')) + [ArgList([tb])]
+            throw_args.replace(pytree.Node(syms.power, with_tb))
+        else:
+            throw_args.replace(Call(exc, args))

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_tuple_params.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_tuple_params.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,169 @@
+"""Fixer for function definitions with tuple parameters.
+
+def func(((a, b), c), d):
+    ...
+
+    ->
+
+def func(x, d):
+    ((a, b), c) = x
+    ...
+
+It will also support lambdas:
+
+    lambda (x, y): x + y -> lambda t: t[0] + t[1]
+
+    # The parens are a syntax error in Python 3
+    lambda (x): x + y -> lambda x: x + y
+"""
+# Author: Collin Winter
+
+# Local imports
+from ... import pytree
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Assign, Name, Newline, Number, Subscript, syms
+
+def is_docstring(stmt):
+    return isinstance(stmt, pytree.Node) and \
+           stmt.children[0].type == token.STRING
+
+class FixTupleParams(fixer_base.BaseFix):
+    PATTERN = """
+              funcdef< 'def' any parameters< '(' args=any ')' >
+                       ['->' any] ':' suite=any+ >
+              |
+              lambda=
+              lambdef< 'lambda' args=vfpdef< '(' inner=any ')' >
+                       ':' body=any
+              >
+              """
+
+    def transform(self, node, results):
+        if "lambda" in results:
+            return self.transform_lambda(node, results)
+
+        new_lines = []
+        suite = results["suite"]
+        args = results["args"]
+        # This crap is so "def foo(...): x = 5; y = 7" is handled correctly.
+        # TODO(cwinter): suite-cleanup
+        if suite[0].children[1].type == token.INDENT:
+            start = 2
+            indent = suite[0].children[1].value
+            end = Newline()
+        else:
+            start = 0
+            indent = "; "
+            end = pytree.Leaf(token.INDENT, "")
+
+        # We need access to self for new_name(), and making this a method
+        #  doesn't feel right. Closing over self and new_lines makes the
+        #  code below cleaner.
+        def handle_tuple(tuple_arg, add_prefix=False):
+            n = Name(self.new_name())
+            arg = tuple_arg.clone()
+            arg.set_prefix("")
+            stmt = Assign(arg, n.clone())
+            if add_prefix:
+                n.set_prefix(" ")
+            tuple_arg.replace(n)
+            new_lines.append(pytree.Node(syms.simple_stmt,
+                                         [stmt, end.clone()]))
+
+        if args.type == syms.tfpdef:
+            handle_tuple(args)
+        elif args.type == syms.typedargslist:
+            for i, arg in enumerate(args.children):
+                if arg.type == syms.tfpdef:
+                    # Without add_prefix, the emitted code is correct,
+                    #  just ugly.
+                    handle_tuple(arg, add_prefix=(i > 0))
+
+        if not new_lines:
+            return node
+
+        # This isn't strictly necessary, but it plays nicely with other fixers.
+        # TODO(cwinter) get rid of this when children becomes a smart list
+        for line in new_lines:
+            line.parent = suite[0]
+
+        # TODO(cwinter) suite-cleanup
+        after = start
+        if start == 0:
+            new_lines[0].set_prefix(" ")
+        elif is_docstring(suite[0].children[start]):
+            new_lines[0].set_prefix(indent)
+            after = start + 1
+
+        suite[0].children[after:after] = new_lines
+        for i in range(after+1, after+len(new_lines)+1):
+            suite[0].children[i].set_prefix(indent)
+        suite[0].changed()
+
+    def transform_lambda(self, node, results):
+        args = results["args"]
+        body = results["body"]
+        inner = simplify_args(results["inner"])
+
+        # Replace lambda ((((x)))): x  with lambda x: x
+        if inner.type == token.NAME:
+            inner = inner.clone()
+            inner.set_prefix(" ")
+            args.replace(inner)
+            return
+
+        params = find_params(args)
+        to_index = map_to_index(params)
+        tup_name = self.new_name(tuple_name(params))
+
+        new_param = Name(tup_name, prefix=" ")
+        args.replace(new_param.clone())
+        for n in body.post_order():
+            if n.type == token.NAME and n.value in to_index:
+                subscripts = [c.clone() for c in to_index[n.value]]
+                new = pytree.Node(syms.power,
+                                  [new_param.clone()] + subscripts)
+                new.set_prefix(n.get_prefix())
+                n.replace(new)
+
+
+### Helper functions for transform_lambda()
+
+def simplify_args(node):
+    if node.type in (syms.vfplist, token.NAME):
+        return node
+    elif node.type == syms.vfpdef:
+        # These look like vfpdef< '(' x ')' > where x is NAME
+        # or another vfpdef instance (leading to recursion).
+        while node.type == syms.vfpdef:
+            node = node.children[1]
+        return node
+    raise RuntimeError("Received unexpected node %s" % node)
+
+def find_params(node):
+    if node.type == syms.vfpdef:
+        return find_params(node.children[1])
+    elif node.type == token.NAME:
+        return node.value
+    return [find_params(c) for c in node.children if c.type != token.COMMA]
+
+def map_to_index(param_list, prefix=[], d=None):
+    if d is None:
+        d = {}
+    for i, obj in enumerate(param_list):
+        trailer = [Subscript(Number(i))]
+        if isinstance(obj, list):
+            map_to_index(obj, trailer, d=d)
+        else:
+            d[obj] = prefix + trailer
+    return d
+
+def tuple_name(param_list):
+    l = []
+    for obj in param_list:
+        if isinstance(obj, list):
+            l.append(tuple_name(obj))
+        else:
+            l.append(obj)
+    return "_".join(l)

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_types.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_types.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,62 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer for removing uses of the types module.
+
+These work for only the known names in the types module.  The forms above
+can include types. or not.  ie, It is assumed the module is imported either as:
+
+    import types
+    from types import ... # either * or specific types
+
+The import statements are not modified.
+
+There should be another fixer that handles at least the following constants:
+
+   type([]) -> list
+   type(()) -> tuple
+   type('') -> str
+
+"""
+
+# Local imports
+from ...pgen2 import token
+from ... import fixer_base
+from ...fixer_util import Name
+
+_TYPE_MAPPING = {
+        'BooleanType' : 'bool',
+        'BufferType' : 'memoryview',
+        'ClassType' : 'type',
+        'ComplexType' : 'complex',
+        'DictType': 'dict',
+        'DictionaryType' : 'dict',
+        'EllipsisType' : 'type(Ellipsis)',
+        #'FileType' : 'io.IOBase',
+        'FloatType': 'float',
+        'IntType': 'int',
+        'ListType': 'list',
+        'LongType': 'int',
+        'ObjectType' : 'object',
+        'NoneType': 'type(None)',
+        'NotImplementedType' : 'type(NotImplemented)',
+        'SliceType' : 'slice',
+        'StringType': 'bytes', # XXX ?
+        'StringTypes' : 'str', # XXX ?
+        'TupleType': 'tuple',
+        'TypeType' : 'type',
+        'UnicodeType': 'str',
+        'XRangeType' : 'range',
+    }
+
+_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
+
+class FixTypes(fixer_base.BaseFix):
+
+    PATTERN = '|'.join(_pats)
+
+    def transform(self, node, results):
+        new_value = _TYPE_MAPPING.get(results["name"].value)
+        if new_value:
+            return Name(new_value, prefix=node.get_prefix())
+        return None

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_unicode.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_unicode.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,28 @@
+"""Fixer that changes unicode to str, unichr to chr, and u"..." into "...".
+
+"""
+
+import re
+from ...pgen2 import token
+from ... import fixer_base
+
+class FixUnicode(fixer_base.BaseFix):
+
+    PATTERN = "STRING | NAME<'unicode' | 'unichr'>"
+
+    def transform(self, node, results):
+        if node.type == token.NAME:
+            if node.value == "unicode":
+                new = node.clone()
+                new.value = "str"
+                return new
+            if node.value == "unichr":
+                new = node.clone()
+                new.value = "chr"
+                return new
+            # XXX Warn when __unicode__ found?
+        elif node.type == token.STRING:
+            if re.match(r"[uU][rR]?[\'\"]", node.value):
+                new = node.clone()
+                new.value = new.value[1:]
+                return new

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_urllib.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_urllib.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,180 @@
+"""Fix changes imports of urllib which are now incompatible.
+   This is rather similar to fix_imports, but because of the more
+   complex nature of the fixing for urllib, it has its own fixer.
+"""
+# Author: Nick Edds
+
+# Local imports
+from .fix_imports import alternates, FixImports
+from ... import fixer_base
+from ...fixer_util import Name, Comma, FromImport, Newline, attr_chain
+
+MAPPING = {'urllib':  [
+                ('urllib.request',
+                    ['URLOpener', 'FancyURLOpener', 'urlretrieve',
+                     '_urlopener', 'urlcleanup']),
+                ('urllib.parse',
+                    ['quote', 'quote_plus', 'unquote', 'unquote_plus',
+                     'urlencode', 'pathname2url', 'url2pathname', 'splitattr',
+                     'splithost', 'splitnport', 'splitpasswd', 'splitport',
+                     'splitquery', 'splittag', 'splittype', 'splituser',
+                     'splitvalue', ]),
+                ('urllib.error',
+                    ['ContentTooShortError'])],
+           'urllib2' : [
+                ('urllib.request',
+                    ['urlopen', 'install_opener', 'build_opener',
+                     'Request', 'OpenerDirector', 'BaseHandler',
+                     'HTTPDefaultErrorHandler', 'HTTPRedirectHandler',
+                     'HTTPCookieProcessor', 'ProxyHandler',
+                     'HTTPPasswordMgr',
+                     'HTTPPasswordMgrWithDefaultRealm',
+                     'AbstractBasicAuthHandler',
+                     'HTTPBasicAuthHandler', 'ProxyBasicAuthHandler',
+                     'AbstractDigestAuthHandler',
+                     'HTTPDigestAuthHandler', 'ProxyDigestAuthHandler',
+                     'HTTPHandler', 'HTTPSHandler', 'FileHandler',
+                     'FTPHandler', 'CacheFTPHandler',
+                     'UnknownHandler']),
+                ('urllib.error',
+                    ['URLError', 'HTTPError']),
+           ]
+}
+
+# Duplicate the url parsing functions for urllib2.
+MAPPING["urllib2"].append(MAPPING["urllib"][1])
+
+
+def build_pattern():
+    bare = set()
+    for old_module, changes in MAPPING.items():
+        for change in changes:
+            new_module, members = change
+            members = alternates(members)
+            yield """import_name< 'import' (module=%r
+                                  | dotted_as_names< any* module=%r any* >) >
+                  """ % (old_module, old_module)
+            yield """import_from< 'from' mod_member=%r 'import'
+                       ( member=%s | import_as_name< member=%s 'as' any > |
+                         import_as_names< members=any*  >) >
+                  """ % (old_module, members, members)
+            yield """import_from< 'from' module_star=%r 'import' star='*' >
+                  """ % old_module
+            yield """import_name< 'import'
+                                  dotted_as_name< module_as=%r 'as' any > >
+                  """ % old_module
+            yield """power< module_dot=%r trailer< '.' member=%s > any* >
+                  """ % (old_module, members)
+
+
+class FixUrllib(FixImports):
+
+    def build_pattern(self):
+        return "|".join(build_pattern())
+
+    def transform_import(self, node, results):
+        """Transform for the basic import case. Replaces the old
+           import name with a comma separated list of its
+           replacements.
+        """
+        import_mod = results.get('module')
+        pref = import_mod.get_prefix()
+
+        names = []
+
+        # create a Node list of the replacement modules
+        for name in MAPPING[import_mod.value][:-1]:
+            names.extend([Name(name[0], prefix=pref), Comma()])
+        names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref))
+        import_mod.replace(names)
+
+    def transform_member(self, node, results):
+        """Transform for imports of specific module elements. Replaces
+           the module to be imported from with the appropriate new
+           module.
+        """
+        mod_member = results.get('mod_member')
+        pref = mod_member.get_prefix()
+        member = results.get('member')
+
+        # Simple case with only a single member being imported
+        if member:
+            # this may be a list of length one, or just a node
+            if isinstance(member, list):
+                member = member[0]
+            new_name = None
+            for change in MAPPING[mod_member.value]:
+                if member.value in change[1]:
+                    new_name = change[0]
+                    break
+            if new_name:
+                mod_member.replace(Name(new_name, prefix=pref))
+            else:
+                self.cannot_convert(node,
+                                    'This is an invalid module element')
+
+        # Multiple members being imported
+        else:
+            # a dictionary for replacements, order matters
+            modules = []
+            mod_dict = {}
+            members = results.get('members')
+            for member in members:
+                member = member.value
+                # we only care about the actual members
+                if member != ',':
+                    for change in MAPPING[mod_member.value]:
+                        if member in change[1]:
+                            if change[0] in mod_dict:
+                                mod_dict[change[0]].append(member)
+                            else:
+                                mod_dict[change[0]] = [member]
+                                modules.append(change[0])
+
+            new_nodes = []
+            for module in modules:
+                elts = mod_dict[module]
+                names = []
+                for elt in elts[:-1]:
+                    names.extend([Name(elt, prefix=pref), Comma()])
+                names.append(Name(elts[-1], prefix=pref))
+                new_nodes.append(FromImport(module, names))
+            if new_nodes:
+                nodes = []
+                for new_node in new_nodes[:-1]:
+                    nodes.extend([new_node, Newline()])
+                nodes.append(new_nodes[-1])
+                node.replace(nodes)
+            else:
+                self.cannot_convert(node, 'All module elements are invalid')
+
+    def transform_dot(self, node, results):
+        """Transform for calls to module members in code."""
+        module_dot = results.get('module_dot')
+        member = results.get('member')
+        # this may be a list of length one, or just a node
+        if isinstance(member, list):
+            member = member[0]
+        new_name = None
+        for change in MAPPING[module_dot.value]:
+            if member.value in change[1]:
+                new_name = change[0]
+                break
+        if new_name:
+            module_dot.replace(Name(new_name,
+                                    prefix=module_dot.get_prefix()))
+        else:
+            self.cannot_convert(node, 'This is an invalid module element')
+
+    def transform(self, node, results):
+        if results.get('module'):
+            self.transform_import(node, results)
+        elif results.get('mod_member'):
+            self.transform_member(node, results)
+        elif results.get('module_dot'):
+            self.transform_dot(node, results)
+        # Renaming and star imports are not supported for these modules.
+        elif results.get('module_star'):
+            self.cannot_convert(node, 'Cannot handle star imports.')
+        elif results.get('module_as'):
+            self.cannot_convert(node, 'This module is now multiple modules')

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_ws_comma.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_ws_comma.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,39 @@
+"""Fixer that changes 'a ,b' into 'a, b'.
+
+This also changes '{a :b}' into '{a: b}', but does not touch other
+uses of colons.  It does not touch other uses of whitespace.
+
+"""
+
+from ... import pytree
+from ...pgen2 import token
+from ... import fixer_base
+
+class FixWsComma(fixer_base.BaseFix):
+
+    explicit = True # The user must ask for this fixers
+
+    PATTERN = """
+    any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]>
+    """
+
+    COMMA = pytree.Leaf(token.COMMA, ",")
+    COLON = pytree.Leaf(token.COLON, ":")
+    SEPS = (COMMA, COLON)
+
+    def transform(self, node, results):
+        new = node.clone()
+        comma = False
+        for child in new.children:
+            if child in self.SEPS:
+                prefix = child.get_prefix()
+                if prefix.isspace() and "\n" not in prefix:
+                    child.set_prefix("")
+                comma = True
+            else:
+                if comma:
+                    prefix = child.get_prefix()
+                    if not prefix:
+                        child.set_prefix(" ")
+                comma = False
+        return new

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_xrange.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_xrange.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,64 @@
+# Copyright 2007 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Fixer that changes xrange(...) into range(...)."""
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name, Call, consuming_calls
+from ... import patcomp
+
+
+class FixXrange(fixer_base.BaseFix):
+
+    PATTERN = """
+              power<
+                 (name='range'|name='xrange') trailer< '(' args=any ')' >
+              rest=any* >
+              """
+
+    def transform(self, node, results):
+        name = results["name"]
+        if name.value == "xrange":
+            return self.transform_xrange(node, results)
+        elif name.value == "range":
+            return self.transform_range(node, results)
+        else:
+            raise ValueError(repr(name))
+
+    def transform_xrange(self, node, results):
+        name = results["name"]
+        name.replace(Name("range", prefix=name.get_prefix()))
+
+    def transform_range(self, node, results):
+        if not self.in_special_context(node):
+            range_call = Call(Name("range"), [results["args"].clone()])
+            # Encase the range call in list().
+            list_call = Call(Name("list"), [range_call],
+                             prefix=node.get_prefix())
+            # Put things that were after the range() call after the list call.
+            for n in results["rest"]:
+                list_call.append_child(n)
+            return list_call
+        return node
+
+    P1 = "power< func=NAME trailer< '(' node=any ')' > any* >"
+    p1 = patcomp.compile_pattern(P1)
+
+    P2 = """for_stmt< 'for' any 'in' node=any ':' any* >
+            | comp_for< 'for' any 'in' node=any any* >
+            | comparison< any 'in' node=any any*>
+         """
+    p2 = patcomp.compile_pattern(P2)
+
+    def in_special_context(self, node):
+        if node.parent is None:
+            return False
+        results = {}
+        if (node.parent.parent is not None and
+               self.p1.match(node.parent.parent, results) and
+               results["node"] is node):
+            # list(d.keys()) -> list(d.keys()), etc.
+            return results["func"].value in consuming_calls
+        # for ... in d.iterkeys() -> for ... in d.keys(), etc.
+        return self.p2.match(node.parent, results) and results["node"] is node

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_xreadlines.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_xreadlines.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,24 @@
+"""Fix "for x in f.xreadlines()" -> "for x in f".
+
+This fixer will also convert g(f.xreadlines) into g(f.__iter__)."""
+# Author: Collin Winter
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name
+
+
+class FixXreadlines(fixer_base.BaseFix):
+    PATTERN = """
+    power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > >
+    |
+    power< any+ trailer< '.' no_call='xreadlines' > >
+    """
+
+    def transform(self, node, results):
+        no_call = results.get("no_call")
+
+        if no_call:
+            no_call.replace(Name("__iter__", prefix=no_call.get_prefix()))
+        else:
+            node.replace([x.clone() for x in results["call"]])

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_zip.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from2/fix_zip.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,34 @@
+"""
+Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...)
+unless there exists a 'from future_builtins import zip' statement in the
+top-level namespace.
+
+We avoid the transformation if the zip() call is directly contained in
+iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:.
+"""
+
+# Local imports
+from ... import fixer_base
+from ...fixer_util import Name, Call, in_special_context
+
+class FixZip(fixer_base.ConditionalFix):
+
+    PATTERN = """
+    power< 'zip' args=trailer< '(' [any] ')' >
+    >
+    """
+
+    skip_on = "future_builtins.zip"
+
+    def transform(self, node, results):
+        if self.should_skip(node):
+            return
+
+        if in_special_context(node):
+            return None
+
+        new = node.clone()
+        new.set_prefix("")
+        new = Call(Name("list"), [new])
+        new.set_prefix(node.get_prefix())
+        return new

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from3/__init__.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from3/__init__.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,2 @@
+from . import fix_range
+from . import fix_renames

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from3/fix_range.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from3/fix_range.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,21 @@
+# Based on fix_xrange.py
+# 3to2 modification by Paul Kippes
+
+"""
+range(...) -> xrange(...)
+"""
+
+from ..fixer_common import *
+
+class FixRange(fixer_base.BaseFix):
+
+    PATTERN = """
+              power<
+                 (name='range') trailer< '(' args=any ')' >
+              rest=any* >
+              """
+
+    def transform(self, node, results):
+        name = results["name"]
+        name.replace(Name("xrange", prefix=name.get_prefix()))
+        return node

Added: sandbox/trunk/refactor_pkg/refactor/fixes/from3/fix_renames.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/fixes/from3/fix_renames.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,69 @@
+"""Fix incompatible renames
+
+Incorporates simple compliment 3-to-2 transforms:
+
+Fixes:
+  * sys.maxsize -> sys.maxint
+"""
+
+# Local imports
+from ..fixer_common import *
+
+MAPPING = {'sys':  {'maxsize' : 'maxint',
+                    },
+           }
+LOOKUP = {}
+
+def alternates(members):
+    return "(" + "|".join(map(repr, members)) + ")"
+
+
+def build_pattern():
+    #bare = set()
+    for module, replace in MAPPING.items():
+        for old_attr, new_attr in replace.items():
+            LOOKUP[(module, old_attr)] = new_attr
+            #bare.add(module)
+            #bare.add(old_attr)
+            #yield """
+            #      import_name< 'import' (module=%r
+            #          | dotted_as_names< any* module=%r any* >) >
+            #      """ % (module, module)
+            yield """
+                  import_from< 'from' module_name=%r 'import'
+                      ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) >
+                  """ % (module, old_attr, old_attr)
+            yield """
+                  power< module_name=%r trailer< '.' attr_name=%r > any* >
+                  """ % (module, old_attr)
+    #yield """bare_name=%s""" % alternates(bare)
+
+
+class FixRenames(fixer_base.BaseFix):
+    PATTERN = "|".join(build_pattern())
+
+    order = "pre" # Pre-order tree traversal
+
+    # Don't match the node if it's within another match
+    def match(self, node):
+        match = super(FixRenames, self).match
+        results = match(node)
+        if results:
+            if any([match(obj) for obj in attr_chain(node, "parent")]):
+                return False
+            return results
+        return False
+
+    #def start_tree(self, tree, filename):
+    #    super(FixRenames, self).start_tree(tree, filename)
+    #    self.replace = {}
+
+    def transform(self, node, results):
+        mod_name = results.get("module_name")
+        attr_name = results.get("attr_name")
+        #bare_name = results.get("bare_name")
+        #import_mod = results.get("module")
+
+        if mod_name and attr_name:
+            new_attr = LOOKUP[(mod_name.value, attr_name.value)]
+            attr_name.replace(Name(new_attr, prefix=attr_name.get_prefix()))

Added: sandbox/trunk/refactor_pkg/refactor/main.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/main.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,134 @@
+"""
+Main program for refactor.
+"""
+
+import sys
+import os
+import logging
+import shutil
+import optparse
+
+from . import refactor
+
+
+class StdoutRefactoringTool(refactor.RefactoringTool):
+    """
+    Prints output to stdout.
+    """
+
+    def __init__(self, fixers, options, explicit, nobackups):
+        self.nobackups = nobackups
+        super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
+
+    def log_error(self, msg, *args, **kwargs):
+        self.errors.append((msg, args, kwargs))
+        self.logger.error(msg, *args, **kwargs)
+
+    def write_file(self, new_text, filename, old_text):
+        if not self.nobackups:
+            # Make backup
+            backup = filename + ".bak"
+            if os.path.lexists(backup):
+                try:
+                    os.remove(backup)
+                except os.error, err:
+                    self.log_message("Can't remove backup %s", backup)
+            try:
+                os.rename(filename, backup)
+            except os.error, err:
+                self.log_message("Can't rename %s to %s", filename, backup)
+        # Actually write the new file
+        super(StdoutRefactoringTool, self).write_file(new_text,
+                                                      filename, old_text)
+        if not self.nobackups:
+            shutil.copymode(backup, filename)
+
+    def print_output(self, lines):
+        for line in lines:
+            print line
+
+
+def main(fixer_pkg, args=None):
+    """Main program.
+
+    Args:
+        fixer_pkg: the name of a package where the fixers are located.
+        args: optional; a list of command line arguments. If omitted,
+              sys.argv[1:] is used.
+
+    Returns a suggested exit status (0, 1, 2).
+    """
+    # Set up option parser
+    parser = optparse.OptionParser(usage="%s [options] file|dir ..." %
+                                   sys.argv[0])
+    parser.add_option("-d", "--doctests_only", action="store_true",
+                      help="Fix up doctests only")
+    parser.add_option("-f", "--fix", action="append", default=[],
+                      help="Each FIX specifies a transformation; default: all")
+    parser.add_option("-x", "--nofix", action="append", default=[],
+                      help="Prevent a fixer from being run.")
+    parser.add_option("-l", "--list-fixes", action="store_true",
+                      help="List available transformations (fixes/fix_*.py)")
+    parser.add_option("-p", "--print-function", action="store_true",
+                      help="Modify the grammar so that print() is a function")
+    parser.add_option("-v", "--verbose", action="store_true",
+                      help="More verbose logging")
+    parser.add_option("-w", "--write", action="store_true",
+                      help="Write back modified files")
+    parser.add_option("-n", "--nobackups", action="store_true", default=False,
+                      help="Don't write backups for modified files.")
+
+    # Parse command line arguments
+    refactor_stdin = False
+    options, args = parser.parse_args(args)
+    if not options.write and options.nobackups:
+        parser.error("Can't use -n without -w")
+    if options.list_fixes:
+        print "Available transformations for the -f/--fix option:"
+        for fixname in refactor.get_all_fix_names(fixer_pkg):
+            print fixname
+        if not args:
+            return 0
+    if not args:
+        print >>sys.stderr, "At least one file or directory argument required."
+        print >>sys.stderr, "Use --help to show usage."
+        return 2
+    if "-" in args:
+        refactor_stdin = True
+        if options.write:
+            print >>sys.stderr, "Can't write to stdin."
+            return 2
+
+    # Set up logging handler
+    level = logging.DEBUG if options.verbose else logging.INFO
+    logging.basicConfig(format='%(name)s: %(message)s', level=level)
+
+    # Initialize the refactoring tool
+    rt_opts = {"print_function" : options.print_function}
+    avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
+    unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
+    explicit = set()
+    if options.fix:
+        all_present = False
+        for fix in options.fix:
+            if fix == "all":
+                all_present = True
+            else:
+                explicit.add(fixer_pkg + ".fix_" + fix)
+        requested = avail_fixes.union(explicit) if all_present else explicit
+    else:
+        requested = avail_fixes.union(explicit)
+    fixer_names = requested.difference(unwanted_fixes)
+    rt = StdoutRefactoringTool(sorted(fixer_names), rt_opts, sorted(explicit),
+                               options.nobackups)
+
+    # Refactor all files and directories passed as arguments
+    if not rt.errors:
+        if refactor_stdin:
+            rt.refactor_stdin()
+        else:
+            rt.refactor(args, options.write, options.doctests_only)
+        rt.summarize()
+
+    # Return error status (0 if rt.errors is zero)
+    return int(bool(rt.errors))

Added: sandbox/trunk/refactor_pkg/refactor/patcomp.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/patcomp.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,186 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Pattern compiler.
+
+The grammer is taken from PatternGrammar.txt.
+
+The compiler compiles a pattern to a pytree.*Pattern instance.
+"""
+
+__author__ = "Guido van Rossum <guido at python.org>"
+
+# Python imports
+import os
+
+# Fairly local imports
+from .pgen2 import driver
+from .pgen2 import literals
+from .pgen2 import token
+from .pgen2 import tokenize
+
+# Really local imports
+from . import pytree
+from . import pygram
+
+# The pattern grammar file
+_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
+                                     "PatternGrammar.txt")
+
+
+def tokenize_wrapper(input):
+    """Tokenizes a string suppressing significant whitespace."""
+    skip = set((token.NEWLINE, token.INDENT, token.DEDENT))
+    tokens = tokenize.generate_tokens(driver.generate_lines(input).next)
+    for quintuple in tokens:
+        type, value, start, end, line_text = quintuple
+        if type not in skip:
+            yield quintuple
+
+
+class PatternCompiler(object):
+
+    def __init__(self, grammar_file=_PATTERN_GRAMMAR_FILE):
+        """Initializer.
+
+        Takes an optional alternative filename for the pattern grammar.
+        """
+        self.grammar = driver.load_grammar(grammar_file)
+        self.syms = pygram.Symbols(self.grammar)
+        self.pygrammar = pygram.python_grammar
+        self.pysyms = pygram.python_symbols
+        self.driver = driver.Driver(self.grammar, convert=pattern_convert)
+
+    def compile_pattern(self, input, debug=False):
+        """Compiles a pattern string to a nested pytree.*Pattern object."""
+        tokens = tokenize_wrapper(input)
+        root = self.driver.parse_tokens(tokens, debug=debug)
+        return self.compile_node(root)
+
+    def compile_node(self, node):
+        """Compiles a node, recursively.
+
+        This is one big switch on the node type.
+        """
+        # XXX Optimize certain Wildcard-containing-Wildcard patterns
+        # that can be merged
+        if node.type == self.syms.Matcher:
+            node = node.children[0] # Avoid unneeded recursion
+
+        if node.type == self.syms.Alternatives:
+            # Skip the odd children since they are just '|' tokens
+            alts = [self.compile_node(ch) for ch in node.children[::2]]
+            if len(alts) == 1:
+                return alts[0]
+            p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
+            return p.optimize()
+
+        if node.type == self.syms.Alternative:
+            units = [self.compile_node(ch) for ch in node.children]
+            if len(units) == 1:
+                return units[0]
+            p = pytree.WildcardPattern([units], min=1, max=1)
+            return p.optimize()
+
+        if node.type == self.syms.NegatedUnit:
+            pattern = self.compile_basic(node.children[1:])
+            p = pytree.NegatedPattern(pattern)
+            return p.optimize()
+
+        assert node.type == self.syms.Unit
+
+        name = None
+        nodes = node.children
+        if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
+            name = nodes[0].value
+            nodes = nodes[2:]
+        repeat = None
+        if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
+            repeat = nodes[-1]
+            nodes = nodes[:-1]
+
+        # Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
+        pattern = self.compile_basic(nodes, repeat)
+
+        if repeat is not None:
+            assert repeat.type == self.syms.Repeater
+            children = repeat.children
+            child = children[0]
+            if child.type == token.STAR:
+                min = 0
+                max = pytree.HUGE
+            elif child.type == token.PLUS:
+                min = 1
+                max = pytree.HUGE
+            elif child.type == token.LBRACE:
+                assert children[-1].type == token.RBRACE
+                assert  len(children) in (3, 5)
+                min = max = self.get_int(children[1])
+                if len(children) == 5:
+                    max = self.get_int(children[3])
+            else:
+                assert False
+            if min != 1 or max != 1:
+                pattern = pattern.optimize()
+                pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
+
+        if name is not None:
+            pattern.name = name
+        return pattern.optimize()
+
+    def compile_basic(self, nodes, repeat=None):
+        # Compile STRING | NAME [Details] | (...) | [...]
+        assert len(nodes) >= 1
+        node = nodes[0]
+        if node.type == token.STRING:
+            value = literals.evalString(node.value)
+            return pytree.LeafPattern(content=value)
+        elif node.type == token.NAME:
+            value = node.value
+            if value.isupper():
+                if value not in TOKEN_MAP:
+                    raise SyntaxError("Invalid token: %r" % value)
+                return pytree.LeafPattern(TOKEN_MAP[value])
+            else:
+                if value == "any":
+                    type = None
+                elif not value.startswith("_"):
+                    type = getattr(self.pysyms, value, None)
+                    if type is None:
+                        raise SyntaxError("Invalid symbol: %r" % value)
+                if nodes[1:]: # Details present
+                    content = [self.compile_node(nodes[1].children[1])]
+                else:
+                    content = None
+                return pytree.NodePattern(type, content)
+        elif node.value == "(":
+            return self.compile_node(nodes[1])
+        elif node.value == "[":
+            assert repeat is None
+            subpattern = self.compile_node(nodes[1])
+            return pytree.WildcardPattern([[subpattern]], min=0, max=1)
+        assert False, node
+
+    def get_int(self, node):
+        assert node.type == token.NUMBER
+        return int(node.value)
+
+
+# Map named tokens to the type value for a LeafPattern
+TOKEN_MAP = {"NAME": token.NAME,
+             "STRING": token.STRING,
+             "NUMBER": token.NUMBER,
+             "TOKEN": None}
+
+
+def pattern_convert(grammar, raw_node_info):
+    """Converts raw node information to a Node or Leaf instance."""
+    type, value, context, children = raw_node_info
+    if children or type in grammar.number2symbol:
+        return pytree.Node(type, children, context=context)
+    else:
+        return pytree.Leaf(type, value, context=context)
+
+
+def compile_pattern(pattern):
+    return PatternCompiler().compile_pattern(pattern)

Added: sandbox/trunk/refactor_pkg/refactor/pgen2/__init__.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pgen2/__init__.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,12 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""The pgen2 package."""
+import conv
+import driver
+import grammar
+import literals
+import parse
+import pgen
+import tokenize
+import token

Added: sandbox/trunk/refactor_pkg/refactor/pgen2/conv.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pgen2/conv.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,257 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Convert graminit.[ch] spit out by pgen to Python code.
+
+Pgen is the Python parser generator.  It is useful to quickly create a
+parser from a grammar file in Python's grammar notation.  But I don't
+want my parsers to be written in C (yet), so I'm translating the
+parsing tables to Python data structures and writing a Python parse
+engine.
+
+Note that the token numbers are constants determined by the standard
+Python tokenizer.  The standard token module defines these numbers and
+their names (the names are not used much).  The token numbers are
+hardcoded into the Python tokenizer and into pgen.  A Python
+implementation of the Python tokenizer is also available, in the
+standard tokenize module.
+
+On the other hand, symbol numbers (representing the grammar's
+non-terminals) are assigned by pgen based on the actual grammar
+input.
+
+Note: this module is pretty much obsolete; the pgen module generates
+equivalent grammar tables directly from the Grammar.txt input file
+without having to invoke the Python pgen C program.
+
+"""
+
+# Python imports
+import re
+
+# Local imports
+from . import grammar, token
+
+
+class Converter(grammar.Grammar):
+    """Grammar subclass that reads classic pgen output files.
+
+    The run() method reads the tables as produced by the pgen parser
+    generator, typically contained in two C files, graminit.h and
+    graminit.c.  The other methods are for internal use only.
+
+    See the base class for more documentation.
+
+    """
+
+    def run(self, graminit_h, graminit_c):
+        """Load the grammar tables from the text files written by pgen."""
+        self.parse_graminit_h(graminit_h)
+        self.parse_graminit_c(graminit_c)
+        self.finish_off()
+
+    def parse_graminit_h(self, filename):
+        """Parse the .h file writen by pgen.  (Internal)
+
+        This file is a sequence of #define statements defining the
+        nonterminals of the grammar as numbers.  We build two tables
+        mapping the numbers to names and back.
+
+        """
+        try:
+            f = open(filename)
+        except IOError, err:
+            print "Can't open %s: %s" % (filename, err)
+            return False
+        self.symbol2number = {}
+        self.number2symbol = {}
+        lineno = 0
+        for line in f:
+            lineno += 1
+            mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
+            if not mo and line.strip():
+                print "%s(%s): can't parse %s" % (filename, lineno,
+                                                  line.strip())
+            else:
+                symbol, number = mo.groups()
+                number = int(number)
+                assert symbol not in self.symbol2number
+                assert number not in self.number2symbol
+                self.symbol2number[symbol] = number
+                self.number2symbol[number] = symbol
+        return True
+
+    def parse_graminit_c(self, filename):
+        """Parse the .c file writen by pgen.  (Internal)
+
+        The file looks as follows.  The first two lines are always this:
+
+        #include "pgenheaders.h"
+        #include "grammar.h"
+
+        After that come four blocks:
+
+        1) one or more state definitions
+        2) a table defining dfas
+        3) a table defining labels
+        4) a struct defining the grammar
+
+        A state definition has the following form:
+        - one or more arc arrays, each of the form:
+          static arc arcs_<n>_<m>[<k>] = {
+                  {<i>, <j>},
+                  ...
+          };
+        - followed by a state array, of the form:
+          static state states_<s>[<t>] = {
+                  {<k>, arcs_<n>_<m>},
+                  ...
+          };
+
+        """
+        try:
+            f = open(filename)
+        except IOError, err:
+            print "Can't open %s: %s" % (filename, err)
+            return False
+        # The code below essentially uses f's iterator-ness!
+        lineno = 0
+
+        # Expect the two #include lines
+        lineno, line = lineno+1, f.next()
+        assert line == '#include "pgenheaders.h"\n', (lineno, line)
+        lineno, line = lineno+1, f.next()
+        assert line == '#include "grammar.h"\n', (lineno, line)
+
+        # Parse the state definitions
+        lineno, line = lineno+1, f.next()
+        allarcs = {}
+        states = []
+        while line.startswith("static arc "):
+            while line.startswith("static arc "):
+                mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
+                              line)
+                assert mo, (lineno, line)
+                n, m, k = map(int, mo.groups())
+                arcs = []
+                for _ in range(k):
+                    lineno, line = lineno+1, f.next()
+                    mo = re.match(r"\s+{(\d+), (\d+)},$", line)
+                    assert mo, (lineno, line)
+                    i, j = map(int, mo.groups())
+                    arcs.append((i, j))
+                lineno, line = lineno+1, f.next()
+                assert line == "};\n", (lineno, line)
+                allarcs[(n, m)] = arcs
+                lineno, line = lineno+1, f.next()
+            mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
+            assert mo, (lineno, line)
+            s, t = map(int, mo.groups())
+            assert s == len(states), (lineno, line)
+            state = []
+            for _ in range(t):
+                lineno, line = lineno+1, f.next()
+                mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
+                assert mo, (lineno, line)
+                k, n, m = map(int, mo.groups())
+                arcs = allarcs[n, m]
+                assert k == len(arcs), (lineno, line)
+                state.append(arcs)
+            states.append(state)
+            lineno, line = lineno+1, f.next()
+            assert line == "};\n", (lineno, line)
+            lineno, line = lineno+1, f.next()
+        self.states = states
+
+        # Parse the dfas
+        dfas = {}
+        mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
+        assert mo, (lineno, line)
+        ndfas = int(mo.group(1))
+        for i in range(ndfas):
+            lineno, line = lineno+1, f.next()
+            mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
+                          line)
+            assert mo, (lineno, line)
+            symbol = mo.group(2)
+            number, x, y, z = map(int, mo.group(1, 3, 4, 5))
+            assert self.symbol2number[symbol] == number, (lineno, line)
+            assert self.number2symbol[number] == symbol, (lineno, line)
+            assert x == 0, (lineno, line)
+            state = states[z]
+            assert y == len(state), (lineno, line)
+            lineno, line = lineno+1, f.next()
+            mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
+            assert mo, (lineno, line)
+            first = {}
+            rawbitset = eval(mo.group(1))
+            for i, c in enumerate(rawbitset):
+                byte = ord(c)
+                for j in range(8):
+                    if byte & (1<<j):
+                        first[i*8 + j] = 1
+            dfas[number] = (state, first)
+        lineno, line = lineno+1, f.next()
+        assert line == "};\n", (lineno, line)
+        self.dfas = dfas
+
+        # Parse the labels
+        labels = []
+        lineno, line = lineno+1, f.next()
+        mo = re.match(r"static label labels\[(\d+)\] = {$", line)
+        assert mo, (lineno, line)
+        nlabels = int(mo.group(1))
+        for i in range(nlabels):
+            lineno, line = lineno+1, f.next()
+            mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
+            assert mo, (lineno, line)
+            x, y = mo.groups()
+            x = int(x)
+            if y == "0":
+                y = None
+            else:
+                y = eval(y)
+            labels.append((x, y))
+        lineno, line = lineno+1, f.next()
+        assert line == "};\n", (lineno, line)
+        self.labels = labels
+
+        # Parse the grammar struct
+        lineno, line = lineno+1, f.next()
+        assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
+        lineno, line = lineno+1, f.next()
+        mo = re.match(r"\s+(\d+),$", line)
+        assert mo, (lineno, line)
+        ndfas = int(mo.group(1))
+        assert ndfas == len(self.dfas)
+        lineno, line = lineno+1, f.next()
+        assert line == "\tdfas,\n", (lineno, line)
+        lineno, line = lineno+1, f.next()
+        mo = re.match(r"\s+{(\d+), labels},$", line)
+        assert mo, (lineno, line)
+        nlabels = int(mo.group(1))
+        assert nlabels == len(self.labels), (lineno, line)
+        lineno, line = lineno+1, f.next()
+        mo = re.match(r"\s+(\d+)$", line)
+        assert mo, (lineno, line)
+        start = int(mo.group(1))
+        assert start in self.number2symbol, (lineno, line)
+        self.start = start
+        lineno, line = lineno+1, f.next()
+        assert line == "};\n", (lineno, line)
+        try:
+            lineno, line = lineno+1, f.next()
+        except StopIteration:
+            pass
+        else:
+            assert 0, (lineno, line)
+
+    def finish_off(self):
+        """Create additional useful structures.  (Internal)."""
+        self.keywords = {} # map from keyword strings to arc labels
+        self.tokens = {}   # map from numeric token values to arc labels
+        for ilabel, (type, value) in enumerate(self.labels):
+            if type == token.NAME and value is not None:
+                self.keywords[value] = ilabel
+            elif value is None:
+                self.tokens[type] = ilabel

Added: sandbox/trunk/refactor_pkg/refactor/pgen2/driver.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pgen2/driver.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,146 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+# Modifications:
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Parser driver.
+
+This provides a high-level interface to parse a file into a syntax tree.
+
+"""
+
+__author__ = "Guido van Rossum <guido at python.org>"
+
+__all__ = ["Driver", "load_grammar"]
+
+# Python imports
+import os
+import logging
+import sys
+
+# Pgen imports
+from . import grammar, parse, token, tokenize, pgen
+
+
+class Driver(object):
+
+    def __init__(self, grammar, convert=None, logger=None):
+        self.grammar = grammar
+        if logger is None:
+            logger = logging.getLogger()
+        self.logger = logger
+        self.convert = convert
+
+    def parse_tokens(self, tokens, debug=False):
+        """Parse a series of tokens and return the syntax tree."""
+        # XXX Move the prefix computation into a wrapper around tokenize.
+        p = parse.Parser(self.grammar, self.convert)
+        p.setup()
+        lineno = 1
+        column = 0
+        type = value = start = end = line_text = None
+        prefix = ""
+        for quintuple in tokens:
+            type, value, start, end, line_text = quintuple
+            if start != (lineno, column):
+                assert (lineno, column) <= start, ((lineno, column), start)
+                s_lineno, s_column = start
+                if lineno < s_lineno:
+                    prefix += "\n" * (s_lineno - lineno)
+                    lineno = s_lineno
+                    column = 0
+                if column < s_column:
+                    prefix += line_text[column:s_column]
+                    column = s_column
+            if type in (tokenize.COMMENT, tokenize.NL):
+                prefix += value
+                lineno, column = end
+                if value.endswith("\n"):
+                    lineno += 1
+                    column = 0
+                continue
+            if type == token.OP:
+                type = grammar.opmap[value]
+            if debug:
+                self.logger.debug("%s %r (prefix=%r)",
+                                  token.tok_name[type], value, prefix)
+            if p.addtoken(type, value, (prefix, start)):
+                if debug:
+                    self.logger.debug("Stop.")
+                break
+            prefix = ""
+            lineno, column = end
+            if value.endswith("\n"):
+                lineno += 1
+                column = 0
+        else:
+            # We never broke out -- EOF is too soon (how can this happen???)
+            raise parse.ParseError("incomplete input",
+                                   type, value, (prefix, start))
+        return p.rootnode
+
+    def parse_stream_raw(self, stream, debug=False):
+        """Parse a stream and return the syntax tree."""
+        tokens = tokenize.generate_tokens(stream.readline)
+        return self.parse_tokens(tokens, debug)
+
+    def parse_stream(self, stream, debug=False):
+        """Parse a stream and return the syntax tree."""
+        return self.parse_stream_raw(stream, debug)
+
+    def parse_file(self, filename, debug=False):
+        """Parse a file and return the syntax tree."""
+        stream = open(filename)
+        try:
+            return self.parse_stream(stream, debug)
+        finally:
+            stream.close()
+
+    def parse_string(self, text, debug=False):
+        """Parse a string and return the syntax tree."""
+        tokens = tokenize.generate_tokens(generate_lines(text).next)
+        return self.parse_tokens(tokens, debug)
+
+
+def generate_lines(text):
+    """Generator that behaves like readline without using StringIO."""
+    for line in text.splitlines(True):
+        yield line
+    while True:
+        yield ""
+
+
+def load_grammar(gt="Grammar.txt", gp=None,
+                 save=True, force=False, logger=None):
+    """Load the grammar (maybe from a pickle)."""
+    if logger is None:
+        logger = logging.getLogger()
+    if gp is None:
+        head, tail = os.path.splitext(gt)
+        if tail == ".txt":
+            tail = ""
+        gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
+    if force or not _newer(gp, gt):
+        logger.info("Generating grammar tables from %s", gt)
+        g = pgen.generate_grammar(gt)
+        if save:
+            logger.info("Writing grammar tables to %s", gp)
+            try:
+                g.dump(gp)
+            except IOError, e:
+                logger.info("Writing failed:"+str(e))
+    else:
+        g = grammar.Grammar()
+        g.load(gp)
+    return g
+
+
+def _newer(a, b):
+    """Inquire whether file a was written since file b."""
+    if not os.path.exists(a):
+        return False
+    if not os.path.exists(b):
+        return True
+    return os.path.getmtime(a) >= os.path.getmtime(b)

Added: sandbox/trunk/refactor_pkg/refactor/pgen2/grammar.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pgen2/grammar.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,171 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""This module defines the data structures used to represent a grammar.
+
+These are a bit arcane because they are derived from the data
+structures used by Python's 'pgen' parser generator.
+
+There's also a table here mapping operators to their names in the
+token module; the Python tokenize module reports all operators as the
+fallback token code OP, but the parser needs the actual token code.
+
+"""
+
+# Python imports
+import pickle
+
+# Local imports
+from . import token, tokenize
+
+
+class Grammar(object):
+    """Pgen parsing tables tables conversion class.
+
+    Once initialized, this class supplies the grammar tables for the
+    parsing engine implemented by parse.py.  The parsing engine
+    accesses the instance variables directly.  The class here does not
+    provide initialization of the tables; several subclasses exist to
+    do this (see the conv and pgen modules).
+
+    The load() method reads the tables from a pickle file, which is
+    much faster than the other ways offered by subclasses.  The pickle
+    file is written by calling dump() (after loading the grammar
+    tables using a subclass).  The report() method prints a readable
+    representation of the tables to stdout, for debugging.
+
+    The instance variables are as follows:
+
+    symbol2number -- a dict mapping symbol names to numbers.  Symbol
+                     numbers are always 256 or higher, to distinguish
+                     them from token numbers, which are between 0 and
+                     255 (inclusive).
+
+    number2symbol -- a dict mapping numbers to symbol names;
+                     these two are each other's inverse.
+
+    states        -- a list of DFAs, where each DFA is a list of
+                     states, each state is is a list of arcs, and each
+                     arc is a (i, j) pair where i is a label and j is
+                     a state number.  The DFA number is the index into
+                     this list.  (This name is slightly confusing.)
+                     Final states are represented by a special arc of
+                     the form (0, j) where j is its own state number.
+
+    dfas          -- a dict mapping symbol numbers to (DFA, first)
+                     pairs, where DFA is an item from the states list
+                     above, and first is a set of tokens that can
+                     begin this grammar rule (represented by a dict
+                     whose values are always 1).
+
+    labels        -- a list of (x, y) pairs where x is either a token
+                     number or a symbol number, and y is either None
+                     or a string; the strings are keywords.  The label
+                     number is the index in this list; label numbers
+                     are used to mark state transitions (arcs) in the
+                     DFAs.
+
+    start         -- the number of the grammar's start symbol.
+
+    keywords      -- a dict mapping keyword strings to arc labels.
+
+    tokens        -- a dict mapping token numbers to arc labels.
+
+    """
+
+    def __init__(self):
+        self.symbol2number = {}
+        self.number2symbol = {}
+        self.states = []
+        self.dfas = {}
+        self.labels = [(0, "EMPTY")]
+        self.keywords = {}
+        self.tokens = {}
+        self.symbol2label = {}
+        self.start = 256
+
+    def dump(self, filename):
+        """Dump the grammar tables to a pickle file."""
+        f = open(filename, "wb")
+        pickle.dump(self.__dict__, f, 2)
+        f.close()
+
+    def load(self, filename):
+        """Load the grammar tables from a pickle file."""
+        f = open(filename, "rb")
+        d = pickle.load(f)
+        f.close()
+        self.__dict__.update(d)
+
+    def report(self):
+        """Dump the grammar tables to standard output, for debugging."""
+        from pprint import pprint
+        print "s2n"
+        pprint(self.symbol2number)
+        print "n2s"
+        pprint(self.number2symbol)
+        print "states"
+        pprint(self.states)
+        print "dfas"
+        pprint(self.dfas)
+        print "labels"
+        pprint(self.labels)
+        print "start", self.start
+
+
+# Map from operator to number (since tokenize doesn't do this)
+
+opmap_raw = """
+( LPAR
+) RPAR
+[ LSQB
+] RSQB
+: COLON
+, COMMA
+; SEMI
++ PLUS
+- MINUS
+* STAR
+/ SLASH
+| VBAR
+& AMPER
+< LESS
+> GREATER
+= EQUAL
+. DOT
+% PERCENT
+` BACKQUOTE
+{ LBRACE
+} RBRACE
+@ AT
+== EQEQUAL
+!= NOTEQUAL
+<> NOTEQUAL
+<= LESSEQUAL
+>= GREATEREQUAL
+~ TILDE
+^ CIRCUMFLEX
+<< LEFTSHIFT
+>> RIGHTSHIFT
+** DOUBLESTAR
++= PLUSEQUAL
+-= MINEQUAL
+*= STAREQUAL
+/= SLASHEQUAL
+%= PERCENTEQUAL
+&= AMPEREQUAL
+|= VBAREQUAL
+^= CIRCUMFLEXEQUAL
+<<= LEFTSHIFTEQUAL
+>>= RIGHTSHIFTEQUAL
+**= DOUBLESTAREQUAL
+// DOUBLESLASH
+//= DOUBLESLASHEQUAL
+-> RARROW
+"""
+
+opmap = {}
+for line in opmap_raw.splitlines():
+    if line:
+        op, name = line.split()
+        opmap[op] = getattr(token, name)

Added: sandbox/trunk/refactor_pkg/refactor/pgen2/literals.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pgen2/literals.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,60 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Safely evaluate Python string literals without using eval()."""
+
+import re
+
+simple_escapes = {"a": "\a",
+                  "b": "\b",
+                  "f": "\f",
+                  "n": "\n",
+                  "r": "\r",
+                  "t": "\t",
+                  "v": "\v",
+                  "'": "'",
+                  '"': '"',
+                  "\\": "\\"}
+
+def escape(m):
+    all, tail = m.group(0, 1)
+    assert all.startswith("\\")
+    esc = simple_escapes.get(tail)
+    if esc is not None:
+        return esc
+    if tail.startswith("x"):
+        hexes = tail[1:]
+        if len(hexes) < 2:
+            raise ValueError("invalid hex string escape ('\\%s')" % tail)
+        try:
+            i = int(hexes, 16)
+        except ValueError:
+            raise ValueError("invalid hex string escape ('\\%s')" % tail)
+    else:
+        try:
+            i = int(tail, 8)
+        except ValueError:
+            raise ValueError("invalid octal string escape ('\\%s')" % tail)
+    return chr(i)
+
+def evalString(s):
+    assert s.startswith("'") or s.startswith('"'), repr(s[:1])
+    q = s[0]
+    if s[:3] == q*3:
+        q = q*3
+    assert s.endswith(q), repr(s[-len(q):])
+    assert len(s) >= 2*len(q)
+    s = s[len(q):-len(q)]
+    return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)
+
+def test():
+    for i in range(256):
+        c = chr(i)
+        s = repr(c)
+        e = evalString(s)
+        if e != c:
+            print i, c, s, e
+
+
+if __name__ == "__main__":
+    test()

Added: sandbox/trunk/refactor_pkg/refactor/pgen2/parse.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pgen2/parse.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,201 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Parser engine for the grammar tables generated by pgen.
+
+The grammar table must be loaded first.
+
+See Parser/parser.c in the Python distribution for additional info on
+how this parsing engine works.
+
+"""
+
+# Local imports
+from . import token
+
+class ParseError(Exception):
+    """Exception to signal the parser is stuck."""
+
+    def __init__(self, msg, type, value, context):
+        Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
+                           (msg, type, value, context))
+        self.msg = msg
+        self.type = type
+        self.value = value
+        self.context = context
+
+class Parser(object):
+    """Parser engine.
+
+    The proper usage sequence is:
+
+    p = Parser(grammar, [converter])  # create instance
+    p.setup([start])                  # prepare for parsing
+    <for each input token>:
+        if p.addtoken(...):           # parse a token; may raise ParseError
+            break
+    root = p.rootnode                 # root of abstract syntax tree
+
+    A Parser instance may be reused by calling setup() repeatedly.
+
+    A Parser instance contains state pertaining to the current token
+    sequence, and should not be used concurrently by different threads
+    to parse separate token sequences.
+
+    See driver.py for how to get input tokens by tokenizing a file or
+    string.
+
+    Parsing is complete when addtoken() returns True; the root of the
+    abstract syntax tree can then be retrieved from the rootnode
+    instance variable.  When a syntax error occurs, addtoken() raises
+    the ParseError exception.  There is no error recovery; the parser
+    cannot be used after a syntax error was reported (but it can be
+    reinitialized by calling setup()).
+
+    """
+
+    def __init__(self, grammar, convert=None):
+        """Constructor.
+
+        The grammar argument is a grammar.Grammar instance; see the
+        grammar module for more information.
+
+        The parser is not ready yet for parsing; you must call the
+        setup() method to get it started.
+
+        The optional convert argument is a function mapping concrete
+        syntax tree nodes to abstract syntax tree nodes.  If not
+        given, no conversion is done and the syntax tree produced is
+        the concrete syntax tree.  If given, it must be a function of
+        two arguments, the first being the grammar (a grammar.Grammar
+        instance), and the second being the concrete syntax tree node
+        to be converted.  The syntax tree is converted from the bottom
+        up.
+
+        A concrete syntax tree node is a (type, value, context, nodes)
+        tuple, where type is the node type (a token or symbol number),
+        value is None for symbols and a string for tokens, context is
+        None or an opaque value used for error reporting (typically a
+        (lineno, offset) pair), and nodes is a list of children for
+        symbols, and None for tokens.
+
+        An abstract syntax tree node may be anything; this is entirely
+        up to the converter function.
+
+        """
+        self.grammar = grammar
+        self.convert = convert or (lambda grammar, node: node)
+
+    def setup(self, start=None):
+        """Prepare for parsing.
+
+        This *must* be called before starting to parse.
+
+        The optional argument is an alternative start symbol; it
+        defaults to the grammar's start symbol.
+
+        You can use a Parser instance to parse any number of programs;
+        each time you call setup() the parser is reset to an initial
+        state determined by the (implicit or explicit) start symbol.
+
+        """
+        if start is None:
+            start = self.grammar.start
+        # Each stack entry is a tuple: (dfa, state, node).
+        # A node is a tuple: (type, value, context, children),
+        # where children is a list of nodes or None, and context may be None.
+        newnode = (start, None, None, [])
+        stackentry = (self.grammar.dfas[start], 0, newnode)
+        self.stack = [stackentry]
+        self.rootnode = None
+        self.used_names = set() # Aliased to self.rootnode.used_names in pop()
+
+    def addtoken(self, type, value, context):
+        """Add a token; return True iff this is the end of the program."""
+        # Map from token to label
+        ilabel = self.classify(type, value, context)
+        # Loop until the token is shifted; may raise exceptions
+        while True:
+            dfa, state, node = self.stack[-1]
+            states, first = dfa
+            arcs = states[state]
+            # Look for a state with this label
+            for i, newstate in arcs:
+                t, v = self.grammar.labels[i]
+                if ilabel == i:
+                    # Look it up in the list of labels
+                    assert t < 256
+                    # Shift a token; we're done with it
+                    self.shift(type, value, newstate, context)
+                    # Pop while we are in an accept-only state
+                    state = newstate
+                    while states[state] == [(0, state)]:
+                        self.pop()
+                        if not self.stack:
+                            # Done parsing!
+                            return True
+                        dfa, state, node = self.stack[-1]
+                        states, first = dfa
+                    # Done with this token
+                    return False
+                elif t >= 256:
+                    # See if it's a symbol and if we're in its first set
+                    itsdfa = self.grammar.dfas[t]
+                    itsstates, itsfirst = itsdfa
+                    if ilabel in itsfirst:
+                        # Push a symbol
+                        self.push(t, self.grammar.dfas[t], newstate, context)
+                        break # To continue the outer while loop
+            else:
+                if (0, state) in arcs:
+                    # An accepting state, pop it and try something else
+                    self.pop()
+                    if not self.stack:
+                        # Done parsing, but another token is input
+                        raise ParseError("too much input",
+                                         type, value, context)
+                else:
+                    # No success finding a transition
+                    raise ParseError("bad input", type, value, context)
+
+    def classify(self, type, value, context):
+        """Turn a token into a label.  (Internal)"""
+        if type == token.NAME:
+            # Keep a listing of all used names
+            self.used_names.add(value)
+            # Check for reserved words
+            ilabel = self.grammar.keywords.get(value)
+            if ilabel is not None:
+                return ilabel
+        ilabel = self.grammar.tokens.get(type)
+        if ilabel is None:
+            raise ParseError("bad token", type, value, context)
+        return ilabel
+
+    def shift(self, type, value, newstate, context):
+        """Shift a token.  (Internal)"""
+        dfa, state, node = self.stack[-1]
+        newnode = (type, value, context, None)
+        newnode = self.convert(self.grammar, newnode)
+        if newnode is not None:
+            node[-1].append(newnode)
+        self.stack[-1] = (dfa, newstate, node)
+
+    def push(self, type, newdfa, newstate, context):
+        """Push a nonterminal.  (Internal)"""
+        dfa, state, node = self.stack[-1]
+        newnode = (type, None, context, [])
+        self.stack[-1] = (dfa, newstate, node)
+        self.stack.append((newdfa, 0, newnode))
+
+    def pop(self):
+        """Pop a nonterminal.  (Internal)"""
+        popdfa, popstate, popnode = self.stack.pop()
+        newnode = self.convert(self.grammar, popnode)
+        if newnode is not None:
+            if self.stack:
+                dfa, state, node = self.stack[-1]
+                node[-1].append(newnode)
+            else:
+                self.rootnode = newnode
+                self.rootnode.used_names = self.used_names

Added: sandbox/trunk/refactor_pkg/refactor/pgen2/pgen.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pgen2/pgen.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,384 @@
+# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+# Pgen imports
+from . import grammar, token, tokenize
+
+class PgenGrammar(grammar.Grammar):
+    pass
+
+class ParserGenerator(object):
+
+    def __init__(self, filename, stream=None):
+        close_stream = None
+        if stream is None:
+            stream = open(filename)
+            close_stream = stream.close
+        self.filename = filename
+        self.stream = stream
+        self.generator = tokenize.generate_tokens(stream.readline)
+        self.gettoken() # Initialize lookahead
+        self.dfas, self.startsymbol = self.parse()
+        if close_stream is not None:
+            close_stream()
+        self.first = {} # map from symbol name to set of tokens
+        self.addfirstsets()
+
+    def make_grammar(self):
+        c = PgenGrammar()
+        names = self.dfas.keys()
+        names.sort()
+        names.remove(self.startsymbol)
+        names.insert(0, self.startsymbol)
+        for name in names:
+            i = 256 + len(c.symbol2number)
+            c.symbol2number[name] = i
+            c.number2symbol[i] = name
+        for name in names:
+            dfa = self.dfas[name]
+            states = []
+            for state in dfa:
+                arcs = []
+                for label, next in state.arcs.iteritems():
+                    arcs.append((self.make_label(c, label), dfa.index(next)))
+                if state.isfinal:
+                    arcs.append((0, dfa.index(state)))
+                states.append(arcs)
+            c.states.append(states)
+            c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
+        c.start = c.symbol2number[self.startsymbol]
+        return c
+
+    def make_first(self, c, name):
+        rawfirst = self.first[name]
+        first = {}
+        for label in rawfirst:
+            ilabel = self.make_label(c, label)
+            ##assert ilabel not in first # XXX failed on <> ... !=
+            first[ilabel] = 1
+        return first
+
+    def make_label(self, c, label):
+        # XXX Maybe this should be a method on a subclass of converter?
+        ilabel = len(c.labels)
+        if label[0].isalpha():
+            # Either a symbol name or a named token
+            if label in c.symbol2number:
+                # A symbol name (a non-terminal)
+                if label in c.symbol2label:
+                    return c.symbol2label[label]
+                else:
+                    c.labels.append((c.symbol2number[label], None))
+                    c.symbol2label[label] = ilabel
+                    return ilabel
+            else:
+                # A named token (NAME, NUMBER, STRING)
+                itoken = getattr(token, label, None)
+                assert isinstance(itoken, int), label
+                assert itoken in token.tok_name, label
+                if itoken in c.tokens:
+                    return c.tokens[itoken]
+                else:
+                    c.labels.append((itoken, None))
+                    c.tokens[itoken] = ilabel
+                    return ilabel
+        else:
+            # Either a keyword or an operator
+            assert label[0] in ('"', "'"), label
+            value = eval(label)
+            if value[0].isalpha():
+                # A keyword
+                if value in c.keywords:
+                    return c.keywords[value]
+                else:
+                    c.labels.append((token.NAME, value))
+                    c.keywords[value] = ilabel
+                    return ilabel
+            else:
+                # An operator (any non-numeric token)
+                itoken = grammar.opmap[value] # Fails if unknown token
+                if itoken in c.tokens:
+                    return c.tokens[itoken]
+                else:
+                    c.labels.append((itoken, None))
+                    c.tokens[itoken] = ilabel
+                    return ilabel
+
+    def addfirstsets(self):
+        names = self.dfas.keys()
+        names.sort()
+        for name in names:
+            if name not in self.first:
+                self.calcfirst(name)
+            #print name, self.first[name].keys()
+
+    def calcfirst(self, name):
+        dfa = self.dfas[name]
+        self.first[name] = None # dummy to detect left recursion
+        state = dfa[0]
+        totalset = {}
+        overlapcheck = {}
+        for label, next in state.arcs.iteritems():
+            if label in self.dfas:
+                if label in self.first:
+                    fset = self.first[label]
+                    if fset is None:
+                        raise ValueError("recursion for rule %r" % name)
+                else:
+                    self.calcfirst(label)
+                    fset = self.first[label]
+                totalset.update(fset)
+                overlapcheck[label] = fset
+            else:
+                totalset[label] = 1
+                overlapcheck[label] = {label: 1}
+        inverse = {}
+        for label, itsfirst in overlapcheck.iteritems():
+            for symbol in itsfirst:
+                if symbol in inverse:
+                    raise ValueError("rule %s is ambiguous; %s is in the"
+                                     " first sets of %s as well as %s" %
+                                     (name, symbol, label, inverse[symbol]))
+                inverse[symbol] = label
+        self.first[name] = totalset
+
+    def parse(self):
+        dfas = {}
+        startsymbol = None
+        # MSTART: (NEWLINE | RULE)* ENDMARKER
+        while self.type != token.ENDMARKER:
+            while self.type == token.NEWLINE:
+                self.gettoken()
+            # RULE: NAME ':' RHS NEWLINE
+            name = self.expect(token.NAME)
+            self.expect(token.OP, ":")
+            a, z = self.parse_rhs()
+            self.expect(token.NEWLINE)
+            #self.dump_nfa(name, a, z)
+            dfa = self.make_dfa(a, z)
+            #self.dump_dfa(name, dfa)
+            oldlen = len(dfa)
+            self.simplify_dfa(dfa)
+            newlen = len(dfa)
+            dfas[name] = dfa
+            #print name, oldlen, newlen
+            if startsymbol is None:
+                startsymbol = name
+        return dfas, startsymbol
+
+    def make_dfa(self, start, finish):
+        # To turn an NFA into a DFA, we define the states of the DFA
+        # to correspond to *sets* of states of the NFA.  Then do some
+        # state reduction.  Let's represent sets as dicts with 1 for
+        # values.
+        assert isinstance(start, NFAState)
+        assert isinstance(finish, NFAState)
+        def closure(state):
+            base = {}
+            addclosure(state, base)
+            return base
+        def addclosure(state, base):
+            assert isinstance(state, NFAState)
+            if state in base:
+                return
+            base[state] = 1
+            for label, next in state.arcs:
+                if label is None:
+                    addclosure(next, base)
+        states = [DFAState(closure(start), finish)]
+        for state in states: # NB states grows while we're iterating
+            arcs = {}
+            for nfastate in state.nfaset:
+                for label, next in nfastate.arcs:
+                    if label is not None:
+                        addclosure(next, arcs.setdefault(label, {}))
+            for label, nfaset in arcs.iteritems():
+                for st in states:
+                    if st.nfaset == nfaset:
+                        break
+                else:
+                    st = DFAState(nfaset, finish)
+                    states.append(st)
+                state.addarc(st, label)
+        return states # List of DFAState instances; first one is start
+
+    def dump_nfa(self, name, start, finish):
+        print "Dump of NFA for", name
+        todo = [start]
+        for i, state in enumerate(todo):
+            print "  State", i, state is finish and "(final)" or ""
+            for label, next in state.arcs:
+                if next in todo:
+                    j = todo.index(next)
+                else:
+                    j = len(todo)
+                    todo.append(next)
+                if label is None:
+                    print "    -> %d" % j
+                else:
+                    print "    %s -> %d" % (label, j)
+
+    def dump_dfa(self, name, dfa):
+        print "Dump of DFA for", name
+        for i, state in enumerate(dfa):
+            print "  State", i, state.isfinal and "(final)" or ""
+            for label, next in state.arcs.iteritems():
+                print "    %s -> %d" % (label, dfa.index(next))
+
+    def simplify_dfa(self, dfa):
+        # This is not theoretically optimal, but works well enough.
+        # Algorithm: repeatedly look for two states that have the same
+        # set of arcs (same labels pointing to the same nodes) and
+        # unify them, until things stop changing.
+
+        # dfa is a list of DFAState instances
+        changes = True
+        while changes:
+            changes = False
+            for i, state_i in enumerate(dfa):
+                for j in range(i+1, len(dfa)):
+                    state_j = dfa[j]
+                    if state_i == state_j:
+                        #print "  unify", i, j
+                        del dfa[j]
+                        for state in dfa:
+                            state.unifystate(state_j, state_i)
+                        changes = True
+                        break
+
+    def parse_rhs(self):
+        # RHS: ALT ('|' ALT)*
+        a, z = self.parse_alt()
+        if self.value != "|":
+            return a, z
+        else:
+            aa = NFAState()
+            zz = NFAState()
+            aa.addarc(a)
+            z.addarc(zz)
+            while self.value == "|":
+                self.gettoken()
+                a, z = self.parse_alt()
+                aa.addarc(a)
+                z.addarc(zz)
+            return aa, zz
+
+    def parse_alt(self):
+        # ALT: ITEM+
+        a, b = self.parse_item()
+        while (self.value in ("(", "[") or
+               self.type in (token.NAME, token.STRING)):
+            c, d = self.parse_item()
+            b.addarc(c)
+            b = d
+        return a, b
+
+    def parse_item(self):
+        # ITEM: '[' RHS ']' | ATOM ['+' | '*']
+        if self.value == "[":
+            self.gettoken()
+            a, z = self.parse_rhs()
+            self.expect(token.OP, "]")
+            a.addarc(z)
+            return a, z
+        else:
+            a, z = self.parse_atom()
+            value = self.value
+            if value not in ("+", "*"):
+                return a, z
+            self.gettoken()
+            z.addarc(a)
+            if value == "+":
+                return a, z
+            else:
+                return a, a
+
+    def parse_atom(self):
+        # ATOM: '(' RHS ')' | NAME | STRING
+        if self.value == "(":
+            self.gettoken()
+            a, z = self.parse_rhs()
+            self.expect(token.OP, ")")
+            return a, z
+        elif self.type in (token.NAME, token.STRING):
+            a = NFAState()
+            z = NFAState()
+            a.addarc(z, self.value)
+            self.gettoken()
+            return a, z
+        else:
+            self.raise_error("expected (...) or NAME or STRING, got %s/%s",
+                             self.type, self.value)
+
+    def expect(self, type, value=None):
+        if self.type != type or (value is not None and self.value != value):
+            self.raise_error("expected %s/%s, got %s/%s",
+                             type, value, self.type, self.value)
+        value = self.value
+        self.gettoken()
+        return value
+
+    def gettoken(self):
+        tup = self.generator.next()
+        while tup[0] in (tokenize.COMMENT, tokenize.NL):
+            tup = self.generator.next()
+        self.type, self.value, self.begin, self.end, self.line = tup
+        #print token.tok_name[self.type], repr(self.value)
+
+    def raise_error(self, msg, *args):
+        if args:
+            try:
+                msg = msg % args
+            except:
+                msg = " ".join([msg] + map(str, args))
+        raise SyntaxError(msg, (self.filename, self.end[0],
+                                self.end[1], self.line))
+
+class NFAState(object):
+
+    def __init__(self):
+        self.arcs = [] # list of (label, NFAState) pairs
+
+    def addarc(self, next, label=None):
+        assert label is None or isinstance(label, str)
+        assert isinstance(next, NFAState)
+        self.arcs.append((label, next))
+
+class DFAState(object):
+
+    def __init__(self, nfaset, final):
+        assert isinstance(nfaset, dict)
+        assert isinstance(iter(nfaset).next(), NFAState)
+        assert isinstance(final, NFAState)
+        self.nfaset = nfaset
+        self.isfinal = final in nfaset
+        self.arcs = {} # map from label to DFAState
+
+    def addarc(self, next, label):
+        assert isinstance(label, str)
+        assert label not in self.arcs
+        assert isinstance(next, DFAState)
+        self.arcs[label] = next
+
+    def unifystate(self, old, new):
+        for label, next in self.arcs.iteritems():
+            if next is old:
+                self.arcs[label] = new
+
+    def __eq__(self, other):
+        # Equality test -- ignore the nfaset instance variable
+        assert isinstance(other, DFAState)
+        if self.isfinal != other.isfinal:
+            return False
+        # Can't just return self.arcs == other.arcs, because that
+        # would invoke this method recursively, with cycles...
+        if len(self.arcs) != len(other.arcs):
+            return False
+        for label, next in self.arcs.iteritems():
+            if next is not other.arcs.get(label):
+                return False
+        return True
+
+def generate_grammar(filename="Grammar.txt"):
+    p = ParserGenerator(filename)
+    return p.make_grammar()

Added: sandbox/trunk/refactor_pkg/refactor/pgen2/token.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pgen2/token.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,82 @@
+#! /usr/bin/env python
+
+"""Token constants (from "token.h")."""
+
+#  Taken from Python (r53757) and modified to include some tokens
+#   originally monkeypatched in by pgen2.tokenize
+
+#--start constants--
+ENDMARKER = 0
+NAME = 1
+NUMBER = 2
+STRING = 3
+NEWLINE = 4
+INDENT = 5
+DEDENT = 6
+LPAR = 7
+RPAR = 8
+LSQB = 9
+RSQB = 10
+COLON = 11
+COMMA = 12
+SEMI = 13
+PLUS = 14
+MINUS = 15
+STAR = 16
+SLASH = 17
+VBAR = 18
+AMPER = 19
+LESS = 20
+GREATER = 21
+EQUAL = 22
+DOT = 23
+PERCENT = 24
+BACKQUOTE = 25
+LBRACE = 26
+RBRACE = 27
+EQEQUAL = 28
+NOTEQUAL = 29
+LESSEQUAL = 30
+GREATEREQUAL = 31
+TILDE = 32
+CIRCUMFLEX = 33
+LEFTSHIFT = 34
+RIGHTSHIFT = 35
+DOUBLESTAR = 36
+PLUSEQUAL = 37
+MINEQUAL = 38
+STAREQUAL = 39
+SLASHEQUAL = 40
+PERCENTEQUAL = 41
+AMPEREQUAL = 42
+VBAREQUAL = 43
+CIRCUMFLEXEQUAL = 44
+LEFTSHIFTEQUAL = 45
+RIGHTSHIFTEQUAL = 46
+DOUBLESTAREQUAL = 47
+DOUBLESLASH = 48
+DOUBLESLASHEQUAL = 49
+AT = 50
+OP = 51
+COMMENT = 52
+NL = 53
+RARROW = 54
+ERRORTOKEN = 55
+N_TOKENS = 56
+NT_OFFSET = 256
+#--end constants--
+
+tok_name = {}
+for _name, _value in globals().items():
+    if type(_value) is type(0):
+        tok_name[_value] = _name
+
+
+def ISTERMINAL(x):
+    return x < NT_OFFSET
+
+def ISNONTERMINAL(x):
+    return x >= NT_OFFSET
+
+def ISEOF(x):
+    return x == ENDMARKER

Added: sandbox/trunk/refactor_pkg/refactor/pgen2/tokenize.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pgen2/tokenize.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,405 @@
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
+# All rights reserved.
+
+"""Tokenization help for Python programs.
+
+generate_tokens(readline) is a generator that breaks a stream of
+text into Python tokens.  It accepts a readline-like method which is called
+repeatedly to get the next line of input (or "" for EOF).  It generates
+5-tuples with these members:
+
+    the token type (see token.py)
+    the token (a string)
+    the starting (row, column) indices of the token (a 2-tuple of ints)
+    the ending (row, column) indices of the token (a 2-tuple of ints)
+    the original line (string)
+
+It is designed to match the working of the Python tokenizer exactly, except
+that it produces COMMENT tokens for comments and gives type OP for all
+operators
+
+Older entry points
+    tokenize_loop(readline, tokeneater)
+    tokenize(readline, tokeneater=printtoken)
+are the same, except instead of generating tokens, tokeneater is a callback
+function to which the 5 fields described above are passed as 5 arguments,
+each time a new token is found."""
+
+__author__ = 'Ka-Ping Yee <ping at lfw.org>'
+__credits__ = \
+    'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
+
+import string, re
+from .token import *
+
+from . import token
+__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
+           "generate_tokens", "untokenize"]
+del token
+
+def group(*choices): return '(' + '|'.join(choices) + ')'
+def any(*choices): return group(*choices) + '*'
+def maybe(*choices): return group(*choices) + '?'
+
+Whitespace = r'[ \f\t]*'
+Comment = r'#[^\r\n]*'
+Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+Name = r'[a-zA-Z_]\w*'
+
+Binnumber = r'0[bB][01]*'
+Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
+Octnumber = r'0[oO]?[0-7]*[lL]?'
+Decnumber = r'[1-9]\d*[lL]?'
+Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
+Exponent = r'[eE][-+]?\d+'
+Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
+Expfloat = r'\d+' + Exponent
+Floatnumber = group(Pointfloat, Expfloat)
+Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
+Number = group(Imagnumber, Floatnumber, Intnumber)
+
+# Tail end of ' string.
+Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+# Tail end of " string.
+Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+# Tail end of ''' string.
+Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+# Tail end of """ string.
+Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
+# Single-line ' or " string.
+String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+               r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+
+# Because of leftmost-then-longest match semantics, be sure to put the
+# longest operators first (e.g., if = came before ==, == would get
+# recognized as two instances of =).
+Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
+                 r"//=?", r"->",
+                 r"[+\-*/%&|^=<>]=?",
+                 r"~")
+
+Bracket = '[][(){}]'
+Special = group(r'\r?\n', r'[:;.,`@]')
+Funny = group(Operator, Bracket, Special)
+
+PlainToken = group(Number, Funny, String, Name)
+Token = Ignore + PlainToken
+
+# First (or only) line of ' or " string.
+ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+                group("'", r'\\\r?\n'),
+                r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+                group('"', r'\\\r?\n'))
+PseudoExtras = group(r'\\\r?\n', Comment, Triple)
+PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+
+tokenprog, pseudoprog, single3prog, double3prog = map(
+    re.compile, (Token, PseudoToken, Single3, Double3))
+endprogs = {"'": re.compile(Single), '"': re.compile(Double),
+            "'''": single3prog, '"""': double3prog,
+            "r'''": single3prog, 'r"""': double3prog,
+            "u'''": single3prog, 'u"""': double3prog,
+            "b'''": single3prog, 'b"""': double3prog,
+            "ur'''": single3prog, 'ur"""': double3prog,
+            "br'''": single3prog, 'br"""': double3prog,
+            "R'''": single3prog, 'R"""': double3prog,
+            "U'''": single3prog, 'U"""': double3prog,
+            "B'''": single3prog, 'B"""': double3prog,
+            "uR'''": single3prog, 'uR"""': double3prog,
+            "Ur'''": single3prog, 'Ur"""': double3prog,
+            "UR'''": single3prog, 'UR"""': double3prog,
+            "bR'''": single3prog, 'bR"""': double3prog,
+            "Br'''": single3prog, 'Br"""': double3prog,
+            "BR'''": single3prog, 'BR"""': double3prog,
+            'r': None, 'R': None,
+            'u': None, 'U': None,
+            'b': None, 'B': None}
+
+triple_quoted = {}
+for t in ("'''", '"""',
+          "r'''", 'r"""', "R'''", 'R"""',
+          "u'''", 'u"""', "U'''", 'U"""',
+          "b'''", 'b"""', "B'''", 'B"""',
+          "ur'''", 'ur"""', "Ur'''", 'Ur"""',
+          "uR'''", 'uR"""', "UR'''", 'UR"""',
+          "br'''", 'br"""', "Br'''", 'Br"""',
+          "bR'''", 'bR"""', "BR'''", 'BR"""',):
+    triple_quoted[t] = t
+single_quoted = {}
+for t in ("'", '"',
+          "r'", 'r"', "R'", 'R"',
+          "u'", 'u"', "U'", 'U"',
+          "b'", 'b"', "B'", 'B"',
+          "ur'", 'ur"', "Ur'", 'Ur"',
+          "uR'", 'uR"', "UR'", 'UR"',
+          "br'", 'br"', "Br'", 'Br"',
+          "bR'", 'bR"', "BR'", 'BR"', ):
+    single_quoted[t] = t
+
+tabsize = 8
+
+class TokenError(Exception): pass
+
+class StopTokenizing(Exception): pass
+
+def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
+    print "%d,%d-%d,%d:\t%s\t%s" % \
+        (srow, scol, erow, ecol, tok_name[type], repr(token))
+
+def tokenize(readline, tokeneater=printtoken):
+    """
+    The tokenize() function accepts two parameters: one representing the
+    input stream, and one providing an output mechanism for tokenize().
+
+    The first parameter, readline, must be a callable object which provides
+    the same interface as the readline() method of built-in file objects.
+    Each call to the function should return one line of input as a string.
+
+    The second parameter, tokeneater, must also be a callable object. It is
+    called once for each token, with five arguments, corresponding to the
+    tuples generated by generate_tokens().
+    """
+    try:
+        tokenize_loop(readline, tokeneater)
+    except StopTokenizing:
+        pass
+
+# backwards compatible interface
+def tokenize_loop(readline, tokeneater):
+    for token_info in generate_tokens(readline):
+        tokeneater(*token_info)
+
+class Untokenizer:
+
+    def __init__(self):
+        self.tokens = []
+        self.prev_row = 1
+        self.prev_col = 0
+
+    def add_whitespace(self, start):
+        row, col = start
+        assert row <= self.prev_row
+        col_offset = col - self.prev_col
+        if col_offset:
+            self.tokens.append(" " * col_offset)
+
+    def untokenize(self, iterable):
+        for t in iterable:
+            if len(t) == 2:
+                self.compat(t, iterable)
+                break
+            tok_type, token, start, end, line = t
+            self.add_whitespace(start)
+            self.tokens.append(token)
+            self.prev_row, self.prev_col = end
+            if tok_type in (NEWLINE, NL):
+                self.prev_row += 1
+                self.prev_col = 0
+        return "".join(self.tokens)
+
+    def compat(self, token, iterable):
+        startline = False
+        indents = []
+        toks_append = self.tokens.append
+        toknum, tokval = token
+        if toknum in (NAME, NUMBER):
+            tokval += ' '
+        if toknum in (NEWLINE, NL):
+            startline = True
+        for tok in iterable:
+            toknum, tokval = tok[:2]
+
+            if toknum in (NAME, NUMBER):
+                tokval += ' '
+
+            if toknum == INDENT:
+                indents.append(tokval)
+                continue
+            elif toknum == DEDENT:
+                indents.pop()
+                continue
+            elif toknum in (NEWLINE, NL):
+                startline = True
+            elif startline and indents:
+                toks_append(indents[-1])
+                startline = False
+            toks_append(tokval)
+
+def untokenize(iterable):
+    """Transform tokens back into Python source code.
+
+    Each element returned by the iterable must be a token sequence
+    with at least two elements, a token number and token value.  If
+    only two tokens are passed, the resulting output is poor.
+
+    Round-trip invariant for full input:
+        Untokenized source will match input source exactly
+
+    Round-trip invariant for limited intput:
+        # Output text will tokenize the back to the input
+        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
+        newcode = untokenize(t1)
+        readline = iter(newcode.splitlines(1)).next
+        t2 = [tok[:2] for tokin generate_tokens(readline)]
+        assert t1 == t2
+    """
+    ut = Untokenizer()
+    return ut.untokenize(iterable)
+
+def generate_tokens(readline):
+    """
+    The generate_tokens() generator requires one argment, readline, which
+    must be a callable object which provides the same interface as the
+    readline() method of built-in file objects. Each call to the function
+    should return one line of input as a string.  Alternately, readline
+    can be a callable function terminating with StopIteration:
+        readline = open(myfile).next    # Example of alternate readline
+
+    The generator produces 5-tuples with these members: the token type; the
+    token string; a 2-tuple (srow, scol) of ints specifying the row and
+    column where the token begins in the source; a 2-tuple (erow, ecol) of
+    ints specifying the row and column where the token ends in the source;
+    and the line on which the token was found. The line passed is the
+    logical line; continuation lines are included.
+    """
+    lnum = parenlev = continued = 0
+    namechars, numchars = string.ascii_letters + '_', '0123456789'
+    contstr, needcont = '', 0
+    contline = None
+    indents = [0]
+
+    while 1:                                   # loop over lines in stream
+        try:
+            line = readline()
+        except StopIteration:
+            line = ''
+        lnum = lnum + 1
+        pos, max = 0, len(line)
+
+        if contstr:                            # continued string
+            if not line:
+                raise TokenError, ("EOF in multi-line string", strstart)
+            endmatch = endprog.match(line)
+            if endmatch:
+                pos = end = endmatch.end(0)
+                yield (STRING, contstr + line[:end],
+                       strstart, (lnum, end), contline + line)
+                contstr, needcont = '', 0
+                contline = None
+            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+                yield (ERRORTOKEN, contstr + line,
+                           strstart, (lnum, len(line)), contline)
+                contstr = ''
+                contline = None
+                continue
+            else:
+                contstr = contstr + line
+                contline = contline + line
+                continue
+
+        elif parenlev == 0 and not continued:  # new statement
+            if not line: break
+            column = 0
+            while pos < max:                   # measure leading whitespace
+                if line[pos] == ' ': column = column + 1
+                elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
+                elif line[pos] == '\f': column = 0
+                else: break
+                pos = pos + 1
+            if pos == max: break
+
+            if line[pos] in '#\r\n':           # skip comments or blank lines
+                if line[pos] == '#':
+                    comment_token = line[pos:].rstrip('\r\n')
+                    nl_pos = pos + len(comment_token)
+                    yield (COMMENT, comment_token,
+                           (lnum, pos), (lnum, pos + len(comment_token)), line)
+                    yield (NL, line[nl_pos:],
+                           (lnum, nl_pos), (lnum, len(line)), line)
+                else:
+                    yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
+                           (lnum, pos), (lnum, len(line)), line)
+                continue
+
+            if column > indents[-1]:           # count indents or dedents
+                indents.append(column)
+                yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+            while column < indents[-1]:
+                if column not in indents:
+                    raise IndentationError(
+                        "unindent does not match any outer indentation level",
+                        ("<tokenize>", lnum, pos, line))
+                indents = indents[:-1]
+                yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
+
+        else:                                  # continued statement
+            if not line:
+                raise TokenError, ("EOF in multi-line statement", (lnum, 0))
+            continued = 0
+
+        while pos < max:
+            pseudomatch = pseudoprog.match(line, pos)
+            if pseudomatch:                                # scan for tokens
+                start, end = pseudomatch.span(1)
+                spos, epos, pos = (lnum, start), (lnum, end), end
+                token, initial = line[start:end], line[start]
+
+                if initial in numchars or \
+                   (initial == '.' and token != '.'):      # ordinary number
+                    yield (NUMBER, token, spos, epos, line)
+                elif initial in '\r\n':
+                    newline = NEWLINE
+                    if parenlev > 0:
+                        newline = NL
+                    yield (newline, token, spos, epos, line)
+                elif initial == '#':
+                    assert not token.endswith("\n")
+                    yield (COMMENT, token, spos, epos, line)
+                elif token in triple_quoted:
+                    endprog = endprogs[token]
+                    endmatch = endprog.match(line, pos)
+                    if endmatch:                           # all on one line
+                        pos = endmatch.end(0)
+                        token = line[start:pos]
+                        yield (STRING, token, spos, (lnum, pos), line)
+                    else:
+                        strstart = (lnum, start)           # multiple lines
+                        contstr = line[start:]
+                        contline = line
+                        break
+                elif initial in single_quoted or \
+                    token[:2] in single_quoted or \
+                    token[:3] in single_quoted:
+                    if token[-1] == '\n':                  # continued string
+                        strstart = (lnum, start)
+                        endprog = (endprogs[initial] or endprogs[token[1]] or
+                                   endprogs[token[2]])
+                        contstr, needcont = line[start:], 1
+                        contline = line
+                        break
+                    else:                                  # ordinary string
+                        yield (STRING, token, spos, epos, line)
+                elif initial in namechars:                 # ordinary name
+                    yield (NAME, token, spos, epos, line)
+                elif initial == '\\':                      # continued stmt
+                    # This yield is new; needed for better idempotency:
+                    yield (NL, token, spos, (lnum, pos), line)
+                    continued = 1
+                else:
+                    if initial in '([{': parenlev = parenlev + 1
+                    elif initial in ')]}': parenlev = parenlev - 1
+                    yield (OP, token, spos, epos, line)
+            else:
+                yield (ERRORTOKEN, line[pos],
+                           (lnum, pos), (lnum, pos+1), line)
+                pos = pos + 1
+
+    for indent in indents[1:]:                 # pop remaining indent levels
+        yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
+    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+
+if __name__ == '__main__':                     # testing
+    import sys
+    if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
+    else: tokenize(sys.stdin.readline)

Added: sandbox/trunk/refactor_pkg/refactor/pygram.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pygram.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,31 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Export the Python grammar and symbols."""
+
+# Python imports
+import os
+
+# Local imports
+from .pgen2 import token
+from .pgen2 import driver
+from . import pytree
+
+# The grammar file
+_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
+
+
+class Symbols(object):
+
+    def __init__(self, grammar):
+        """Initializer.
+
+        Creates an attribute for each grammar symbol (nonterminal),
+        whose value is the symbol's type (an int >= 256).
+        """
+        for name, symbol in grammar.symbol2number.iteritems():
+            setattr(self, name, symbol)
+
+
+python_grammar = driver.load_grammar(_GRAMMAR_FILE)
+python_symbols = Symbols(python_grammar)

Added: sandbox/trunk/refactor_pkg/refactor/pytree.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/pytree.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,846 @@
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""
+Python parse tree definitions.
+
+This is a very concrete parse tree; we need to keep every token and
+even the comments and whitespace between tokens.
+
+There's also a pattern matching implementation here.
+"""
+
+__author__ = "Guido van Rossum <guido at python.org>"
+
+import sys
+from StringIO import StringIO
+
+
+HUGE = 0x7FFFFFFF  # maximum repeat count, default max
+
+_type_reprs = {}
+def type_repr(type_num):
+    global _type_reprs
+    if not _type_reprs:
+        from .pygram import python_symbols
+        # printing tokens is possible but not as useful
+        # from .pgen2 import token // token.__dict__.items():
+        for name, val in python_symbols.__dict__.items():
+            if type(val) == int: _type_reprs[val] = name
+    return _type_reprs.setdefault(type_num, type_num)
+
+
+class Base(object):
+
+    """
+    Abstract base class for Node and Leaf.
+
+    This provides some default functionality and boilerplate using the
+    template pattern.
+
+    A node may be a subnode of at most one parent.
+    """
+
+    # Default values for instance variables
+    type = None    # int: token number (< 256) or symbol number (>= 256)
+    parent = None  # Parent node pointer, or None
+    children = ()  # Tuple of subnodes
+    was_changed = False
+
+    def __new__(cls, *args, **kwds):
+        """Constructor that prevents Base from being instantiated."""
+        assert cls is not Base, "Cannot instantiate Base"
+        return object.__new__(cls)
+
+    def __eq__(self, other):
+        """
+        Compare two nodes for equality.
+
+        This calls the method _eq().
+        """
+        if self.__class__ is not other.__class__:
+            return NotImplemented
+        return self._eq(other)
+
+    def __ne__(self, other):
+        """
+        Compare two nodes for inequality.
+
+        This calls the method _eq().
+        """
+        if self.__class__ is not other.__class__:
+            return NotImplemented
+        return not self._eq(other)
+
+    def _eq(self, other):
+        """
+        Compare two nodes for equality.
+
+        This is called by __eq__ and __ne__.  It is only called if the two nodes
+        have the same type.  This must be implemented by the concrete subclass.
+        Nodes should be considered equal if they have the same structure,
+        ignoring the prefix string and other context information.
+        """
+        raise NotImplementedError
+
+    def clone(self):
+        """
+        Return a cloned (deep) copy of self.
+
+        This must be implemented by the concrete subclass.
+        """
+        raise NotImplementedError
+
+    def post_order(self):
+        """
+        Return a post-order iterator for the tree.
+
+        This must be implemented by the concrete subclass.
+        """
+        raise NotImplementedError
+
+    def pre_order(self):
+        """
+        Return a pre-order iterator for the tree.
+
+        This must be implemented by the concrete subclass.
+        """
+        raise NotImplementedError
+
+    def set_prefix(self, prefix):
+        """
+        Set the prefix for the node (see Leaf class).
+
+        This must be implemented by the concrete subclass.
+        """
+        raise NotImplementedError
+
+    def get_prefix(self):
+        """
+        Return the prefix for the node (see Leaf class).
+
+        This must be implemented by the concrete subclass.
+        """
+        raise NotImplementedError
+
+    def replace(self, new):
+        """Replace this node with a new one in the parent."""
+        assert self.parent is not None, str(self)
+        assert new is not None
+        if not isinstance(new, list):
+            new = [new]
+        l_children = []
+        found = False
+        for ch in self.parent.children:
+            if ch is self:
+                assert not found, (self.parent.children, self, new)
+                if new is not None:
+                    l_children.extend(new)
+                found = True
+            else:
+                l_children.append(ch)
+        assert found, (self.children, self, new)
+        self.parent.changed()
+        self.parent.children = l_children
+        for x in new:
+            x.parent = self.parent
+        self.parent = None
+
+    def get_lineno(self):
+        """Return the line number which generated the invocant node."""
+        node = self
+        while not isinstance(node, Leaf):
+            if not node.children:
+                return
+            node = node.children[0]
+        return node.lineno
+
+    def changed(self):
+        if self.parent:
+            self.parent.changed()
+        self.was_changed = True
+
+    def remove(self):
+        """
+        Remove the node from the tree. Returns the position of the node in its
+        parent's children before it was removed.
+        """
+        if self.parent:
+            for i, node in enumerate(self.parent.children):
+                if node is self:
+                    self.parent.changed()
+                    del self.parent.children[i]
+                    self.parent = None
+                    return i
+
+    @property
+    def next_sibling(self):
+        """
+        The node immediately following the invocant in their parent's children
+        list. If the invocant does not have a next sibling, it is None
+        """
+        if self.parent is None:
+            return None
+
+        # Can't use index(); we need to test by identity
+        for i, child in enumerate(self.parent.children):
+            if child is self:
+                try:
+                    return self.parent.children[i+1]
+                except IndexError:
+                    return None
+
+    @property
+    def prev_sibling(self):
+        """
+        The node immediately preceding the invocant in their parent's children
+        list. If the invocant does not have a previous sibling, it is None.
+        """
+        if self.parent is None:
+            return None
+
+        # Can't use index(); we need to test by identity
+        for i, child in enumerate(self.parent.children):
+            if child is self:
+                if i == 0:
+                    return None
+                return self.parent.children[i-1]
+
+    def get_suffix(self):
+        """
+        Return the string immediately following the invocant node. This is
+        effectively equivalent to node.next_sibling.get_prefix()
+        """
+        next_sib = self.next_sibling
+        if next_sib is None:
+            return ""
+        return next_sib.get_prefix()
+
+
+class Node(Base):
+
+    """Concrete implementation for interior nodes."""
+
+    def __init__(self, type, children, context=None, prefix=None):
+        """
+        Initializer.
+
+        Takes a type constant (a symbol number >= 256), a sequence of
+        child nodes, and an optional context keyword argument.
+
+        As a side effect, the parent pointers of the children are updated.
+        """
+        assert type >= 256, type
+        self.type = type
+        self.children = list(children)
+        for ch in self.children:
+            assert ch.parent is None, repr(ch)
+            ch.parent = self
+        if prefix is not None:
+            self.set_prefix(prefix)
+
+    def __repr__(self):
+        """Return a canonical string representation."""
+        return "%s(%s, %r)" % (self.__class__.__name__,
+                               type_repr(self.type),
+                               self.children)
+
+    def __str__(self):
+        """
+        Return a pretty string representation.
+
+        This reproduces the input source exactly.
+        """
+        return "".join(map(str, self.children))
+
+    def _eq(self, other):
+        """Compare two nodes for equality."""
+        return (self.type, self.children) == (other.type, other.children)
+
+    def clone(self):
+        """Return a cloned (deep) copy of self."""
+        return Node(self.type, [ch.clone() for ch in self.children])
+
+    def post_order(self):
+        """Return a post-order iterator for the tree."""
+        for child in self.children:
+            for node in child.post_order():
+                yield node
+        yield self
+
+    def pre_order(self):
+        """Return a pre-order iterator for the tree."""
+        yield self
+        for child in self.children:
+            for node in child.post_order():
+                yield node
+
+    def set_prefix(self, prefix):
+        """
+        Set the prefix for the node.
+
+        This passes the responsibility on to the first child.
+        """
+        if self.children:
+            self.children[0].set_prefix(prefix)
+
+    def get_prefix(self):
+        """
+        Return the prefix for the node.
+
+        This passes the call on to the first child.
+        """
+        if not self.children:
+            return ""
+        return self.children[0].get_prefix()
+
+    def set_child(self, i, child):
+        """
+        Equivalent to 'node.children[i] = child'. This method also sets the
+        child's parent attribute appropriately.
+        """
+        child.parent = self
+        self.children[i].parent = None
+        self.children[i] = child
+        self.changed()
+
+    def insert_child(self, i, child):
+        """
+        Equivalent to 'node.children.insert(i, child)'. This method also sets
+        the child's parent attribute appropriately.
+        """
+        child.parent = self
+        self.children.insert(i, child)
+        self.changed()
+
+    def append_child(self, child):
+        """
+        Equivalent to 'node.children.append(child)'. This method also sets the
+        child's parent attribute appropriately.
+        """
+        child.parent = self
+        self.children.append(child)
+        self.changed()
+
+
+class Leaf(Base):
+
+    """Concrete implementation for leaf nodes."""
+
+    # Default values for instance variables
+    prefix = ""  # Whitespace and comments preceding this token in the input
+    lineno = 0   # Line where this token starts in the input
+    column = 0   # Column where this token tarts in the input
+
+    def __init__(self, type, value, context=None, prefix=None):
+        """
+        Initializer.
+
+        Takes a type constant (a token number < 256), a string value, and an
+        optional context keyword argument.
+        """
+        assert 0 <= type < 256, type
+        if context is not None:
+            self.prefix, (self.lineno, self.column) = context
+        self.type = type
+        self.value = value
+        if prefix is not None:
+            self.prefix = prefix
+
+    def __repr__(self):
+        """Return a canonical string representation."""
+        return "%s(%r, %r)" % (self.__class__.__name__,
+                               self.type,
+                               self.value)
+
+    def __str__(self):
+        """
+        Return a pretty string representation.
+
+        This reproduces the input source exactly.
+        """
+        return self.prefix + str(self.value)
+
+    def _eq(self, other):
+        """Compare two nodes for equality."""
+        return (self.type, self.value) == (other.type, other.value)
+
+    def clone(self):
+        """Return a cloned (deep) copy of self."""
+        return Leaf(self.type, self.value,
+                    (self.prefix, (self.lineno, self.column)))
+
+    def post_order(self):
+        """Return a post-order iterator for the tree."""
+        yield self
+
+    def pre_order(self):
+        """Return a pre-order iterator for the tree."""
+        yield self
+
+    def set_prefix(self, prefix):
+        """Set the prefix for the node."""
+        self.changed()
+        self.prefix = prefix
+
+    def get_prefix(self):
+        """Return the prefix for the node."""
+        return self.prefix
+
+
+def convert(gr, raw_node):
+    """
+    Convert raw node information to a Node or Leaf instance.
+
+    This is passed to the parser driver which calls it whenever a reduction of a
+    grammar rule produces a new complete node, so that the tree is build
+    strictly bottom-up.
+    """
+    type, value, context, children = raw_node
+    if children or type in gr.number2symbol:
+        # If there's exactly one child, return that child instead of
+        # creating a new node.
+        if len(children) == 1:
+            return children[0]
+        return Node(type, children, context=context)
+    else:
+        return Leaf(type, value, context=context)
+
+
+class BasePattern(object):
+
+    """
+    A pattern is a tree matching pattern.
+
+    It looks for a specific node type (token or symbol), and
+    optionally for a specific content.
+
+    This is an abstract base class.  There are three concrete
+    subclasses:
+
+    - LeafPattern matches a single leaf node;
+    - NodePattern matches a single node (usually non-leaf);
+    - WildcardPattern matches a sequence of nodes of variable length.
+    """
+
+    # Defaults for instance variables
+    type = None     # Node type (token if < 256, symbol if >= 256)
+    content = None  # Optional content matching pattern
+    name = None     # Optional name used to store match in results dict
+
+    def __new__(cls, *args, **kwds):
+        """Constructor that prevents BasePattern from being instantiated."""
+        assert cls is not BasePattern, "Cannot instantiate BasePattern"
+        return object.__new__(cls)
+
+    def __repr__(self):
+        args = [type_repr(self.type), self.content, self.name]
+        while args and args[-1] is None:
+            del args[-1]
+        return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
+
+    def optimize(self):
+        """
+        A subclass can define this as a hook for optimizations.
+
+        Returns either self or another node with the same effect.
+        """
+        return self
+
+    def match(self, node, results=None):
+        """
+        Does this pattern exactly match a node?
+
+        Returns True if it matches, False if not.
+
+        If results is not None, it must be a dict which will be
+        updated with the nodes matching named subpatterns.
+
+        Default implementation for non-wildcard patterns.
+        """
+        if self.type is not None and node.type != self.type:
+            return False
+        if self.content is not None:
+            r = None
+            if results is not None:
+                r = {}
+            if not self._submatch(node, r):
+                return False
+            if r:
+                results.update(r)
+        if results is not None and self.name:
+            results[self.name] = node
+        return True
+
+    def match_seq(self, nodes, results=None):
+        """
+        Does this pattern exactly match a sequence of nodes?
+
+        Default implementation for non-wildcard patterns.
+        """
+        if len(nodes) != 1:
+            return False
+        return self.match(nodes[0], results)
+
+    def generate_matches(self, nodes):
+        """
+        Generator yielding all matches for this pattern.
+
+        Default implementation for non-wildcard patterns.
+        """
+        r = {}
+        if nodes and self.match(nodes[0], r):
+            yield 1, r
+
+
+class LeafPattern(BasePattern):
+
+    def __init__(self, type=None, content=None, name=None):
+        """
+        Initializer.  Takes optional type, content, and name.
+
+        The type, if given must be a token type (< 256).  If not given,
+        this matches any *leaf* node; the content may still be required.
+
+        The content, if given, must be a string.
+
+        If a name is given, the matching node is stored in the results
+        dict under that key.
+        """
+        if type is not None:
+            assert 0 <= type < 256, type
+        if content is not None:
+            assert isinstance(content, basestring), repr(content)
+        self.type = type
+        self.content = content
+        self.name = name
+
+    def match(self, node, results=None):
+        """Override match() to insist on a leaf node."""
+        if not isinstance(node, Leaf):
+            return False
+        return BasePattern.match(self, node, results)
+
+    def _submatch(self, node, results=None):
+        """
+        Match the pattern's content to the node's children.
+
+        This assumes the node type matches and self.content is not None.
+
+        Returns True if it matches, False if not.
+
+        If results is not None, it must be a dict which will be
+        updated with the nodes matching named subpatterns.
+
+        When returning False, the results dict may still be updated.
+        """
+        return self.content == node.value
+
+
+class NodePattern(BasePattern):
+
+    wildcards = False
+
+    def __init__(self, type=None, content=None, name=None):
+        """
+        Initializer.  Takes optional type, content, and name.
+
+        The type, if given, must be a symbol type (>= 256).  If the
+        type is None this matches *any* single node (leaf or not),
+        except if content is not None, in which it only matches
+        non-leaf nodes that also match the content pattern.
+
+        The content, if not None, must be a sequence of Patterns that
+        must match the node's children exactly.  If the content is
+        given, the type must not be None.
+
+        If a name is given, the matching node is stored in the results
+        dict under that key.
+        """
+        if type is not None:
+            assert type >= 256, type
+        if content is not None:
+            assert not isinstance(content, basestring), repr(content)
+            content = list(content)
+            for i, item in enumerate(content):
+                assert isinstance(item, BasePattern), (i, item)
+                if isinstance(item, WildcardPattern):
+                    self.wildcards = True
+        self.type = type
+        self.content = content
+        self.name = name
+
+    def _submatch(self, node, results=None):
+        """
+        Match the pattern's content to the node's children.
+
+        This assumes the node type matches and self.content is not None.
+
+        Returns True if it matches, False if not.
+
+        If results is not None, it must be a dict which will be
+        updated with the nodes matching named subpatterns.
+
+        When returning False, the results dict may still be updated.
+        """
+        if self.wildcards:
+            for c, r in generate_matches(self.content, node.children):
+                if c == len(node.children):
+                    if results is not None:
+                        results.update(r)
+                    return True
+            return False
+        if len(self.content) != len(node.children):
+            return False
+        for subpattern, child in zip(self.content, node.children):
+            if not subpattern.match(child, results):
+                return False
+        return True
+
+
+class WildcardPattern(BasePattern):
+
+    """
+    A wildcard pattern can match zero or more nodes.
+
+    This has all the flexibility needed to implement patterns like:
+
+    .*      .+      .?      .{m,n}
+    (a b c | d e | f)
+    (...)*  (...)+  (...)?  (...){m,n}
+
+    except it always uses non-greedy matching.
+    """
+
+    def __init__(self, content=None, min=0, max=HUGE, name=None):
+        """
+        Initializer.
+
+        Args:
+            content: optional sequence of subsequences of patterns;
+                     if absent, matches one node;
+                     if present, each subsequence is an alternative [*]
+            min: optinal minumum number of times to match, default 0
+            max: optional maximum number of times tro match, default HUGE
+            name: optional name assigned to this match
+
+        [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
+            equivalent to (a b c | d e | f g h); if content is None,
+            this is equivalent to '.' in regular expression terms.
+            The min and max parameters work as follows:
+                min=0, max=maxint: .*
+                min=1, max=maxint: .+
+                min=0, max=1: .?
+                min=1, max=1: .
+            If content is not None, replace the dot with the parenthesized
+            list of alternatives, e.g. (a b c | d e | f g h)*
+        """
+        assert 0 <= min <= max <= HUGE, (min, max)
+        if content is not None:
+            content = tuple(map(tuple, content))  # Protect against alterations
+            # Check sanity of alternatives
+            assert len(content), repr(content)  # Can't have zero alternatives
+            for alt in content:
+                assert len(alt), repr(alt) # Can have empty alternatives
+        self.content = content
+        self.min = min
+        self.max = max
+        self.name = name
+
+    def optimize(self):
+        """Optimize certain stacked wildcard patterns."""
+        subpattern = None
+        if (self.content is not None and
+            len(self.content) == 1 and len(self.content[0]) == 1):
+            subpattern = self.content[0][0]
+        if self.min == 1 and self.max == 1:
+            if self.content is None:
+                return NodePattern(name=self.name)
+            if subpattern is not None and  self.name == subpattern.name:
+                return subpattern.optimize()
+        if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
+            subpattern.min <= 1 and self.name == subpattern.name):
+            return WildcardPattern(subpattern.content,
+                                   self.min*subpattern.min,
+                                   self.max*subpattern.max,
+                                   subpattern.name)
+        return self
+
+    def match(self, node, results=None):
+        """Does this pattern exactly match a node?"""
+        return self.match_seq([node], results)
+
+    def match_seq(self, nodes, results=None):
+        """Does this pattern exactly match a sequence of nodes?"""
+        for c, r in self.generate_matches(nodes):
+            if c == len(nodes):
+                if results is not None:
+                    results.update(r)
+                    if self.name:
+                        results[self.name] = list(nodes)
+                return True
+        return False
+
+    def generate_matches(self, nodes):
+        """
+        Generator yielding matches for a sequence of nodes.
+
+        Args:
+            nodes: sequence of nodes
+
+        Yields:
+            (count, results) tuples where:
+            count: the match comprises nodes[:count];
+            results: dict containing named submatches.
+        """
+        if self.content is None:
+            # Shortcut for special case (see __init__.__doc__)
+            for count in xrange(self.min, 1 + min(len(nodes), self.max)):
+                r = {}
+                if self.name:
+                    r[self.name] = nodes[:count]
+                yield count, r
+        elif self.name == "bare_name":
+            yield self._bare_name_matches(nodes)
+        else:
+            # The reason for this is that hitting the recursion limit usually
+            # results in some ugly messages about how RuntimeErrors are being
+            # ignored.
+            save_stderr = sys.stderr
+            sys.stderr = StringIO()
+            try:
+                for count, r in self._recursive_matches(nodes, 0):
+                    if self.name:
+                        r[self.name] = nodes[:count]
+                    yield count, r
+            except RuntimeError:
+                # We fall back to the iterative pattern matching scheme if the recursive
+                # scheme hits the recursion limit.
+                for count, r in self._iterative_matches(nodes):
+                    if self.name:
+                        r[self.name] = nodes[:count]
+                    yield count, r
+            finally:
+                sys.stderr = save_stderr
+
+    def _iterative_matches(self, nodes):
+        """Helper to iteratively yield the matches."""
+        nodelen = len(nodes)
+        if 0 >= self.min:
+            yield 0, {}
+
+        results = []
+        # generate matches that use just one alt from self.content
+        for alt in self.content:
+            for c, r in generate_matches(alt, nodes):
+                yield c, r
+                results.append((c, r))
+
+        # for each match, iterate down the nodes
+        while results:
+            new_results = []
+            for c0, r0 in results:
+                # stop if the entire set of nodes has been matched
+                if c0 < nodelen and c0 <= self.max:
+                    for alt in self.content:
+                        for c1, r1 in generate_matches(alt, nodes[c0:]):
+                            if c1 > 0:
+                                r = {}
+                                r.update(r0)
+                                r.update(r1)
+                                yield c0 + c1, r
+                                new_results.append((c0 + c1, r))
+            results = new_results
+
+    def _bare_name_matches(self, nodes):
+        """Special optimized matcher for bare_name."""
+        count = 0
+        r = {}
+        done = False
+        max = len(nodes)
+        while not done and count < max:
+            done = True
+            for leaf in self.content:
+                if leaf[0].match(nodes[count], r):
+                    count += 1
+                    done = False
+                    break
+        r[self.name] = nodes[:count]
+        return count, r
+
+    def _recursive_matches(self, nodes, count):
+        """Helper to recursively yield the matches."""
+        assert self.content is not None
+        if count >= self.min:
+            yield 0, {}
+        if count < self.max:
+            for alt in self.content:
+                for c0, r0 in generate_matches(alt, nodes):
+                    for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
+                        r = {}
+                        r.update(r0)
+                        r.update(r1)
+                        yield c0 + c1, r
+
+
+class NegatedPattern(BasePattern):
+
+    def __init__(self, content=None):
+        """
+        Initializer.
+
+        The argument is either a pattern or None.  If it is None, this
+        only matches an empty sequence (effectively '$' in regex
+        lingo).  If it is not None, this matches whenever the argument
+        pattern doesn't have any matches.
+        """
+        if content is not None:
+            assert isinstance(content, BasePattern), repr(content)
+        self.content = content
+
+    def match(self, node):
+        # We never match a node in its entirety
+        return False
+
+    def match_seq(self, nodes):
+        # We only match an empty sequence of nodes in its entirety
+        return len(nodes) == 0
+
+    def generate_matches(self, nodes):
+        if self.content is None:
+            # Return a match if there is an empty sequence
+            if len(nodes) == 0:
+                yield 0, {}
+        else:
+            # Return a match if the argument pattern has no matches
+            for c, r in self.content.generate_matches(nodes):
+                return
+            yield 0, {}
+
+
+def generate_matches(patterns, nodes):
+    """
+    Generator yielding matches for a sequence of patterns and nodes.
+
+    Args:
+        patterns: a sequence of patterns
+        nodes: a sequence of nodes
+
+    Yields:
+        (count, results) tuples where:
+        count: the entire sequence of patterns matches nodes[:count];
+        results: dict containing named submatches.
+        """
+    if not patterns:
+        yield 0, {}
+    else:
+        p, rest = patterns[0], patterns[1:]
+        for c0, r0 in p.generate_matches(nodes):
+            if not rest:
+                yield c0, r0
+            else:
+                for c1, r1 in generate_matches(rest, nodes[c0:]):
+                    r = {}
+                    r.update(r0)
+                    r.update(r1)
+                    yield c0 + c1, r

Added: sandbox/trunk/refactor_pkg/refactor/refactor.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/refactor.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,515 @@
+#!/usr/bin/env python2.5
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Refactoring framework.
+
+Used as a main program, this can refactor any number of files and/or
+recursively descend down directories.  Imported as a module, this
+provides infrastructure to write your own refactoring tool.
+"""
+
+__author__ = "Guido van Rossum <guido at python.org>"
+
+
+# Python imports
+import os
+import sys
+import difflib
+import logging
+import operator
+from collections import defaultdict
+from itertools import chain
+
+# Local imports
+from .pgen2 import driver
+from .pgen2 import tokenize
+
+from . import pytree
+from . import patcomp
+from . import fixes
+from . import pygram
+
+
+def get_all_fix_names(fixer_pkg, remove_prefix=True):
+    """Return a sorted list of all available fix names in the given package."""
+    pkg = __import__(fixer_pkg, [], [], ["*"])
+    fixer_dir = os.path.dirname(pkg.__file__)
+    fix_names = []
+    for name in sorted(os.listdir(fixer_dir)):
+        if name.startswith("fix_") and name.endswith(".py"):
+            if remove_prefix:
+                name = name[4:]
+            fix_names.append(name[:-3])
+    return fix_names
+
+def get_head_types(pat):
+    """ Accepts a pytree Pattern Node and returns a set
+        of the pattern types which will match first. """
+
+    if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)):
+        # NodePatters must either have no type and no content
+        #   or a type and content -- so they don't get any farther
+        # Always return leafs
+        return set([pat.type])
+
+    if isinstance(pat, pytree.NegatedPattern):
+        if pat.content:
+            return get_head_types(pat.content)
+        return set([None]) # Negated Patterns don't have a type
+
+    if isinstance(pat, pytree.WildcardPattern):
+        # Recurse on each node in content
+        r = set()
+        for p in pat.content:
+            for x in p:
+                r.update(get_head_types(x))
+        return r
+
+    raise Exception("Oh no! I don't understand pattern %s" %(pat))
+
+def get_headnode_dict(fixer_list):
+    """ Accepts a list of fixers and returns a dictionary
+        of head node type --> fixer list.  """
+    head_nodes = defaultdict(list)
+    for fixer in fixer_list:
+        if not fixer.pattern:
+            head_nodes[None].append(fixer)
+            continue
+        for t in get_head_types(fixer.pattern):
+            head_nodes[t].append(fixer)
+    return head_nodes
+
+def get_fixers_from_package(pkg_name):
+    """
+    Return the fully qualified names for fixers in the package pkg_name.
+    """
+    return [pkg_name + "." + fix_name
+            for fix_name in get_all_fix_names(pkg_name, False)]
+
+
+class FixerError(Exception):
+    """A fixer could not be loaded."""
+
+
+class RefactoringTool(object):
+
+    _default_options = {"print_function": False}
+
+    CLASS_PREFIX = "Fix" # The prefix for fixer classes
+    FILE_PREFIX = "fix_" # The prefix for modules with a fixer within
+
+    def __init__(self, fixer_names, options=None, explicit=None):
+        """Initializer.
+
+        Args:
+            fixer_names: a list of fixers to import
+            options: an dict with configuration.
+            explicit: a list of fixers to run even if they are explicit.
+        """
+        self.fixers = fixer_names
+        self.explicit = explicit or []
+        self.options = self._default_options.copy()
+        if options is not None:
+            self.options.update(options)
+        self.errors = []
+        self.logger = logging.getLogger("RefactoringTool")
+        self.fixer_log = []
+        self.wrote = False
+        if self.options["print_function"]:
+            del pygram.python_grammar.keywords["print"]
+        self.driver = driver.Driver(pygram.python_grammar,
+                                    convert=pytree.convert,
+                                    logger=self.logger)
+        self.pre_order, self.post_order = self.get_fixers()
+
+        self.pre_order_heads = get_headnode_dict(self.pre_order)
+        self.post_order_heads = get_headnode_dict(self.post_order)
+
+        self.files = []  # List of files that were or should be modified
+
+    def get_fixers(self):
+        """Inspects the options to load the requested patterns and handlers.
+
+        Returns:
+          (pre_order, post_order), where pre_order is the list of fixers that
+          want a pre-order AST traversal, and post_order is the list that want
+          post-order traversal.
+        """
+        pre_order_fixers = []
+        post_order_fixers = []
+        for fix_mod_path in self.fixers:
+            mod = __import__(fix_mod_path, {}, {}, ["*"])
+            fix_name = fix_mod_path.rsplit(".", 1)[-1]
+            if fix_name.startswith(self.FILE_PREFIX):
+                fix_name = fix_name[len(self.FILE_PREFIX):]
+            parts = fix_name.split("_")
+            class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts])
+            try:
+                fix_class = getattr(mod, class_name)
+            except AttributeError:
+                raise FixerError("Can't find %s.%s" % (fix_name, class_name))
+            fixer = fix_class(self.options, self.fixer_log)
+            if fixer.explicit and self.explicit is not True and \
+                    fix_mod_path not in self.explicit:
+                self.log_message("Skipping implicit fixer: %s", fix_name)
+                continue
+
+            self.log_debug("Adding transformation: %s", fix_name)
+            if fixer.order == "pre":
+                pre_order_fixers.append(fixer)
+            elif fixer.order == "post":
+                post_order_fixers.append(fixer)
+            else:
+                raise FixerError("Illegal fixer order: %r" % fixer.order)
+
+        key_func = operator.attrgetter("run_order")
+        pre_order_fixers.sort(key=key_func)
+        post_order_fixers.sort(key=key_func)
+        return (pre_order_fixers, post_order_fixers)
+
+    def log_error(self, msg, *args, **kwds):
+        """Called when an error occurs."""
+        raise
+
+    def log_message(self, msg, *args):
+        """Hook to log a message."""
+        if args:
+            msg = msg % args
+        self.logger.info(msg)
+
+    def log_debug(self, msg, *args):
+        if args:
+            msg = msg % args
+        self.logger.debug(msg)
+
+    def print_output(self, lines):
+        """Called with lines of output to give to the user."""
+        pass
+
+    def refactor(self, items, write=False, doctests_only=False):
+        """Refactor a list of files and directories."""
+        for dir_or_file in items:
+            if os.path.isdir(dir_or_file):
+                self.refactor_dir(dir_or_file, write, doctests_only)
+            else:
+                self.refactor_file(dir_or_file, write, doctests_only)
+
+    def refactor_dir(self, dir_name, write=False, doctests_only=False):
+        """Descends down a directory and refactor every Python file found.
+
+        Python files are assumed to have a .py extension.
+
+        Files and subdirectories starting with '.' are skipped.
+        """
+        for dirpath, dirnames, filenames in os.walk(dir_name):
+            self.log_debug("Descending into %s", dirpath)
+            dirnames.sort()
+            filenames.sort()
+            for name in filenames:
+                if not name.startswith(".") and name.endswith("py"):
+                    fullname = os.path.join(dirpath, name)
+                    self.refactor_file(fullname, write, doctests_only)
+            # Modify dirnames in-place to remove subdirs with leading dots
+            dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")]
+
+    def refactor_file(self, filename, write=False, doctests_only=False):
+        """Refactors a file."""
+        try:
+            f = open(filename)
+        except IOError, err:
+            self.log_error("Can't open %s: %s", filename, err)
+            return
+        try:
+            input = f.read() + "\n" # Silence certain parse errors
+        finally:
+            f.close()
+        if doctests_only:
+            self.log_debug("Refactoring doctests in %s", filename)
+            output = self.refactor_docstring(input, filename)
+            if output != input:
+                self.processed_file(output, filename, input, write=write)
+            else:
+                self.log_debug("No doctest changes in %s", filename)
+        else:
+            tree = self.refactor_string(input, filename)
+            if tree and tree.was_changed:
+                # The [:-1] is to take off the \n we added earlier
+                self.processed_file(str(tree)[:-1], filename, write=write)
+            else:
+                self.log_debug("No changes in %s", filename)
+
+    def refactor_string(self, data, name):
+        """Refactor a given input string.
+
+        Args:
+            data: a string holding the code to be refactored.
+            name: a human-readable name for use in error/log messages.
+
+        Returns:
+            An AST corresponding to the refactored input stream; None if
+            there were errors during the parse.
+        """
+        try:
+            tree = self.driver.parse_string(data)
+        except Exception, err:
+            self.log_error("Can't parse %s: %s: %s",
+                           name, err.__class__.__name__, err)
+            return
+        self.log_debug("Refactoring %s", name)
+        self.refactor_tree(tree, name)
+        return tree
+
+    def refactor_stdin(self, doctests_only=False):
+        input = sys.stdin.read()
+        if doctests_only:
+            self.log_debug("Refactoring doctests in stdin")
+            output = self.refactor_docstring(input, "<stdin>")
+            if output != input:
+                self.processed_file(output, "<stdin>", input)
+            else:
+                self.log_debug("No doctest changes in stdin")
+        else:
+            tree = self.refactor_string(input, "<stdin>")
+            if tree and tree.was_changed:
+                self.processed_file(str(tree), "<stdin>", input)
+            else:
+                self.log_debug("No changes in stdin")
+
+    def refactor_tree(self, tree, name):
+        """Refactors a parse tree (modifying the tree in place).
+
+        Args:
+            tree: a pytree.Node instance representing the root of the tree
+                  to be refactored.
+            name: a human-readable name for this tree.
+
+        Returns:
+            True if the tree was modified, False otherwise.
+        """
+        for fixer in chain(self.pre_order, self.post_order):
+            fixer.start_tree(tree, name)
+
+        self.traverse_by(self.pre_order_heads, tree.pre_order())
+        self.traverse_by(self.post_order_heads, tree.post_order())
+
+        for fixer in chain(self.pre_order, self.post_order):
+            fixer.finish_tree(tree, name)
+        return tree.was_changed
+
+    def traverse_by(self, fixers, traversal):
+        """Traverse an AST, applying a set of fixers to each node.
+
+        This is a helper method for refactor_tree().
+
+        Args:
+            fixers: a list of fixer instances.
+            traversal: a generator that yields AST nodes.
+
+        Returns:
+            None
+        """
+        if not fixers:
+            return
+        for node in traversal:
+            for fixer in fixers[node.type] + fixers[None]:
+                results = fixer.match(node)
+                if results:
+                    new = fixer.transform(node, results)
+                    if new is not None and (new != node or
+                                            str(new) != str(node)):
+                        node.replace(new)
+                        node = new
+
+    def processed_file(self, new_text, filename, old_text=None, write=False):
+        """
+        Called when a file has been refactored, and there are changes.
+        """
+        self.files.append(filename)
+        if old_text is None:
+            try:
+                f = open(filename, "r")
+            except IOError, err:
+                self.log_error("Can't read %s: %s", filename, err)
+                return
+            try:
+                old_text = f.read()
+            finally:
+                f.close()
+        if old_text == new_text:
+            self.log_debug("No changes to %s", filename)
+            return
+        self.print_output(diff_texts(old_text, new_text, filename))
+        if write:
+            self.write_file(new_text, filename, old_text)
+        else:
+            self.log_debug("Not writing changes to %s", filename)
+
+    def write_file(self, new_text, filename, old_text):
+        """Writes a string to a file.
+
+        It first shows a unified diff between the old text and the new text, and
+        then rewrites the file; the latter is only done if the write option is
+        set.
+        """
+        try:
+            f = open(filename, "w")
+        except os.error, err:
+            self.log_error("Can't create %s: %s", filename, err)
+            return
+        try:
+            f.write(new_text)
+        except os.error, err:
+            self.log_error("Can't write %s: %s", filename, err)
+        finally:
+            f.close()
+        self.log_debug("Wrote changes to %s", filename)
+        self.wrote = True
+
+    PS1 = ">>> "
+    PS2 = "... "
+
+    def refactor_docstring(self, input, filename):
+        """Refactors a docstring, looking for doctests.
+
+        This returns a modified version of the input string.  It looks
+        for doctests, which start with a ">>>" prompt, and may be
+        continued with "..." prompts, as long as the "..." is indented
+        the same as the ">>>".
+
+        (Unfortunately we can't use the doctest module's parser,
+        since, like most parsers, it is not geared towards preserving
+        the original source.)
+        """
+        result = []
+        block = None
+        block_lineno = None
+        indent = None
+        lineno = 0
+        for line in input.splitlines(True):
+            lineno += 1
+            if line.lstrip().startswith(self.PS1):
+                if block is not None:
+                    result.extend(self.refactor_doctest(block, block_lineno,
+                                                        indent, filename))
+                block_lineno = lineno
+                block = [line]
+                i = line.find(self.PS1)
+                indent = line[:i]
+            elif (indent is not None and
+                  (line.startswith(indent + self.PS2) or
+                   line == indent + self.PS2.rstrip() + "\n")):
+                block.append(line)
+            else:
+                if block is not None:
+                    result.extend(self.refactor_doctest(block, block_lineno,
+                                                        indent, filename))
+                block = None
+                indent = None
+                result.append(line)
+        if block is not None:
+            result.extend(self.refactor_doctest(block, block_lineno,
+                                                indent, filename))
+        return "".join(result)
+
+    def refactor_doctest(self, block, lineno, indent, filename):
+        """Refactors one doctest.
+
+        A doctest is given as a block of lines, the first of which starts
+        with ">>>" (possibly indented), while the remaining lines start
+        with "..." (identically indented).
+
+        """
+        try:
+            tree = self.parse_block(block, lineno, indent)
+        except Exception, err:
+            if self.log.isEnabledFor(logging.DEBUG):
+                for line in block:
+                    self.log_debug("Source: %s", line.rstrip("\n"))
+            self.log_error("Can't parse docstring in %s line %s: %s: %s",
+                           filename, lineno, err.__class__.__name__, err)
+            return block
+        if self.refactor_tree(tree, filename):
+            new = str(tree).splitlines(True)
+            # Undo the adjustment of the line numbers in wrap_toks() below.
+            clipped, new = new[:lineno-1], new[lineno-1:]
+            assert clipped == ["\n"] * (lineno-1), clipped
+            if not new[-1].endswith("\n"):
+                new[-1] += "\n"
+            block = [indent + self.PS1 + new.pop(0)]
+            if new:
+                block += [indent + self.PS2 + line for line in new]
+        return block
+
+    def summarize(self):
+        if self.wrote:
+            were = "were"
+        else:
+            were = "need to be"
+        if not self.files:
+            self.log_message("No files %s modified.", were)
+        else:
+            self.log_message("Files that %s modified:", were)
+            for file in self.files:
+                self.log_message(file)
+        if self.fixer_log:
+            self.log_message("Warnings/messages while refactoring:")
+            for message in self.fixer_log:
+                self.log_message(message)
+        if self.errors:
+            if len(self.errors) == 1:
+                self.log_message("There was 1 error:")
+            else:
+                self.log_message("There were %d errors:", len(self.errors))
+            for msg, args, kwds in self.errors:
+                self.log_message(msg, *args, **kwds)
+
+    def parse_block(self, block, lineno, indent):
+        """Parses a block into a tree.
+
+        This is necessary to get correct line number / offset information
+        in the parser diagnostics and embedded into the parse tree.
+        """
+        return self.driver.parse_tokens(self.wrap_toks(block, lineno, indent))
+
+    def wrap_toks(self, block, lineno, indent):
+        """Wraps a tokenize stream to systematically modify start/end."""
+        tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next)
+        for type, value, (line0, col0), (line1, col1), line_text in tokens:
+            line0 += lineno - 1
+            line1 += lineno - 1
+            # Don't bother updating the columns; this is too complicated
+            # since line_text would also have to be updated and it would
+            # still break for tokens spanning lines.  Let the user guess
+            # that the column numbers for doctests are relative to the
+            # end of the prompt string (PS1 or PS2).
+            yield type, value, (line0, col0), (line1, col1), line_text
+
+
+    def gen_lines(self, block, indent):
+        """Generates lines as expected by tokenize from a list of lines.
+
+        This strips the first len(indent + self.PS1) characters off each line.
+        """
+        prefix1 = indent + self.PS1
+        prefix2 = indent + self.PS2
+        prefix = prefix1
+        for line in block:
+            if line.startswith(prefix):
+                yield line[len(prefix):]
+            elif line == prefix.rstrip() + "\n":
+                yield "\n"
+            else:
+                raise AssertionError("line=%r, prefix=%r" % (line, prefix))
+            prefix = prefix2
+        while True:
+            yield ""
+
+
+def diff_texts(a, b, filename):
+    """Return a unified diff of two strings."""
+    a = a.splitlines()
+    b = b.splitlines()
+    return difflib.unified_diff(a, b, filename, filename,
+                                "(original)", "(refactored)",
+                                lineterm="")

Added: sandbox/trunk/refactor_pkg/refactor/tests/__init__.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/__init__.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,24 @@
+"""Make tests/ into a package. This allows us to "import tests" and
+have tests.all_tests be a TestSuite representing all test cases
+from all test_*.py files in tests/."""
+# Author: Collin Winter
+
+import os
+import os.path
+import unittest
+import types
+
+from . import support
+
+all_tests = unittest.TestSuite()
+
+tests_dir = os.path.join(os.path.dirname(__file__), '..', 'tests')
+tests = [t[0:-3] for t in os.listdir(tests_dir)
+                        if t.startswith('test_') and t.endswith('.py')]
+
+loader = unittest.TestLoader()
+
+for t in tests:
+    __import__("",globals(),locals(),[t],level=1)
+    mod = globals()[t]
+    all_tests.addTests(loader.loadTestsFromModule(mod))

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/README
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/README	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,6 @@
+In this directory:
+- py2_test_grammar.py -- test file that exercises most/all of Python 2.x's grammar.
+- py3_test_grammar.py -- test file that exercises most/all of Python 3.x's grammar.
+- infinite_recursion.py -- test file that causes refactor's faster recursive pattern matching
+  scheme to fail, but passes when refactor falls back to iterative pattern matching.
+- fixes/ -- for use by test_refactor.py

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/bad_order.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/bad_order.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,5 @@
+from refactor.fixer_base import BaseFix
+
+class FixBadOrder(BaseFix):
+
+    order = "crazy"

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/__init__.py
==============================================================================

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_explicit.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_explicit.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,6 @@
+from refactor.fixer_base import BaseFix
+
+class FixExplicit(BaseFix):
+    explicit = True
+
+    def match(self): return False

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_first.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_first.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,6 @@
+from refactor.fixer_base import BaseFix
+
+class FixFirst(BaseFix):
+    run_order = 1
+
+    def match(self, node): return False

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_last.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_last.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,7 @@
+from refactor.fixer_base import BaseFix
+
+class FixLast(BaseFix):
+
+    run_order = 10
+
+    def match(self, node): return False

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_parrot.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_parrot.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,13 @@
+from refactor.fixer_base import BaseFix
+from refactor.fixer_util import Name
+
+class FixParrot(BaseFix):
+    """
+    Change functions named 'parrot' to 'cheese'.
+    """
+
+    PATTERN = """funcdef < 'def' name='parrot' any* >"""
+
+    def transform(self, node, results):
+        name = results["name"]
+        name.replace(Name("cheese", name.get_prefix()))

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_preorder.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/myfixes/fix_preorder.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,6 @@
+from refactor.fixer_base import BaseFix
+
+class FixPreorder(BaseFix):
+    order = "pre"
+
+    def match(self, node): return False

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/no_fixer_cls.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/no_fixer_cls.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1 @@
+# This is empty so trying to fetch the fixer class gives an AttributeError

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/parrot_example.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/fixers/parrot_example.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,2 @@
+def parrot():
+    pass

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/infinite_recursion.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/infinite_recursion.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,2670 @@
+# This file is used to verify that refactor falls back to a slower, iterative
+# pattern matching scheme in the event that the faster recursive system fails
+# due to infinite recursion.
+from ctypes import *
+STRING = c_char_p
+
+
+OSUnknownByteOrder = 0
+UIT_PROMPT = 1
+P_PGID = 2
+P_PID = 1
+UIT_ERROR = 5
+UIT_INFO = 4
+UIT_NONE = 0
+P_ALL = 0
+UIT_VERIFY = 2
+OSBigEndian = 2
+UIT_BOOLEAN = 3
+OSLittleEndian = 1
+__darwin_nl_item = c_int
+__darwin_wctrans_t = c_int
+__darwin_wctype_t = c_ulong
+__int8_t = c_byte
+__uint8_t = c_ubyte
+__int16_t = c_short
+__uint16_t = c_ushort
+__int32_t = c_int
+__uint32_t = c_uint
+__int64_t = c_longlong
+__uint64_t = c_ulonglong
+__darwin_intptr_t = c_long
+__darwin_natural_t = c_uint
+__darwin_ct_rune_t = c_int
+class __mbstate_t(Union):
+    pass
+__mbstate_t._pack_ = 4
+__mbstate_t._fields_ = [
+    ('__mbstate8', c_char * 128),
+    ('_mbstateL', c_longlong),
+]
+assert sizeof(__mbstate_t) == 128, sizeof(__mbstate_t)
+assert alignment(__mbstate_t) == 4, alignment(__mbstate_t)
+__darwin_mbstate_t = __mbstate_t
+__darwin_ptrdiff_t = c_int
+__darwin_size_t = c_ulong
+__darwin_va_list = STRING
+__darwin_wchar_t = c_int
+__darwin_rune_t = __darwin_wchar_t
+__darwin_wint_t = c_int
+__darwin_clock_t = c_ulong
+__darwin_socklen_t = __uint32_t
+__darwin_ssize_t = c_long
+__darwin_time_t = c_long
+sig_atomic_t = c_int
+class sigcontext(Structure):
+    pass
+sigcontext._fields_ = [
+    ('sc_onstack', c_int),
+    ('sc_mask', c_int),
+    ('sc_eax', c_uint),
+    ('sc_ebx', c_uint),
+    ('sc_ecx', c_uint),
+    ('sc_edx', c_uint),
+    ('sc_edi', c_uint),
+    ('sc_esi', c_uint),
+    ('sc_ebp', c_uint),
+    ('sc_esp', c_uint),
+    ('sc_ss', c_uint),
+    ('sc_eflags', c_uint),
+    ('sc_eip', c_uint),
+    ('sc_cs', c_uint),
+    ('sc_ds', c_uint),
+    ('sc_es', c_uint),
+    ('sc_fs', c_uint),
+    ('sc_gs', c_uint),
+]
+assert sizeof(sigcontext) == 72, sizeof(sigcontext)
+assert alignment(sigcontext) == 4, alignment(sigcontext)
+u_int8_t = c_ubyte
+u_int16_t = c_ushort
+u_int32_t = c_uint
+u_int64_t = c_ulonglong
+int32_t = c_int
+register_t = int32_t
+user_addr_t = u_int64_t
+user_size_t = u_int64_t
+int64_t = c_longlong
+user_ssize_t = int64_t
+user_long_t = int64_t
+user_ulong_t = u_int64_t
+user_time_t = int64_t
+syscall_arg_t = u_int64_t
+
+# values for unnamed enumeration
+class aes_key_st(Structure):
+    pass
+aes_key_st._fields_ = [
+    ('rd_key', c_ulong * 60),
+    ('rounds', c_int),
+]
+assert sizeof(aes_key_st) == 244, sizeof(aes_key_st)
+assert alignment(aes_key_st) == 4, alignment(aes_key_st)
+AES_KEY = aes_key_st
+class asn1_ctx_st(Structure):
+    pass
+asn1_ctx_st._fields_ = [
+    ('p', POINTER(c_ubyte)),
+    ('eos', c_int),
+    ('error', c_int),
+    ('inf', c_int),
+    ('tag', c_int),
+    ('xclass', c_int),
+    ('slen', c_long),
+    ('max', POINTER(c_ubyte)),
+    ('q', POINTER(c_ubyte)),
+    ('pp', POINTER(POINTER(c_ubyte))),
+    ('line', c_int),
+]
+assert sizeof(asn1_ctx_st) == 44, sizeof(asn1_ctx_st)
+assert alignment(asn1_ctx_st) == 4, alignment(asn1_ctx_st)
+ASN1_CTX = asn1_ctx_st
+class asn1_object_st(Structure):
+    pass
+asn1_object_st._fields_ = [
+    ('sn', STRING),
+    ('ln', STRING),
+    ('nid', c_int),
+    ('length', c_int),
+    ('data', POINTER(c_ubyte)),
+    ('flags', c_int),
+]
+assert sizeof(asn1_object_st) == 24, sizeof(asn1_object_st)
+assert alignment(asn1_object_st) == 4, alignment(asn1_object_st)
+ASN1_OBJECT = asn1_object_st
+class asn1_string_st(Structure):
+    pass
+asn1_string_st._fields_ = [
+    ('length', c_int),
+    ('type', c_int),
+    ('data', POINTER(c_ubyte)),
+    ('flags', c_long),
+]
+assert sizeof(asn1_string_st) == 16, sizeof(asn1_string_st)
+assert alignment(asn1_string_st) == 4, alignment(asn1_string_st)
+ASN1_STRING = asn1_string_st
+class ASN1_ENCODING_st(Structure):
+    pass
+ASN1_ENCODING_st._fields_ = [
+    ('enc', POINTER(c_ubyte)),
+    ('len', c_long),
+    ('modified', c_int),
+]
+assert sizeof(ASN1_ENCODING_st) == 12, sizeof(ASN1_ENCODING_st)
+assert alignment(ASN1_ENCODING_st) == 4, alignment(ASN1_ENCODING_st)
+ASN1_ENCODING = ASN1_ENCODING_st
+class asn1_string_table_st(Structure):
+    pass
+asn1_string_table_st._fields_ = [
+    ('nid', c_int),
+    ('minsize', c_long),
+    ('maxsize', c_long),
+    ('mask', c_ulong),
+    ('flags', c_ulong),
+]
+assert sizeof(asn1_string_table_st) == 20, sizeof(asn1_string_table_st)
+assert alignment(asn1_string_table_st) == 4, alignment(asn1_string_table_st)
+ASN1_STRING_TABLE = asn1_string_table_st
+class ASN1_TEMPLATE_st(Structure):
+    pass
+ASN1_TEMPLATE_st._fields_ = [
+]
+ASN1_TEMPLATE = ASN1_TEMPLATE_st
+class ASN1_ITEM_st(Structure):
+    pass
+ASN1_ITEM = ASN1_ITEM_st
+ASN1_ITEM_st._fields_ = [
+]
+class ASN1_TLC_st(Structure):
+    pass
+ASN1_TLC = ASN1_TLC_st
+ASN1_TLC_st._fields_ = [
+]
+class ASN1_VALUE_st(Structure):
+    pass
+ASN1_VALUE_st._fields_ = [
+]
+ASN1_VALUE = ASN1_VALUE_st
+ASN1_ITEM_EXP = ASN1_ITEM
+class asn1_type_st(Structure):
+    pass
+class N12asn1_type_st4DOLLAR_11E(Union):
+    pass
+ASN1_BOOLEAN = c_int
+ASN1_INTEGER = asn1_string_st
+ASN1_ENUMERATED = asn1_string_st
+ASN1_BIT_STRING = asn1_string_st
+ASN1_OCTET_STRING = asn1_string_st
+ASN1_PRINTABLESTRING = asn1_string_st
+ASN1_T61STRING = asn1_string_st
+ASN1_IA5STRING = asn1_string_st
+ASN1_GENERALSTRING = asn1_string_st
+ASN1_BMPSTRING = asn1_string_st
+ASN1_UNIVERSALSTRING = asn1_string_st
+ASN1_UTCTIME = asn1_string_st
+ASN1_GENERALIZEDTIME = asn1_string_st
+ASN1_VISIBLESTRING = asn1_string_st
+ASN1_UTF8STRING = asn1_string_st
+N12asn1_type_st4DOLLAR_11E._fields_ = [
+    ('ptr', STRING),
+    ('boolean', ASN1_BOOLEAN),
+    ('asn1_string', POINTER(ASN1_STRING)),
+    ('object', POINTER(ASN1_OBJECT)),
+    ('integer', POINTER(ASN1_INTEGER)),
+    ('enumerated', POINTER(ASN1_ENUMERATED)),
+    ('bit_string', POINTER(ASN1_BIT_STRING)),
+    ('octet_string', POINTER(ASN1_OCTET_STRING)),
+    ('printablestring', POINTER(ASN1_PRINTABLESTRING)),
+    ('t61string', POINTER(ASN1_T61STRING)),
+    ('ia5string', POINTER(ASN1_IA5STRING)),
+    ('generalstring', POINTER(ASN1_GENERALSTRING)),
+    ('bmpstring', POINTER(ASN1_BMPSTRING)),
+    ('universalstring', POINTER(ASN1_UNIVERSALSTRING)),
+    ('utctime', POINTER(ASN1_UTCTIME)),
+    ('generalizedtime', POINTER(ASN1_GENERALIZEDTIME)),
+    ('visiblestring', POINTER(ASN1_VISIBLESTRING)),
+    ('utf8string', POINTER(ASN1_UTF8STRING)),
+    ('set', POINTER(ASN1_STRING)),
+    ('sequence', POINTER(ASN1_STRING)),
+]
+assert sizeof(N12asn1_type_st4DOLLAR_11E) == 4, sizeof(N12asn1_type_st4DOLLAR_11E)
+assert alignment(N12asn1_type_st4DOLLAR_11E) == 4, alignment(N12asn1_type_st4DOLLAR_11E)
+asn1_type_st._fields_ = [
+    ('type', c_int),
+    ('value', N12asn1_type_st4DOLLAR_11E),
+]
+assert sizeof(asn1_type_st) == 8, sizeof(asn1_type_st)
+assert alignment(asn1_type_st) == 4, alignment(asn1_type_st)
+ASN1_TYPE = asn1_type_st
+class asn1_method_st(Structure):
+    pass
+asn1_method_st._fields_ = [
+    ('i2d', CFUNCTYPE(c_int)),
+    ('d2i', CFUNCTYPE(STRING)),
+    ('create', CFUNCTYPE(STRING)),
+    ('destroy', CFUNCTYPE(None)),
+]
+assert sizeof(asn1_method_st) == 16, sizeof(asn1_method_st)
+assert alignment(asn1_method_st) == 4, alignment(asn1_method_st)
+ASN1_METHOD = asn1_method_st
+class asn1_header_st(Structure):
+    pass
+asn1_header_st._fields_ = [
+    ('header', POINTER(ASN1_OCTET_STRING)),
+    ('data', STRING),
+    ('meth', POINTER(ASN1_METHOD)),
+]
+assert sizeof(asn1_header_st) == 12, sizeof(asn1_header_st)
+assert alignment(asn1_header_st) == 4, alignment(asn1_header_st)
+ASN1_HEADER = asn1_header_st
+class BIT_STRING_BITNAME_st(Structure):
+    pass
+BIT_STRING_BITNAME_st._fields_ = [
+    ('bitnum', c_int),
+    ('lname', STRING),
+    ('sname', STRING),
+]
+assert sizeof(BIT_STRING_BITNAME_st) == 12, sizeof(BIT_STRING_BITNAME_st)
+assert alignment(BIT_STRING_BITNAME_st) == 4, alignment(BIT_STRING_BITNAME_st)
+BIT_STRING_BITNAME = BIT_STRING_BITNAME_st
+class bio_st(Structure):
+    pass
+BIO = bio_st
+bio_info_cb = CFUNCTYPE(None, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)
+class bio_method_st(Structure):
+    pass
+bio_method_st._fields_ = [
+    ('type', c_int),
+    ('name', STRING),
+    ('bwrite', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
+    ('bread', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
+    ('bputs', CFUNCTYPE(c_int, POINTER(BIO), STRING)),
+    ('bgets', CFUNCTYPE(c_int, POINTER(BIO), STRING, c_int)),
+    ('ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, c_long, c_void_p)),
+    ('create', CFUNCTYPE(c_int, POINTER(BIO))),
+    ('destroy', CFUNCTYPE(c_int, POINTER(BIO))),
+    ('callback_ctrl', CFUNCTYPE(c_long, POINTER(BIO), c_int, POINTER(bio_info_cb))),
+]
+assert sizeof(bio_method_st) == 40, sizeof(bio_method_st)
+assert alignment(bio_method_st) == 4, alignment(bio_method_st)
+BIO_METHOD = bio_method_st
+class crypto_ex_data_st(Structure):
+    pass
+class stack_st(Structure):
+    pass
+STACK = stack_st
+crypto_ex_data_st._fields_ = [
+    ('sk', POINTER(STACK)),
+    ('dummy', c_int),
+]
+assert sizeof(crypto_ex_data_st) == 8, sizeof(crypto_ex_data_st)
+assert alignment(crypto_ex_data_st) == 4, alignment(crypto_ex_data_st)
+CRYPTO_EX_DATA = crypto_ex_data_st
+bio_st._fields_ = [
+    ('method', POINTER(BIO_METHOD)),
+    ('callback', CFUNCTYPE(c_long, POINTER(bio_st), c_int, STRING, c_int, c_long, c_long)),
+    ('cb_arg', STRING),
+    ('init', c_int),
+    ('shutdown', c_int),
+    ('flags', c_int),
+    ('retry_reason', c_int),
+    ('num', c_int),
+    ('ptr', c_void_p),
+    ('next_bio', POINTER(bio_st)),
+    ('prev_bio', POINTER(bio_st)),
+    ('references', c_int),
+    ('num_read', c_ulong),
+    ('num_write', c_ulong),
+    ('ex_data', CRYPTO_EX_DATA),
+]
+assert sizeof(bio_st) == 64, sizeof(bio_st)
+assert alignment(bio_st) == 4, alignment(bio_st)
+class bio_f_buffer_ctx_struct(Structure):
+    pass
+bio_f_buffer_ctx_struct._fields_ = [
+    ('ibuf_size', c_int),
+    ('obuf_size', c_int),
+    ('ibuf', STRING),
+    ('ibuf_len', c_int),
+    ('ibuf_off', c_int),
+    ('obuf', STRING),
+    ('obuf_len', c_int),
+    ('obuf_off', c_int),
+]
+assert sizeof(bio_f_buffer_ctx_struct) == 32, sizeof(bio_f_buffer_ctx_struct)
+assert alignment(bio_f_buffer_ctx_struct) == 4, alignment(bio_f_buffer_ctx_struct)
+BIO_F_BUFFER_CTX = bio_f_buffer_ctx_struct
+class hostent(Structure):
+    pass
+hostent._fields_ = [
+]
+class bf_key_st(Structure):
+    pass
+bf_key_st._fields_ = [
+    ('P', c_uint * 18),
+    ('S', c_uint * 1024),
+]
+assert sizeof(bf_key_st) == 4168, sizeof(bf_key_st)
+assert alignment(bf_key_st) == 4, alignment(bf_key_st)
+BF_KEY = bf_key_st
+class bignum_st(Structure):
+    pass
+bignum_st._fields_ = [
+    ('d', POINTER(c_ulong)),
+    ('top', c_int),
+    ('dmax', c_int),
+    ('neg', c_int),
+    ('flags', c_int),
+]
+assert sizeof(bignum_st) == 20, sizeof(bignum_st)
+assert alignment(bignum_st) == 4, alignment(bignum_st)
+BIGNUM = bignum_st
+class bignum_ctx(Structure):
+    pass
+bignum_ctx._fields_ = [
+]
+BN_CTX = bignum_ctx
+class bn_blinding_st(Structure):
+    pass
+bn_blinding_st._fields_ = [
+    ('init', c_int),
+    ('A', POINTER(BIGNUM)),
+    ('Ai', POINTER(BIGNUM)),
+    ('mod', POINTER(BIGNUM)),
+    ('thread_id', c_ulong),
+]
+assert sizeof(bn_blinding_st) == 20, sizeof(bn_blinding_st)
+assert alignment(bn_blinding_st) == 4, alignment(bn_blinding_st)
+BN_BLINDING = bn_blinding_st
+class bn_mont_ctx_st(Structure):
+    pass
+bn_mont_ctx_st._fields_ = [
+    ('ri', c_int),
+    ('RR', BIGNUM),
+    ('N', BIGNUM),
+    ('Ni', BIGNUM),
+    ('n0', c_ulong),
+    ('flags', c_int),
+]
+assert sizeof(bn_mont_ctx_st) == 72, sizeof(bn_mont_ctx_st)
+assert alignment(bn_mont_ctx_st) == 4, alignment(bn_mont_ctx_st)
+BN_MONT_CTX = bn_mont_ctx_st
+class bn_recp_ctx_st(Structure):
+    pass
+bn_recp_ctx_st._fields_ = [
+    ('N', BIGNUM),
+    ('Nr', BIGNUM),
+    ('num_bits', c_int),
+    ('shift', c_int),
+    ('flags', c_int),
+]
+assert sizeof(bn_recp_ctx_st) == 52, sizeof(bn_recp_ctx_st)
+assert alignment(bn_recp_ctx_st) == 4, alignment(bn_recp_ctx_st)
+BN_RECP_CTX = bn_recp_ctx_st
+class buf_mem_st(Structure):
+    pass
+buf_mem_st._fields_ = [
+    ('length', c_int),
+    ('data', STRING),
+    ('max', c_int),
+]
+assert sizeof(buf_mem_st) == 12, sizeof(buf_mem_st)
+assert alignment(buf_mem_st) == 4, alignment(buf_mem_st)
+BUF_MEM = buf_mem_st
+class cast_key_st(Structure):
+    pass
+cast_key_st._fields_ = [
+    ('data', c_ulong * 32),
+    ('short_key', c_int),
+]
+assert sizeof(cast_key_st) == 132, sizeof(cast_key_st)
+assert alignment(cast_key_st) == 4, alignment(cast_key_st)
+CAST_KEY = cast_key_st
+class comp_method_st(Structure):
+    pass
+comp_method_st._fields_ = [
+    ('type', c_int),
+    ('name', STRING),
+    ('init', CFUNCTYPE(c_int)),
+    ('finish', CFUNCTYPE(None)),
+    ('compress', CFUNCTYPE(c_int)),
+    ('expand', CFUNCTYPE(c_int)),
+    ('ctrl', CFUNCTYPE(c_long)),
+    ('callback_ctrl', CFUNCTYPE(c_long)),
+]
+assert sizeof(comp_method_st) == 32, sizeof(comp_method_st)
+assert alignment(comp_method_st) == 4, alignment(comp_method_st)
+COMP_METHOD = comp_method_st
+class comp_ctx_st(Structure):
+    pass
+comp_ctx_st._fields_ = [
+    ('meth', POINTER(COMP_METHOD)),
+    ('compress_in', c_ulong),
+    ('compress_out', c_ulong),
+    ('expand_in', c_ulong),
+    ('expand_out', c_ulong),
+    ('ex_data', CRYPTO_EX_DATA),
+]
+assert sizeof(comp_ctx_st) == 28, sizeof(comp_ctx_st)
+assert alignment(comp_ctx_st) == 4, alignment(comp_ctx_st)
+COMP_CTX = comp_ctx_st
+class CRYPTO_dynlock_value(Structure):
+    pass
+CRYPTO_dynlock_value._fields_ = [
+]
+class CRYPTO_dynlock(Structure):
+    pass
+CRYPTO_dynlock._fields_ = [
+    ('references', c_int),
+    ('data', POINTER(CRYPTO_dynlock_value)),
+]
+assert sizeof(CRYPTO_dynlock) == 8, sizeof(CRYPTO_dynlock)
+assert alignment(CRYPTO_dynlock) == 4, alignment(CRYPTO_dynlock)
+BIO_dummy = bio_st
+CRYPTO_EX_new = CFUNCTYPE(c_int, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
+CRYPTO_EX_free = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(CRYPTO_EX_DATA), c_int, c_long, c_void_p)
+CRYPTO_EX_dup = CFUNCTYPE(c_int, POINTER(CRYPTO_EX_DATA), POINTER(CRYPTO_EX_DATA), c_void_p, c_int, c_long, c_void_p)
+class crypto_ex_data_func_st(Structure):
+    pass
+crypto_ex_data_func_st._fields_ = [
+    ('argl', c_long),
+    ('argp', c_void_p),
+    ('new_func', POINTER(CRYPTO_EX_new)),
+    ('free_func', POINTER(CRYPTO_EX_free)),
+    ('dup_func', POINTER(CRYPTO_EX_dup)),
+]
+assert sizeof(crypto_ex_data_func_st) == 20, sizeof(crypto_ex_data_func_st)
+assert alignment(crypto_ex_data_func_st) == 4, alignment(crypto_ex_data_func_st)
+CRYPTO_EX_DATA_FUNCS = crypto_ex_data_func_st
+class st_CRYPTO_EX_DATA_IMPL(Structure):
+    pass
+CRYPTO_EX_DATA_IMPL = st_CRYPTO_EX_DATA_IMPL
+st_CRYPTO_EX_DATA_IMPL._fields_ = [
+]
+CRYPTO_MEM_LEAK_CB = CFUNCTYPE(c_void_p, c_ulong, STRING, c_int, c_int, c_void_p)
+DES_cblock = c_ubyte * 8
+const_DES_cblock = c_ubyte * 8
+class DES_ks(Structure):
+    pass
+class N6DES_ks3DOLLAR_9E(Union):
+    pass
+N6DES_ks3DOLLAR_9E._fields_ = [
+    ('cblock', DES_cblock),
+    ('deslong', c_ulong * 2),
+]
+assert sizeof(N6DES_ks3DOLLAR_9E) == 8, sizeof(N6DES_ks3DOLLAR_9E)
+assert alignment(N6DES_ks3DOLLAR_9E) == 4, alignment(N6DES_ks3DOLLAR_9E)
+DES_ks._fields_ = [
+    ('ks', N6DES_ks3DOLLAR_9E * 16),
+]
+assert sizeof(DES_ks) == 128, sizeof(DES_ks)
+assert alignment(DES_ks) == 4, alignment(DES_ks)
+DES_key_schedule = DES_ks
+_ossl_old_des_cblock = c_ubyte * 8
+class _ossl_old_des_ks_struct(Structure):
+    pass
+class N23_ossl_old_des_ks_struct4DOLLAR_10E(Union):
+    pass
+N23_ossl_old_des_ks_struct4DOLLAR_10E._fields_ = [
+    ('_', _ossl_old_des_cblock),
+    ('pad', c_ulong * 2),
+]
+assert sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 8, sizeof(N23_ossl_old_des_ks_struct4DOLLAR_10E)
+assert alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E) == 4, alignment(N23_ossl_old_des_ks_struct4DOLLAR_10E)
+_ossl_old_des_ks_struct._fields_ = [
+    ('ks', N23_ossl_old_des_ks_struct4DOLLAR_10E),
+]
+assert sizeof(_ossl_old_des_ks_struct) == 8, sizeof(_ossl_old_des_ks_struct)
+assert alignment(_ossl_old_des_ks_struct) == 4, alignment(_ossl_old_des_ks_struct)
+_ossl_old_des_key_schedule = _ossl_old_des_ks_struct * 16
+class dh_st(Structure):
+    pass
+DH = dh_st
+class dh_method(Structure):
+    pass
+dh_method._fields_ = [
+    ('name', STRING),
+    ('generate_key', CFUNCTYPE(c_int, POINTER(DH))),
+    ('compute_key', CFUNCTYPE(c_int, POINTER(c_ubyte), POINTER(BIGNUM), POINTER(DH))),
+    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DH), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+    ('init', CFUNCTYPE(c_int, POINTER(DH))),
+    ('finish', CFUNCTYPE(c_int, POINTER(DH))),
+    ('flags', c_int),
+    ('app_data', STRING),
+]
+assert sizeof(dh_method) == 32, sizeof(dh_method)
+assert alignment(dh_method) == 4, alignment(dh_method)
+DH_METHOD = dh_method
+class engine_st(Structure):
+    pass
+ENGINE = engine_st
+dh_st._fields_ = [
+    ('pad', c_int),
+    ('version', c_int),
+    ('p', POINTER(BIGNUM)),
+    ('g', POINTER(BIGNUM)),
+    ('length', c_long),
+    ('pub_key', POINTER(BIGNUM)),
+    ('priv_key', POINTER(BIGNUM)),
+    ('flags', c_int),
+    ('method_mont_p', STRING),
+    ('q', POINTER(BIGNUM)),
+    ('j', POINTER(BIGNUM)),
+    ('seed', POINTER(c_ubyte)),
+    ('seedlen', c_int),
+    ('counter', POINTER(BIGNUM)),
+    ('references', c_int),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('meth', POINTER(DH_METHOD)),
+    ('engine', POINTER(ENGINE)),
+]
+assert sizeof(dh_st) == 76, sizeof(dh_st)
+assert alignment(dh_st) == 4, alignment(dh_st)
+class dsa_st(Structure):
+    pass
+DSA = dsa_st
+class DSA_SIG_st(Structure):
+    pass
+DSA_SIG_st._fields_ = [
+    ('r', POINTER(BIGNUM)),
+    ('s', POINTER(BIGNUM)),
+]
+assert sizeof(DSA_SIG_st) == 8, sizeof(DSA_SIG_st)
+assert alignment(DSA_SIG_st) == 4, alignment(DSA_SIG_st)
+DSA_SIG = DSA_SIG_st
+class dsa_method(Structure):
+    pass
+dsa_method._fields_ = [
+    ('name', STRING),
+    ('dsa_do_sign', CFUNCTYPE(POINTER(DSA_SIG), POINTER(c_ubyte), c_int, POINTER(DSA))),
+    ('dsa_sign_setup', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BN_CTX), POINTER(POINTER(BIGNUM)), POINTER(POINTER(BIGNUM)))),
+    ('dsa_do_verify', CFUNCTYPE(c_int, POINTER(c_ubyte), c_int, POINTER(DSA_SIG), POINTER(DSA))),
+    ('dsa_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(DSA), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+    ('init', CFUNCTYPE(c_int, POINTER(DSA))),
+    ('finish', CFUNCTYPE(c_int, POINTER(DSA))),
+    ('flags', c_int),
+    ('app_data', STRING),
+]
+assert sizeof(dsa_method) == 40, sizeof(dsa_method)
+assert alignment(dsa_method) == 4, alignment(dsa_method)
+DSA_METHOD = dsa_method
+dsa_st._fields_ = [
+    ('pad', c_int),
+    ('version', c_long),
+    ('write_params', c_int),
+    ('p', POINTER(BIGNUM)),
+    ('q', POINTER(BIGNUM)),
+    ('g', POINTER(BIGNUM)),
+    ('pub_key', POINTER(BIGNUM)),
+    ('priv_key', POINTER(BIGNUM)),
+    ('kinv', POINTER(BIGNUM)),
+    ('r', POINTER(BIGNUM)),
+    ('flags', c_int),
+    ('method_mont_p', STRING),
+    ('references', c_int),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('meth', POINTER(DSA_METHOD)),
+    ('engine', POINTER(ENGINE)),
+]
+assert sizeof(dsa_st) == 68, sizeof(dsa_st)
+assert alignment(dsa_st) == 4, alignment(dsa_st)
+class evp_pkey_st(Structure):
+    pass
+class N11evp_pkey_st4DOLLAR_12E(Union):
+    pass
+class rsa_st(Structure):
+    pass
+N11evp_pkey_st4DOLLAR_12E._fields_ = [
+    ('ptr', STRING),
+    ('rsa', POINTER(rsa_st)),
+    ('dsa', POINTER(dsa_st)),
+    ('dh', POINTER(dh_st)),
+]
+assert sizeof(N11evp_pkey_st4DOLLAR_12E) == 4, sizeof(N11evp_pkey_st4DOLLAR_12E)
+assert alignment(N11evp_pkey_st4DOLLAR_12E) == 4, alignment(N11evp_pkey_st4DOLLAR_12E)
+evp_pkey_st._fields_ = [
+    ('type', c_int),
+    ('save_type', c_int),
+    ('references', c_int),
+    ('pkey', N11evp_pkey_st4DOLLAR_12E),
+    ('save_parameters', c_int),
+    ('attributes', POINTER(STACK)),
+]
+assert sizeof(evp_pkey_st) == 24, sizeof(evp_pkey_st)
+assert alignment(evp_pkey_st) == 4, alignment(evp_pkey_st)
+class env_md_st(Structure):
+    pass
+class env_md_ctx_st(Structure):
+    pass
+EVP_MD_CTX = env_md_ctx_st
+env_md_st._fields_ = [
+    ('type', c_int),
+    ('pkey_type', c_int),
+    ('md_size', c_int),
+    ('flags', c_ulong),
+    ('init', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
+    ('update', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), c_void_p, c_ulong)),
+    ('final', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(c_ubyte))),
+    ('copy', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX), POINTER(EVP_MD_CTX))),
+    ('cleanup', CFUNCTYPE(c_int, POINTER(EVP_MD_CTX))),
+    ('sign', CFUNCTYPE(c_int)),
+    ('verify', CFUNCTYPE(c_int)),
+    ('required_pkey_type', c_int * 5),
+    ('block_size', c_int),
+    ('ctx_size', c_int),
+]
+assert sizeof(env_md_st) == 72, sizeof(env_md_st)
+assert alignment(env_md_st) == 4, alignment(env_md_st)
+EVP_MD = env_md_st
+env_md_ctx_st._fields_ = [
+    ('digest', POINTER(EVP_MD)),
+    ('engine', POINTER(ENGINE)),
+    ('flags', c_ulong),
+    ('md_data', c_void_p),
+]
+assert sizeof(env_md_ctx_st) == 16, sizeof(env_md_ctx_st)
+assert alignment(env_md_ctx_st) == 4, alignment(env_md_ctx_st)
+class evp_cipher_st(Structure):
+    pass
+class evp_cipher_ctx_st(Structure):
+    pass
+EVP_CIPHER_CTX = evp_cipher_ctx_st
+evp_cipher_st._fields_ = [
+    ('nid', c_int),
+    ('block_size', c_int),
+    ('key_len', c_int),
+    ('iv_len', c_int),
+    ('flags', c_ulong),
+    ('init', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_int)),
+    ('do_cipher', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(c_ubyte), POINTER(c_ubyte), c_uint)),
+    ('cleanup', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX))),
+    ('ctx_size', c_int),
+    ('set_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
+    ('get_asn1_parameters', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), POINTER(ASN1_TYPE))),
+    ('ctrl', CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), c_int, c_int, c_void_p)),
+    ('app_data', c_void_p),
+]
+assert sizeof(evp_cipher_st) == 52, sizeof(evp_cipher_st)
+assert alignment(evp_cipher_st) == 4, alignment(evp_cipher_st)
+class evp_cipher_info_st(Structure):
+    pass
+EVP_CIPHER = evp_cipher_st
+evp_cipher_info_st._fields_ = [
+    ('cipher', POINTER(EVP_CIPHER)),
+    ('iv', c_ubyte * 16),
+]
+assert sizeof(evp_cipher_info_st) == 20, sizeof(evp_cipher_info_st)
+assert alignment(evp_cipher_info_st) == 4, alignment(evp_cipher_info_st)
+EVP_CIPHER_INFO = evp_cipher_info_st
+evp_cipher_ctx_st._fields_ = [
+    ('cipher', POINTER(EVP_CIPHER)),
+    ('engine', POINTER(ENGINE)),
+    ('encrypt', c_int),
+    ('buf_len', c_int),
+    ('oiv', c_ubyte * 16),
+    ('iv', c_ubyte * 16),
+    ('buf', c_ubyte * 32),
+    ('num', c_int),
+    ('app_data', c_void_p),
+    ('key_len', c_int),
+    ('flags', c_ulong),
+    ('cipher_data', c_void_p),
+    ('final_used', c_int),
+    ('block_mask', c_int),
+    ('final', c_ubyte * 32),
+]
+assert sizeof(evp_cipher_ctx_st) == 140, sizeof(evp_cipher_ctx_st)
+assert alignment(evp_cipher_ctx_st) == 4, alignment(evp_cipher_ctx_st)
+class evp_Encode_Ctx_st(Structure):
+    pass
+evp_Encode_Ctx_st._fields_ = [
+    ('num', c_int),
+    ('length', c_int),
+    ('enc_data', c_ubyte * 80),
+    ('line_num', c_int),
+    ('expect_nl', c_int),
+]
+assert sizeof(evp_Encode_Ctx_st) == 96, sizeof(evp_Encode_Ctx_st)
+assert alignment(evp_Encode_Ctx_st) == 4, alignment(evp_Encode_Ctx_st)
+EVP_ENCODE_CTX = evp_Encode_Ctx_st
+EVP_PBE_KEYGEN = CFUNCTYPE(c_int, POINTER(EVP_CIPHER_CTX), STRING, c_int, POINTER(ASN1_TYPE), POINTER(EVP_CIPHER), POINTER(EVP_MD), c_int)
+class lhash_node_st(Structure):
+    pass
+lhash_node_st._fields_ = [
+    ('data', c_void_p),
+    ('next', POINTER(lhash_node_st)),
+    ('hash', c_ulong),
+]
+assert sizeof(lhash_node_st) == 12, sizeof(lhash_node_st)
+assert alignment(lhash_node_st) == 4, alignment(lhash_node_st)
+LHASH_NODE = lhash_node_st
+LHASH_COMP_FN_TYPE = CFUNCTYPE(c_int, c_void_p, c_void_p)
+LHASH_HASH_FN_TYPE = CFUNCTYPE(c_ulong, c_void_p)
+LHASH_DOALL_FN_TYPE = CFUNCTYPE(None, c_void_p)
+LHASH_DOALL_ARG_FN_TYPE = CFUNCTYPE(None, c_void_p, c_void_p)
+class lhash_st(Structure):
+    pass
+lhash_st._fields_ = [
+    ('b', POINTER(POINTER(LHASH_NODE))),
+    ('comp', LHASH_COMP_FN_TYPE),
+    ('hash', LHASH_HASH_FN_TYPE),
+    ('num_nodes', c_uint),
+    ('num_alloc_nodes', c_uint),
+    ('p', c_uint),
+    ('pmax', c_uint),
+    ('up_load', c_ulong),
+    ('down_load', c_ulong),
+    ('num_items', c_ulong),
+    ('num_expands', c_ulong),
+    ('num_expand_reallocs', c_ulong),
+    ('num_contracts', c_ulong),
+    ('num_contract_reallocs', c_ulong),
+    ('num_hash_calls', c_ulong),
+    ('num_comp_calls', c_ulong),
+    ('num_insert', c_ulong),
+    ('num_replace', c_ulong),
+    ('num_delete', c_ulong),
+    ('num_no_delete', c_ulong),
+    ('num_retrieve', c_ulong),
+    ('num_retrieve_miss', c_ulong),
+    ('num_hash_comps', c_ulong),
+    ('error', c_int),
+]
+assert sizeof(lhash_st) == 96, sizeof(lhash_st)
+assert alignment(lhash_st) == 4, alignment(lhash_st)
+LHASH = lhash_st
+class MD2state_st(Structure):
+    pass
+MD2state_st._fields_ = [
+    ('num', c_int),
+    ('data', c_ubyte * 16),
+    ('cksm', c_uint * 16),
+    ('state', c_uint * 16),
+]
+assert sizeof(MD2state_st) == 148, sizeof(MD2state_st)
+assert alignment(MD2state_st) == 4, alignment(MD2state_st)
+MD2_CTX = MD2state_st
+class MD4state_st(Structure):
+    pass
+MD4state_st._fields_ = [
+    ('A', c_uint),
+    ('B', c_uint),
+    ('C', c_uint),
+    ('D', c_uint),
+    ('Nl', c_uint),
+    ('Nh', c_uint),
+    ('data', c_uint * 16),
+    ('num', c_int),
+]
+assert sizeof(MD4state_st) == 92, sizeof(MD4state_st)
+assert alignment(MD4state_st) == 4, alignment(MD4state_st)
+MD4_CTX = MD4state_st
+class MD5state_st(Structure):
+    pass
+MD5state_st._fields_ = [
+    ('A', c_uint),
+    ('B', c_uint),
+    ('C', c_uint),
+    ('D', c_uint),
+    ('Nl', c_uint),
+    ('Nh', c_uint),
+    ('data', c_uint * 16),
+    ('num', c_int),
+]
+assert sizeof(MD5state_st) == 92, sizeof(MD5state_st)
+assert alignment(MD5state_st) == 4, alignment(MD5state_st)
+MD5_CTX = MD5state_st
+class mdc2_ctx_st(Structure):
+    pass
+mdc2_ctx_st._fields_ = [
+    ('num', c_int),
+    ('data', c_ubyte * 8),
+    ('h', DES_cblock),
+    ('hh', DES_cblock),
+    ('pad_type', c_int),
+]
+assert sizeof(mdc2_ctx_st) == 32, sizeof(mdc2_ctx_st)
+assert alignment(mdc2_ctx_st) == 4, alignment(mdc2_ctx_st)
+MDC2_CTX = mdc2_ctx_st
+class obj_name_st(Structure):
+    pass
+obj_name_st._fields_ = [
+    ('type', c_int),
+    ('alias', c_int),
+    ('name', STRING),
+    ('data', STRING),
+]
+assert sizeof(obj_name_st) == 16, sizeof(obj_name_st)
+assert alignment(obj_name_st) == 4, alignment(obj_name_st)
+OBJ_NAME = obj_name_st
+ASN1_TIME = asn1_string_st
+ASN1_NULL = c_int
+EVP_PKEY = evp_pkey_st
+class x509_st(Structure):
+    pass
+X509 = x509_st
+class X509_algor_st(Structure):
+    pass
+X509_ALGOR = X509_algor_st
+class X509_crl_st(Structure):
+    pass
+X509_CRL = X509_crl_st
+class X509_name_st(Structure):
+    pass
+X509_NAME = X509_name_st
+class x509_store_st(Structure):
+    pass
+X509_STORE = x509_store_st
+class x509_store_ctx_st(Structure):
+    pass
+X509_STORE_CTX = x509_store_ctx_st
+engine_st._fields_ = [
+]
+class PEM_Encode_Seal_st(Structure):
+    pass
+PEM_Encode_Seal_st._fields_ = [
+    ('encode', EVP_ENCODE_CTX),
+    ('md', EVP_MD_CTX),
+    ('cipher', EVP_CIPHER_CTX),
+]
+assert sizeof(PEM_Encode_Seal_st) == 252, sizeof(PEM_Encode_Seal_st)
+assert alignment(PEM_Encode_Seal_st) == 4, alignment(PEM_Encode_Seal_st)
+PEM_ENCODE_SEAL_CTX = PEM_Encode_Seal_st
+class pem_recip_st(Structure):
+    pass
+pem_recip_st._fields_ = [
+    ('name', STRING),
+    ('dn', POINTER(X509_NAME)),
+    ('cipher', c_int),
+    ('key_enc', c_int),
+]
+assert sizeof(pem_recip_st) == 16, sizeof(pem_recip_st)
+assert alignment(pem_recip_st) == 4, alignment(pem_recip_st)
+PEM_USER = pem_recip_st
+class pem_ctx_st(Structure):
+    pass
+class N10pem_ctx_st4DOLLAR_16E(Structure):
+    pass
+N10pem_ctx_st4DOLLAR_16E._fields_ = [
+    ('version', c_int),
+    ('mode', c_int),
+]
+assert sizeof(N10pem_ctx_st4DOLLAR_16E) == 8, sizeof(N10pem_ctx_st4DOLLAR_16E)
+assert alignment(N10pem_ctx_st4DOLLAR_16E) == 4, alignment(N10pem_ctx_st4DOLLAR_16E)
+class N10pem_ctx_st4DOLLAR_17E(Structure):
+    pass
+N10pem_ctx_st4DOLLAR_17E._fields_ = [
+    ('cipher', c_int),
+]
+assert sizeof(N10pem_ctx_st4DOLLAR_17E) == 4, sizeof(N10pem_ctx_st4DOLLAR_17E)
+assert alignment(N10pem_ctx_st4DOLLAR_17E) == 4, alignment(N10pem_ctx_st4DOLLAR_17E)
+pem_ctx_st._fields_ = [
+    ('type', c_int),
+    ('proc_type', N10pem_ctx_st4DOLLAR_16E),
+    ('domain', STRING),
+    ('DEK_info', N10pem_ctx_st4DOLLAR_17E),
+    ('originator', POINTER(PEM_USER)),
+    ('num_recipient', c_int),
+    ('recipient', POINTER(POINTER(PEM_USER))),
+    ('x509_chain', POINTER(STACK)),
+    ('md', POINTER(EVP_MD)),
+    ('md_enc', c_int),
+    ('md_len', c_int),
+    ('md_data', STRING),
+    ('dec', POINTER(EVP_CIPHER)),
+    ('key_len', c_int),
+    ('key', POINTER(c_ubyte)),
+    ('data_enc', c_int),
+    ('data_len', c_int),
+    ('data', POINTER(c_ubyte)),
+]
+assert sizeof(pem_ctx_st) == 76, sizeof(pem_ctx_st)
+assert alignment(pem_ctx_st) == 4, alignment(pem_ctx_st)
+PEM_CTX = pem_ctx_st
+pem_password_cb = CFUNCTYPE(c_int, STRING, c_int, c_int, c_void_p)
+class pkcs7_issuer_and_serial_st(Structure):
+    pass
+pkcs7_issuer_and_serial_st._fields_ = [
+    ('issuer', POINTER(X509_NAME)),
+    ('serial', POINTER(ASN1_INTEGER)),
+]
+assert sizeof(pkcs7_issuer_and_serial_st) == 8, sizeof(pkcs7_issuer_and_serial_st)
+assert alignment(pkcs7_issuer_and_serial_st) == 4, alignment(pkcs7_issuer_and_serial_st)
+PKCS7_ISSUER_AND_SERIAL = pkcs7_issuer_and_serial_st
+class pkcs7_signer_info_st(Structure):
+    pass
+pkcs7_signer_info_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
+    ('digest_alg', POINTER(X509_ALGOR)),
+    ('auth_attr', POINTER(STACK)),
+    ('digest_enc_alg', POINTER(X509_ALGOR)),
+    ('enc_digest', POINTER(ASN1_OCTET_STRING)),
+    ('unauth_attr', POINTER(STACK)),
+    ('pkey', POINTER(EVP_PKEY)),
+]
+assert sizeof(pkcs7_signer_info_st) == 32, sizeof(pkcs7_signer_info_st)
+assert alignment(pkcs7_signer_info_st) == 4, alignment(pkcs7_signer_info_st)
+PKCS7_SIGNER_INFO = pkcs7_signer_info_st
+class pkcs7_recip_info_st(Structure):
+    pass
+pkcs7_recip_info_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('issuer_and_serial', POINTER(PKCS7_ISSUER_AND_SERIAL)),
+    ('key_enc_algor', POINTER(X509_ALGOR)),
+    ('enc_key', POINTER(ASN1_OCTET_STRING)),
+    ('cert', POINTER(X509)),
+]
+assert sizeof(pkcs7_recip_info_st) == 20, sizeof(pkcs7_recip_info_st)
+assert alignment(pkcs7_recip_info_st) == 4, alignment(pkcs7_recip_info_st)
+PKCS7_RECIP_INFO = pkcs7_recip_info_st
+class pkcs7_signed_st(Structure):
+    pass
+class pkcs7_st(Structure):
+    pass
+pkcs7_signed_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('md_algs', POINTER(STACK)),
+    ('cert', POINTER(STACK)),
+    ('crl', POINTER(STACK)),
+    ('signer_info', POINTER(STACK)),
+    ('contents', POINTER(pkcs7_st)),
+]
+assert sizeof(pkcs7_signed_st) == 24, sizeof(pkcs7_signed_st)
+assert alignment(pkcs7_signed_st) == 4, alignment(pkcs7_signed_st)
+PKCS7_SIGNED = pkcs7_signed_st
+class pkcs7_enc_content_st(Structure):
+    pass
+pkcs7_enc_content_st._fields_ = [
+    ('content_type', POINTER(ASN1_OBJECT)),
+    ('algorithm', POINTER(X509_ALGOR)),
+    ('enc_data', POINTER(ASN1_OCTET_STRING)),
+    ('cipher', POINTER(EVP_CIPHER)),
+]
+assert sizeof(pkcs7_enc_content_st) == 16, sizeof(pkcs7_enc_content_st)
+assert alignment(pkcs7_enc_content_st) == 4, alignment(pkcs7_enc_content_st)
+PKCS7_ENC_CONTENT = pkcs7_enc_content_st
+class pkcs7_enveloped_st(Structure):
+    pass
+pkcs7_enveloped_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('recipientinfo', POINTER(STACK)),
+    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
+]
+assert sizeof(pkcs7_enveloped_st) == 12, sizeof(pkcs7_enveloped_st)
+assert alignment(pkcs7_enveloped_st) == 4, alignment(pkcs7_enveloped_st)
+PKCS7_ENVELOPE = pkcs7_enveloped_st
+class pkcs7_signedandenveloped_st(Structure):
+    pass
+pkcs7_signedandenveloped_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('md_algs', POINTER(STACK)),
+    ('cert', POINTER(STACK)),
+    ('crl', POINTER(STACK)),
+    ('signer_info', POINTER(STACK)),
+    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
+    ('recipientinfo', POINTER(STACK)),
+]
+assert sizeof(pkcs7_signedandenveloped_st) == 28, sizeof(pkcs7_signedandenveloped_st)
+assert alignment(pkcs7_signedandenveloped_st) == 4, alignment(pkcs7_signedandenveloped_st)
+PKCS7_SIGN_ENVELOPE = pkcs7_signedandenveloped_st
+class pkcs7_digest_st(Structure):
+    pass
+pkcs7_digest_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('md', POINTER(X509_ALGOR)),
+    ('contents', POINTER(pkcs7_st)),
+    ('digest', POINTER(ASN1_OCTET_STRING)),
+]
+assert sizeof(pkcs7_digest_st) == 16, sizeof(pkcs7_digest_st)
+assert alignment(pkcs7_digest_st) == 4, alignment(pkcs7_digest_st)
+PKCS7_DIGEST = pkcs7_digest_st
+class pkcs7_encrypted_st(Structure):
+    pass
+pkcs7_encrypted_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('enc_data', POINTER(PKCS7_ENC_CONTENT)),
+]
+assert sizeof(pkcs7_encrypted_st) == 8, sizeof(pkcs7_encrypted_st)
+assert alignment(pkcs7_encrypted_st) == 4, alignment(pkcs7_encrypted_st)
+PKCS7_ENCRYPT = pkcs7_encrypted_st
+class N8pkcs7_st4DOLLAR_15E(Union):
+    pass
+N8pkcs7_st4DOLLAR_15E._fields_ = [
+    ('ptr', STRING),
+    ('data', POINTER(ASN1_OCTET_STRING)),
+    ('sign', POINTER(PKCS7_SIGNED)),
+    ('enveloped', POINTER(PKCS7_ENVELOPE)),
+    ('signed_and_enveloped', POINTER(PKCS7_SIGN_ENVELOPE)),
+    ('digest', POINTER(PKCS7_DIGEST)),
+    ('encrypted', POINTER(PKCS7_ENCRYPT)),
+    ('other', POINTER(ASN1_TYPE)),
+]
+assert sizeof(N8pkcs7_st4DOLLAR_15E) == 4, sizeof(N8pkcs7_st4DOLLAR_15E)
+assert alignment(N8pkcs7_st4DOLLAR_15E) == 4, alignment(N8pkcs7_st4DOLLAR_15E)
+pkcs7_st._fields_ = [
+    ('asn1', POINTER(c_ubyte)),
+    ('length', c_long),
+    ('state', c_int),
+    ('detached', c_int),
+    ('type', POINTER(ASN1_OBJECT)),
+    ('d', N8pkcs7_st4DOLLAR_15E),
+]
+assert sizeof(pkcs7_st) == 24, sizeof(pkcs7_st)
+assert alignment(pkcs7_st) == 4, alignment(pkcs7_st)
+PKCS7 = pkcs7_st
+class rc2_key_st(Structure):
+    pass
+rc2_key_st._fields_ = [
+    ('data', c_uint * 64),
+]
+assert sizeof(rc2_key_st) == 256, sizeof(rc2_key_st)
+assert alignment(rc2_key_st) == 4, alignment(rc2_key_st)
+RC2_KEY = rc2_key_st
+class rc4_key_st(Structure):
+    pass
+rc4_key_st._fields_ = [
+    ('x', c_ubyte),
+    ('y', c_ubyte),
+    ('data', c_ubyte * 256),
+]
+assert sizeof(rc4_key_st) == 258, sizeof(rc4_key_st)
+assert alignment(rc4_key_st) == 1, alignment(rc4_key_st)
+RC4_KEY = rc4_key_st
+class rc5_key_st(Structure):
+    pass
+rc5_key_st._fields_ = [
+    ('rounds', c_int),
+    ('data', c_ulong * 34),
+]
+assert sizeof(rc5_key_st) == 140, sizeof(rc5_key_st)
+assert alignment(rc5_key_st) == 4, alignment(rc5_key_st)
+RC5_32_KEY = rc5_key_st
+class RIPEMD160state_st(Structure):
+    pass
+RIPEMD160state_st._fields_ = [
+    ('A', c_uint),
+    ('B', c_uint),
+    ('C', c_uint),
+    ('D', c_uint),
+    ('E', c_uint),
+    ('Nl', c_uint),
+    ('Nh', c_uint),
+    ('data', c_uint * 16),
+    ('num', c_int),
+]
+assert sizeof(RIPEMD160state_st) == 96, sizeof(RIPEMD160state_st)
+assert alignment(RIPEMD160state_st) == 4, alignment(RIPEMD160state_st)
+RIPEMD160_CTX = RIPEMD160state_st
+RSA = rsa_st
+class rsa_meth_st(Structure):
+    pass
+rsa_meth_st._fields_ = [
+    ('name', STRING),
+    ('rsa_pub_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+    ('rsa_pub_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+    ('rsa_priv_enc', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+    ('rsa_priv_dec', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), POINTER(c_ubyte), POINTER(RSA), c_int)),
+    ('rsa_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(RSA))),
+    ('bn_mod_exp', CFUNCTYPE(c_int, POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BIGNUM), POINTER(BN_CTX), POINTER(BN_MONT_CTX))),
+    ('init', CFUNCTYPE(c_int, POINTER(RSA))),
+    ('finish', CFUNCTYPE(c_int, POINTER(RSA))),
+    ('flags', c_int),
+    ('app_data', STRING),
+    ('rsa_sign', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), POINTER(c_uint), POINTER(RSA))),
+    ('rsa_verify', CFUNCTYPE(c_int, c_int, POINTER(c_ubyte), c_uint, POINTER(c_ubyte), c_uint, POINTER(RSA))),
+]
+assert sizeof(rsa_meth_st) == 52, sizeof(rsa_meth_st)
+assert alignment(rsa_meth_st) == 4, alignment(rsa_meth_st)
+RSA_METHOD = rsa_meth_st
+rsa_st._fields_ = [
+    ('pad', c_int),
+    ('version', c_long),
+    ('meth', POINTER(RSA_METHOD)),
+    ('engine', POINTER(ENGINE)),
+    ('n', POINTER(BIGNUM)),
+    ('e', POINTER(BIGNUM)),
+    ('d', POINTER(BIGNUM)),
+    ('p', POINTER(BIGNUM)),
+    ('q', POINTER(BIGNUM)),
+    ('dmp1', POINTER(BIGNUM)),
+    ('dmq1', POINTER(BIGNUM)),
+    ('iqmp', POINTER(BIGNUM)),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('references', c_int),
+    ('flags', c_int),
+    ('_method_mod_n', POINTER(BN_MONT_CTX)),
+    ('_method_mod_p', POINTER(BN_MONT_CTX)),
+    ('_method_mod_q', POINTER(BN_MONT_CTX)),
+    ('bignum_data', STRING),
+    ('blinding', POINTER(BN_BLINDING)),
+]
+assert sizeof(rsa_st) == 84, sizeof(rsa_st)
+assert alignment(rsa_st) == 4, alignment(rsa_st)
+openssl_fptr = CFUNCTYPE(None)
+class SHAstate_st(Structure):
+    pass
+SHAstate_st._fields_ = [
+    ('h0', c_uint),
+    ('h1', c_uint),
+    ('h2', c_uint),
+    ('h3', c_uint),
+    ('h4', c_uint),
+    ('Nl', c_uint),
+    ('Nh', c_uint),
+    ('data', c_uint * 16),
+    ('num', c_int),
+]
+assert sizeof(SHAstate_st) == 96, sizeof(SHAstate_st)
+assert alignment(SHAstate_st) == 4, alignment(SHAstate_st)
+SHA_CTX = SHAstate_st
+class ssl_st(Structure):
+    pass
+ssl_crock_st = POINTER(ssl_st)
+class ssl_cipher_st(Structure):
+    pass
+ssl_cipher_st._fields_ = [
+    ('valid', c_int),
+    ('name', STRING),
+    ('id', c_ulong),
+    ('algorithms', c_ulong),
+    ('algo_strength', c_ulong),
+    ('algorithm2', c_ulong),
+    ('strength_bits', c_int),
+    ('alg_bits', c_int),
+    ('mask', c_ulong),
+    ('mask_strength', c_ulong),
+]
+assert sizeof(ssl_cipher_st) == 40, sizeof(ssl_cipher_st)
+assert alignment(ssl_cipher_st) == 4, alignment(ssl_cipher_st)
+SSL_CIPHER = ssl_cipher_st
+SSL = ssl_st
+class ssl_ctx_st(Structure):
+    pass
+SSL_CTX = ssl_ctx_st
+class ssl_method_st(Structure):
+    pass
+class ssl3_enc_method(Structure):
+    pass
+ssl_method_st._fields_ = [
+    ('version', c_int),
+    ('ssl_new', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_clear', CFUNCTYPE(None, POINTER(SSL))),
+    ('ssl_free', CFUNCTYPE(None, POINTER(SSL))),
+    ('ssl_accept', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_connect', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_read', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
+    ('ssl_peek', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
+    ('ssl_write', CFUNCTYPE(c_int, POINTER(SSL), c_void_p, c_int)),
+    ('ssl_shutdown', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_renegotiate', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_renegotiate_check', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('ssl_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, c_long, c_void_p)),
+    ('ssl_ctx_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, c_long, c_void_p)),
+    ('get_cipher_by_char', CFUNCTYPE(POINTER(SSL_CIPHER), POINTER(c_ubyte))),
+    ('put_cipher_by_char', CFUNCTYPE(c_int, POINTER(SSL_CIPHER), POINTER(c_ubyte))),
+    ('ssl_pending', CFUNCTYPE(c_int, POINTER(SSL))),
+    ('num_ciphers', CFUNCTYPE(c_int)),
+    ('get_cipher', CFUNCTYPE(POINTER(SSL_CIPHER), c_uint)),
+    ('get_ssl_method', CFUNCTYPE(POINTER(ssl_method_st), c_int)),
+    ('get_timeout', CFUNCTYPE(c_long)),
+    ('ssl3_enc', POINTER(ssl3_enc_method)),
+    ('ssl_version', CFUNCTYPE(c_int)),
+    ('ssl_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL), c_int, CFUNCTYPE(None))),
+    ('ssl_ctx_callback_ctrl', CFUNCTYPE(c_long, POINTER(SSL_CTX), c_int, CFUNCTYPE(None))),
+]
+assert sizeof(ssl_method_st) == 100, sizeof(ssl_method_st)
+assert alignment(ssl_method_st) == 4, alignment(ssl_method_st)
+ssl3_enc_method._fields_ = [
+]
+SSL_METHOD = ssl_method_st
+class ssl_session_st(Structure):
+    pass
+class sess_cert_st(Structure):
+    pass
+ssl_session_st._fields_ = [
+    ('ssl_version', c_int),
+    ('key_arg_length', c_uint),
+    ('key_arg', c_ubyte * 8),
+    ('master_key_length', c_int),
+    ('master_key', c_ubyte * 48),
+    ('session_id_length', c_uint),
+    ('session_id', c_ubyte * 32),
+    ('sid_ctx_length', c_uint),
+    ('sid_ctx', c_ubyte * 32),
+    ('not_resumable', c_int),
+    ('sess_cert', POINTER(sess_cert_st)),
+    ('peer', POINTER(X509)),
+    ('verify_result', c_long),
+    ('references', c_int),
+    ('timeout', c_long),
+    ('time', c_long),
+    ('compress_meth', c_int),
+    ('cipher', POINTER(SSL_CIPHER)),
+    ('cipher_id', c_ulong),
+    ('ciphers', POINTER(STACK)),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('prev', POINTER(ssl_session_st)),
+    ('next', POINTER(ssl_session_st)),
+]
+assert sizeof(ssl_session_st) == 200, sizeof(ssl_session_st)
+assert alignment(ssl_session_st) == 4, alignment(ssl_session_st)
+sess_cert_st._fields_ = [
+]
+SSL_SESSION = ssl_session_st
+GEN_SESSION_CB = CFUNCTYPE(c_int, POINTER(SSL), POINTER(c_ubyte), POINTER(c_uint))
+class ssl_comp_st(Structure):
+    pass
+ssl_comp_st._fields_ = [
+    ('id', c_int),
+    ('name', STRING),
+    ('method', POINTER(COMP_METHOD)),
+]
+assert sizeof(ssl_comp_st) == 12, sizeof(ssl_comp_st)
+assert alignment(ssl_comp_st) == 4, alignment(ssl_comp_st)
+SSL_COMP = ssl_comp_st
+class N10ssl_ctx_st4DOLLAR_18E(Structure):
+    pass
+N10ssl_ctx_st4DOLLAR_18E._fields_ = [
+    ('sess_connect', c_int),
+    ('sess_connect_renegotiate', c_int),
+    ('sess_connect_good', c_int),
+    ('sess_accept', c_int),
+    ('sess_accept_renegotiate', c_int),
+    ('sess_accept_good', c_int),
+    ('sess_miss', c_int),
+    ('sess_timeout', c_int),
+    ('sess_cache_full', c_int),
+    ('sess_hit', c_int),
+    ('sess_cb_hit', c_int),
+]
+assert sizeof(N10ssl_ctx_st4DOLLAR_18E) == 44, sizeof(N10ssl_ctx_st4DOLLAR_18E)
+assert alignment(N10ssl_ctx_st4DOLLAR_18E) == 4, alignment(N10ssl_ctx_st4DOLLAR_18E)
+class cert_st(Structure):
+    pass
+ssl_ctx_st._fields_ = [
+    ('method', POINTER(SSL_METHOD)),
+    ('cipher_list', POINTER(STACK)),
+    ('cipher_list_by_id', POINTER(STACK)),
+    ('cert_store', POINTER(x509_store_st)),
+    ('sessions', POINTER(lhash_st)),
+    ('session_cache_size', c_ulong),
+    ('session_cache_head', POINTER(ssl_session_st)),
+    ('session_cache_tail', POINTER(ssl_session_st)),
+    ('session_cache_mode', c_int),
+    ('session_timeout', c_long),
+    ('new_session_cb', CFUNCTYPE(c_int, POINTER(ssl_st), POINTER(SSL_SESSION))),
+    ('remove_session_cb', CFUNCTYPE(None, POINTER(ssl_ctx_st), POINTER(SSL_SESSION))),
+    ('get_session_cb', CFUNCTYPE(POINTER(SSL_SESSION), POINTER(ssl_st), POINTER(c_ubyte), c_int, POINTER(c_int))),
+    ('stats', N10ssl_ctx_st4DOLLAR_18E),
+    ('references', c_int),
+    ('app_verify_callback', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), c_void_p)),
+    ('app_verify_arg', c_void_p),
+    ('default_passwd_callback', POINTER(pem_password_cb)),
+    ('default_passwd_callback_userdata', c_void_p),
+    ('client_cert_cb', CFUNCTYPE(c_int, POINTER(SSL), POINTER(POINTER(X509)), POINTER(POINTER(EVP_PKEY)))),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('rsa_md5', POINTER(EVP_MD)),
+    ('md5', POINTER(EVP_MD)),
+    ('sha1', POINTER(EVP_MD)),
+    ('extra_certs', POINTER(STACK)),
+    ('comp_methods', POINTER(STACK)),
+    ('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
+    ('client_CA', POINTER(STACK)),
+    ('options', c_ulong),
+    ('mode', c_ulong),
+    ('max_cert_list', c_long),
+    ('cert', POINTER(cert_st)),
+    ('read_ahead', c_int),
+    ('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
+    ('msg_callback_arg', c_void_p),
+    ('verify_mode', c_int),
+    ('verify_depth', c_int),
+    ('sid_ctx_length', c_uint),
+    ('sid_ctx', c_ubyte * 32),
+    ('default_verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+    ('generate_session_id', GEN_SESSION_CB),
+    ('purpose', c_int),
+    ('trust', c_int),
+    ('quiet_shutdown', c_int),
+]
+assert sizeof(ssl_ctx_st) == 248, sizeof(ssl_ctx_st)
+assert alignment(ssl_ctx_st) == 4, alignment(ssl_ctx_st)
+cert_st._fields_ = [
+]
+class ssl2_state_st(Structure):
+    pass
+class ssl3_state_st(Structure):
+    pass
+ssl_st._fields_ = [
+    ('version', c_int),
+    ('type', c_int),
+    ('method', POINTER(SSL_METHOD)),
+    ('rbio', POINTER(BIO)),
+    ('wbio', POINTER(BIO)),
+    ('bbio', POINTER(BIO)),
+    ('rwstate', c_int),
+    ('in_handshake', c_int),
+    ('handshake_func', CFUNCTYPE(c_int)),
+    ('server', c_int),
+    ('new_session', c_int),
+    ('quiet_shutdown', c_int),
+    ('shutdown', c_int),
+    ('state', c_int),
+    ('rstate', c_int),
+    ('init_buf', POINTER(BUF_MEM)),
+    ('init_msg', c_void_p),
+    ('init_num', c_int),
+    ('init_off', c_int),
+    ('packet', POINTER(c_ubyte)),
+    ('packet_length', c_uint),
+    ('s2', POINTER(ssl2_state_st)),
+    ('s3', POINTER(ssl3_state_st)),
+    ('read_ahead', c_int),
+    ('msg_callback', CFUNCTYPE(None, c_int, c_int, c_int, c_void_p, c_ulong, POINTER(SSL), c_void_p)),
+    ('msg_callback_arg', c_void_p),
+    ('hit', c_int),
+    ('purpose', c_int),
+    ('trust', c_int),
+    ('cipher_list', POINTER(STACK)),
+    ('cipher_list_by_id', POINTER(STACK)),
+    ('enc_read_ctx', POINTER(EVP_CIPHER_CTX)),
+    ('read_hash', POINTER(EVP_MD)),
+    ('expand', POINTER(COMP_CTX)),
+    ('enc_write_ctx', POINTER(EVP_CIPHER_CTX)),
+    ('write_hash', POINTER(EVP_MD)),
+    ('compress', POINTER(COMP_CTX)),
+    ('cert', POINTER(cert_st)),
+    ('sid_ctx_length', c_uint),
+    ('sid_ctx', c_ubyte * 32),
+    ('session', POINTER(SSL_SESSION)),
+    ('generate_session_id', GEN_SESSION_CB),
+    ('verify_mode', c_int),
+    ('verify_depth', c_int),
+    ('verify_callback', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+    ('info_callback', CFUNCTYPE(None, POINTER(SSL), c_int, c_int)),
+    ('error', c_int),
+    ('error_code', c_int),
+    ('ctx', POINTER(SSL_CTX)),
+    ('debug', c_int),
+    ('verify_result', c_long),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('client_CA', POINTER(STACK)),
+    ('references', c_int),
+    ('options', c_ulong),
+    ('mode', c_ulong),
+    ('max_cert_list', c_long),
+    ('first_packet', c_int),
+    ('client_version', c_int),
+]
+assert sizeof(ssl_st) == 268, sizeof(ssl_st)
+assert alignment(ssl_st) == 4, alignment(ssl_st)
+class N13ssl2_state_st4DOLLAR_19E(Structure):
+    pass
+N13ssl2_state_st4DOLLAR_19E._fields_ = [
+    ('conn_id_length', c_uint),
+    ('cert_type', c_uint),
+    ('cert_length', c_uint),
+    ('csl', c_uint),
+    ('clear', c_uint),
+    ('enc', c_uint),
+    ('ccl', c_ubyte * 32),
+    ('cipher_spec_length', c_uint),
+    ('session_id_length', c_uint),
+    ('clen', c_uint),
+    ('rlen', c_uint),
+]
+assert sizeof(N13ssl2_state_st4DOLLAR_19E) == 72, sizeof(N13ssl2_state_st4DOLLAR_19E)
+assert alignment(N13ssl2_state_st4DOLLAR_19E) == 4, alignment(N13ssl2_state_st4DOLLAR_19E)
+ssl2_state_st._fields_ = [
+    ('three_byte_header', c_int),
+    ('clear_text', c_int),
+    ('escape', c_int),
+    ('ssl2_rollback', c_int),
+    ('wnum', c_uint),
+    ('wpend_tot', c_int),
+    ('wpend_buf', POINTER(c_ubyte)),
+    ('wpend_off', c_int),
+    ('wpend_len', c_int),
+    ('wpend_ret', c_int),
+    ('rbuf_left', c_int),
+    ('rbuf_offs', c_int),
+    ('rbuf', POINTER(c_ubyte)),
+    ('wbuf', POINTER(c_ubyte)),
+    ('write_ptr', POINTER(c_ubyte)),
+    ('padding', c_uint),
+    ('rlength', c_uint),
+    ('ract_data_length', c_int),
+    ('wlength', c_uint),
+    ('wact_data_length', c_int),
+    ('ract_data', POINTER(c_ubyte)),
+    ('wact_data', POINTER(c_ubyte)),
+    ('mac_data', POINTER(c_ubyte)),
+    ('read_key', POINTER(c_ubyte)),
+    ('write_key', POINTER(c_ubyte)),
+    ('challenge_length', c_uint),
+    ('challenge', c_ubyte * 32),
+    ('conn_id_length', c_uint),
+    ('conn_id', c_ubyte * 16),
+    ('key_material_length', c_uint),
+    ('key_material', c_ubyte * 48),
+    ('read_sequence', c_ulong),
+    ('write_sequence', c_ulong),
+    ('tmp', N13ssl2_state_st4DOLLAR_19E),
+]
+assert sizeof(ssl2_state_st) == 288, sizeof(ssl2_state_st)
+assert alignment(ssl2_state_st) == 4, alignment(ssl2_state_st)
+SSL2_STATE = ssl2_state_st
+class ssl3_record_st(Structure):
+    pass
+ssl3_record_st._fields_ = [
+    ('type', c_int),
+    ('length', c_uint),
+    ('off', c_uint),
+    ('data', POINTER(c_ubyte)),
+    ('input', POINTER(c_ubyte)),
+    ('comp', POINTER(c_ubyte)),
+]
+assert sizeof(ssl3_record_st) == 24, sizeof(ssl3_record_st)
+assert alignment(ssl3_record_st) == 4, alignment(ssl3_record_st)
+SSL3_RECORD = ssl3_record_st
+class ssl3_buffer_st(Structure):
+    pass
+size_t = __darwin_size_t
+ssl3_buffer_st._fields_ = [
+    ('buf', POINTER(c_ubyte)),
+    ('len', size_t),
+    ('offset', c_int),
+    ('left', c_int),
+]
+assert sizeof(ssl3_buffer_st) == 16, sizeof(ssl3_buffer_st)
+assert alignment(ssl3_buffer_st) == 4, alignment(ssl3_buffer_st)
+SSL3_BUFFER = ssl3_buffer_st
+class N13ssl3_state_st4DOLLAR_20E(Structure):
+    pass
+N13ssl3_state_st4DOLLAR_20E._fields_ = [
+    ('cert_verify_md', c_ubyte * 72),
+    ('finish_md', c_ubyte * 72),
+    ('finish_md_len', c_int),
+    ('peer_finish_md', c_ubyte * 72),
+    ('peer_finish_md_len', c_int),
+    ('message_size', c_ulong),
+    ('message_type', c_int),
+    ('new_cipher', POINTER(SSL_CIPHER)),
+    ('dh', POINTER(DH)),
+    ('next_state', c_int),
+    ('reuse_message', c_int),
+    ('cert_req', c_int),
+    ('ctype_num', c_int),
+    ('ctype', c_char * 7),
+    ('ca_names', POINTER(STACK)),
+    ('use_rsa_tmp', c_int),
+    ('key_block_length', c_int),
+    ('key_block', POINTER(c_ubyte)),
+    ('new_sym_enc', POINTER(EVP_CIPHER)),
+    ('new_hash', POINTER(EVP_MD)),
+    ('new_compression', POINTER(SSL_COMP)),
+    ('cert_request', c_int),
+]
+assert sizeof(N13ssl3_state_st4DOLLAR_20E) == 296, sizeof(N13ssl3_state_st4DOLLAR_20E)
+assert alignment(N13ssl3_state_st4DOLLAR_20E) == 4, alignment(N13ssl3_state_st4DOLLAR_20E)
+ssl3_state_st._fields_ = [
+    ('flags', c_long),
+    ('delay_buf_pop_ret', c_int),
+    ('read_sequence', c_ubyte * 8),
+    ('read_mac_secret', c_ubyte * 36),
+    ('write_sequence', c_ubyte * 8),
+    ('write_mac_secret', c_ubyte * 36),
+    ('server_random', c_ubyte * 32),
+    ('client_random', c_ubyte * 32),
+    ('need_empty_fragments', c_int),
+    ('empty_fragment_done', c_int),
+    ('rbuf', SSL3_BUFFER),
+    ('wbuf', SSL3_BUFFER),
+    ('rrec', SSL3_RECORD),
+    ('wrec', SSL3_RECORD),
+    ('alert_fragment', c_ubyte * 2),
+    ('alert_fragment_len', c_uint),
+    ('handshake_fragment', c_ubyte * 4),
+    ('handshake_fragment_len', c_uint),
+    ('wnum', c_uint),
+    ('wpend_tot', c_int),
+    ('wpend_type', c_int),
+    ('wpend_ret', c_int),
+    ('wpend_buf', POINTER(c_ubyte)),
+    ('finish_dgst1', EVP_MD_CTX),
+    ('finish_dgst2', EVP_MD_CTX),
+    ('change_cipher_spec', c_int),
+    ('warn_alert', c_int),
+    ('fatal_alert', c_int),
+    ('alert_dispatch', c_int),
+    ('send_alert', c_ubyte * 2),
+    ('renegotiate', c_int),
+    ('total_renegotiations', c_int),
+    ('num_renegotiations', c_int),
+    ('in_read_app_data', c_int),
+    ('tmp', N13ssl3_state_st4DOLLAR_20E),
+]
+assert sizeof(ssl3_state_st) == 648, sizeof(ssl3_state_st)
+assert alignment(ssl3_state_st) == 4, alignment(ssl3_state_st)
+SSL3_STATE = ssl3_state_st
+stack_st._fields_ = [
+    ('num', c_int),
+    ('data', POINTER(STRING)),
+    ('sorted', c_int),
+    ('num_alloc', c_int),
+    ('comp', CFUNCTYPE(c_int, POINTER(STRING), POINTER(STRING))),
+]
+assert sizeof(stack_st) == 20, sizeof(stack_st)
+assert alignment(stack_st) == 4, alignment(stack_st)
+class ui_st(Structure):
+    pass
+ui_st._fields_ = [
+]
+UI = ui_st
+class ui_method_st(Structure):
+    pass
+ui_method_st._fields_ = [
+]
+UI_METHOD = ui_method_st
+class ui_string_st(Structure):
+    pass
+ui_string_st._fields_ = [
+]
+UI_STRING = ui_string_st
+
+# values for enumeration 'UI_string_types'
+UI_string_types = c_int # enum
+class X509_objects_st(Structure):
+    pass
+X509_objects_st._fields_ = [
+    ('nid', c_int),
+    ('a2i', CFUNCTYPE(c_int)),
+    ('i2a', CFUNCTYPE(c_int)),
+]
+assert sizeof(X509_objects_st) == 12, sizeof(X509_objects_st)
+assert alignment(X509_objects_st) == 4, alignment(X509_objects_st)
+X509_OBJECTS = X509_objects_st
+X509_algor_st._fields_ = [
+    ('algorithm', POINTER(ASN1_OBJECT)),
+    ('parameter', POINTER(ASN1_TYPE)),
+]
+assert sizeof(X509_algor_st) == 8, sizeof(X509_algor_st)
+assert alignment(X509_algor_st) == 4, alignment(X509_algor_st)
+class X509_val_st(Structure):
+    pass
+X509_val_st._fields_ = [
+    ('notBefore', POINTER(ASN1_TIME)),
+    ('notAfter', POINTER(ASN1_TIME)),
+]
+assert sizeof(X509_val_st) == 8, sizeof(X509_val_st)
+assert alignment(X509_val_st) == 4, alignment(X509_val_st)
+X509_VAL = X509_val_st
+class X509_pubkey_st(Structure):
+    pass
+X509_pubkey_st._fields_ = [
+    ('algor', POINTER(X509_ALGOR)),
+    ('public_key', POINTER(ASN1_BIT_STRING)),
+    ('pkey', POINTER(EVP_PKEY)),
+]
+assert sizeof(X509_pubkey_st) == 12, sizeof(X509_pubkey_st)
+assert alignment(X509_pubkey_st) == 4, alignment(X509_pubkey_st)
+X509_PUBKEY = X509_pubkey_st
+class X509_sig_st(Structure):
+    pass
+X509_sig_st._fields_ = [
+    ('algor', POINTER(X509_ALGOR)),
+    ('digest', POINTER(ASN1_OCTET_STRING)),
+]
+assert sizeof(X509_sig_st) == 8, sizeof(X509_sig_st)
+assert alignment(X509_sig_st) == 4, alignment(X509_sig_st)
+X509_SIG = X509_sig_st
+class X509_name_entry_st(Structure):
+    pass
+X509_name_entry_st._fields_ = [
+    ('object', POINTER(ASN1_OBJECT)),
+    ('value', POINTER(ASN1_STRING)),
+    ('set', c_int),
+    ('size', c_int),
+]
+assert sizeof(X509_name_entry_st) == 16, sizeof(X509_name_entry_st)
+assert alignment(X509_name_entry_st) == 4, alignment(X509_name_entry_st)
+X509_NAME_ENTRY = X509_name_entry_st
+X509_name_st._fields_ = [
+    ('entries', POINTER(STACK)),
+    ('modified', c_int),
+    ('bytes', POINTER(BUF_MEM)),
+    ('hash', c_ulong),
+]
+assert sizeof(X509_name_st) == 16, sizeof(X509_name_st)
+assert alignment(X509_name_st) == 4, alignment(X509_name_st)
+class X509_extension_st(Structure):
+    pass
+X509_extension_st._fields_ = [
+    ('object', POINTER(ASN1_OBJECT)),
+    ('critical', ASN1_BOOLEAN),
+    ('value', POINTER(ASN1_OCTET_STRING)),
+]
+assert sizeof(X509_extension_st) == 12, sizeof(X509_extension_st)
+assert alignment(X509_extension_st) == 4, alignment(X509_extension_st)
+X509_EXTENSION = X509_extension_st
+class x509_attributes_st(Structure):
+    pass
+class N18x509_attributes_st4DOLLAR_13E(Union):
+    pass
+N18x509_attributes_st4DOLLAR_13E._fields_ = [
+    ('ptr', STRING),
+    ('set', POINTER(STACK)),
+    ('single', POINTER(ASN1_TYPE)),
+]
+assert sizeof(N18x509_attributes_st4DOLLAR_13E) == 4, sizeof(N18x509_attributes_st4DOLLAR_13E)
+assert alignment(N18x509_attributes_st4DOLLAR_13E) == 4, alignment(N18x509_attributes_st4DOLLAR_13E)
+x509_attributes_st._fields_ = [
+    ('object', POINTER(ASN1_OBJECT)),
+    ('single', c_int),
+    ('value', N18x509_attributes_st4DOLLAR_13E),
+]
+assert sizeof(x509_attributes_st) == 12, sizeof(x509_attributes_st)
+assert alignment(x509_attributes_st) == 4, alignment(x509_attributes_st)
+X509_ATTRIBUTE = x509_attributes_st
+class X509_req_info_st(Structure):
+    pass
+X509_req_info_st._fields_ = [
+    ('enc', ASN1_ENCODING),
+    ('version', POINTER(ASN1_INTEGER)),
+    ('subject', POINTER(X509_NAME)),
+    ('pubkey', POINTER(X509_PUBKEY)),
+    ('attributes', POINTER(STACK)),
+]
+assert sizeof(X509_req_info_st) == 28, sizeof(X509_req_info_st)
+assert alignment(X509_req_info_st) == 4, alignment(X509_req_info_st)
+X509_REQ_INFO = X509_req_info_st
+class X509_req_st(Structure):
+    pass
+X509_req_st._fields_ = [
+    ('req_info', POINTER(X509_REQ_INFO)),
+    ('sig_alg', POINTER(X509_ALGOR)),
+    ('signature', POINTER(ASN1_BIT_STRING)),
+    ('references', c_int),
+]
+assert sizeof(X509_req_st) == 16, sizeof(X509_req_st)
+assert alignment(X509_req_st) == 4, alignment(X509_req_st)
+X509_REQ = X509_req_st
+class x509_cinf_st(Structure):
+    pass
+x509_cinf_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('serialNumber', POINTER(ASN1_INTEGER)),
+    ('signature', POINTER(X509_ALGOR)),
+    ('issuer', POINTER(X509_NAME)),
+    ('validity', POINTER(X509_VAL)),
+    ('subject', POINTER(X509_NAME)),
+    ('key', POINTER(X509_PUBKEY)),
+    ('issuerUID', POINTER(ASN1_BIT_STRING)),
+    ('subjectUID', POINTER(ASN1_BIT_STRING)),
+    ('extensions', POINTER(STACK)),
+]
+assert sizeof(x509_cinf_st) == 40, sizeof(x509_cinf_st)
+assert alignment(x509_cinf_st) == 4, alignment(x509_cinf_st)
+X509_CINF = x509_cinf_st
+class x509_cert_aux_st(Structure):
+    pass
+x509_cert_aux_st._fields_ = [
+    ('trust', POINTER(STACK)),
+    ('reject', POINTER(STACK)),
+    ('alias', POINTER(ASN1_UTF8STRING)),
+    ('keyid', POINTER(ASN1_OCTET_STRING)),
+    ('other', POINTER(STACK)),
+]
+assert sizeof(x509_cert_aux_st) == 20, sizeof(x509_cert_aux_st)
+assert alignment(x509_cert_aux_st) == 4, alignment(x509_cert_aux_st)
+X509_CERT_AUX = x509_cert_aux_st
+class AUTHORITY_KEYID_st(Structure):
+    pass
+x509_st._fields_ = [
+    ('cert_info', POINTER(X509_CINF)),
+    ('sig_alg', POINTER(X509_ALGOR)),
+    ('signature', POINTER(ASN1_BIT_STRING)),
+    ('valid', c_int),
+    ('references', c_int),
+    ('name', STRING),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('ex_pathlen', c_long),
+    ('ex_flags', c_ulong),
+    ('ex_kusage', c_ulong),
+    ('ex_xkusage', c_ulong),
+    ('ex_nscert', c_ulong),
+    ('skid', POINTER(ASN1_OCTET_STRING)),
+    ('akid', POINTER(AUTHORITY_KEYID_st)),
+    ('sha1_hash', c_ubyte * 20),
+    ('aux', POINTER(X509_CERT_AUX)),
+]
+assert sizeof(x509_st) == 84, sizeof(x509_st)
+assert alignment(x509_st) == 4, alignment(x509_st)
+AUTHORITY_KEYID_st._fields_ = [
+]
+class x509_trust_st(Structure):
+    pass
+x509_trust_st._fields_ = [
+    ('trust', c_int),
+    ('flags', c_int),
+    ('check_trust', CFUNCTYPE(c_int, POINTER(x509_trust_st), POINTER(X509), c_int)),
+    ('name', STRING),
+    ('arg1', c_int),
+    ('arg2', c_void_p),
+]
+assert sizeof(x509_trust_st) == 24, sizeof(x509_trust_st)
+assert alignment(x509_trust_st) == 4, alignment(x509_trust_st)
+X509_TRUST = x509_trust_st
+class X509_revoked_st(Structure):
+    pass
+X509_revoked_st._fields_ = [
+    ('serialNumber', POINTER(ASN1_INTEGER)),
+    ('revocationDate', POINTER(ASN1_TIME)),
+    ('extensions', POINTER(STACK)),
+    ('sequence', c_int),
+]
+assert sizeof(X509_revoked_st) == 16, sizeof(X509_revoked_st)
+assert alignment(X509_revoked_st) == 4, alignment(X509_revoked_st)
+X509_REVOKED = X509_revoked_st
+class X509_crl_info_st(Structure):
+    pass
+X509_crl_info_st._fields_ = [
+    ('version', POINTER(ASN1_INTEGER)),
+    ('sig_alg', POINTER(X509_ALGOR)),
+    ('issuer', POINTER(X509_NAME)),
+    ('lastUpdate', POINTER(ASN1_TIME)),
+    ('nextUpdate', POINTER(ASN1_TIME)),
+    ('revoked', POINTER(STACK)),
+    ('extensions', POINTER(STACK)),
+    ('enc', ASN1_ENCODING),
+]
+assert sizeof(X509_crl_info_st) == 40, sizeof(X509_crl_info_st)
+assert alignment(X509_crl_info_st) == 4, alignment(X509_crl_info_st)
+X509_CRL_INFO = X509_crl_info_st
+X509_crl_st._fields_ = [
+    ('crl', POINTER(X509_CRL_INFO)),
+    ('sig_alg', POINTER(X509_ALGOR)),
+    ('signature', POINTER(ASN1_BIT_STRING)),
+    ('references', c_int),
+]
+assert sizeof(X509_crl_st) == 16, sizeof(X509_crl_st)
+assert alignment(X509_crl_st) == 4, alignment(X509_crl_st)
+class private_key_st(Structure):
+    pass
+private_key_st._fields_ = [
+    ('version', c_int),
+    ('enc_algor', POINTER(X509_ALGOR)),
+    ('enc_pkey', POINTER(ASN1_OCTET_STRING)),
+    ('dec_pkey', POINTER(EVP_PKEY)),
+    ('key_length', c_int),
+    ('key_data', STRING),
+    ('key_free', c_int),
+    ('cipher', EVP_CIPHER_INFO),
+    ('references', c_int),
+]
+assert sizeof(private_key_st) == 52, sizeof(private_key_st)
+assert alignment(private_key_st) == 4, alignment(private_key_st)
+X509_PKEY = private_key_st
+class X509_info_st(Structure):
+    pass
+X509_info_st._fields_ = [
+    ('x509', POINTER(X509)),
+    ('crl', POINTER(X509_CRL)),
+    ('x_pkey', POINTER(X509_PKEY)),
+    ('enc_cipher', EVP_CIPHER_INFO),
+    ('enc_len', c_int),
+    ('enc_data', STRING),
+    ('references', c_int),
+]
+assert sizeof(X509_info_st) == 44, sizeof(X509_info_st)
+assert alignment(X509_info_st) == 4, alignment(X509_info_st)
+X509_INFO = X509_info_st
+class Netscape_spkac_st(Structure):
+    pass
+Netscape_spkac_st._fields_ = [
+    ('pubkey', POINTER(X509_PUBKEY)),
+    ('challenge', POINTER(ASN1_IA5STRING)),
+]
+assert sizeof(Netscape_spkac_st) == 8, sizeof(Netscape_spkac_st)
+assert alignment(Netscape_spkac_st) == 4, alignment(Netscape_spkac_st)
+NETSCAPE_SPKAC = Netscape_spkac_st
+class Netscape_spki_st(Structure):
+    pass
+Netscape_spki_st._fields_ = [
+    ('spkac', POINTER(NETSCAPE_SPKAC)),
+    ('sig_algor', POINTER(X509_ALGOR)),
+    ('signature', POINTER(ASN1_BIT_STRING)),
+]
+assert sizeof(Netscape_spki_st) == 12, sizeof(Netscape_spki_st)
+assert alignment(Netscape_spki_st) == 4, alignment(Netscape_spki_st)
+NETSCAPE_SPKI = Netscape_spki_st
+class Netscape_certificate_sequence(Structure):
+    pass
+Netscape_certificate_sequence._fields_ = [
+    ('type', POINTER(ASN1_OBJECT)),
+    ('certs', POINTER(STACK)),
+]
+assert sizeof(Netscape_certificate_sequence) == 8, sizeof(Netscape_certificate_sequence)
+assert alignment(Netscape_certificate_sequence) == 4, alignment(Netscape_certificate_sequence)
+NETSCAPE_CERT_SEQUENCE = Netscape_certificate_sequence
+class PBEPARAM_st(Structure):
+    pass
+PBEPARAM_st._fields_ = [
+    ('salt', POINTER(ASN1_OCTET_STRING)),
+    ('iter', POINTER(ASN1_INTEGER)),
+]
+assert sizeof(PBEPARAM_st) == 8, sizeof(PBEPARAM_st)
+assert alignment(PBEPARAM_st) == 4, alignment(PBEPARAM_st)
+PBEPARAM = PBEPARAM_st
+class PBE2PARAM_st(Structure):
+    pass
+PBE2PARAM_st._fields_ = [
+    ('keyfunc', POINTER(X509_ALGOR)),
+    ('encryption', POINTER(X509_ALGOR)),
+]
+assert sizeof(PBE2PARAM_st) == 8, sizeof(PBE2PARAM_st)
+assert alignment(PBE2PARAM_st) == 4, alignment(PBE2PARAM_st)
+PBE2PARAM = PBE2PARAM_st
+class PBKDF2PARAM_st(Structure):
+    pass
+PBKDF2PARAM_st._fields_ = [
+    ('salt', POINTER(ASN1_TYPE)),
+    ('iter', POINTER(ASN1_INTEGER)),
+    ('keylength', POINTER(ASN1_INTEGER)),
+    ('prf', POINTER(X509_ALGOR)),
+]
+assert sizeof(PBKDF2PARAM_st) == 16, sizeof(PBKDF2PARAM_st)
+assert alignment(PBKDF2PARAM_st) == 4, alignment(PBKDF2PARAM_st)
+PBKDF2PARAM = PBKDF2PARAM_st
+class pkcs8_priv_key_info_st(Structure):
+    pass
+pkcs8_priv_key_info_st._fields_ = [
+    ('broken', c_int),
+    ('version', POINTER(ASN1_INTEGER)),
+    ('pkeyalg', POINTER(X509_ALGOR)),
+    ('pkey', POINTER(ASN1_TYPE)),
+    ('attributes', POINTER(STACK)),
+]
+assert sizeof(pkcs8_priv_key_info_st) == 20, sizeof(pkcs8_priv_key_info_st)
+assert alignment(pkcs8_priv_key_info_st) == 4, alignment(pkcs8_priv_key_info_st)
+PKCS8_PRIV_KEY_INFO = pkcs8_priv_key_info_st
+class x509_hash_dir_st(Structure):
+    pass
+x509_hash_dir_st._fields_ = [
+    ('num_dirs', c_int),
+    ('dirs', POINTER(STRING)),
+    ('dirs_type', POINTER(c_int)),
+    ('num_dirs_alloced', c_int),
+]
+assert sizeof(x509_hash_dir_st) == 16, sizeof(x509_hash_dir_st)
+assert alignment(x509_hash_dir_st) == 4, alignment(x509_hash_dir_st)
+X509_HASH_DIR_CTX = x509_hash_dir_st
+class x509_file_st(Structure):
+    pass
+x509_file_st._fields_ = [
+    ('num_paths', c_int),
+    ('num_alloced', c_int),
+    ('paths', POINTER(STRING)),
+    ('path_type', POINTER(c_int)),
+]
+assert sizeof(x509_file_st) == 16, sizeof(x509_file_st)
+assert alignment(x509_file_st) == 4, alignment(x509_file_st)
+X509_CERT_FILE_CTX = x509_file_st
+class x509_object_st(Structure):
+    pass
+class N14x509_object_st4DOLLAR_14E(Union):
+    pass
+N14x509_object_st4DOLLAR_14E._fields_ = [
+    ('ptr', STRING),
+    ('x509', POINTER(X509)),
+    ('crl', POINTER(X509_CRL)),
+    ('pkey', POINTER(EVP_PKEY)),
+]
+assert sizeof(N14x509_object_st4DOLLAR_14E) == 4, sizeof(N14x509_object_st4DOLLAR_14E)
+assert alignment(N14x509_object_st4DOLLAR_14E) == 4, alignment(N14x509_object_st4DOLLAR_14E)
+x509_object_st._fields_ = [
+    ('type', c_int),
+    ('data', N14x509_object_st4DOLLAR_14E),
+]
+assert sizeof(x509_object_st) == 8, sizeof(x509_object_st)
+assert alignment(x509_object_st) == 4, alignment(x509_object_st)
+X509_OBJECT = x509_object_st
+class x509_lookup_st(Structure):
+    pass
+X509_LOOKUP = x509_lookup_st
+class x509_lookup_method_st(Structure):
+    pass
+x509_lookup_method_st._fields_ = [
+    ('name', STRING),
+    ('new_item', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
+    ('free', CFUNCTYPE(None, POINTER(X509_LOOKUP))),
+    ('init', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
+    ('shutdown', CFUNCTYPE(c_int, POINTER(X509_LOOKUP))),
+    ('ctrl', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_long, POINTER(STRING))),
+    ('get_by_subject', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(X509_OBJECT))),
+    ('get_by_issuer_serial', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(X509_NAME), POINTER(ASN1_INTEGER), POINTER(X509_OBJECT))),
+    ('get_by_fingerprint', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, POINTER(c_ubyte), c_int, POINTER(X509_OBJECT))),
+    ('get_by_alias', CFUNCTYPE(c_int, POINTER(X509_LOOKUP), c_int, STRING, c_int, POINTER(X509_OBJECT))),
+]
+assert sizeof(x509_lookup_method_st) == 40, sizeof(x509_lookup_method_st)
+assert alignment(x509_lookup_method_st) == 4, alignment(x509_lookup_method_st)
+X509_LOOKUP_METHOD = x509_lookup_method_st
+x509_store_st._fields_ = [
+    ('cache', c_int),
+    ('objs', POINTER(STACK)),
+    ('get_cert_methods', POINTER(STACK)),
+    ('flags', c_ulong),
+    ('purpose', c_int),
+    ('trust', c_int),
+    ('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+    ('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
+    ('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
+    ('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
+    ('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
+    ('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
+    ('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('ex_data', CRYPTO_EX_DATA),
+    ('references', c_int),
+    ('depth', c_int),
+]
+assert sizeof(x509_store_st) == 76, sizeof(x509_store_st)
+assert alignment(x509_store_st) == 4, alignment(x509_store_st)
+x509_lookup_st._fields_ = [
+    ('init', c_int),
+    ('skip', c_int),
+    ('method', POINTER(X509_LOOKUP_METHOD)),
+    ('method_data', STRING),
+    ('store_ctx', POINTER(X509_STORE)),
+]
+assert sizeof(x509_lookup_st) == 20, sizeof(x509_lookup_st)
+assert alignment(x509_lookup_st) == 4, alignment(x509_lookup_st)
+time_t = __darwin_time_t
+x509_store_ctx_st._fields_ = [
+    ('ctx', POINTER(X509_STORE)),
+    ('current_method', c_int),
+    ('cert', POINTER(X509)),
+    ('untrusted', POINTER(STACK)),
+    ('purpose', c_int),
+    ('trust', c_int),
+    ('check_time', time_t),
+    ('flags', c_ulong),
+    ('other_ctx', c_void_p),
+    ('verify', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('verify_cb', CFUNCTYPE(c_int, c_int, POINTER(X509_STORE_CTX))),
+    ('get_issuer', CFUNCTYPE(c_int, POINTER(POINTER(X509)), POINTER(X509_STORE_CTX), POINTER(X509))),
+    ('check_issued', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509), POINTER(X509))),
+    ('check_revocation', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('get_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(POINTER(X509_CRL)), POINTER(X509))),
+    ('check_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL))),
+    ('cert_crl', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX), POINTER(X509_CRL), POINTER(X509))),
+    ('cleanup', CFUNCTYPE(c_int, POINTER(X509_STORE_CTX))),
+    ('depth', c_int),
+    ('valid', c_int),
+    ('last_untrusted', c_int),
+    ('chain', POINTER(STACK)),
+    ('error_depth', c_int),
+    ('error', c_int),
+    ('current_cert', POINTER(X509)),
+    ('current_issuer', POINTER(X509)),
+    ('current_crl', POINTER(X509_CRL)),
+    ('ex_data', CRYPTO_EX_DATA),
+]
+assert sizeof(x509_store_ctx_st) == 116, sizeof(x509_store_ctx_st)
+assert alignment(x509_store_ctx_st) == 4, alignment(x509_store_ctx_st)
+va_list = __darwin_va_list
+__darwin_off_t = __int64_t
+fpos_t = __darwin_off_t
+class __sbuf(Structure):
+    pass
+__sbuf._fields_ = [
+    ('_base', POINTER(c_ubyte)),
+    ('_size', c_int),
+]
+assert sizeof(__sbuf) == 8, sizeof(__sbuf)
+assert alignment(__sbuf) == 4, alignment(__sbuf)
+class __sFILEX(Structure):
+    pass
+__sFILEX._fields_ = [
+]
+class __sFILE(Structure):
+    pass
+__sFILE._pack_ = 4
+__sFILE._fields_ = [
+    ('_p', POINTER(c_ubyte)),
+    ('_r', c_int),
+    ('_w', c_int),
+    ('_flags', c_short),
+    ('_file', c_short),
+    ('_bf', __sbuf),
+    ('_lbfsize', c_int),
+    ('_cookie', c_void_p),
+    ('_close', CFUNCTYPE(c_int, c_void_p)),
+    ('_read', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
+    ('_seek', CFUNCTYPE(fpos_t, c_void_p, c_longlong, c_int)),
+    ('_write', CFUNCTYPE(c_int, c_void_p, STRING, c_int)),
+    ('_ub', __sbuf),
+    ('_extra', POINTER(__sFILEX)),
+    ('_ur', c_int),
+    ('_ubuf', c_ubyte * 3),
+    ('_nbuf', c_ubyte * 1),
+    ('_lb', __sbuf),
+    ('_blksize', c_int),
+    ('_offset', fpos_t),
+]
+assert sizeof(__sFILE) == 88, sizeof(__sFILE)
+assert alignment(__sFILE) == 4, alignment(__sFILE)
+FILE = __sFILE
+ct_rune_t = __darwin_ct_rune_t
+rune_t = __darwin_rune_t
+class div_t(Structure):
+    pass
+div_t._fields_ = [
+    ('quot', c_int),
+    ('rem', c_int),
+]
+assert sizeof(div_t) == 8, sizeof(div_t)
+assert alignment(div_t) == 4, alignment(div_t)
+class ldiv_t(Structure):
+    pass
+ldiv_t._fields_ = [
+    ('quot', c_long),
+    ('rem', c_long),
+]
+assert sizeof(ldiv_t) == 8, sizeof(ldiv_t)
+assert alignment(ldiv_t) == 4, alignment(ldiv_t)
+class lldiv_t(Structure):
+    pass
+lldiv_t._pack_ = 4
+lldiv_t._fields_ = [
+    ('quot', c_longlong),
+    ('rem', c_longlong),
+]
+assert sizeof(lldiv_t) == 16, sizeof(lldiv_t)
+assert alignment(lldiv_t) == 4, alignment(lldiv_t)
+__darwin_dev_t = __int32_t
+dev_t = __darwin_dev_t
+__darwin_mode_t = __uint16_t
+mode_t = __darwin_mode_t
+class mcontext(Structure):
+    pass
+mcontext._fields_ = [
+]
+class mcontext64(Structure):
+    pass
+mcontext64._fields_ = [
+]
+class __darwin_pthread_handler_rec(Structure):
+    pass
+__darwin_pthread_handler_rec._fields_ = [
+    ('__routine', CFUNCTYPE(None, c_void_p)),
+    ('__arg', c_void_p),
+    ('__next', POINTER(__darwin_pthread_handler_rec)),
+]
+assert sizeof(__darwin_pthread_handler_rec) == 12, sizeof(__darwin_pthread_handler_rec)
+assert alignment(__darwin_pthread_handler_rec) == 4, alignment(__darwin_pthread_handler_rec)
+class _opaque_pthread_attr_t(Structure):
+    pass
+_opaque_pthread_attr_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 36),
+]
+assert sizeof(_opaque_pthread_attr_t) == 40, sizeof(_opaque_pthread_attr_t)
+assert alignment(_opaque_pthread_attr_t) == 4, alignment(_opaque_pthread_attr_t)
+class _opaque_pthread_cond_t(Structure):
+    pass
+_opaque_pthread_cond_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 24),
+]
+assert sizeof(_opaque_pthread_cond_t) == 28, sizeof(_opaque_pthread_cond_t)
+assert alignment(_opaque_pthread_cond_t) == 4, alignment(_opaque_pthread_cond_t)
+class _opaque_pthread_condattr_t(Structure):
+    pass
+_opaque_pthread_condattr_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 4),
+]
+assert sizeof(_opaque_pthread_condattr_t) == 8, sizeof(_opaque_pthread_condattr_t)
+assert alignment(_opaque_pthread_condattr_t) == 4, alignment(_opaque_pthread_condattr_t)
+class _opaque_pthread_mutex_t(Structure):
+    pass
+_opaque_pthread_mutex_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 40),
+]
+assert sizeof(_opaque_pthread_mutex_t) == 44, sizeof(_opaque_pthread_mutex_t)
+assert alignment(_opaque_pthread_mutex_t) == 4, alignment(_opaque_pthread_mutex_t)
+class _opaque_pthread_mutexattr_t(Structure):
+    pass
+_opaque_pthread_mutexattr_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 8),
+]
+assert sizeof(_opaque_pthread_mutexattr_t) == 12, sizeof(_opaque_pthread_mutexattr_t)
+assert alignment(_opaque_pthread_mutexattr_t) == 4, alignment(_opaque_pthread_mutexattr_t)
+class _opaque_pthread_once_t(Structure):
+    pass
+_opaque_pthread_once_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 4),
+]
+assert sizeof(_opaque_pthread_once_t) == 8, sizeof(_opaque_pthread_once_t)
+assert alignment(_opaque_pthread_once_t) == 4, alignment(_opaque_pthread_once_t)
+class _opaque_pthread_rwlock_t(Structure):
+    pass
+_opaque_pthread_rwlock_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 124),
+]
+assert sizeof(_opaque_pthread_rwlock_t) == 128, sizeof(_opaque_pthread_rwlock_t)
+assert alignment(_opaque_pthread_rwlock_t) == 4, alignment(_opaque_pthread_rwlock_t)
+class _opaque_pthread_rwlockattr_t(Structure):
+    pass
+_opaque_pthread_rwlockattr_t._fields_ = [
+    ('__sig', c_long),
+    ('__opaque', c_char * 12),
+]
+assert sizeof(_opaque_pthread_rwlockattr_t) == 16, sizeof(_opaque_pthread_rwlockattr_t)
+assert alignment(_opaque_pthread_rwlockattr_t) == 4, alignment(_opaque_pthread_rwlockattr_t)
+class _opaque_pthread_t(Structure):
+    pass
+_opaque_pthread_t._fields_ = [
+    ('__sig', c_long),
+    ('__cleanup_stack', POINTER(__darwin_pthread_handler_rec)),
+    ('__opaque', c_char * 596),
+]
+assert sizeof(_opaque_pthread_t) == 604, sizeof(_opaque_pthread_t)
+assert alignment(_opaque_pthread_t) == 4, alignment(_opaque_pthread_t)
+__darwin_blkcnt_t = __int64_t
+__darwin_blksize_t = __int32_t
+__darwin_fsblkcnt_t = c_uint
+__darwin_fsfilcnt_t = c_uint
+__darwin_gid_t = __uint32_t
+__darwin_id_t = __uint32_t
+__darwin_ino_t = __uint32_t
+__darwin_mach_port_name_t = __darwin_natural_t
+__darwin_mach_port_t = __darwin_mach_port_name_t
+__darwin_mcontext_t = POINTER(mcontext)
+__darwin_mcontext64_t = POINTER(mcontext64)
+__darwin_pid_t = __int32_t
+__darwin_pthread_attr_t = _opaque_pthread_attr_t
+__darwin_pthread_cond_t = _opaque_pthread_cond_t
+__darwin_pthread_condattr_t = _opaque_pthread_condattr_t
+__darwin_pthread_key_t = c_ulong
+__darwin_pthread_mutex_t = _opaque_pthread_mutex_t
+__darwin_pthread_mutexattr_t = _opaque_pthread_mutexattr_t
+__darwin_pthread_once_t = _opaque_pthread_once_t
+__darwin_pthread_rwlock_t = _opaque_pthread_rwlock_t
+__darwin_pthread_rwlockattr_t = _opaque_pthread_rwlockattr_t
+__darwin_pthread_t = POINTER(_opaque_pthread_t)
+__darwin_sigset_t = __uint32_t
+__darwin_suseconds_t = __int32_t
+__darwin_uid_t = __uint32_t
+__darwin_useconds_t = __uint32_t
+__darwin_uuid_t = c_ubyte * 16
+class sigaltstack(Structure):
+    pass
+sigaltstack._fields_ = [
+    ('ss_sp', c_void_p),
+    ('ss_size', __darwin_size_t),
+    ('ss_flags', c_int),
+]
+assert sizeof(sigaltstack) == 12, sizeof(sigaltstack)
+assert alignment(sigaltstack) == 4, alignment(sigaltstack)
+__darwin_stack_t = sigaltstack
+class ucontext(Structure):
+    pass
+ucontext._fields_ = [
+    ('uc_onstack', c_int),
+    ('uc_sigmask', __darwin_sigset_t),
+    ('uc_stack', __darwin_stack_t),
+    ('uc_link', POINTER(ucontext)),
+    ('uc_mcsize', __darwin_size_t),
+    ('uc_mcontext', __darwin_mcontext_t),
+]
+assert sizeof(ucontext) == 32, sizeof(ucontext)
+assert alignment(ucontext) == 4, alignment(ucontext)
+__darwin_ucontext_t = ucontext
+class ucontext64(Structure):
+    pass
+ucontext64._fields_ = [
+    ('uc_onstack', c_int),
+    ('uc_sigmask', __darwin_sigset_t),
+    ('uc_stack', __darwin_stack_t),
+    ('uc_link', POINTER(ucontext64)),
+    ('uc_mcsize', __darwin_size_t),
+    ('uc_mcontext64', __darwin_mcontext64_t),
+]
+assert sizeof(ucontext64) == 32, sizeof(ucontext64)
+assert alignment(ucontext64) == 4, alignment(ucontext64)
+__darwin_ucontext64_t = ucontext64
+class timeval(Structure):
+    pass
+timeval._fields_ = [
+    ('tv_sec', __darwin_time_t),
+    ('tv_usec', __darwin_suseconds_t),
+]
+assert sizeof(timeval) == 8, sizeof(timeval)
+assert alignment(timeval) == 4, alignment(timeval)
+rlim_t = __int64_t
+class rusage(Structure):
+    pass
+rusage._fields_ = [
+    ('ru_utime', timeval),
+    ('ru_stime', timeval),
+    ('ru_maxrss', c_long),
+    ('ru_ixrss', c_long),
+    ('ru_idrss', c_long),
+    ('ru_isrss', c_long),
+    ('ru_minflt', c_long),
+    ('ru_majflt', c_long),
+    ('ru_nswap', c_long),
+    ('ru_inblock', c_long),
+    ('ru_oublock', c_long),
+    ('ru_msgsnd', c_long),
+    ('ru_msgrcv', c_long),
+    ('ru_nsignals', c_long),
+    ('ru_nvcsw', c_long),
+    ('ru_nivcsw', c_long),
+]
+assert sizeof(rusage) == 72, sizeof(rusage)
+assert alignment(rusage) == 4, alignment(rusage)
+class rlimit(Structure):
+    pass
+rlimit._pack_ = 4
+rlimit._fields_ = [
+    ('rlim_cur', rlim_t),
+    ('rlim_max', rlim_t),
+]
+assert sizeof(rlimit) == 16, sizeof(rlimit)
+assert alignment(rlimit) == 4, alignment(rlimit)
+mcontext_t = __darwin_mcontext_t
+mcontext64_t = __darwin_mcontext64_t
+pthread_attr_t = __darwin_pthread_attr_t
+sigset_t = __darwin_sigset_t
+ucontext_t = __darwin_ucontext_t
+ucontext64_t = __darwin_ucontext64_t
+uid_t = __darwin_uid_t
+class sigval(Union):
+    pass
+sigval._fields_ = [
+    ('sival_int', c_int),
+    ('sival_ptr', c_void_p),
+]
+assert sizeof(sigval) == 4, sizeof(sigval)
+assert alignment(sigval) == 4, alignment(sigval)
+class sigevent(Structure):
+    pass
+sigevent._fields_ = [
+    ('sigev_notify', c_int),
+    ('sigev_signo', c_int),
+    ('sigev_value', sigval),
+    ('sigev_notify_function', CFUNCTYPE(None, sigval)),
+    ('sigev_notify_attributes', POINTER(pthread_attr_t)),
+]
+assert sizeof(sigevent) == 20, sizeof(sigevent)
+assert alignment(sigevent) == 4, alignment(sigevent)
+class __siginfo(Structure):
+    pass
+pid_t = __darwin_pid_t
+__siginfo._fields_ = [
+    ('si_signo', c_int),
+    ('si_errno', c_int),
+    ('si_code', c_int),
+    ('si_pid', pid_t),
+    ('si_uid', uid_t),
+    ('si_status', c_int),
+    ('si_addr', c_void_p),
+    ('si_value', sigval),
+    ('si_band', c_long),
+    ('pad', c_ulong * 7),
+]
+assert sizeof(__siginfo) == 64, sizeof(__siginfo)
+assert alignment(__siginfo) == 4, alignment(__siginfo)
+siginfo_t = __siginfo
+class __sigaction_u(Union):
+    pass
+__sigaction_u._fields_ = [
+    ('__sa_handler', CFUNCTYPE(None, c_int)),
+    ('__sa_sigaction', CFUNCTYPE(None, c_int, POINTER(__siginfo), c_void_p)),
+]
+assert sizeof(__sigaction_u) == 4, sizeof(__sigaction_u)
+assert alignment(__sigaction_u) == 4, alignment(__sigaction_u)
+class __sigaction(Structure):
+    pass
+__sigaction._fields_ = [
+    ('__sigaction_u', __sigaction_u),
+    ('sa_tramp', CFUNCTYPE(None, c_void_p, c_int, c_int, POINTER(siginfo_t), c_void_p)),
+    ('sa_mask', sigset_t),
+    ('sa_flags', c_int),
+]
+assert sizeof(__sigaction) == 16, sizeof(__sigaction)
+assert alignment(__sigaction) == 4, alignment(__sigaction)
+class sigaction(Structure):
+    pass
+sigaction._fields_ = [
+    ('__sigaction_u', __sigaction_u),
+    ('sa_mask', sigset_t),
+    ('sa_flags', c_int),
+]
+assert sizeof(sigaction) == 12, sizeof(sigaction)
+assert alignment(sigaction) == 4, alignment(sigaction)
+sig_t = CFUNCTYPE(None, c_int)
+stack_t = __darwin_stack_t
+class sigvec(Structure):
+    pass
+sigvec._fields_ = [
+    ('sv_handler', CFUNCTYPE(None, c_int)),
+    ('sv_mask', c_int),
+    ('sv_flags', c_int),
+]
+assert sizeof(sigvec) == 12, sizeof(sigvec)
+assert alignment(sigvec) == 4, alignment(sigvec)
+class sigstack(Structure):
+    pass
+sigstack._fields_ = [
+    ('ss_sp', STRING),
+    ('ss_onstack', c_int),
+]
+assert sizeof(sigstack) == 8, sizeof(sigstack)
+assert alignment(sigstack) == 4, alignment(sigstack)
+u_char = c_ubyte
+u_short = c_ushort
+u_int = c_uint
+u_long = c_ulong
+ushort = c_ushort
+uint = c_uint
+u_quad_t = u_int64_t
+quad_t = int64_t
+qaddr_t = POINTER(quad_t)
+caddr_t = STRING
+daddr_t = int32_t
+fixpt_t = u_int32_t
+blkcnt_t = __darwin_blkcnt_t
+blksize_t = __darwin_blksize_t
+gid_t = __darwin_gid_t
+in_addr_t = __uint32_t
+in_port_t = __uint16_t
+ino_t = __darwin_ino_t
+key_t = __int32_t
+nlink_t = __uint16_t
+off_t = __darwin_off_t
+segsz_t = int32_t
+swblk_t = int32_t
+clock_t = __darwin_clock_t
+ssize_t = __darwin_ssize_t
+useconds_t = __darwin_useconds_t
+suseconds_t = __darwin_suseconds_t
+fd_mask = __int32_t
+class fd_set(Structure):
+    pass
+fd_set._fields_ = [
+    ('fds_bits', __int32_t * 32),
+]
+assert sizeof(fd_set) == 128, sizeof(fd_set)
+assert alignment(fd_set) == 4, alignment(fd_set)
+pthread_cond_t = __darwin_pthread_cond_t
+pthread_condattr_t = __darwin_pthread_condattr_t
+pthread_mutex_t = __darwin_pthread_mutex_t
+pthread_mutexattr_t = __darwin_pthread_mutexattr_t
+pthread_once_t = __darwin_pthread_once_t
+pthread_rwlock_t = __darwin_pthread_rwlock_t
+pthread_rwlockattr_t = __darwin_pthread_rwlockattr_t
+pthread_t = __darwin_pthread_t
+pthread_key_t = __darwin_pthread_key_t
+fsblkcnt_t = __darwin_fsblkcnt_t
+fsfilcnt_t = __darwin_fsfilcnt_t
+
+# values for enumeration 'idtype_t'
+idtype_t = c_int # enum
+id_t = __darwin_id_t
+class wait(Union):
+    pass
+class N4wait3DOLLAR_3E(Structure):
+    pass
+N4wait3DOLLAR_3E._fields_ = [
+    ('w_Termsig', c_uint, 7),
+    ('w_Coredump', c_uint, 1),
+    ('w_Retcode', c_uint, 8),
+    ('w_Filler', c_uint, 16),
+]
+assert sizeof(N4wait3DOLLAR_3E) == 4, sizeof(N4wait3DOLLAR_3E)
+assert alignment(N4wait3DOLLAR_3E) == 4, alignment(N4wait3DOLLAR_3E)
+class N4wait3DOLLAR_4E(Structure):
+    pass
+N4wait3DOLLAR_4E._fields_ = [
+    ('w_Stopval', c_uint, 8),
+    ('w_Stopsig', c_uint, 8),
+    ('w_Filler', c_uint, 16),
+]
+assert sizeof(N4wait3DOLLAR_4E) == 4, sizeof(N4wait3DOLLAR_4E)
+assert alignment(N4wait3DOLLAR_4E) == 4, alignment(N4wait3DOLLAR_4E)
+wait._fields_ = [
+    ('w_status', c_int),
+    ('w_T', N4wait3DOLLAR_3E),
+    ('w_S', N4wait3DOLLAR_4E),
+]
+assert sizeof(wait) == 4, sizeof(wait)
+assert alignment(wait) == 4, alignment(wait)
+class timespec(Structure):
+    pass
+timespec._fields_ = [
+    ('tv_sec', time_t),
+    ('tv_nsec', c_long),
+]
+assert sizeof(timespec) == 8, sizeof(timespec)
+assert alignment(timespec) == 4, alignment(timespec)
+class tm(Structure):
+    pass
+tm._fields_ = [
+    ('tm_sec', c_int),
+    ('tm_min', c_int),
+    ('tm_hour', c_int),
+    ('tm_mday', c_int),
+    ('tm_mon', c_int),
+    ('tm_year', c_int),
+    ('tm_wday', c_int),
+    ('tm_yday', c_int),
+    ('tm_isdst', c_int),
+    ('tm_gmtoff', c_long),
+    ('tm_zone', STRING),
+]
+assert sizeof(tm) == 44, sizeof(tm)
+assert alignment(tm) == 4, alignment(tm)
+__gnuc_va_list = STRING
+ptrdiff_t = c_int
+int8_t = c_byte
+int16_t = c_short
+uint8_t = c_ubyte
+uint16_t = c_ushort
+uint32_t = c_uint
+uint64_t = c_ulonglong
+int_least8_t = int8_t
+int_least16_t = int16_t
+int_least32_t = int32_t
+int_least64_t = int64_t
+uint_least8_t = uint8_t
+uint_least16_t = uint16_t
+uint_least32_t = uint32_t
+uint_least64_t = uint64_t
+int_fast8_t = int8_t
+int_fast16_t = int16_t
+int_fast32_t = int32_t
+int_fast64_t = int64_t
+uint_fast8_t = uint8_t
+uint_fast16_t = uint16_t
+uint_fast32_t = uint32_t
+uint_fast64_t = uint64_t
+intptr_t = c_long
+uintptr_t = c_ulong
+intmax_t = c_longlong
+uintmax_t = c_ulonglong
+__all__ = ['ENGINE', 'pkcs7_enc_content_st', '__int16_t',
+           'X509_REVOKED', 'SSL_CTX', 'UIT_BOOLEAN',
+           '__darwin_time_t', 'ucontext64_t', 'int_fast32_t',
+           'pem_ctx_st', 'uint8_t', 'fpos_t', 'X509', 'COMP_CTX',
+           'tm', 'N10pem_ctx_st4DOLLAR_17E', 'swblk_t',
+           'ASN1_TEMPLATE', '__darwin_pthread_t', 'fixpt_t',
+           'BIO_METHOD', 'ASN1_PRINTABLESTRING', 'EVP_ENCODE_CTX',
+           'dh_method', 'bio_f_buffer_ctx_struct', 'in_port_t',
+           'X509_SIG', '__darwin_ssize_t', '__darwin_sigset_t',
+           'wait', 'uint_fast16_t', 'N12asn1_type_st4DOLLAR_11E',
+           'uint_least8_t', 'pthread_rwlock_t', 'ASN1_IA5STRING',
+           'fsfilcnt_t', 'ucontext', '__uint64_t', 'timespec',
+           'x509_cinf_st', 'COMP_METHOD', 'MD5_CTX', 'buf_mem_st',
+           'ASN1_ENCODING_st', 'PBEPARAM', 'X509_NAME_ENTRY',
+           '__darwin_va_list', 'ucontext_t', 'lhash_st',
+           'N4wait3DOLLAR_4E', '__darwin_uuid_t',
+           '_ossl_old_des_ks_struct', 'id_t', 'ASN1_BIT_STRING',
+           'va_list', '__darwin_wchar_t', 'pthread_key_t',
+           'pkcs7_signer_info_st', 'ASN1_METHOD', 'DSA_SIG', 'DSA',
+           'UIT_NONE', 'pthread_t', '__darwin_useconds_t',
+           'uint_fast8_t', 'UI_STRING', 'DES_cblock',
+           '__darwin_mcontext64_t', 'rlim_t', 'PEM_Encode_Seal_st',
+           'SHAstate_st', 'u_quad_t', 'openssl_fptr',
+           '_opaque_pthread_rwlockattr_t',
+           'N18x509_attributes_st4DOLLAR_13E',
+           '__darwin_pthread_rwlock_t', 'daddr_t', 'ui_string_st',
+           'x509_file_st', 'X509_req_info_st', 'int_least64_t',
+           'evp_Encode_Ctx_st', 'X509_OBJECTS', 'CRYPTO_EX_DATA',
+           '__int8_t', 'AUTHORITY_KEYID_st', '_opaque_pthread_attr_t',
+           'sigstack', 'EVP_CIPHER_CTX', 'X509_extension_st', 'pid_t',
+           'RSA_METHOD', 'PEM_USER', 'pem_recip_st', 'env_md_ctx_st',
+           'rc5_key_st', 'ui_st', 'X509_PUBKEY', 'u_int8_t',
+           'ASN1_ITEM_st', 'pkcs7_recip_info_st', 'ssl2_state_st',
+           'off_t', 'N10ssl_ctx_st4DOLLAR_18E', 'crypto_ex_data_st',
+           'ui_method_st', '__darwin_pthread_rwlockattr_t',
+           'CRYPTO_EX_dup', '__darwin_ino_t', '__sFILE',
+           'OSUnknownByteOrder', 'BN_MONT_CTX', 'ASN1_NULL', 'time_t',
+           'CRYPTO_EX_new', 'asn1_type_st', 'CRYPTO_EX_DATA_FUNCS',
+           'user_time_t', 'BIGNUM', 'pthread_rwlockattr_t',
+           'ASN1_VALUE_st', 'DH_METHOD', '__darwin_off_t',
+           '_opaque_pthread_t', 'bn_blinding_st', 'RSA', 'ssize_t',
+           'mcontext64_t', 'user_long_t', 'fsblkcnt_t', 'cert_st',
+           '__darwin_pthread_condattr_t', 'X509_PKEY',
+           '__darwin_id_t', '__darwin_nl_item', 'SSL2_STATE', 'FILE',
+           'pthread_mutexattr_t', 'size_t',
+           '_ossl_old_des_key_schedule', 'pkcs7_issuer_and_serial_st',
+           'sigval', 'CRYPTO_MEM_LEAK_CB', 'X509_NAME', 'blkcnt_t',
+           'uint_least16_t', '__darwin_dev_t', 'evp_cipher_info_st',
+           'BN_BLINDING', 'ssl3_state_st', 'uint_least64_t',
+           'user_addr_t', 'DES_key_schedule', 'RIPEMD160_CTX',
+           'u_char', 'X509_algor_st', 'uid_t', 'sess_cert_st',
+           'u_int64_t', 'u_int16_t', 'sigset_t', '__darwin_ptrdiff_t',
+           'ASN1_CTX', 'STACK', '__int32_t', 'UI_METHOD',
+           'NETSCAPE_SPKI', 'UIT_PROMPT', 'st_CRYPTO_EX_DATA_IMPL',
+           'cast_key_st', 'X509_HASH_DIR_CTX', 'sigevent',
+           'user_ssize_t', 'clock_t', 'aes_key_st',
+           '__darwin_socklen_t', '__darwin_intptr_t', 'int_fast64_t',
+           'asn1_string_table_st', 'uint_fast32_t',
+           'ASN1_VISIBLESTRING', 'DSA_SIG_st', 'obj_name_st',
+           'X509_LOOKUP_METHOD', 'u_int32_t', 'EVP_CIPHER_INFO',
+           '__gnuc_va_list', 'AES_KEY', 'PKCS7_ISSUER_AND_SERIAL',
+           'BN_CTX', '__darwin_blkcnt_t', 'key_t', 'SHA_CTX',
+           'pkcs7_signed_st', 'SSL', 'N10pem_ctx_st4DOLLAR_16E',
+           'pthread_attr_t', 'EVP_MD', 'uint', 'ASN1_BOOLEAN',
+           'ino_t', '__darwin_clock_t', 'ASN1_OCTET_STRING',
+           'asn1_ctx_st', 'BIO_F_BUFFER_CTX', 'bn_mont_ctx_st',
+           'X509_REQ_INFO', 'PEM_CTX', 'sigvec',
+           '__darwin_pthread_mutexattr_t', 'x509_attributes_st',
+           'stack_t', '__darwin_mode_t', '__mbstate_t',
+           'asn1_object_st', 'ASN1_ENCODING', '__uint8_t',
+           'LHASH_NODE', 'PKCS7_SIGNER_INFO', 'asn1_method_st',
+           'stack_st', 'bio_info_cb', 'div_t', 'UIT_VERIFY',
+           'PBEPARAM_st', 'N4wait3DOLLAR_3E', 'quad_t', '__siginfo',
+           '__darwin_mbstate_t', 'rsa_st', 'ASN1_UNIVERSALSTRING',
+           'uint64_t', 'ssl_comp_st', 'X509_OBJECT', 'pthread_cond_t',
+           'DH', '__darwin_wctype_t', 'PKCS7_ENVELOPE', 'ASN1_TLC_st',
+           'sig_atomic_t', 'BIO', 'nlink_t', 'BUF_MEM', 'SSL3_RECORD',
+           'bio_method_st', 'timeval', 'UI_string_types', 'BIO_dummy',
+           'ssl_ctx_st', 'NETSCAPE_CERT_SEQUENCE',
+           'BIT_STRING_BITNAME_st', '__darwin_pthread_attr_t',
+           'int8_t', '__darwin_wint_t', 'OBJ_NAME',
+           'PKCS8_PRIV_KEY_INFO', 'PBE2PARAM_st',
+           'LHASH_DOALL_FN_TYPE', 'x509_st', 'X509_VAL', 'dev_t',
+           'ASN1_TEMPLATE_st', 'MD5state_st', '__uint16_t',
+           'LHASH_DOALL_ARG_FN_TYPE', 'mdc2_ctx_st', 'SSL3_STATE',
+           'ssl3_buffer_st', 'ASN1_ITEM_EXP',
+           '_opaque_pthread_condattr_t', 'mode_t', 'ASN1_VALUE',
+           'qaddr_t', '__darwin_gid_t', 'EVP_PKEY', 'CRYPTO_EX_free',
+           '_ossl_old_des_cblock', 'X509_INFO', 'asn1_string_st',
+           'intptr_t', 'UIT_INFO', 'int_fast8_t', 'sigaltstack',
+           'env_md_st', 'LHASH', '__darwin_ucontext_t',
+           'PKCS7_SIGN_ENVELOPE', '__darwin_mcontext_t', 'ct_rune_t',
+           'MD2_CTX', 'pthread_once_t', 'SSL3_BUFFER', 'fd_mask',
+           'ASN1_TYPE', 'PKCS7_SIGNED', 'ssl3_record_st', 'BF_KEY',
+           'MD4state_st', 'MD4_CTX', 'int16_t', 'SSL_CIPHER',
+           'rune_t', 'X509_TRUST', 'siginfo_t', 'X509_STORE',
+           '__sbuf', 'X509_STORE_CTX', '__darwin_blksize_t', 'ldiv_t',
+           'ASN1_TIME', 'SSL_METHOD', 'X509_LOOKUP',
+           'Netscape_spki_st', 'P_PID', 'sigaction', 'sig_t',
+           'hostent', 'x509_cert_aux_st', '_opaque_pthread_cond_t',
+           'segsz_t', 'ushort', '__darwin_ct_rune_t', 'fd_set',
+           'BN_RECP_CTX', 'x509_lookup_st', 'uint16_t', 'pkcs7_st',
+           'asn1_header_st', '__darwin_pthread_key_t',
+           'x509_trust_st', '__darwin_pthread_handler_rec', 'int32_t',
+           'X509_CRL_INFO', 'N11evp_pkey_st4DOLLAR_12E', 'MDC2_CTX',
+           'N23_ossl_old_des_ks_struct4DOLLAR_10E', 'ASN1_HEADER',
+           'X509_crl_info_st', 'LHASH_HASH_FN_TYPE',
+           '_opaque_pthread_mutexattr_t', 'ssl_st',
+           'N8pkcs7_st4DOLLAR_15E', 'evp_pkey_st',
+           'pkcs7_signedandenveloped_st', '__darwin_mach_port_t',
+           'EVP_PBE_KEYGEN', '_opaque_pthread_mutex_t',
+           'ASN1_UTCTIME', 'mcontext', 'crypto_ex_data_func_st',
+           'u_long', 'PBKDF2PARAM_st', 'rc4_key_st', 'DSA_METHOD',
+           'EVP_CIPHER', 'BIT_STRING_BITNAME', 'PKCS7_RECIP_INFO',
+           'ssl3_enc_method', 'X509_CERT_AUX', 'uintmax_t',
+           'int_fast16_t', 'RC5_32_KEY', 'ucontext64', 'ASN1_INTEGER',
+           'u_short', 'N14x509_object_st4DOLLAR_14E', 'mcontext64',
+           'X509_sig_st', 'ASN1_GENERALSTRING', 'PKCS7', '__sFILEX',
+           'X509_name_entry_st', 'ssl_session_st', 'caddr_t',
+           'bignum_st', 'X509_CINF', '__darwin_pthread_cond_t',
+           'ASN1_TLC', 'PKCS7_ENCRYPT', 'NETSCAPE_SPKAC',
+           'Netscape_spkac_st', 'idtype_t', 'UIT_ERROR',
+           'uint_fast64_t', 'in_addr_t', 'pthread_mutex_t',
+           '__int64_t', 'ASN1_BMPSTRING', 'uint32_t',
+           'PEM_ENCODE_SEAL_CTX', 'suseconds_t', 'ASN1_OBJECT',
+           'X509_val_st', 'private_key_st', 'CRYPTO_dynlock',
+           'X509_objects_st', 'CRYPTO_EX_DATA_IMPL',
+           'pthread_condattr_t', 'PKCS7_DIGEST', 'uint_least32_t',
+           'ASN1_STRING', '__uint32_t', 'P_PGID', 'rsa_meth_st',
+           'X509_crl_st', 'RC2_KEY', '__darwin_fsfilcnt_t',
+           'X509_revoked_st', 'PBE2PARAM', 'blksize_t',
+           'Netscape_certificate_sequence', 'ssl_cipher_st',
+           'bignum_ctx', 'register_t', 'ASN1_UTF8STRING',
+           'pkcs7_encrypted_st', 'RC4_KEY', '__darwin_ucontext64_t',
+           'N13ssl2_state_st4DOLLAR_19E', 'bn_recp_ctx_st',
+           'CAST_KEY', 'X509_ATTRIBUTE', '__darwin_suseconds_t',
+           '__sigaction', 'user_ulong_t', 'syscall_arg_t',
+           'evp_cipher_ctx_st', 'X509_ALGOR', 'mcontext_t',
+           'const_DES_cblock', '__darwin_fsblkcnt_t', 'dsa_st',
+           'int_least8_t', 'MD2state_st', 'X509_EXTENSION',
+           'GEN_SESSION_CB', 'int_least16_t', '__darwin_wctrans_t',
+           'PBKDF2PARAM', 'x509_lookup_method_st', 'pem_password_cb',
+           'X509_info_st', 'x509_store_st', '__darwin_natural_t',
+           'X509_pubkey_st', 'pkcs7_digest_st', '__darwin_size_t',
+           'ASN1_STRING_TABLE', 'OSLittleEndian', 'RIPEMD160state_st',
+           'pkcs7_enveloped_st', 'UI', 'ptrdiff_t', 'X509_REQ',
+           'CRYPTO_dynlock_value', 'X509_req_st', 'x509_store_ctx_st',
+           'N13ssl3_state_st4DOLLAR_20E', 'lhash_node_st',
+           '__darwin_pthread_mutex_t', 'LHASH_COMP_FN_TYPE',
+           '__darwin_rune_t', 'rlimit', '__darwin_pthread_once_t',
+           'OSBigEndian', 'uintptr_t', '__darwin_uid_t', 'u_int',
+           'ASN1_T61STRING', 'gid_t', 'ssl_method_st', 'ASN1_ITEM',
+           'ASN1_ENUMERATED', '_opaque_pthread_rwlock_t',
+           'pkcs8_priv_key_info_st', 'intmax_t', 'sigcontext',
+           'X509_CRL', 'rc2_key_st', 'engine_st', 'x509_object_st',
+           '_opaque_pthread_once_t', 'DES_ks', 'SSL_COMP',
+           'dsa_method', 'int64_t', 'bio_st', 'bf_key_st',
+           'ASN1_GENERALIZEDTIME', 'PKCS7_ENC_CONTENT',
+           '__darwin_pid_t', 'lldiv_t', 'comp_method_st',
+           'EVP_MD_CTX', 'evp_cipher_st', 'X509_name_st',
+           'x509_hash_dir_st', '__darwin_mach_port_name_t',
+           'useconds_t', 'user_size_t', 'SSL_SESSION', 'rusage',
+           'ssl_crock_st', 'int_least32_t', '__sigaction_u', 'dh_st',
+           'P_ALL', '__darwin_stack_t', 'N6DES_ks3DOLLAR_9E',
+           'comp_ctx_st', 'X509_CERT_FILE_CTX']

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/py2_test_grammar.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/py2_test_grammar.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,956 @@
+# Python 2's Lib/test/test_grammar.py (r66189)
+
+# Python test set -- part 1, grammar.
+# This just tests whether the parser accepts them all.
+
+# NOTE: When you run this test as a script from the command line, you
+# get warnings about certain hex/oct constants.  Since those are
+# issued by the parser, you can't suppress them by adding a
+# filterwarnings() call to this module.  Therefore, to shut up the
+# regression test, the filterwarnings() call has been added to
+# regrtest.py.
+
+from test.test_support import run_unittest, check_syntax_error
+import unittest
+import sys
+# testing import *
+from sys import *
+
+class TokenTests(unittest.TestCase):
+
+    def testBackslash(self):
+        # Backslash means line continuation:
+        x = 1 \
+        + 1
+        self.assertEquals(x, 2, 'backslash for line continuation')
+
+        # Backslash does not means continuation in comments :\
+        x = 0
+        self.assertEquals(x, 0, 'backslash ending comment')
+
+    def testPlainIntegers(self):
+        self.assertEquals(0xff, 255)
+        self.assertEquals(0377, 255)
+        self.assertEquals(2147483647, 017777777777)
+        # "0x" is not a valid literal
+        self.assertRaises(SyntaxError, eval, "0x")
+        from sys import maxint
+        if maxint == 2147483647:
+            self.assertEquals(-2147483647-1, -020000000000)
+            # XXX -2147483648
+            self.assert_(037777777777 > 0)
+            self.assert_(0xffffffff > 0)
+            for s in '2147483648', '040000000000', '0x100000000':
+                try:
+                    x = eval(s)
+                except OverflowError:
+                    self.fail("OverflowError on huge integer literal %r" % s)
+        elif maxint == 9223372036854775807:
+            self.assertEquals(-9223372036854775807-1, -01000000000000000000000)
+            self.assert_(01777777777777777777777 > 0)
+            self.assert_(0xffffffffffffffff > 0)
+            for s in '9223372036854775808', '02000000000000000000000', \
+                     '0x10000000000000000':
+                try:
+                    x = eval(s)
+                except OverflowError:
+                    self.fail("OverflowError on huge integer literal %r" % s)
+        else:
+            self.fail('Weird maxint value %r' % maxint)
+
+    def testLongIntegers(self):
+        x = 0L
+        x = 0l
+        x = 0xffffffffffffffffL
+        x = 0xffffffffffffffffl
+        x = 077777777777777777L
+        x = 077777777777777777l
+        x = 123456789012345678901234567890L
+        x = 123456789012345678901234567890l
+
+    def testFloats(self):
+        x = 3.14
+        x = 314.
+        x = 0.314
+        # XXX x = 000.314
+        x = .314
+        x = 3e14
+        x = 3E14
+        x = 3e-14
+        x = 3e+14
+        x = 3.e14
+        x = .3e14
+        x = 3.1e4
+
+    def testStringLiterals(self):
+        x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
+        x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
+        x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
+        x = "doesn't \"shrink\" does it"
+        y = 'doesn\'t "shrink" does it'
+        self.assert_(len(x) == 24 and x == y)
+        x = "does \"shrink\" doesn't it"
+        y = 'does "shrink" doesn\'t it'
+        self.assert_(len(x) == 24 and x == y)
+        x = """
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+"""
+        y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
+        self.assertEquals(x, y)
+        y = '''
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+'''
+        self.assertEquals(x, y)
+        y = "\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the 'lazy' dog.\n\
+"
+        self.assertEquals(x, y)
+        y = '\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the \'lazy\' dog.\n\
+'
+        self.assertEquals(x, y)
+
+
+class GrammarTests(unittest.TestCase):
+
+    # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+    # XXX can't test in a script -- this rule is only used when interactive
+
+    # file_input: (NEWLINE | stmt)* ENDMARKER
+    # Being tested as this very moment this very module
+
+    # expr_input: testlist NEWLINE
+    # XXX Hard to test -- used only in calls to input()
+
+    def testEvalInput(self):
+        # testlist ENDMARKER
+        x = eval('1, 0 or 1')
+
+    def testFuncdef(self):
+        ### 'def' NAME parameters ':' suite
+        ### parameters: '(' [varargslist] ')'
+        ### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
+        ###            | ('**'|'*' '*') NAME)
+        ###            | fpdef ['=' test] (',' fpdef ['=' test])* [',']
+        ### fpdef: NAME | '(' fplist ')'
+        ### fplist: fpdef (',' fpdef)* [',']
+        ### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
+        ### argument: [test '='] test   # Really [keyword '='] test
+        def f1(): pass
+        f1()
+        f1(*())
+        f1(*(), **{})
+        def f2(one_argument): pass
+        def f3(two, arguments): pass
+        def f4(two, (compound, (argument, list))): pass
+        def f5((compound, first), two): pass
+        self.assertEquals(f2.func_code.co_varnames, ('one_argument',))
+        self.assertEquals(f3.func_code.co_varnames, ('two', 'arguments'))
+        if sys.platform.startswith('java'):
+            self.assertEquals(f4.func_code.co_varnames,
+                   ('two', '(compound, (argument, list))', 'compound', 'argument',
+                                'list',))
+            self.assertEquals(f5.func_code.co_varnames,
+                   ('(compound, first)', 'two', 'compound', 'first'))
+        else:
+            self.assertEquals(f4.func_code.co_varnames,
+                  ('two', '.1', 'compound', 'argument',  'list'))
+            self.assertEquals(f5.func_code.co_varnames,
+                  ('.0', 'two', 'compound', 'first'))
+        def a1(one_arg,): pass
+        def a2(two, args,): pass
+        def v0(*rest): pass
+        def v1(a, *rest): pass
+        def v2(a, b, *rest): pass
+        def v3(a, (b, c), *rest): return a, b, c, rest
+
+        f1()
+        f2(1)
+        f2(1,)
+        f3(1, 2)
+        f3(1, 2,)
+        f4(1, (2, (3, 4)))
+        v0()
+        v0(1)
+        v0(1,)
+        v0(1,2)
+        v0(1,2,3,4,5,6,7,8,9,0)
+        v1(1)
+        v1(1,)
+        v1(1,2)
+        v1(1,2,3)
+        v1(1,2,3,4,5,6,7,8,9,0)
+        v2(1,2)
+        v2(1,2,3)
+        v2(1,2,3,4)
+        v2(1,2,3,4,5,6,7,8,9,0)
+        v3(1,(2,3))
+        v3(1,(2,3),4)
+        v3(1,(2,3),4,5,6,7,8,9,0)
+
+        # ceval unpacks the formal arguments into the first argcount names;
+        # thus, the names nested inside tuples must appear after these names.
+        if sys.platform.startswith('java'):
+            self.assertEquals(v3.func_code.co_varnames, ('a', '(b, c)', 'rest', 'b', 'c'))
+        else:
+            self.assertEquals(v3.func_code.co_varnames, ('a', '.1', 'rest', 'b', 'c'))
+        self.assertEquals(v3(1, (2, 3), 4), (1, 2, 3, (4,)))
+        def d01(a=1): pass
+        d01()
+        d01(1)
+        d01(*(1,))
+        d01(**{'a':2})
+        def d11(a, b=1): pass
+        d11(1)
+        d11(1, 2)
+        d11(1, **{'b':2})
+        def d21(a, b, c=1): pass
+        d21(1, 2)
+        d21(1, 2, 3)
+        d21(*(1, 2, 3))
+        d21(1, *(2, 3))
+        d21(1, 2, *(3,))
+        d21(1, 2, **{'c':3})
+        def d02(a=1, b=2): pass
+        d02()
+        d02(1)
+        d02(1, 2)
+        d02(*(1, 2))
+        d02(1, *(2,))
+        d02(1, **{'b':2})
+        d02(**{'a': 1, 'b': 2})
+        def d12(a, b=1, c=2): pass
+        d12(1)
+        d12(1, 2)
+        d12(1, 2, 3)
+        def d22(a, b, c=1, d=2): pass
+        d22(1, 2)
+        d22(1, 2, 3)
+        d22(1, 2, 3, 4)
+        def d01v(a=1, *rest): pass
+        d01v()
+        d01v(1)
+        d01v(1, 2)
+        d01v(*(1, 2, 3, 4))
+        d01v(*(1,))
+        d01v(**{'a':2})
+        def d11v(a, b=1, *rest): pass
+        d11v(1)
+        d11v(1, 2)
+        d11v(1, 2, 3)
+        def d21v(a, b, c=1, *rest): pass
+        d21v(1, 2)
+        d21v(1, 2, 3)
+        d21v(1, 2, 3, 4)
+        d21v(*(1, 2, 3, 4))
+        d21v(1, 2, **{'c': 3})
+        def d02v(a=1, b=2, *rest): pass
+        d02v()
+        d02v(1)
+        d02v(1, 2)
+        d02v(1, 2, 3)
+        d02v(1, *(2, 3, 4))
+        d02v(**{'a': 1, 'b': 2})
+        def d12v(a, b=1, c=2, *rest): pass
+        d12v(1)
+        d12v(1, 2)
+        d12v(1, 2, 3)
+        d12v(1, 2, 3, 4)
+        d12v(*(1, 2, 3, 4))
+        d12v(1, 2, *(3, 4, 5))
+        d12v(1, *(2,), **{'c': 3})
+        def d22v(a, b, c=1, d=2, *rest): pass
+        d22v(1, 2)
+        d22v(1, 2, 3)
+        d22v(1, 2, 3, 4)
+        d22v(1, 2, 3, 4, 5)
+        d22v(*(1, 2, 3, 4))
+        d22v(1, 2, *(3, 4, 5))
+        d22v(1, *(2, 3), **{'d': 4})
+        def d31v((x)): pass
+        d31v(1)
+        def d32v((x,)): pass
+        d32v((1,))
+
+        # keyword arguments after *arglist
+        def f(*args, **kwargs):
+            return args, kwargs
+        self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
+                                                    {'x':2, 'y':5}))
+        self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
+        self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
+
+        # Check ast errors in *args and *kwargs
+        check_syntax_error(self, "f(*g(1=2))")
+        check_syntax_error(self, "f(**g(1=2))")
+
+    def testLambdef(self):
+        ### lambdef: 'lambda' [varargslist] ':' test
+        l1 = lambda : 0
+        self.assertEquals(l1(), 0)
+        l2 = lambda : a[d] # XXX just testing the expression
+        l3 = lambda : [2 < x for x in [-1, 3, 0L]]
+        self.assertEquals(l3(), [0, 1, 0])
+        l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
+        self.assertEquals(l4(), 1)
+        l5 = lambda x, y, z=2: x + y + z
+        self.assertEquals(l5(1, 2), 5)
+        self.assertEquals(l5(1, 2, 3), 6)
+        check_syntax_error(self, "lambda x: x = 2")
+        check_syntax_error(self, "lambda (None,): None")
+
+    ### stmt: simple_stmt | compound_stmt
+    # Tested below
+
+    def testSimpleStmt(self):
+        ### simple_stmt: small_stmt (';' small_stmt)* [';']
+        x = 1; pass; del x
+        def foo():
+            # verify statments that end with semi-colons
+            x = 1; pass; del x;
+        foo()
+
+    ### small_stmt: expr_stmt | print_stmt  | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
+    # Tested below
+
+    def testExprStmt(self):
+        # (exprlist '=')* exprlist
+        1
+        1, 2, 3
+        x = 1
+        x = 1, 2, 3
+        x = y = z = 1, 2, 3
+        x, y, z = 1, 2, 3
+        abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
+
+        check_syntax_error(self, "x + 1 = 1")
+        check_syntax_error(self, "a + 1 = b + 2")
+
+    def testPrintStmt(self):
+        # 'print' (test ',')* [test]
+        import StringIO
+
+        # Can't test printing to real stdout without comparing output
+        # which is not available in unittest.
+        save_stdout = sys.stdout
+        sys.stdout = StringIO.StringIO()
+
+        print 1, 2, 3
+        print 1, 2, 3,
+        print
+        print 0 or 1, 0 or 1,
+        print 0 or 1
+
+        # 'print' '>>' test ','
+        print >> sys.stdout, 1, 2, 3
+        print >> sys.stdout, 1, 2, 3,
+        print >> sys.stdout
+        print >> sys.stdout, 0 or 1, 0 or 1,
+        print >> sys.stdout, 0 or 1
+
+        # test printing to an instance
+        class Gulp:
+            def write(self, msg): pass
+
+        gulp = Gulp()
+        print >> gulp, 1, 2, 3
+        print >> gulp, 1, 2, 3,
+        print >> gulp
+        print >> gulp, 0 or 1, 0 or 1,
+        print >> gulp, 0 or 1
+
+        # test print >> None
+        def driver():
+            oldstdout = sys.stdout
+            sys.stdout = Gulp()
+            try:
+                tellme(Gulp())
+                tellme()
+            finally:
+                sys.stdout = oldstdout
+
+        # we should see this once
+        def tellme(file=sys.stdout):
+            print >> file, 'hello world'
+
+        driver()
+
+        # we should not see this at all
+        def tellme(file=None):
+            print >> file, 'goodbye universe'
+
+        driver()
+
+        self.assertEqual(sys.stdout.getvalue(), '''\
+1 2 3
+1 2 3
+1 1 1
+1 2 3
+1 2 3
+1 1 1
+hello world
+''')
+        sys.stdout = save_stdout
+
+        # syntax errors
+        check_syntax_error(self, 'print ,')
+        check_syntax_error(self, 'print >> x,')
+
+    def testDelStmt(self):
+        # 'del' exprlist
+        abc = [1,2,3]
+        x, y, z = abc
+        xyz = x, y, z
+
+        del abc
+        del x, y, (z, xyz)
+
+    def testPassStmt(self):
+        # 'pass'
+        pass
+
+    # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
+    # Tested below
+
+    def testBreakStmt(self):
+        # 'break'
+        while 1: break
+
+    def testContinueStmt(self):
+        # 'continue'
+        i = 1
+        while i: i = 0; continue
+
+        msg = ""
+        while not msg:
+            msg = "ok"
+            try:
+                continue
+                msg = "continue failed to continue inside try"
+            except:
+                msg = "continue inside try called except block"
+        if msg != "ok":
+            self.fail(msg)
+
+        msg = ""
+        while not msg:
+            msg = "finally block not called"
+            try:
+                continue
+            finally:
+                msg = "ok"
+        if msg != "ok":
+            self.fail(msg)
+
+    def test_break_continue_loop(self):
+        # This test warrants an explanation. It is a test specifically for SF bugs
+        # #463359 and #462937. The bug is that a 'break' statement executed or
+        # exception raised inside a try/except inside a loop, *after* a continue
+        # statement has been executed in that loop, will cause the wrong number of
+        # arguments to be popped off the stack and the instruction pointer reset to
+        # a very small number (usually 0.) Because of this, the following test
+        # *must* written as a function, and the tracking vars *must* be function
+        # arguments with default values. Otherwise, the test will loop and loop.
+
+        def test_inner(extra_burning_oil = 1, count=0):
+            big_hippo = 2
+            while big_hippo:
+                count += 1
+                try:
+                    if extra_burning_oil and big_hippo == 1:
+                        extra_burning_oil -= 1
+                        break
+                    big_hippo -= 1
+                    continue
+                except:
+                    raise
+            if count > 2 or big_hippo <> 1:
+                self.fail("continue then break in try/except in loop broken!")
+        test_inner()
+
+    def testReturn(self):
+        # 'return' [testlist]
+        def g1(): return
+        def g2(): return 1
+        g1()
+        x = g2()
+        check_syntax_error(self, "class foo:return 1")
+
+    def testYield(self):
+        check_syntax_error(self, "class foo:yield 1")
+
+    def testRaise(self):
+        # 'raise' test [',' test]
+        try: raise RuntimeError, 'just testing'
+        except RuntimeError: pass
+        try: raise KeyboardInterrupt
+        except KeyboardInterrupt: pass
+
+    def testImport(self):
+        # 'import' dotted_as_names
+        import sys
+        import time, sys
+        # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
+        from time import time
+        from time import (time)
+        # not testable inside a function, but already done at top of the module
+        # from sys import *
+        from sys import path, argv
+        from sys import (path, argv)
+        from sys import (path, argv,)
+
+    def testGlobal(self):
+        # 'global' NAME (',' NAME)*
+        global a
+        global a, b
+        global one, two, three, four, five, six, seven, eight, nine, ten
+
+    def testExec(self):
+        # 'exec' expr ['in' expr [',' expr]]
+        z = None
+        del z
+        exec 'z=1+1\n'
+        if z != 2: self.fail('exec \'z=1+1\'\\n')
+        del z
+        exec 'z=1+1'
+        if z != 2: self.fail('exec \'z=1+1\'')
+        z = None
+        del z
+        import types
+        if hasattr(types, "UnicodeType"):
+            exec r"""if 1:
+            exec u'z=1+1\n'
+            if z != 2: self.fail('exec u\'z=1+1\'\\n')
+            del z
+            exec u'z=1+1'
+            if z != 2: self.fail('exec u\'z=1+1\'')"""
+        g = {}
+        exec 'z = 1' in g
+        if g.has_key('__builtins__'): del g['__builtins__']
+        if g != {'z': 1}: self.fail('exec \'z = 1\' in g')
+        g = {}
+        l = {}
+
+        import warnings
+        warnings.filterwarnings("ignore", "global statement", module="<string>")
+        exec 'global a; a = 1; b = 2' in g, l
+        if g.has_key('__builtins__'): del g['__builtins__']
+        if l.has_key('__builtins__'): del l['__builtins__']
+        if (g, l) != ({'a':1}, {'b':2}):
+            self.fail('exec ... in g (%s), l (%s)' %(g,l))
+
+    def testAssert(self):
+        # assert_stmt: 'assert' test [',' test]
+        assert 1
+        assert 1, 1
+        assert lambda x:x
+        assert 1, lambda x:x+1
+        try:
+            assert 0, "msg"
+        except AssertionError, e:
+            self.assertEquals(e.args[0], "msg")
+        else:
+            if __debug__:
+                self.fail("AssertionError not raised by assert 0")
+
+    ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
+    # Tested below
+
+    def testIf(self):
+        # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+        if 1: pass
+        if 1: pass
+        else: pass
+        if 0: pass
+        elif 0: pass
+        if 0: pass
+        elif 0: pass
+        elif 0: pass
+        elif 0: pass
+        else: pass
+
+    def testWhile(self):
+        # 'while' test ':' suite ['else' ':' suite]
+        while 0: pass
+        while 0: pass
+        else: pass
+
+        # Issue1920: "while 0" is optimized away,
+        # ensure that the "else" clause is still present.
+        x = 0
+        while 0:
+            x = 1
+        else:
+            x = 2
+        self.assertEquals(x, 2)
+
+    def testFor(self):
+        # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
+        for i in 1, 2, 3: pass
+        for i, j, k in (): pass
+        else: pass
+        class Squares:
+            def __init__(self, max):
+                self.max = max
+                self.sofar = []
+            def __len__(self): return len(self.sofar)
+            def __getitem__(self, i):
+                if not 0 <= i < self.max: raise IndexError
+                n = len(self.sofar)
+                while n <= i:
+                    self.sofar.append(n*n)
+                    n = n+1
+                return self.sofar[i]
+        n = 0
+        for x in Squares(10): n = n+x
+        if n != 285:
+            self.fail('for over growing sequence')
+
+        result = []
+        for x, in [(1,), (2,), (3,)]:
+            result.append(x)
+        self.assertEqual(result, [1, 2, 3])
+
+    def testTry(self):
+        ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
+        ###         | 'try' ':' suite 'finally' ':' suite
+        ### except_clause: 'except' [expr [('as' | ',') expr]]
+        try:
+            1/0
+        except ZeroDivisionError:
+            pass
+        else:
+            pass
+        try: 1/0
+        except EOFError: pass
+        except TypeError as msg: pass
+        except RuntimeError, msg: pass
+        except: pass
+        else: pass
+        try: 1/0
+        except (EOFError, TypeError, ZeroDivisionError): pass
+        try: 1/0
+        except (EOFError, TypeError, ZeroDivisionError), msg: pass
+        try: pass
+        finally: pass
+
+    def testSuite(self):
+        # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
+        if 1: pass
+        if 1:
+            pass
+        if 1:
+            #
+            #
+            #
+            pass
+            pass
+            #
+            pass
+            #
+
+    def testTest(self):
+        ### and_test ('or' and_test)*
+        ### and_test: not_test ('and' not_test)*
+        ### not_test: 'not' not_test | comparison
+        if not 1: pass
+        if 1 and 1: pass
+        if 1 or 1: pass
+        if not not not 1: pass
+        if not 1 and 1 and 1: pass
+        if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
+
+    def testComparison(self):
+        ### comparison: expr (comp_op expr)*
+        ### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+        if 1: pass
+        x = (1 == 1)
+        if 1 == 1: pass
+        if 1 != 1: pass
+        if 1 <> 1: pass
+        if 1 < 1: pass
+        if 1 > 1: pass
+        if 1 <= 1: pass
+        if 1 >= 1: pass
+        if 1 is 1: pass
+        if 1 is not 1: pass
+        if 1 in (): pass
+        if 1 not in (): pass
+        if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
+
+    def testBinaryMaskOps(self):
+        x = 1 & 1
+        x = 1 ^ 1
+        x = 1 | 1
+
+    def testShiftOps(self):
+        x = 1 << 1
+        x = 1 >> 1
+        x = 1 << 1 >> 1
+
+    def testAdditiveOps(self):
+        x = 1
+        x = 1 + 1
+        x = 1 - 1 - 1
+        x = 1 - 1 + 1 - 1 + 1
+
+    def testMultiplicativeOps(self):
+        x = 1 * 1
+        x = 1 / 1
+        x = 1 % 1
+        x = 1 / 1 * 1 % 1
+
+    def testUnaryOps(self):
+        x = +1
+        x = -1
+        x = ~1
+        x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
+        x = -1*1/1 + 1*1 - ---1*1
+
+    def testSelectors(self):
+        ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
+        ### subscript: expr | [expr] ':' [expr]
+
+        import sys, time
+        c = sys.path[0]
+        x = time.time()
+        x = sys.modules['time'].time()
+        a = '01234'
+        c = a[0]
+        c = a[-1]
+        s = a[0:5]
+        s = a[:5]
+        s = a[0:]
+        s = a[:]
+        s = a[-5:]
+        s = a[:-1]
+        s = a[-4:-3]
+        # A rough test of SF bug 1333982.  http://python.org/sf/1333982
+        # The testing here is fairly incomplete.
+        # Test cases should include: commas with 1 and 2 colons
+        d = {}
+        d[1] = 1
+        d[1,] = 2
+        d[1,2] = 3
+        d[1,2,3] = 4
+        L = list(d)
+        L.sort()
+        self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
+
+    def testAtoms(self):
+        ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
+        ### dictmaker: test ':' test (',' test ':' test)* [',']
+
+        x = (1)
+        x = (1 or 2 or 3)
+        x = (1 or 2 or 3, 2, 3)
+
+        x = []
+        x = [1]
+        x = [1 or 2 or 3]
+        x = [1 or 2 or 3, 2, 3]
+        x = []
+
+        x = {}
+        x = {'one': 1}
+        x = {'one': 1,}
+        x = {'one' or 'two': 1 or 2}
+        x = {'one': 1, 'two': 2}
+        x = {'one': 1, 'two': 2,}
+        x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
+
+        x = `x`
+        x = `1 or 2 or 3`
+        self.assertEqual(`1,2`, '(1, 2)')
+
+        x = x
+        x = 'x'
+        x = 123
+
+    ### exprlist: expr (',' expr)* [',']
+    ### testlist: test (',' test)* [',']
+    # These have been exercised enough above
+
+    def testClassdef(self):
+        # 'class' NAME ['(' [testlist] ')'] ':' suite
+        class B: pass
+        class B2(): pass
+        class C1(B): pass
+        class C2(B): pass
+        class D(C1, C2, B): pass
+        class C:
+            def meth1(self): pass
+            def meth2(self, arg): pass
+            def meth3(self, a1, a2): pass
+        # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+        # decorators: decorator+
+        # decorated: decorators (classdef | funcdef)
+        def class_decorator(x):
+            x.decorated = True
+            return x
+        @class_decorator
+        class G:
+            pass
+        self.assertEqual(G.decorated, True)
+
+    def testListcomps(self):
+        # list comprehension tests
+        nums = [1, 2, 3, 4, 5]
+        strs = ["Apple", "Banana", "Coconut"]
+        spcs = ["  Apple", " Banana ", "Coco  nut  "]
+
+        self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco  nut'])
+        self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
+        self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
+        self.assertEqual([(i, s) for i in nums for s in strs],
+                         [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
+                          (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
+                          (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
+                          (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
+                          (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
+        self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
+                         [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
+                          (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
+                          (5, 'Banana'), (5, 'Coconut')])
+        self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
+                         [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
+
+        def test_in_func(l):
+            return [None < x < 3 for x in l if x > 2]
+
+        self.assertEqual(test_in_func(nums), [False, False, False])
+
+        def test_nested_front():
+            self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
+                             [[1, 2], [3, 4], [5, 6]])
+
+        test_nested_front()
+
+        check_syntax_error(self, "[i, s for i in nums for s in strs]")
+        check_syntax_error(self, "[x if y]")
+
+        suppliers = [
+          (1, "Boeing"),
+          (2, "Ford"),
+          (3, "Macdonalds")
+        ]
+
+        parts = [
+          (10, "Airliner"),
+          (20, "Engine"),
+          (30, "Cheeseburger")
+        ]
+
+        suppart = [
+          (1, 10), (1, 20), (2, 20), (3, 30)
+        ]
+
+        x = [
+          (sname, pname)
+            for (sno, sname) in suppliers
+              for (pno, pname) in parts
+                for (sp_sno, sp_pno) in suppart
+                  if sno == sp_sno and pno == sp_pno
+        ]
+
+        self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
+                             ('Macdonalds', 'Cheeseburger')])
+
+    def testGenexps(self):
+        # generator expression tests
+        g = ([x for x in range(10)] for x in range(1))
+        self.assertEqual(g.next(), [x for x in range(10)])
+        try:
+            g.next()
+            self.fail('should produce StopIteration exception')
+        except StopIteration:
+            pass
+
+        a = 1
+        try:
+            g = (a for d in a)
+            g.next()
+            self.fail('should produce TypeError')
+        except TypeError:
+            pass
+
+        self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
+        self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
+
+        a = [x for x in range(10)]
+        b = (x for x in (y for y in a))
+        self.assertEqual(sum(b), sum([x for x in range(10)]))
+
+        self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
+        self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
+        self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
+        check_syntax_error(self, "foo(x for x in range(10), 100)")
+        check_syntax_error(self, "foo(100, x for x in range(10))")
+
+    def testComprehensionSpecials(self):
+        # test for outmost iterable precomputation
+        x = 10; g = (i for i in range(x)); x = 5
+        self.assertEqual(len(list(g)), 10)
+
+        # This should hold, since we're only precomputing outmost iterable.
+        x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
+        x = 5; t = True;
+        self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
+
+        # Grammar allows multiple adjacent 'if's in listcomps and genexps,
+        # even though it's silly. Make sure it works (ifelse broke this.)
+        self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
+        self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
+
+        # verify unpacking single element tuples in listcomp/genexp.
+        self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
+        self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
+
+    def testIfElseExpr(self):
+        # Test ifelse expressions in various cases
+        def _checkeval(msg, ret):
+            "helper to check that evaluation of expressions is done correctly"
+            print x
+            return ret
+
+        self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
+        self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
+        self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
+        self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
+        self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
+        self.assertEqual((5 and 6 if 0 else 1), 1)
+        self.assertEqual(((5 and 6) if 0 else 1), 1)
+        self.assertEqual((5 and (6 if 1 else 1)), 6)
+        self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
+        self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
+        self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
+        self.assertEqual((not 5 if 1 else 1), False)
+        self.assertEqual((not 5 if 0 else 1), 1)
+        self.assertEqual((6 + 1 if 1 else 2), 7)
+        self.assertEqual((6 - 1 if 1 else 2), 5)
+        self.assertEqual((6 * 2 if 1 else 4), 12)
+        self.assertEqual((6 / 2 if 1 else 3), 3)
+        self.assertEqual((6 < 4 if 0 else 2), 2)
+
+
+def test_main():
+    run_unittest(TokenTests, GrammarTests)
+
+if __name__ == '__main__':
+    test_main()

Added: sandbox/trunk/refactor_pkg/refactor/tests/data/py3_test_grammar.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/data/py3_test_grammar.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,903 @@
+# Python test set -- part 1, grammar.
+# This just tests whether the parser accepts them all.
+
+# NOTE: When you run this test as a script from the command line, you
+# get warnings about certain hex/oct constants.  Since those are
+# issued by the parser, you can't suppress them by adding a
+# filterwarnings() call to this module.  Therefore, to shut up the
+# regression test, the filterwarnings() call has been added to
+# regrtest.py.
+
+from test.support import run_unittest, check_syntax_error
+import unittest
+import sys
+# testing import *
+from sys import *
+
+class TokenTests(unittest.TestCase):
+
+    def testBackslash(self):
+        # Backslash means line continuation:
+        x = 1 \
+        + 1
+        self.assertEquals(x, 2, 'backslash for line continuation')
+
+        # Backslash does not means continuation in comments :\
+        x = 0
+        self.assertEquals(x, 0, 'backslash ending comment')
+
+    def testPlainIntegers(self):
+        self.assertEquals(type(000), type(0))
+        self.assertEquals(0xff, 255)
+        self.assertEquals(0o377, 255)
+        self.assertEquals(2147483647, 0o17777777777)
+        self.assertEquals(0b1001, 9)
+        # "0x" is not a valid literal
+        self.assertRaises(SyntaxError, eval, "0x")
+        from sys import maxsize
+        if maxsize == 2147483647:
+            self.assertEquals(-2147483647-1, -0o20000000000)
+            # XXX -2147483648
+            self.assert_(0o37777777777 > 0)
+            self.assert_(0xffffffff > 0)
+            self.assert_(0b1111111111111111111111111111111 > 0)
+            for s in ('2147483648', '0o40000000000', '0x100000000',
+                      '0b10000000000000000000000000000000'):
+                try:
+                    x = eval(s)
+                except OverflowError:
+                    self.fail("OverflowError on huge integer literal %r" % s)
+        elif maxsize == 9223372036854775807:
+            self.assertEquals(-9223372036854775807-1, -0o1000000000000000000000)
+            self.assert_(0o1777777777777777777777 > 0)
+            self.assert_(0xffffffffffffffff > 0)
+            self.assert_(0b11111111111111111111111111111111111111111111111111111111111111 > 0)
+            for s in '9223372036854775808', '0o2000000000000000000000', \
+                     '0x10000000000000000', \
+                     '0b100000000000000000000000000000000000000000000000000000000000000':
+                try:
+                    x = eval(s)
+                except OverflowError:
+                    self.fail("OverflowError on huge integer literal %r" % s)
+        else:
+            self.fail('Weird maxsize value %r' % maxsize)
+
+    def testLongIntegers(self):
+        x = 0
+        x = 0xffffffffffffffff
+        x = 0Xffffffffffffffff
+        x = 0o77777777777777777
+        x = 0O77777777777777777
+        x = 123456789012345678901234567890
+        x = 0b100000000000000000000000000000000000000000000000000000000000000000000
+        x = 0B111111111111111111111111111111111111111111111111111111111111111111111
+
+    def testFloats(self):
+        x = 3.14
+        x = 314.
+        x = 0.314
+        # XXX x = 000.314
+        x = .314
+        x = 3e14
+        x = 3E14
+        x = 3e-14
+        x = 3e+14
+        x = 3.e14
+        x = .3e14
+        x = 3.1e4
+
+    def testStringLiterals(self):
+        x = ''; y = ""; self.assert_(len(x) == 0 and x == y)
+        x = '\''; y = "'"; self.assert_(len(x) == 1 and x == y and ord(x) == 39)
+        x = '"'; y = "\""; self.assert_(len(x) == 1 and x == y and ord(x) == 34)
+        x = "doesn't \"shrink\" does it"
+        y = 'doesn\'t "shrink" does it'
+        self.assert_(len(x) == 24 and x == y)
+        x = "does \"shrink\" doesn't it"
+        y = 'does "shrink" doesn\'t it'
+        self.assert_(len(x) == 24 and x == y)
+        x = """
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+"""
+        y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
+        self.assertEquals(x, y)
+        y = '''
+The "quick"
+brown fox
+jumps over
+the 'lazy' dog.
+'''
+        self.assertEquals(x, y)
+        y = "\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the 'lazy' dog.\n\
+"
+        self.assertEquals(x, y)
+        y = '\n\
+The \"quick\"\n\
+brown fox\n\
+jumps over\n\
+the \'lazy\' dog.\n\
+'
+        self.assertEquals(x, y)
+
+    def testEllipsis(self):
+        x = ...
+        self.assert_(x is Ellipsis)
+        self.assertRaises(SyntaxError, eval, ".. .")
+
+class GrammarTests(unittest.TestCase):
+
+    # single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+    # XXX can't test in a script -- this rule is only used when interactive
+
+    # file_input: (NEWLINE | stmt)* ENDMARKER
+    # Being tested as this very moment this very module
+
+    # expr_input: testlist NEWLINE
+    # XXX Hard to test -- used only in calls to input()
+
+    def testEvalInput(self):
+        # testlist ENDMARKER
+        x = eval('1, 0 or 1')
+
+    def testFuncdef(self):
+        ### [decorators] 'def' NAME parameters ['->' test] ':' suite
+        ### decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+        ### decorators: decorator+
+        ### parameters: '(' [typedargslist] ')'
+        ### typedargslist: ((tfpdef ['=' test] ',')*
+        ###                ('*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
+        ###                | tfpdef ['=' test] (',' tfpdef ['=' test])* [','])
+        ### tfpdef: NAME [':' test]
+        ### varargslist: ((vfpdef ['=' test] ',')*
+        ###              ('*' [vfpdef] (',' vfpdef ['=' test])*  [',' '**' vfpdef] | '**' vfpdef)
+        ###              | vfpdef ['=' test] (',' vfpdef ['=' test])* [','])
+        ### vfpdef: NAME
+        def f1(): pass
+        f1()
+        f1(*())
+        f1(*(), **{})
+        def f2(one_argument): pass
+        def f3(two, arguments): pass
+        self.assertEquals(f2.__code__.co_varnames, ('one_argument',))
+        self.assertEquals(f3.__code__.co_varnames, ('two', 'arguments'))
+        def a1(one_arg,): pass
+        def a2(two, args,): pass
+        def v0(*rest): pass
+        def v1(a, *rest): pass
+        def v2(a, b, *rest): pass
+
+        f1()
+        f2(1)
+        f2(1,)
+        f3(1, 2)
+        f3(1, 2,)
+        v0()
+        v0(1)
+        v0(1,)
+        v0(1,2)
+        v0(1,2,3,4,5,6,7,8,9,0)
+        v1(1)
+        v1(1,)
+        v1(1,2)
+        v1(1,2,3)
+        v1(1,2,3,4,5,6,7,8,9,0)
+        v2(1,2)
+        v2(1,2,3)
+        v2(1,2,3,4)
+        v2(1,2,3,4,5,6,7,8,9,0)
+
+        def d01(a=1): pass
+        d01()
+        d01(1)
+        d01(*(1,))
+        d01(**{'a':2})
+        def d11(a, b=1): pass
+        d11(1)
+        d11(1, 2)
+        d11(1, **{'b':2})
+        def d21(a, b, c=1): pass
+        d21(1, 2)
+        d21(1, 2, 3)
+        d21(*(1, 2, 3))
+        d21(1, *(2, 3))
+        d21(1, 2, *(3,))
+        d21(1, 2, **{'c':3})
+        def d02(a=1, b=2): pass
+        d02()
+        d02(1)
+        d02(1, 2)
+        d02(*(1, 2))
+        d02(1, *(2,))
+        d02(1, **{'b':2})
+        d02(**{'a': 1, 'b': 2})
+        def d12(a, b=1, c=2): pass
+        d12(1)
+        d12(1, 2)
+        d12(1, 2, 3)
+        def d22(a, b, c=1, d=2): pass
+        d22(1, 2)
+        d22(1, 2, 3)
+        d22(1, 2, 3, 4)
+        def d01v(a=1, *rest): pass
+        d01v()
+        d01v(1)
+        d01v(1, 2)
+        d01v(*(1, 2, 3, 4))
+        d01v(*(1,))
+        d01v(**{'a':2})
+        def d11v(a, b=1, *rest): pass
+        d11v(1)
+        d11v(1, 2)
+        d11v(1, 2, 3)
+        def d21v(a, b, c=1, *rest): pass
+        d21v(1, 2)
+        d21v(1, 2, 3)
+        d21v(1, 2, 3, 4)
+        d21v(*(1, 2, 3, 4))
+        d21v(1, 2, **{'c': 3})
+        def d02v(a=1, b=2, *rest): pass
+        d02v()
+        d02v(1)
+        d02v(1, 2)
+        d02v(1, 2, 3)
+        d02v(1, *(2, 3, 4))
+        d02v(**{'a': 1, 'b': 2})
+        def d12v(a, b=1, c=2, *rest): pass
+        d12v(1)
+        d12v(1, 2)
+        d12v(1, 2, 3)
+        d12v(1, 2, 3, 4)
+        d12v(*(1, 2, 3, 4))
+        d12v(1, 2, *(3, 4, 5))
+        d12v(1, *(2,), **{'c': 3})
+        def d22v(a, b, c=1, d=2, *rest): pass
+        d22v(1, 2)
+        d22v(1, 2, 3)
+        d22v(1, 2, 3, 4)
+        d22v(1, 2, 3, 4, 5)
+        d22v(*(1, 2, 3, 4))
+        d22v(1, 2, *(3, 4, 5))
+        d22v(1, *(2, 3), **{'d': 4})
+
+        # keyword argument type tests
+        try:
+            str('x', **{b'foo':1 })
+        except TypeError:
+            pass
+        else:
+            self.fail('Bytes should not work as keyword argument names')
+        # keyword only argument tests
+        def pos0key1(*, key): return key
+        pos0key1(key=100)
+        def pos2key2(p1, p2, *, k1, k2=100): return p1,p2,k1,k2
+        pos2key2(1, 2, k1=100)
+        pos2key2(1, 2, k1=100, k2=200)
+        pos2key2(1, 2, k2=100, k1=200)
+        def pos2key2dict(p1, p2, *, k1=100, k2, **kwarg): return p1,p2,k1,k2,kwarg
+        pos2key2dict(1,2,k2=100,tokwarg1=100,tokwarg2=200)
+        pos2key2dict(1,2,tokwarg1=100,tokwarg2=200, k2=100)
+
+        # keyword arguments after *arglist
+        def f(*args, **kwargs):
+            return args, kwargs
+        self.assertEquals(f(1, x=2, *[3, 4], y=5), ((1, 3, 4),
+                                                    {'x':2, 'y':5}))
+        self.assertRaises(SyntaxError, eval, "f(1, *(2,3), 4)")
+        self.assertRaises(SyntaxError, eval, "f(1, x=2, *(3,4), x=5)")
+
+        # argument annotation tests
+        def f(x) -> list: pass
+        self.assertEquals(f.__annotations__, {'return': list})
+        def f(x:int): pass
+        self.assertEquals(f.__annotations__, {'x': int})
+        def f(*x:str): pass
+        self.assertEquals(f.__annotations__, {'x': str})
+        def f(**x:float): pass
+        self.assertEquals(f.__annotations__, {'x': float})
+        def f(x, y:1+2): pass
+        self.assertEquals(f.__annotations__, {'y': 3})
+        def f(a, b:1, c:2, d): pass
+        self.assertEquals(f.__annotations__, {'b': 1, 'c': 2})
+        def f(a, b:1, c:2, d, e:3=4, f=5, *g:6): pass
+        self.assertEquals(f.__annotations__,
+                          {'b': 1, 'c': 2, 'e': 3, 'g': 6})
+        def f(a, b:1, c:2, d, e:3=4, f=5, *g:6, h:7, i=8, j:9=10,
+              **k:11) -> 12: pass
+        self.assertEquals(f.__annotations__,
+                          {'b': 1, 'c': 2, 'e': 3, 'g': 6, 'h': 7, 'j': 9,
+                           'k': 11, 'return': 12})
+        # Check for SF Bug #1697248 - mixing decorators and a return annotation
+        def null(x): return x
+        @null
+        def f(x) -> list: pass
+        self.assertEquals(f.__annotations__, {'return': list})
+
+        # test MAKE_CLOSURE with a variety of oparg's
+        closure = 1
+        def f(): return closure
+        def f(x=1): return closure
+        def f(*, k=1): return closure
+        def f() -> int: return closure
+
+        # Check ast errors in *args and *kwargs
+        check_syntax_error(self, "f(*g(1=2))")
+        check_syntax_error(self, "f(**g(1=2))")
+
+    def testLambdef(self):
+        ### lambdef: 'lambda' [varargslist] ':' test
+        l1 = lambda : 0
+        self.assertEquals(l1(), 0)
+        l2 = lambda : a[d] # XXX just testing the expression
+        l3 = lambda : [2 < x for x in [-1, 3, 0]]
+        self.assertEquals(l3(), [0, 1, 0])
+        l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
+        self.assertEquals(l4(), 1)
+        l5 = lambda x, y, z=2: x + y + z
+        self.assertEquals(l5(1, 2), 5)
+        self.assertEquals(l5(1, 2, 3), 6)
+        check_syntax_error(self, "lambda x: x = 2")
+        check_syntax_error(self, "lambda (None,): None")
+        l6 = lambda x, y, *, k=20: x+y+k
+        self.assertEquals(l6(1,2), 1+2+20)
+        self.assertEquals(l6(1,2,k=10), 1+2+10)
+
+
+    ### stmt: simple_stmt | compound_stmt
+    # Tested below
+
+    def testSimpleStmt(self):
+        ### simple_stmt: small_stmt (';' small_stmt)* [';']
+        x = 1; pass; del x
+        def foo():
+            # verify statments that end with semi-colons
+            x = 1; pass; del x;
+        foo()
+
+    ### small_stmt: expr_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt
+    # Tested below
+
+    def testExprStmt(self):
+        # (exprlist '=')* exprlist
+        1
+        1, 2, 3
+        x = 1
+        x = 1, 2, 3
+        x = y = z = 1, 2, 3
+        x, y, z = 1, 2, 3
+        abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
+
+        check_syntax_error(self, "x + 1 = 1")
+        check_syntax_error(self, "a + 1 = b + 2")
+
+    def testDelStmt(self):
+        # 'del' exprlist
+        abc = [1,2,3]
+        x, y, z = abc
+        xyz = x, y, z
+
+        del abc
+        del x, y, (z, xyz)
+
+    def testPassStmt(self):
+        # 'pass'
+        pass
+
+    # flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt
+    # Tested below
+
+    def testBreakStmt(self):
+        # 'break'
+        while 1: break
+
+    def testContinueStmt(self):
+        # 'continue'
+        i = 1
+        while i: i = 0; continue
+
+        msg = ""
+        while not msg:
+            msg = "ok"
+            try:
+                continue
+                msg = "continue failed to continue inside try"
+            except:
+                msg = "continue inside try called except block"
+        if msg != "ok":
+            self.fail(msg)
+
+        msg = ""
+        while not msg:
+            msg = "finally block not called"
+            try:
+                continue
+            finally:
+                msg = "ok"
+        if msg != "ok":
+            self.fail(msg)
+
+    def test_break_continue_loop(self):
+        # This test warrants an explanation. It is a test specifically for SF bugs
+        # #463359 and #462937. The bug is that a 'break' statement executed or
+        # exception raised inside a try/except inside a loop, *after* a continue
+        # statement has been executed in that loop, will cause the wrong number of
+        # arguments to be popped off the stack and the instruction pointer reset to
+        # a very small number (usually 0.) Because of this, the following test
+        # *must* written as a function, and the tracking vars *must* be function
+        # arguments with default values. Otherwise, the test will loop and loop.
+
+        def test_inner(extra_burning_oil = 1, count=0):
+            big_hippo = 2
+            while big_hippo:
+                count += 1
+                try:
+                    if extra_burning_oil and big_hippo == 1:
+                        extra_burning_oil -= 1
+                        break
+                    big_hippo -= 1
+                    continue
+                except:
+                    raise
+            if count > 2 or big_hippo != 1:
+                self.fail("continue then break in try/except in loop broken!")
+        test_inner()
+
+    def testReturn(self):
+        # 'return' [testlist]
+        def g1(): return
+        def g2(): return 1
+        g1()
+        x = g2()
+        check_syntax_error(self, "class foo:return 1")
+
+    def testYield(self):
+        check_syntax_error(self, "class foo:yield 1")
+
+    def testRaise(self):
+        # 'raise' test [',' test]
+        try: raise RuntimeError('just testing')
+        except RuntimeError: pass
+        try: raise KeyboardInterrupt
+        except KeyboardInterrupt: pass
+
+    def testImport(self):
+        # 'import' dotted_as_names
+        import sys
+        import time, sys
+        # 'from' dotted_name 'import' ('*' | '(' import_as_names ')' | import_as_names)
+        from time import time
+        from time import (time)
+        # not testable inside a function, but already done at top of the module
+        # from sys import *
+        from sys import path, argv
+        from sys import (path, argv)
+        from sys import (path, argv,)
+
+    def testGlobal(self):
+        # 'global' NAME (',' NAME)*
+        global a
+        global a, b
+        global one, two, three, four, five, six, seven, eight, nine, ten
+
+    def testNonlocal(self):
+        # 'nonlocal' NAME (',' NAME)*
+        x = 0
+        y = 0
+        def f():
+            nonlocal x
+            nonlocal x, y
+
+    def testAssert(self):
+        # assert_stmt: 'assert' test [',' test]
+        assert 1
+        assert 1, 1
+        assert lambda x:x
+        assert 1, lambda x:x+1
+        try:
+            assert 0, "msg"
+        except AssertionError as e:
+            self.assertEquals(e.args[0], "msg")
+        else:
+            if __debug__:
+                self.fail("AssertionError not raised by assert 0")
+
+    ### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
+    # Tested below
+
+    def testIf(self):
+        # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
+        if 1: pass
+        if 1: pass
+        else: pass
+        if 0: pass
+        elif 0: pass
+        if 0: pass
+        elif 0: pass
+        elif 0: pass
+        elif 0: pass
+        else: pass
+
+    def testWhile(self):
+        # 'while' test ':' suite ['else' ':' suite]
+        while 0: pass
+        while 0: pass
+        else: pass
+
+        # Issue1920: "while 0" is optimized away,
+        # ensure that the "else" clause is still present.
+        x = 0
+        while 0:
+            x = 1
+        else:
+            x = 2
+        self.assertEquals(x, 2)
+
+    def testFor(self):
+        # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
+        for i in 1, 2, 3: pass
+        for i, j, k in (): pass
+        else: pass
+        class Squares:
+            def __init__(self, max):
+                self.max = max
+                self.sofar = []
+            def __len__(self): return len(self.sofar)
+            def __getitem__(self, i):
+                if not 0 <= i < self.max: raise IndexError
+                n = len(self.sofar)
+                while n <= i:
+                    self.sofar.append(n*n)
+                    n = n+1
+                return self.sofar[i]
+        n = 0
+        for x in Squares(10): n = n+x
+        if n != 285:
+            self.fail('for over growing sequence')
+
+        result = []
+        for x, in [(1,), (2,), (3,)]:
+            result.append(x)
+        self.assertEqual(result, [1, 2, 3])
+
+    def testTry(self):
+        ### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
+        ###         | 'try' ':' suite 'finally' ':' suite
+        ### except_clause: 'except' [expr ['as' expr]]
+        try:
+            1/0
+        except ZeroDivisionError:
+            pass
+        else:
+            pass
+        try: 1/0
+        except EOFError: pass
+        except TypeError as msg: pass
+        except RuntimeError as msg: pass
+        except: pass
+        else: pass
+        try: 1/0
+        except (EOFError, TypeError, ZeroDivisionError): pass
+        try: 1/0
+        except (EOFError, TypeError, ZeroDivisionError) as msg: pass
+        try: pass
+        finally: pass
+
+    def testSuite(self):
+        # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
+        if 1: pass
+        if 1:
+            pass
+        if 1:
+            #
+            #
+            #
+            pass
+            pass
+            #
+            pass
+            #
+
+    def testTest(self):
+        ### and_test ('or' and_test)*
+        ### and_test: not_test ('and' not_test)*
+        ### not_test: 'not' not_test | comparison
+        if not 1: pass
+        if 1 and 1: pass
+        if 1 or 1: pass
+        if not not not 1: pass
+        if not 1 and 1 and 1: pass
+        if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
+
+    def testComparison(self):
+        ### comparison: expr (comp_op expr)*
+        ### comp_op: '<'|'>'|'=='|'>='|'<='|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+        if 1: pass
+        x = (1 == 1)
+        if 1 == 1: pass
+        if 1 != 1: pass
+        if 1 < 1: pass
+        if 1 > 1: pass
+        if 1 <= 1: pass
+        if 1 >= 1: pass
+        if 1 is 1: pass
+        if 1 is not 1: pass
+        if 1 in (): pass
+        if 1 not in (): pass
+        if 1 < 1 > 1 == 1 >= 1 <= 1 != 1 in 1 not in 1 is 1 is not 1: pass
+
+    def testBinaryMaskOps(self):
+        x = 1 & 1
+        x = 1 ^ 1
+        x = 1 | 1
+
+    def testShiftOps(self):
+        x = 1 << 1
+        x = 1 >> 1
+        x = 1 << 1 >> 1
+
+    def testAdditiveOps(self):
+        x = 1
+        x = 1 + 1
+        x = 1 - 1 - 1
+        x = 1 - 1 + 1 - 1 + 1
+
+    def testMultiplicativeOps(self):
+        x = 1 * 1
+        x = 1 / 1
+        x = 1 % 1
+        x = 1 / 1 * 1 % 1
+
+    def testUnaryOps(self):
+        x = +1
+        x = -1
+        x = ~1
+        x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
+        x = -1*1/1 + 1*1 - ---1*1
+
+    def testSelectors(self):
+        ### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
+        ### subscript: expr | [expr] ':' [expr]
+
+        import sys, time
+        c = sys.path[0]
+        x = time.time()
+        x = sys.modules['time'].time()
+        a = '01234'
+        c = a[0]
+        c = a[-1]
+        s = a[0:5]
+        s = a[:5]
+        s = a[0:]
+        s = a[:]
+        s = a[-5:]
+        s = a[:-1]
+        s = a[-4:-3]
+        # A rough test of SF bug 1333982.  http://python.org/sf/1333982
+        # The testing here is fairly incomplete.
+        # Test cases should include: commas with 1 and 2 colons
+        d = {}
+        d[1] = 1
+        d[1,] = 2
+        d[1,2] = 3
+        d[1,2,3] = 4
+        L = list(d)
+        L.sort(key=lambda x: x if isinstance(x, tuple) else ())
+        self.assertEquals(str(L), '[1, (1,), (1, 2), (1, 2, 3)]')
+
+    def testAtoms(self):
+        ### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictsetmaker] '}' | NAME | NUMBER | STRING
+        ### dictsetmaker: (test ':' test (',' test ':' test)* [',']) | (test (',' test)* [','])
+
+        x = (1)
+        x = (1 or 2 or 3)
+        x = (1 or 2 or 3, 2, 3)
+
+        x = []
+        x = [1]
+        x = [1 or 2 or 3]
+        x = [1 or 2 or 3, 2, 3]
+        x = []
+
+        x = {}
+        x = {'one': 1}
+        x = {'one': 1,}
+        x = {'one' or 'two': 1 or 2}
+        x = {'one': 1, 'two': 2}
+        x = {'one': 1, 'two': 2,}
+        x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
+
+        x = {'one'}
+        x = {'one', 1,}
+        x = {'one', 'two', 'three'}
+        x = {2, 3, 4,}
+
+        x = x
+        x = 'x'
+        x = 123
+
+    ### exprlist: expr (',' expr)* [',']
+    ### testlist: test (',' test)* [',']
+    # These have been exercised enough above
+
+    def testClassdef(self):
+        # 'class' NAME ['(' [testlist] ')'] ':' suite
+        class B: pass
+        class B2(): pass
+        class C1(B): pass
+        class C2(B): pass
+        class D(C1, C2, B): pass
+        class C:
+            def meth1(self): pass
+            def meth2(self, arg): pass
+            def meth3(self, a1, a2): pass
+
+        # decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
+        # decorators: decorator+
+        # decorated: decorators (classdef | funcdef)
+        def class_decorator(x): return x
+        @class_decorator
+        class G: pass
+
+    def testDictcomps(self):
+        # dictorsetmaker: ( (test ':' test (comp_for |
+        #                                   (',' test ':' test)* [','])) |
+        #                   (test (comp_for | (',' test)* [','])) )
+        nums = [1, 2, 3]
+        self.assertEqual({i:i+1 for i in nums}, {1: 2, 2: 3, 3: 4})
+
+    def testListcomps(self):
+        # list comprehension tests
+        nums = [1, 2, 3, 4, 5]
+        strs = ["Apple", "Banana", "Coconut"]
+        spcs = ["  Apple", " Banana ", "Coco  nut  "]
+
+        self.assertEqual([s.strip() for s in spcs], ['Apple', 'Banana', 'Coco  nut'])
+        self.assertEqual([3 * x for x in nums], [3, 6, 9, 12, 15])
+        self.assertEqual([x for x in nums if x > 2], [3, 4, 5])
+        self.assertEqual([(i, s) for i in nums for s in strs],
+                         [(1, 'Apple'), (1, 'Banana'), (1, 'Coconut'),
+                          (2, 'Apple'), (2, 'Banana'), (2, 'Coconut'),
+                          (3, 'Apple'), (3, 'Banana'), (3, 'Coconut'),
+                          (4, 'Apple'), (4, 'Banana'), (4, 'Coconut'),
+                          (5, 'Apple'), (5, 'Banana'), (5, 'Coconut')])
+        self.assertEqual([(i, s) for i in nums for s in [f for f in strs if "n" in f]],
+                         [(1, 'Banana'), (1, 'Coconut'), (2, 'Banana'), (2, 'Coconut'),
+                          (3, 'Banana'), (3, 'Coconut'), (4, 'Banana'), (4, 'Coconut'),
+                          (5, 'Banana'), (5, 'Coconut')])
+        self.assertEqual([(lambda a:[a**i for i in range(a+1)])(j) for j in range(5)],
+                         [[1], [1, 1], [1, 2, 4], [1, 3, 9, 27], [1, 4, 16, 64, 256]])
+
+        def test_in_func(l):
+            return [0 < x < 3 for x in l if x > 2]
+
+        self.assertEqual(test_in_func(nums), [False, False, False])
+
+        def test_nested_front():
+            self.assertEqual([[y for y in [x, x + 1]] for x in [1,3,5]],
+                             [[1, 2], [3, 4], [5, 6]])
+
+        test_nested_front()
+
+        check_syntax_error(self, "[i, s for i in nums for s in strs]")
+        check_syntax_error(self, "[x if y]")
+
+        suppliers = [
+          (1, "Boeing"),
+          (2, "Ford"),
+          (3, "Macdonalds")
+        ]
+
+        parts = [
+          (10, "Airliner"),
+          (20, "Engine"),
+          (30, "Cheeseburger")
+        ]
+
+        suppart = [
+          (1, 10), (1, 20), (2, 20), (3, 30)
+        ]
+
+        x = [
+          (sname, pname)
+            for (sno, sname) in suppliers
+              for (pno, pname) in parts
+                for (sp_sno, sp_pno) in suppart
+                  if sno == sp_sno and pno == sp_pno
+        ]
+
+        self.assertEqual(x, [('Boeing', 'Airliner'), ('Boeing', 'Engine'), ('Ford', 'Engine'),
+                             ('Macdonalds', 'Cheeseburger')])
+
+    def testGenexps(self):
+        # generator expression tests
+        g = ([x for x in range(10)] for x in range(1))
+        self.assertEqual(next(g), [x for x in range(10)])
+        try:
+            next(g)
+            self.fail('should produce StopIteration exception')
+        except StopIteration:
+            pass
+
+        a = 1
+        try:
+            g = (a for d in a)
+            next(g)
+            self.fail('should produce TypeError')
+        except TypeError:
+            pass
+
+        self.assertEqual(list((x, y) for x in 'abcd' for y in 'abcd'), [(x, y) for x in 'abcd' for y in 'abcd'])
+        self.assertEqual(list((x, y) for x in 'ab' for y in 'xy'), [(x, y) for x in 'ab' for y in 'xy'])
+
+        a = [x for x in range(10)]
+        b = (x for x in (y for y in a))
+        self.assertEqual(sum(b), sum([x for x in range(10)]))
+
+        self.assertEqual(sum(x**2 for x in range(10)), sum([x**2 for x in range(10)]))
+        self.assertEqual(sum(x*x for x in range(10) if x%2), sum([x*x for x in range(10) if x%2]))
+        self.assertEqual(sum(x for x in (y for y in range(10))), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10)))), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in [y for y in (z for z in range(10))]), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True)) if True), sum([x for x in range(10)]))
+        self.assertEqual(sum(x for x in (y for y in (z for z in range(10) if True) if False) if True), 0)
+        check_syntax_error(self, "foo(x for x in range(10), 100)")
+        check_syntax_error(self, "foo(100, x for x in range(10))")
+
+    def testComprehensionSpecials(self):
+        # test for outmost iterable precomputation
+        x = 10; g = (i for i in range(x)); x = 5
+        self.assertEqual(len(list(g)), 10)
+
+        # This should hold, since we're only precomputing outmost iterable.
+        x = 10; t = False; g = ((i,j) for i in range(x) if t for j in range(x))
+        x = 5; t = True;
+        self.assertEqual([(i,j) for i in range(10) for j in range(5)], list(g))
+
+        # Grammar allows multiple adjacent 'if's in listcomps and genexps,
+        # even though it's silly. Make sure it works (ifelse broke this.)
+        self.assertEqual([ x for x in range(10) if x % 2 if x % 3 ], [1, 5, 7])
+        self.assertEqual(list(x for x in range(10) if x % 2 if x % 3), [1, 5, 7])
+
+        # verify unpacking single element tuples in listcomp/genexp.
+        self.assertEqual([x for x, in [(4,), (5,), (6,)]], [4, 5, 6])
+        self.assertEqual(list(x for x, in [(7,), (8,), (9,)]), [7, 8, 9])
+
+    def testIfElseExpr(self):
+        # Test ifelse expressions in various cases
+        def _checkeval(msg, ret):
+            "helper to check that evaluation of expressions is done correctly"
+            print(x)
+            return ret
+
+        # the next line is not allowed anymore
+        #self.assertEqual([ x() for x in lambda: True, lambda: False if x() ], [True])
+        self.assertEqual([ x() for x in (lambda: True, lambda: False) if x() ], [True])
+        self.assertEqual([ x(False) for x in (lambda x: False if x else True, lambda x: True if x else False) if x(False) ], [True])
+        self.assertEqual((5 if 1 else _checkeval("check 1", 0)), 5)
+        self.assertEqual((_checkeval("check 2", 0) if 0 else 5), 5)
+        self.assertEqual((5 and 6 if 0 else 1), 1)
+        self.assertEqual(((5 and 6) if 0 else 1), 1)
+        self.assertEqual((5 and (6 if 1 else 1)), 6)
+        self.assertEqual((0 or _checkeval("check 3", 2) if 0 else 3), 3)
+        self.assertEqual((1 or _checkeval("check 4", 2) if 1 else _checkeval("check 5", 3)), 1)
+        self.assertEqual((0 or 5 if 1 else _checkeval("check 6", 3)), 5)
+        self.assertEqual((not 5 if 1 else 1), False)
+        self.assertEqual((not 5 if 0 else 1), 1)
+        self.assertEqual((6 + 1 if 1 else 2), 7)
+        self.assertEqual((6 - 1 if 1 else 2), 5)
+        self.assertEqual((6 * 2 if 1 else 4), 12)
+        self.assertEqual((6 / 2 if 1 else 3), 3)
+        self.assertEqual((6 < 4 if 0 else 2), 2)
+
+
+def test_main():
+    run_unittest(TokenTests, GrammarTests)
+
+if __name__ == '__main__':
+    test_main()

Added: sandbox/trunk/refactor_pkg/refactor/tests/pytree_idempotency.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/pytree_idempotency.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,92 @@
+#!/usr/bin/env python2.5
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Main program for testing the infrastructure."""
+
+__author__ = "Guido van Rossum <guido at python.org>"
+
+# Support imports (need to be imported first)
+from . import support
+
+# Python imports
+import os
+import sys
+import logging
+
+# Local imports
+from .. import pytree
+import pgen2
+from pgen2 import driver
+
+logging.basicConfig()
+
+def main():
+    gr = driver.load_grammar("Grammar.txt")
+    dr = driver.Driver(gr, convert=pytree.convert)
+
+    fn = "example.py"
+    tree = dr.parse_file(fn, debug=True)
+    if not diff(fn, tree):
+        print "No diffs."
+    if not sys.argv[1:]:
+        return # Pass a dummy argument to run the complete test suite below
+
+    problems = []
+
+    # Process every imported module
+    for name in sys.modules:
+        mod = sys.modules[name]
+        if mod is None or not hasattr(mod, "__file__"):
+            continue
+        fn = mod.__file__
+        if fn.endswith(".pyc"):
+            fn = fn[:-1]
+        if not fn.endswith(".py"):
+            continue
+        print >>sys.stderr, "Parsing", fn
+        tree = dr.parse_file(fn, debug=True)
+        if diff(fn, tree):
+            problems.append(fn)
+
+    # Process every single module on sys.path (but not in packages)
+    for dir in sys.path:
+        try:
+            names = os.listdir(dir)
+        except os.error:
+            continue
+        print >>sys.stderr, "Scanning", dir, "..."
+        for name in names:
+            if not name.endswith(".py"):
+                continue
+            print >>sys.stderr, "Parsing", name
+            fn = os.path.join(dir, name)
+            try:
+                tree = dr.parse_file(fn, debug=True)
+            except pgen2.parse.ParseError, err:
+                print "ParseError:", err
+            else:
+                if diff(fn, tree):
+                    problems.append(fn)
+
+    # Show summary of problem files
+    if not problems:
+        print "No problems.  Congratulations!"
+    else:
+        print "Problems in following files:"
+        for fn in problems:
+            print "***", fn
+
+def diff(fn, tree):
+    f = open("@", "w")
+    try:
+        f.write(str(tree))
+    finally:
+        f.close()
+    try:
+        return os.system("diff -u %s @" % fn)
+    finally:
+        os.remove("@")
+
+if __name__ == "__main__":
+    main()

Added: sandbox/trunk/refactor_pkg/refactor/tests/support.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/support.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,78 @@
+"""Support code for test_*.py files"""
+# Original Author: Collin Winter
+
+# Python imports
+import unittest
+import sys
+import os
+import os.path
+import re
+from textwrap import dedent
+
+#sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
+
+# Local imports
+from .. import pytree
+from .. import refactor
+from ..pgen2 import driver
+
+test_pkg = "refactor.fixes"
+test_dir = os.path.dirname(__file__)
+proj_dir = os.path.normpath(os.path.join(test_dir, ".."))
+grammar_path = os.path.join(test_dir, "..", "Grammar.txt")
+grammar = driver.load_grammar(grammar_path)
+driver = driver.Driver(grammar, convert=pytree.convert)
+
+def parse_version(version_string):
+    """Returns a version tuple matching input version_string."""
+    if not version_string:
+        return ()
+
+    version_list = []
+    for token in version_string.split('.'):
+        try:
+            version_list.append(int(token))
+        except ValueError:
+            version_list.append(token)
+    return tuple(version_list)
+
+def parse_string(string):
+    return driver.parse_string(reformat(string), debug=True)
+
+# Python 2.3's TestSuite is not iter()-able
+if sys.version_info < (2, 4):
+    def TestSuite_iter(self):
+        return iter(self._tests)
+    unittest.TestSuite.__iter__ = TestSuite_iter
+
+def run_all_tests(test_mod=None, tests=None):
+    if tests is None:
+        tests = unittest.TestLoader().loadTestsFromModule(test_mod)
+    unittest.TextTestRunner(verbosity=2).run(tests)
+
+def reformat(string):
+    return dedent(string) + "\n\n"
+
+def get_refactorer(fixers=None, options=None, pkg_name=None):
+    """
+    A convenience function for creating a RefactoringTool for tests.
+
+    fixers is a list of fixers for the RefactoringTool to use. By default
+    "refactor.fixes.*" is used. options is an optional dictionary of options to
+    be passed to the RefactoringTool.
+    """
+    pkg_name = pkg_name or test_pkg
+    if fixers is not None:
+        fixers = [pkg_name + ".fix_" + fix for fix in fixers]
+    else:
+        fixers = refactor.get_fixers_from_package(pkg_name)
+    options = options or {}
+    return refactor.RefactoringTool(fixers, options, explicit=True)
+
+def all_project_files():
+    for dirpath, dirnames, filenames in os.walk(proj_dir):
+        for filename in filenames:
+            if filename.endswith(".py"):
+                yield os.path.join(dirpath, filename)
+
+TestCase = unittest.TestCase

Added: sandbox/trunk/refactor_pkg/refactor/tests/test_all_fixers.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/test_all_fixers.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,35 @@
+#!/usr/bin/env python2.5
+"""Tests that run all fixer modules over an input stream.
+
+This has been broken out into its own test module because of its
+running time.
+"""
+# Author: Collin Winter
+
+# Testing imports
+try:
+    from . import support
+except ImportError:
+    import support
+
+# Python imports
+import unittest
+
+# Local imports
+from .. import pytree
+from .. import refactor
+
+class Test_all(support.TestCase):
+    def setUp(self):
+        options = {"print_function" : False}
+        self.refactor = support.get_refactorer(options=options)
+
+    def test_all_project_files(self):
+        for filepath in support.all_project_files():
+            print "Fixing %s..." % filepath
+            self.refactor.refactor_string(open(filepath).read(), filepath)
+
+
+if __name__ == "__main__":
+    import __main__
+    support.run_all_tests(__main__)

Added: sandbox/trunk/refactor_pkg/refactor/tests/test_fixers.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/test_fixers.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,222 @@
+#!/usr/bin/env python2.5
+""" Test suite for the fixer modules """
+# Original Author: Collin Winter
+
+# Testing imports
+try:
+    from tests import support
+except ImportError:
+    import support
+
+# Python imports
+import os
+import unittest
+from itertools import chain
+from operator import itemgetter
+
+# Local imports
+from .. import pygram, pytree, refactor, fixer_util
+
+class FixerTestCase(support.TestCase):
+    old_version = (3, 0)
+    new_version = (2, 5)
+
+    def setUp(self, fix_list=None):
+        if fix_list is None:
+            fix_list = [self.fixer]
+        options = {"print_function" : False}
+        pkg_name = self.get_pkg_name()
+        self.refactor = support.get_refactorer(fix_list, options,
+                                               pkg_name=pkg_name)
+        self.fixer_log = []
+        self.filename = "<string>"
+
+        for fixer in chain(self.refactor.pre_order,
+                           self.refactor.post_order):
+            fixer.log = self.fixer_log
+
+    def _check(self, versions, ignore_warnings=False):
+        """Verifying a fix matches before and after version
+
+        versions is a dict mapping version tuples to sample code.
+
+        Example:
+            check({ (3, 0): 'print()',
+                     (2,3): 'print' })
+            # The same dict applies for 3.x to 2.x and vice versa
+        """
+        before = self.price_is_right(versions, self.old_version)
+        after = self.price_is_right(versions, self.new_version)
+
+        # Quit now if neither before nor after won the Price is Right.
+        if before == None or after == None:
+            return
+
+        before = support.reformat(before)
+        after = support.reformat(after)
+
+        tree = self.refactor.refactor_string(before, self.filename)
+        self.failUnlessEqual(after, str(tree))
+        if not ignore_warnings:
+            self.failUnlessEqual(self.fixer_log, [])
+        return tree
+
+
+    def price_is_right(self, versions, target_version):
+        """Return the closest version in versions without going over target
+        """
+        snippet = None
+        for version_key in sorted(versions.keys()):
+            if version_key > target_version:
+                break
+            snippet = versions[version_key]
+        return snippet
+
+    def check(self, up, down):
+        if self.old_version > self.new_version:
+            self._check(down)
+        elif self.old_version < self.new_version:
+            self._check(up)
+        else:
+            self._check(down)
+            self._check(up)
+
+    def get_pkg_name(self):
+        if self.old_version >= (3, 0):
+            return 'refactor.fixes.from3'
+        else:
+            return 'refactor.fixes.from2'
+
+    def warns(self, before, after, message, unchanged=False):
+        tree = self._check(before, after)
+        self.failUnless(message in "".join(self.fixer_log))
+        if not unchanged:
+            self.failUnless(tree.was_changed)
+
+    def warns_unchanged(self, before, message):
+        self.warns(before, before, message, unchanged=True)
+
+    def unchanged(self, before, ignore_warnings=False):
+        self._check(before, before)
+        if not ignore_warnings:
+            self.failUnlessEqual(self.fixer_log, [])
+
+    def assert_runs_after(self, *names):
+        fixes = [self.fixer]
+        fixes.extend(names)
+        options = {"print_function" : False}
+        r = support.get_refactorer(fixes, options)
+        (pre, post) = r.get_fixers()
+        n = "fix_" + self.fixer
+        if post and post[-1].__class__.__module__.endswith(n):
+            # We're the last fixer to run
+            return
+        if pre and pre[-1].__class__.__module__.endswith(n) and not post:
+            # We're the last in pre and post is empty
+            return
+        self.fail("Fixer run order (%s) is incorrect; %s should be last."\
+               %(", ".join([x.__class__.__module__ for x in (pre+post)]), n))
+
+class Test_range(FixerTestCase):
+    fixer = "range"
+
+    def test_xrange(self):
+        up = {}
+        down = {
+            (2, 5): """x = xrange(0, 10, 2)""",
+            (3, 0): """x = range(0, 10, 2)""",
+            }
+        self.check(up, down)
+
+    def test_range(self):
+        up = {}
+        down = {
+            (2, 5): """x = list(xrange(0, 10, 2))""",
+            (3, 0): """x = list(range(0, 10, 2))""",
+            }
+        self.check(up, down)
+
+class Test_renames(FixerTestCase):
+    fixer = "renames"
+
+    def test_maxint(self):
+        up = {}
+        down = {
+            (2, 5): """sys.maxint""",
+            (2, 6): """sys.maxsize""",
+            }
+        self.check(up, down)
+
+class Test_print(FixerTestCase):
+    """
+    http://docs.python.org/3.0/whatsnew/3.0.html
+
+    Old: print "The answer is", 2*2
+    New: print("The answer is", 2*2)
+
+    Old: print x,           # Trailing comma suppresses newline
+    New: print(x, end=" ")  # Appends a space instead of a newline
+
+    Old: print              # Prints a newline
+    New: print()            # You must call the function!
+
+    Old: print >>sys.stderr, "fatal error"
+    New: print("fatal error", file=sys.stderr)
+
+    Old: print (x, y)       # prints repr((x, y))
+    New: print((x, y))      # Not the same as print(x, y)!
+    """
+
+    fixer = "print"
+
+    def test_func(self):
+        up = {}
+        down = {
+            (2, 5): """print""",
+            (3, 0): """print()""",
+            }
+        self.check(up, down)
+
+    def test_x(self):
+        up = {}
+        down = {
+            (2, 5): """print x""",
+            (3, 0): """print(x)""",
+            }
+        self.check(up, down)
+
+    def test_str(self):
+        up = {}
+        down = {
+            (2, 5): """print ''""",
+            (3, 0): """print('')""",
+            }
+        self.check(up, down)
+
+    def test_compound(self):
+        up = {}
+        down = {
+            (2, 5): """print "The answer is", 2*2""",
+            (3, 0): """print("The answer is", 2*2)""",
+            }
+        self.check(up, down)
+
+    def test_end(self):
+        up = {}
+        down = {
+            (2, 5): """print x, """,
+            (3, 0): """print(x, end=" ")""",
+            }
+        self.check(up, down)
+
+    def test_stderr(self):
+        up = {}
+        down = {
+            (2, 5): """print >>sys.stderr, 'fatal error'""",
+            (3, 0): """print('fatal error', file=sys.stderr)""",
+            }
+        self.check(up, down)
+
+if __name__ == "__main__":
+    import __main__
+    support.run_all_tests(__main__)

Added: sandbox/trunk/refactor_pkg/refactor/tests/test_parser.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/test_parser.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,202 @@
+#!/usr/bin/env python2.5
+"""Test suite for refactor's parser and grammar files.
+
+This is the place to add tests for changes to refactor's grammar, such as those
+merging the grammars for Python 2 and 3. In addition to specific tests for
+parts of the grammar we've changed, we also make sure we can parse the
+test_grammar.py files from both Python 2 and Python 3.
+"""
+# Author: Collin Winter
+
+# Testing imports
+from . import support
+from .support import driver, test_dir
+
+# Python imports
+import os
+import os.path
+
+# Local imports
+from ..pgen2.parse import ParseError
+
+
+class GrammarTest(support.TestCase):
+    def validate(self, code):
+        support.parse_string(code)
+
+    def invalid_syntax(self, code):
+        try:
+            self.validate(code)
+        except ParseError:
+            pass
+        else:
+            raise AssertionError("Syntax shouldn't have been valid")
+
+
+class TestRaiseChanges(GrammarTest):
+    def test_2x_style_1(self):
+        self.validate("raise")
+
+    def test_2x_style_2(self):
+        self.validate("raise E, V")
+
+    def test_2x_style_3(self):
+        self.validate("raise E, V, T")
+
+    def test_2x_style_invalid_1(self):
+        self.invalid_syntax("raise E, V, T, Z")
+
+    def test_3x_style(self):
+        self.validate("raise E1 from E2")
+
+    def test_3x_style_invalid_1(self):
+        self.invalid_syntax("raise E, V from E1")
+
+    def test_3x_style_invalid_2(self):
+        self.invalid_syntax("raise E from E1, E2")
+
+    def test_3x_style_invalid_3(self):
+        self.invalid_syntax("raise from E1, E2")
+
+    def test_3x_style_invalid_4(self):
+        self.invalid_syntax("raise E from")
+
+
+# Adapated from Python 3's Lib/test/test_grammar.py:GrammarTests.testFuncdef
+class TestFunctionAnnotations(GrammarTest):
+    def test_1(self):
+        self.validate("""def f(x) -> list: pass""")
+
+    def test_2(self):
+        self.validate("""def f(x:int): pass""")
+
+    def test_3(self):
+        self.validate("""def f(*x:str): pass""")
+
+    def test_4(self):
+        self.validate("""def f(**x:float): pass""")
+
+    def test_5(self):
+        self.validate("""def f(x, y:1+2): pass""")
+
+    def test_6(self):
+        self.validate("""def f(a, (b:1, c:2, d)): pass""")
+
+    def test_7(self):
+        self.validate("""def f(a, (b:1, c:2, d), e:3=4, f=5, *g:6): pass""")
+
+    def test_8(self):
+        s = """def f(a, (b:1, c:2, d), e:3=4, f=5,
+                        *g:6, h:7, i=8, j:9=10, **k:11) -> 12: pass"""
+        self.validate(s)
+
+
+class TestExcept(GrammarTest):
+    def test_new(self):
+        s = """
+            try:
+                x
+            except E as N:
+                y"""
+        self.validate(s)
+
+    def test_old(self):
+        s = """
+            try:
+                x
+            except E, N:
+                y"""
+        self.validate(s)
+
+
+# Adapted from Python 3's Lib/test/test_grammar.py:GrammarTests.testAtoms
+class TestSetLiteral(GrammarTest):
+    def test_1(self):
+        self.validate("""x = {'one'}""")
+
+    def test_2(self):
+        self.validate("""x = {'one', 1,}""")
+
+    def test_3(self):
+        self.validate("""x = {'one', 'two', 'three'}""")
+
+    def test_4(self):
+        self.validate("""x = {2, 3, 4,}""")
+
+
+class TestNumericLiterals(GrammarTest):
+    def test_new_octal_notation(self):
+        self.validate("""0o7777777777777""")
+        self.invalid_syntax("""0o7324528887""")
+
+    def test_new_binary_notation(self):
+        self.validate("""0b101010""")
+        self.invalid_syntax("""0b0101021""")
+
+
+class TestClassDef(GrammarTest):
+    def test_new_syntax(self):
+        self.validate("class B(t=7): pass")
+        self.validate("class B(t, *args): pass")
+        self.validate("class B(t, **kwargs): pass")
+        self.validate("class B(t, *args, **kwargs): pass")
+        self.validate("class B(t, y=9, *args, **kwargs): pass")
+
+
+class TestParserIdempotency(support.TestCase):
+
+    """A cut-down version of pytree_idempotency.py."""
+
+    def test_all_project_files(self):
+        for filepath in support.all_project_files():
+            print "Parsing %s..." % filepath
+            tree = driver.parse_file(filepath, debug=True)
+            if diff(filepath, tree):
+                self.fail("Idempotency failed: %s" % filepath)
+
+
+class TestLiterals(GrammarTest):
+
+    def test_multiline_bytes_literals(self):
+        s = """
+            md5test(b"\xaa" * 80,
+                    (b"Test Using Larger Than Block-Size Key "
+                     b"and Larger Than One Block-Size Data"),
+                    "6f630fad67cda0ee1fb1f562db3aa53e")
+            """
+        self.validate(s)
+
+    def test_multiline_bytes_tripquote_literals(self):
+        s = '''
+            b"""
+            <?xml version="1.0" encoding="UTF-8"?>
+            <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN">
+            """
+            '''
+        self.validate(s)
+
+    def test_multiline_str_literals(self):
+        s = """
+            md5test("\xaa" * 80,
+                    ("Test Using Larger Than Block-Size Key "
+                     "and Larger Than One Block-Size Data"),
+                    "6f630fad67cda0ee1fb1f562db3aa53e")
+            """
+        self.validate(s)
+
+
+def diff(fn, tree):
+    f = open("@", "w")
+    try:
+        f.write(str(tree))
+    finally:
+        f.close()
+    try:
+        return os.system("diff -u %s @" % fn)
+    finally:
+        os.remove("@")
+
+
+if __name__ == "__main__":
+    import __main__
+    support.run_all_tests(__main__)

Added: sandbox/trunk/refactor_pkg/refactor/tests/test_pytree.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/test_pytree.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,458 @@
+#!/usr/bin/env python2.5
+# Copyright 2006 Google, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement.
+
+"""Unit tests for pytree.py.
+
+NOTE: Please *don't* add doc strings to individual test methods!
+In verbose mode, printing of the module, class and method name is much
+more helpful than printing of (the first line of) the docstring,
+especially when debugging a test.
+"""
+
+# Testing imports
+from . import support
+
+# Local imports (XXX should become a package)
+from .. import pytree
+
+try:
+    sorted
+except NameError:
+    def sorted(lst):
+        l = list(lst)
+        l.sort()
+        return l
+
+class TestNodes(support.TestCase):
+
+    """Unit tests for nodes (Base, Leaf, Node)."""
+
+    def testBaseCantConstruct(self):
+        if __debug__:
+            # Test that instantiating Base() raises an AssertionError
+            self.assertRaises(AssertionError, pytree.Base)
+
+    def testLeaf(self):
+        l1 = pytree.Leaf(100, "foo")
+        self.assertEqual(l1.type, 100)
+        self.assertEqual(l1.value, "foo")
+
+    def testLeafRepr(self):
+        l1 = pytree.Leaf(100, "foo")
+        self.assertEqual(repr(l1), "Leaf(100, 'foo')")
+
+    def testLeafStr(self):
+        l1 = pytree.Leaf(100, "foo")
+        self.assertEqual(str(l1), "foo")
+        l2 = pytree.Leaf(100, "foo", context=(" ", (10, 1)))
+        self.assertEqual(str(l2), " foo")
+
+    def testLeafStrNumericValue(self):
+        # Make sure that the Leaf's value is stringified. Failing to
+        #  do this can cause a TypeError in certain situations.
+        l1 = pytree.Leaf(2, 5)
+        l1.set_prefix("foo_")
+        self.assertEqual(str(l1), "foo_5")
+
+    def testLeafEq(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "foo", context=(" ", (1, 0)))
+        self.assertEqual(l1, l2)
+        l3 = pytree.Leaf(101, "foo")
+        l4 = pytree.Leaf(100, "bar")
+        self.assertNotEqual(l1, l3)
+        self.assertNotEqual(l1, l4)
+
+    def testLeafPrefix(self):
+        l1 = pytree.Leaf(100, "foo")
+        self.assertEqual(l1.get_prefix(), "")
+        self.failIf(l1.was_changed)
+        l1.set_prefix("  ##\n\n")
+        self.assertEqual(l1.get_prefix(), "  ##\n\n")
+        self.failUnless(l1.was_changed)
+
+    def testNode(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(200, "bar")
+        n1 = pytree.Node(1000, [l1, l2])
+        self.assertEqual(n1.type, 1000)
+        self.assertEqual(n1.children, [l1, l2])
+
+    def testNodeRepr(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
+        n1 = pytree.Node(1000, [l1, l2])
+        self.assertEqual(repr(n1),
+                         "Node(1000, [%s, %s])" % (repr(l1), repr(l2)))
+
+    def testNodeStr(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar", context=(" ", (1, 0)))
+        n1 = pytree.Node(1000, [l1, l2])
+        self.assertEqual(str(n1), "foo bar")
+
+    def testNodePrefix(self):
+        l1 = pytree.Leaf(100, "foo")
+        self.assertEqual(l1.get_prefix(), "")
+        n1 = pytree.Node(1000, [l1])
+        self.assertEqual(n1.get_prefix(), "")
+        n1.set_prefix(" ")
+        self.assertEqual(n1.get_prefix(), " ")
+        self.assertEqual(l1.get_prefix(), " ")
+
+    def testGetSuffix(self):
+        l1 = pytree.Leaf(100, "foo", prefix="a")
+        l2 = pytree.Leaf(100, "bar", prefix="b")
+        n1 = pytree.Node(1000, [l1, l2])
+
+        self.assertEqual(l1.get_suffix(), l2.get_prefix())
+        self.assertEqual(l2.get_suffix(), "")
+        self.assertEqual(n1.get_suffix(), "")
+
+        l3 = pytree.Leaf(100, "bar", prefix="c")
+        n2 = pytree.Node(1000, [n1, l3])
+
+        self.assertEqual(n1.get_suffix(), l3.get_prefix())
+        self.assertEqual(l3.get_suffix(), "")
+        self.assertEqual(n2.get_suffix(), "")
+
+    def testNodeEq(self):
+        n1 = pytree.Node(1000, ())
+        n2 = pytree.Node(1000, [], context=(" ", (1, 0)))
+        self.assertEqual(n1, n2)
+        n3 = pytree.Node(1001, ())
+        self.assertNotEqual(n1, n3)
+
+    def testNodeEqRecursive(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1])
+        n2 = pytree.Node(1000, [l2])
+        self.assertEqual(n1, n2)
+        l3 = pytree.Leaf(100, "bar")
+        n3 = pytree.Node(1000, [l3])
+        self.assertNotEqual(n1, n3)
+
+    def testReplace(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "+")
+        l3 = pytree.Leaf(100, "bar")
+        n1 = pytree.Node(1000, [l1, l2, l3])
+        self.assertEqual(n1.children, [l1, l2, l3])
+        self.failUnless(isinstance(n1.children, list))
+        self.failIf(n1.was_changed)
+        l2new = pytree.Leaf(100, "-")
+        l2.replace(l2new)
+        self.assertEqual(n1.children, [l1, l2new, l3])
+        self.failUnless(isinstance(n1.children, list))
+        self.failUnless(n1.was_changed)
+
+    def testReplaceWithList(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "+")
+        l3 = pytree.Leaf(100, "bar")
+        n1 = pytree.Node(1000, [l1, l2, l3])
+
+        l2.replace([pytree.Leaf(100, "*"), pytree.Leaf(100, "*")])
+        self.assertEqual(str(n1), "foo**bar")
+        self.failUnless(isinstance(n1.children, list))
+
+    def testPostOrder(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar")
+        n1 = pytree.Node(1000, [l1, l2])
+        self.assertEqual(list(n1.post_order()), [l1, l2, n1])
+
+    def testPreOrder(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar")
+        n1 = pytree.Node(1000, [l1, l2])
+        self.assertEqual(list(n1.pre_order()), [n1, l1, l2])
+
+    def testChangedLeaf(self):
+        l1 = pytree.Leaf(100, "f")
+        self.failIf(l1.was_changed)
+
+        l1.changed()
+        self.failUnless(l1.was_changed)
+
+    def testChangedNode(self):
+        l1 = pytree.Leaf(100, "f")
+        n1 = pytree.Node(1000, [l1])
+        self.failIf(n1.was_changed)
+
+        n1.changed()
+        self.failUnless(n1.was_changed)
+
+    def testChangedRecursive(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "+")
+        l3 = pytree.Leaf(100, "bar")
+        n1 = pytree.Node(1000, [l1, l2, l3])
+        n2 = pytree.Node(1000, [n1])
+        self.failIf(l1.was_changed)
+        self.failIf(n1.was_changed)
+        self.failIf(n2.was_changed)
+
+        n1.changed()
+        self.failUnless(n1.was_changed)
+        self.failUnless(n2.was_changed)
+        self.failIf(l1.was_changed)
+
+    def testLeafConstructorPrefix(self):
+        for prefix in ("xyz_", ""):
+            l1 = pytree.Leaf(100, "self", prefix=prefix)
+            self.failUnless(str(l1), prefix + "self")
+            self.assertEqual(l1.get_prefix(), prefix)
+
+    def testNodeConstructorPrefix(self):
+        for prefix in ("xyz_", ""):
+            l1 = pytree.Leaf(100, "self")
+            l2 = pytree.Leaf(100, "foo", prefix="_")
+            n1 = pytree.Node(1000, [l1, l2], prefix=prefix)
+            self.failUnless(str(n1), prefix + "self_foo")
+            self.assertEqual(n1.get_prefix(), prefix)
+            self.assertEqual(l1.get_prefix(), prefix)
+            self.assertEqual(l2.get_prefix(), "_")
+
+    def testRemove(self):
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1, l2])
+        n2 = pytree.Node(1000, [n1])
+
+        self.assertEqual(n1.remove(), 0)
+        self.assertEqual(n2.children, [])
+        self.assertEqual(l1.parent, n1)
+        self.assertEqual(n1.parent, None)
+        self.assertEqual(n2.parent, None)
+        self.failIf(n1.was_changed)
+        self.failUnless(n2.was_changed)
+
+        self.assertEqual(l2.remove(), 1)
+        self.assertEqual(l1.remove(), 0)
+        self.assertEqual(n1.children, [])
+        self.assertEqual(l1.parent, None)
+        self.assertEqual(n1.parent, None)
+        self.assertEqual(n2.parent, None)
+        self.failUnless(n1.was_changed)
+        self.failUnless(n2.was_changed)
+
+    def testRemoveParentless(self):
+        n1 = pytree.Node(1000, [])
+        n1.remove()
+        self.assertEqual(n1.parent, None)
+
+        l1 = pytree.Leaf(100, "foo")
+        l1.remove()
+        self.assertEqual(l1.parent, None)
+
+    def testNodeSetChild(self):
+        l1 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1])
+
+        l2 = pytree.Leaf(100, "bar")
+        n1.set_child(0, l2)
+        self.assertEqual(l1.parent, None)
+        self.assertEqual(l2.parent, n1)
+        self.assertEqual(n1.children, [l2])
+
+        n2 = pytree.Node(1000, [l1])
+        n2.set_child(0, n1)
+        self.assertEqual(l1.parent, None)
+        self.assertEqual(n1.parent, n2)
+        self.assertEqual(n2.parent, None)
+        self.assertEqual(n2.children, [n1])
+
+        self.assertRaises(IndexError, n1.set_child, 4, l2)
+        # I don't care what it raises, so long as it's an exception
+        self.assertRaises(Exception, n1.set_child, 0, list)
+
+    def testNodeInsertChild(self):
+        l1 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1])
+
+        l2 = pytree.Leaf(100, "bar")
+        n1.insert_child(0, l2)
+        self.assertEqual(l2.parent, n1)
+        self.assertEqual(n1.children, [l2, l1])
+
+        l3 = pytree.Leaf(100, "abc")
+        n1.insert_child(2, l3)
+        self.assertEqual(n1.children, [l2, l1, l3])
+
+        # I don't care what it raises, so long as it's an exception
+        self.assertRaises(Exception, n1.insert_child, 0, list)
+
+    def testNodeAppendChild(self):
+        n1 = pytree.Node(1000, [])
+
+        l1 = pytree.Leaf(100, "foo")
+        n1.append_child(l1)
+        self.assertEqual(l1.parent, n1)
+        self.assertEqual(n1.children, [l1])
+
+        l2 = pytree.Leaf(100, "bar")
+        n1.append_child(l2)
+        self.assertEqual(l2.parent, n1)
+        self.assertEqual(n1.children, [l1, l2])
+
+        # I don't care what it raises, so long as it's an exception
+        self.assertRaises(Exception, n1.append_child, list)
+
+    def testNodeNextSibling(self):
+        n1 = pytree.Node(1000, [])
+        n2 = pytree.Node(1000, [])
+        p1 = pytree.Node(1000, [n1, n2])
+
+        self.failUnless(n1.next_sibling is n2)
+        self.assertEqual(n2.next_sibling, None)
+        self.assertEqual(p1.next_sibling, None)
+
+    def testLeafNextSibling(self):
+        l1 = pytree.Leaf(100, "a")
+        l2 = pytree.Leaf(100, "b")
+        p1 = pytree.Node(1000, [l1, l2])
+
+        self.failUnless(l1.next_sibling is l2)
+        self.assertEqual(l2.next_sibling, None)
+        self.assertEqual(p1.next_sibling, None)
+
+    def testNodePrevSibling(self):
+        n1 = pytree.Node(1000, [])
+        n2 = pytree.Node(1000, [])
+        p1 = pytree.Node(1000, [n1, n2])
+
+        self.failUnless(n2.prev_sibling is n1)
+        self.assertEqual(n1.prev_sibling, None)
+        self.assertEqual(p1.prev_sibling, None)
+
+    def testLeafPrevSibling(self):
+        l1 = pytree.Leaf(100, "a")
+        l2 = pytree.Leaf(100, "b")
+        p1 = pytree.Node(1000, [l1, l2])
+
+        self.failUnless(l2.prev_sibling is l1)
+        self.assertEqual(l1.prev_sibling, None)
+        self.assertEqual(p1.prev_sibling, None)
+
+
+class TestPatterns(support.TestCase):
+
+    """Unit tests for tree matching patterns."""
+
+    def testBasicPatterns(self):
+        # Build a tree
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar")
+        l3 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1, l2])
+        n2 = pytree.Node(1000, [l3])
+        root = pytree.Node(1000, [n1, n2])
+        # Build a pattern matching a leaf
+        pl = pytree.LeafPattern(100, "foo", name="pl")
+        r = {}
+        self.assertFalse(pl.match(root, results=r))
+        self.assertEqual(r, {})
+        self.assertFalse(pl.match(n1, results=r))
+        self.assertEqual(r, {})
+        self.assertFalse(pl.match(n2, results=r))
+        self.assertEqual(r, {})
+        self.assertTrue(pl.match(l1, results=r))
+        self.assertEqual(r, {"pl": l1})
+        r = {}
+        self.assertFalse(pl.match(l2, results=r))
+        self.assertEqual(r, {})
+        # Build a pattern matching a node
+        pn = pytree.NodePattern(1000, [pl], name="pn")
+        self.assertFalse(pn.match(root, results=r))
+        self.assertEqual(r, {})
+        self.assertFalse(pn.match(n1, results=r))
+        self.assertEqual(r, {})
+        self.assertTrue(pn.match(n2, results=r))
+        self.assertEqual(r, {"pn": n2, "pl": l3})
+        r = {}
+        self.assertFalse(pn.match(l1, results=r))
+        self.assertEqual(r, {})
+        self.assertFalse(pn.match(l2, results=r))
+        self.assertEqual(r, {})
+
+    def testWildcardPatterns(self):
+        # Build a tree for testing
+        l1 = pytree.Leaf(100, "foo")
+        l2 = pytree.Leaf(100, "bar")
+        l3 = pytree.Leaf(100, "foo")
+        n1 = pytree.Node(1000, [l1, l2])
+        n2 = pytree.Node(1000, [l3])
+        root = pytree.Node(1000, [n1, n2])
+        # Build a pattern
+        pl = pytree.LeafPattern(100, "foo", name="pl")
+        pn = pytree.NodePattern(1000, [pl], name="pn")
+        pw = pytree.WildcardPattern([[pn], [pl, pl]], name="pw")
+        r = {}
+        self.assertFalse(pw.match_seq([root], r))
+        self.assertEqual(r, {})
+        self.assertFalse(pw.match_seq([n1], r))
+        self.assertEqual(r, {})
+        self.assertTrue(pw.match_seq([n2], r))
+        # These are easier to debug
+        self.assertEqual(sorted(r.keys()), ["pl", "pn", "pw"])
+        self.assertEqual(r["pl"], l1)
+        self.assertEqual(r["pn"], n2)
+        self.assertEqual(r["pw"], [n2])
+        # But this is equivalent
+        self.assertEqual(r, {"pl": l1, "pn": n2, "pw": [n2]})
+        r = {}
+        self.assertTrue(pw.match_seq([l1, l3], r))
+        self.assertEqual(r, {"pl": l3, "pw": [l1, l3]})
+        self.assert_(r["pl"] is l3)
+        r = {}
+
+    def testGenerateMatches(self):
+        la = pytree.Leaf(1, "a")
+        lb = pytree.Leaf(1, "b")
+        lc = pytree.Leaf(1, "c")
+        ld = pytree.Leaf(1, "d")
+        le = pytree.Leaf(1, "e")
+        lf = pytree.Leaf(1, "f")
+        leaves = [la, lb, lc, ld, le, lf]
+        root = pytree.Node(1000, leaves)
+        pa = pytree.LeafPattern(1, "a", "pa")
+        pb = pytree.LeafPattern(1, "b", "pb")
+        pc = pytree.LeafPattern(1, "c", "pc")
+        pd = pytree.LeafPattern(1, "d", "pd")
+        pe = pytree.LeafPattern(1, "e", "pe")
+        pf = pytree.LeafPattern(1, "f", "pf")
+        pw = pytree.WildcardPattern([[pa, pb, pc], [pd, pe],
+                                     [pa, pb], [pc, pd], [pe, pf]],
+                                    min=1, max=4, name="pw")
+        self.assertEqual([x[0] for x in pw.generate_matches(leaves)],
+                         [3, 5, 2, 4, 6])
+        pr = pytree.NodePattern(type=1000, content=[pw], name="pr")
+        matches = list(pytree.generate_matches([pr], [root]))
+        self.assertEqual(len(matches), 1)
+        c, r = matches[0]
+        self.assertEqual(c, 1)
+        self.assertEqual(str(r["pr"]), "abcdef")
+        self.assertEqual(r["pw"], [la, lb, lc, ld, le, lf])
+        for c in "abcdef":
+            self.assertEqual(r["p" + c], pytree.Leaf(1, c))
+
+    def testHasKeyExample(self):
+        pattern = pytree.NodePattern(331,
+                                     (pytree.LeafPattern(7),
+                                      pytree.WildcardPattern(name="args"),
+                                      pytree.LeafPattern(8)))
+        l1 = pytree.Leaf(7, "(")
+        l2 = pytree.Leaf(3, "x")
+        l3 = pytree.Leaf(8, ")")
+        node = pytree.Node(331, [l1, l2, l3])
+        r = {}
+        self.assert_(pattern.match(node, r))
+        self.assertEqual(r["args"], [l2])
+
+
+if __name__ == "__main__":
+    import __main__
+    support.run_all_tests(__main__)

Added: sandbox/trunk/refactor_pkg/refactor/tests/test_refactor.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/test_refactor.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,168 @@
+"""
+Unit tests for refactor.py.
+"""
+
+import sys
+import os
+import operator
+import StringIO
+import tempfile
+import unittest
+
+from .. import refactor, pygram, fixer_base
+
+from . import support
+
+
+FIXER_DIR = os.path.join(os.path.dirname(__file__), "data/fixers")
+
+sys.path.append(FIXER_DIR)
+try:
+    _DEFAULT_FIXERS = refactor.get_fixers_from_package("myfixes")
+finally:
+    sys.path.pop()
+
+class TestRefactoringTool(unittest.TestCase):
+
+    def setUp(self):
+        sys.path.append(FIXER_DIR)
+
+    def tearDown(self):
+        sys.path.pop()
+
+    def check_instances(self, instances, classes):
+        for inst, cls in zip(instances, classes):
+            if not isinstance(inst, cls):
+                self.fail("%s are not instances of %s" % instances, classes)
+
+    def rt(self, options=None, fixers=_DEFAULT_FIXERS, explicit=None):
+        return refactor.RefactoringTool(fixers, options, explicit)
+
+    def test_print_function_option(self):
+        gram = pygram.python_grammar
+        save = gram.keywords["print"]
+        try:
+            rt = self.rt({"print_function" : True})
+            self.assertRaises(KeyError, operator.itemgetter("print"),
+                              gram.keywords)
+        finally:
+            gram.keywords["print"] = save
+
+    def test_fixer_loading_helpers(self):
+        contents = ["explicit", "first", "last", "parrot", "preorder"]
+        non_prefixed = refactor.get_all_fix_names("myfixes")
+        prefixed = refactor.get_all_fix_names("myfixes", False)
+        full_names = refactor.get_fixers_from_package("myfixes")
+        self.assertEqual(prefixed, ["fix_" + name for name in contents])
+        self.assertEqual(non_prefixed, contents)
+        self.assertEqual(full_names,
+                         ["myfixes.fix_" + name for name in contents])
+
+    def test_get_headnode_dict(self):
+        class NoneFix(fixer_base.BaseFix):
+            PATTERN = None
+
+        class FileInputFix(fixer_base.BaseFix):
+            PATTERN = "file_input< any * >"
+
+        no_head = NoneFix({}, [])
+        with_head = FileInputFix({}, [])
+        d = refactor.get_headnode_dict([no_head, with_head])
+        expected = {None: [no_head],
+                    pygram.python_symbols.file_input : [with_head]}
+        self.assertEqual(d, expected)
+
+    def test_fixer_loading(self):
+        from myfixes.fix_first import FixFirst
+        from myfixes.fix_last import FixLast
+        from myfixes.fix_parrot import FixParrot
+        from myfixes.fix_preorder import FixPreorder
+
+        rt = self.rt()
+        pre, post = rt.get_fixers()
+
+        self.check_instances(pre, [FixPreorder])
+        self.check_instances(post, [FixFirst, FixParrot, FixLast])
+
+    def test_naughty_fixers(self):
+        self.assertRaises(ImportError, self.rt, fixers=["not_here"])
+        self.assertRaises(refactor.FixerError, self.rt, fixers=["no_fixer_cls"])
+        self.assertRaises(refactor.FixerError, self.rt, fixers=["bad_order"])
+
+    def test_refactor_string(self):
+        rt = self.rt()
+        input = "def parrot(): pass\n\n"
+        tree = rt.refactor_string(input, "<test>")
+        self.assertNotEqual(str(tree), input)
+
+        input = "def f(): pass\n\n"
+        tree = rt.refactor_string(input, "<test>")
+        self.assertEqual(str(tree), input)
+
+    def test_refactor_stdin(self):
+
+        class MyRT(refactor.RefactoringTool):
+
+            def print_output(self, lines):
+                diff_lines.extend(lines)
+
+        diff_lines = []
+        rt = MyRT(_DEFAULT_FIXERS)
+        save = sys.stdin
+        sys.stdin = StringIO.StringIO("def parrot(): pass\n\n")
+        try:
+            rt.refactor_stdin()
+        finally:
+            sys.stdin = save
+        expected = """--- <stdin> (original)
++++ <stdin> (refactored)
+@@ -1,2 +1,2 @@
+-def parrot(): pass
++def cheese(): pass""".splitlines()
+        self.assertEqual(diff_lines[:-1], expected)
+
+    def test_refactor_file(self):
+        test_file = os.path.join(FIXER_DIR, "parrot_example.py")
+        old_contents = open(test_file, "r").read()
+        rt = self.rt()
+
+        rt.refactor_file(test_file)
+        self.assertEqual(old_contents, open(test_file, "r").read())
+
+        rt.refactor_file(test_file, True)
+        try:
+            self.assertNotEqual(old_contents, open(test_file, "r").read())
+        finally:
+            open(test_file, "w").write(old_contents)
+
+    def test_refactor_docstring(self):
+        rt = self.rt()
+
+        def example():
+            """
+            >>> example()
+            42
+            """
+        out = rt.refactor_docstring(example.__doc__, "<test>")
+        self.assertEqual(out, example.__doc__)
+
+        def parrot():
+            """
+            >>> def parrot():
+            ...      return 43
+            """
+        out = rt.refactor_docstring(parrot.__doc__, "<test>")
+        self.assertNotEqual(out, parrot.__doc__)
+
+    def test_explicit(self):
+        from myfixes.fix_explicit import FixExplicit
+
+        rt = self.rt(fixers=["myfixes.fix_explicit"])
+        self.assertEqual(len(rt.post_order), 0)
+
+        rt = self.rt(explicit=["myfixes.fix_explicit"])
+        for fix in rt.post_order:
+            if isinstance(fix, FixExplicit):
+                break
+        else:
+            self.fail("explicit fixer not loaded")

Added: sandbox/trunk/refactor_pkg/refactor/tests/test_util.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/refactor/tests/test_util.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,559 @@
+#!/usr/bin/env python2.5
+""" Test suite for the code in fixes.util """
+# Author: Collin Winter
+
+# Testing imports
+from . import support
+
+# Python imports
+import os.path
+
+# Local imports
+from .. import pytree
+from .. import fixer_util
+from ..fixer_util import Attr, Name
+
+
+def parse(code, strip_levels=0):
+    # The topmost node is file_input, which we don't care about.
+    # The next-topmost node is a *_stmt node, which we also don't care about
+    tree = support.parse_string(code)
+    for i in range(strip_levels):
+        tree = tree.children[0]
+    tree.parent = None
+    return tree
+
+class MacroTestCase(support.TestCase):
+    def assertStr(self, node, string):
+        if isinstance(node, (tuple, list)):
+            node = pytree.Node(fixer_util.syms.simple_stmt, node)
+        self.assertEqual(str(node), string)
+
+
+class Test_is_tuple(support.TestCase):
+    def is_tuple(self, string):
+        return fixer_util.is_tuple(parse(string, strip_levels=2))
+
+    def test_valid(self):
+        self.failUnless(self.is_tuple("(a, b)"))
+        self.failUnless(self.is_tuple("(a, (b, c))"))
+        self.failUnless(self.is_tuple("((a, (b, c)),)"))
+        self.failUnless(self.is_tuple("(a,)"))
+        self.failUnless(self.is_tuple("()"))
+
+    def test_invalid(self):
+        self.failIf(self.is_tuple("(a)"))
+        self.failIf(self.is_tuple("('foo') % (b, c)"))
+
+
+class Test_is_list(support.TestCase):
+    def is_list(self, string):
+        return fixer_util.is_list(parse(string, strip_levels=2))
+
+    def test_valid(self):
+        self.failUnless(self.is_list("[]"))
+        self.failUnless(self.is_list("[a]"))
+        self.failUnless(self.is_list("[a, b]"))
+        self.failUnless(self.is_list("[a, [b, c]]"))
+        self.failUnless(self.is_list("[[a, [b, c]],]"))
+
+    def test_invalid(self):
+        self.failIf(self.is_list("[]+[]"))
+
+
+class Test_Attr(MacroTestCase):
+    def test(self):
+        call = parse("foo()", strip_levels=2)
+
+        self.assertStr(Attr(Name("a"), Name("b")), "a.b")
+        self.assertStr(Attr(call, Name("b")), "foo().b")
+
+    def test_returns(self):
+        attr = Attr(Name("a"), Name("b"))
+        self.assertEqual(type(attr), list)
+
+
+class Test_Name(MacroTestCase):
+    def test(self):
+        self.assertStr(Name("a"), "a")
+        self.assertStr(Name("foo.foo().bar"), "foo.foo().bar")
+        self.assertStr(Name("a", prefix="b"), "ba")
+
+
+class Test_does_tree_import(support.TestCase):
+    def _find_bind_rec(self, name, node):
+        # Search a tree for a binding -- used to find the starting
+        # point for these tests.
+        c = fixer_util.find_binding(name, node)
+        if c: return c
+        for child in node.children:
+            c = self._find_bind_rec(name, child)
+            if c: return c
+
+    def does_tree_import(self, package, name, string):
+        node = parse(string)
+        # Find the binding of start -- that's what we'll go from
+        node = self._find_bind_rec('start', node)
+        return fixer_util.does_tree_import(package, name, node)
+
+    def try_with(self, string):
+        failing_tests = (("a", "a", "from a import b"),
+                         ("a.d", "a", "from a.d import b"),
+                         ("d.a", "a", "from d.a import b"),
+                         (None, "a", "import b"),
+                         (None, "a", "import b, c, d"))
+        for package, name, import_ in failing_tests:
+            n = self.does_tree_import(package, name, import_ + "\n" + string)
+            self.failIf(n)
+            n = self.does_tree_import(package, name, string + "\n" + import_)
+            self.failIf(n)
+
+        passing_tests = (("a", "a", "from a import a"),
+                         ("x", "a", "from x import a"),
+                         ("x", "a", "from x import b, c, a, d"),
+                         ("x.b", "a", "from x.b import a"),
+                         ("x.b", "a", "from x.b import b, c, a, d"),
+                         (None, "a", "import a"),
+                         (None, "a", "import b, c, a, d"))
+        for package, name, import_ in passing_tests:
+            n = self.does_tree_import(package, name, import_ + "\n" + string)
+            self.failUnless(n)
+            n = self.does_tree_import(package, name, string + "\n" + import_)
+            self.failUnless(n)
+
+    def test_in_function(self):
+        self.try_with("def foo():\n\tbar.baz()\n\tstart=3")
+
+class Test_find_binding(support.TestCase):
+    def find_binding(self, name, string, package=None):
+        return fixer_util.find_binding(name, parse(string), package)
+
+    def test_simple_assignment(self):
+        self.failUnless(self.find_binding("a", "a = b"))
+        self.failUnless(self.find_binding("a", "a = [b, c, d]"))
+        self.failUnless(self.find_binding("a", "a = foo()"))
+        self.failUnless(self.find_binding("a", "a = foo().foo.foo[6][foo]"))
+        self.failIf(self.find_binding("a", "foo = a"))
+        self.failIf(self.find_binding("a", "foo = (a, b, c)"))
+
+    def test_tuple_assignment(self):
+        self.failUnless(self.find_binding("a", "(a,) = b"))
+        self.failUnless(self.find_binding("a", "(a, b, c) = [b, c, d]"))
+        self.failUnless(self.find_binding("a", "(c, (d, a), b) = foo()"))
+        self.failUnless(self.find_binding("a", "(a, b) = foo().foo[6][foo]"))
+        self.failIf(self.find_binding("a", "(foo, b) = (b, a)"))
+        self.failIf(self.find_binding("a", "(foo, (b, c)) = (a, b, c)"))
+
+    def test_list_assignment(self):
+        self.failUnless(self.find_binding("a", "[a] = b"))
+        self.failUnless(self.find_binding("a", "[a, b, c] = [b, c, d]"))
+        self.failUnless(self.find_binding("a", "[c, [d, a], b] = foo()"))
+        self.failUnless(self.find_binding("a", "[a, b] = foo().foo[a][foo]"))
+        self.failIf(self.find_binding("a", "[foo, b] = (b, a)"))
+        self.failIf(self.find_binding("a", "[foo, [b, c]] = (a, b, c)"))
+
+    def test_invalid_assignments(self):
+        self.failIf(self.find_binding("a", "foo.a = 5"))
+        self.failIf(self.find_binding("a", "foo[a] = 5"))
+        self.failIf(self.find_binding("a", "foo(a) = 5"))
+        self.failIf(self.find_binding("a", "foo(a, b) = 5"))
+
+    def test_simple_import(self):
+        self.failUnless(self.find_binding("a", "import a"))
+        self.failUnless(self.find_binding("a", "import b, c, a, d"))
+        self.failIf(self.find_binding("a", "import b"))
+        self.failIf(self.find_binding("a", "import b, c, d"))
+
+    def test_from_import(self):
+        self.failUnless(self.find_binding("a", "from x import a"))
+        self.failUnless(self.find_binding("a", "from a import a"))
+        self.failUnless(self.find_binding("a", "from x import b, c, a, d"))
+        self.failUnless(self.find_binding("a", "from x.b import a"))
+        self.failUnless(self.find_binding("a", "from x.b import b, c, a, d"))
+        self.failIf(self.find_binding("a", "from a import b"))
+        self.failIf(self.find_binding("a", "from a.d import b"))
+        self.failIf(self.find_binding("a", "from d.a import b"))
+
+    def test_import_as(self):
+        self.failUnless(self.find_binding("a", "import b as a"))
+        self.failUnless(self.find_binding("a", "import b as a, c, a as f, d"))
+        self.failIf(self.find_binding("a", "import a as f"))
+        self.failIf(self.find_binding("a", "import b, c as f, d as e"))
+
+    def test_from_import_as(self):
+        self.failUnless(self.find_binding("a", "from x import b as a"))
+        self.failUnless(self.find_binding("a", "from x import g as a, d as b"))
+        self.failUnless(self.find_binding("a", "from x.b import t as a"))
+        self.failUnless(self.find_binding("a", "from x.b import g as a, d"))
+        self.failIf(self.find_binding("a", "from a import b as t"))
+        self.failIf(self.find_binding("a", "from a.d import b as t"))
+        self.failIf(self.find_binding("a", "from d.a import b as t"))
+
+    def test_simple_import_with_package(self):
+        self.failUnless(self.find_binding("b", "import b"))
+        self.failUnless(self.find_binding("b", "import b, c, d"))
+        self.failIf(self.find_binding("b", "import b", "b"))
+        self.failIf(self.find_binding("b", "import b, c, d", "c"))
+
+    def test_from_import_with_package(self):
+        self.failUnless(self.find_binding("a", "from x import a", "x"))
+        self.failUnless(self.find_binding("a", "from a import a", "a"))
+        self.failUnless(self.find_binding("a", "from x import *", "x"))
+        self.failUnless(self.find_binding("a", "from x import b, c, a, d", "x"))
+        self.failUnless(self.find_binding("a", "from x.b import a", "x.b"))
+        self.failUnless(self.find_binding("a", "from x.b import *", "x.b"))
+        self.failUnless(self.find_binding("a", "from x.b import b, c, a, d", "x.b"))
+        self.failIf(self.find_binding("a", "from a import b", "a"))
+        self.failIf(self.find_binding("a", "from a.d import b", "a.d"))
+        self.failIf(self.find_binding("a", "from d.a import b", "a.d"))
+        self.failIf(self.find_binding("a", "from x.y import *", "a.b"))
+
+    def test_import_as_with_package(self):
+        self.failIf(self.find_binding("a", "import b.c as a", "b.c"))
+        self.failIf(self.find_binding("a", "import a as f", "f"))
+        self.failIf(self.find_binding("a", "import a as f", "a"))
+
+    def test_from_import_as_with_package(self):
+        # Because it would take a lot of special-case code in the fixers
+        # to deal with from foo import bar as baz, we'll simply always
+        # fail if there is an "from ... import ... as ..."
+        self.failIf(self.find_binding("a", "from x import b as a", "x"))
+        self.failIf(self.find_binding("a", "from x import g as a, d as b", "x"))
+        self.failIf(self.find_binding("a", "from x.b import t as a", "x.b"))
+        self.failIf(self.find_binding("a", "from x.b import g as a, d", "x.b"))
+        self.failIf(self.find_binding("a", "from a import b as t", "a"))
+        self.failIf(self.find_binding("a", "from a import b as t", "b"))
+        self.failIf(self.find_binding("a", "from a import b as t", "t"))
+
+    def test_function_def(self):
+        self.failUnless(self.find_binding("a", "def a(): pass"))
+        self.failUnless(self.find_binding("a", "def a(b, c, d): pass"))
+        self.failUnless(self.find_binding("a", "def a(): b = 7"))
+        self.failIf(self.find_binding("a", "def d(b, (c, a), e): pass"))
+        self.failIf(self.find_binding("a", "def d(a=7): pass"))
+        self.failIf(self.find_binding("a", "def d(a): pass"))
+        self.failIf(self.find_binding("a", "def d(): a = 7"))
+
+        s = """
+            def d():
+                def a():
+                    pass"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_class_def(self):
+        self.failUnless(self.find_binding("a", "class a: pass"))
+        self.failUnless(self.find_binding("a", "class a(): pass"))
+        self.failUnless(self.find_binding("a", "class a(b): pass"))
+        self.failUnless(self.find_binding("a", "class a(b, c=8): pass"))
+        self.failIf(self.find_binding("a", "class d: pass"))
+        self.failIf(self.find_binding("a", "class d(a): pass"))
+        self.failIf(self.find_binding("a", "class d(b, a=7): pass"))
+        self.failIf(self.find_binding("a", "class d(b, *a): pass"))
+        self.failIf(self.find_binding("a", "class d(b, **a): pass"))
+        self.failIf(self.find_binding("a", "class d: a = 7"))
+
+        s = """
+            class d():
+                class a():
+                    pass"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_for(self):
+        self.failUnless(self.find_binding("a", "for a in r: pass"))
+        self.failUnless(self.find_binding("a", "for a, b in r: pass"))
+        self.failUnless(self.find_binding("a", "for (a, b) in r: pass"))
+        self.failUnless(self.find_binding("a", "for c, (a,) in r: pass"))
+        self.failUnless(self.find_binding("a", "for c, (a, b) in r: pass"))
+        self.failUnless(self.find_binding("a", "for c in r: a = c"))
+        self.failIf(self.find_binding("a", "for c in a: pass"))
+
+    def test_for_nested(self):
+        s = """
+            for b in r:
+                for a in b:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for a, c in b:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for (a, c) in b:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for (a,) in b:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for c, (a, d) in b:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for c in b:
+                    a = 7"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for c in b:
+                    d = a"""
+        self.failIf(self.find_binding("a", s))
+
+        s = """
+            for b in r:
+                for c in a:
+                    d = 7"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_if(self):
+        self.failUnless(self.find_binding("a", "if b in r: a = c"))
+        self.failIf(self.find_binding("a", "if a in r: d = e"))
+
+    def test_if_nested(self):
+        s = """
+            if b in r:
+                if c in d:
+                    a = c"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            if b in r:
+                if c in d:
+                    c = a"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_while(self):
+        self.failUnless(self.find_binding("a", "while b in r: a = c"))
+        self.failIf(self.find_binding("a", "while a in r: d = e"))
+
+    def test_while_nested(self):
+        s = """
+            while b in r:
+                while c in d:
+                    a = c"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            while b in r:
+                while c in d:
+                    c = a"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_try_except(self):
+        s = """
+            try:
+                a = 6
+            except:
+                b = 8"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except KeyError:
+                pass
+            except:
+                a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                b = 6"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_try_except_nested(self):
+        s = """
+            try:
+                try:
+                    a = 6
+                except:
+                    pass
+            except:
+                b = 8"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                try:
+                    a = 6
+                except:
+                    pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                try:
+                    pass
+                except:
+                    a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                try:
+                    b = 8
+                except KeyError:
+                    pass
+                except:
+                    a = 6
+            except:
+                pass"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                pass
+            except:
+                try:
+                    b = 8
+                except KeyError:
+                    pass
+                except:
+                    a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                b = 6"""
+        self.failIf(self.find_binding("a", s))
+
+        s = """
+            try:
+                try:
+                    b = 8
+                except:
+                    c = d
+            except:
+                try:
+                    b = 6
+                except:
+                    t = 8
+                except:
+                    o = y"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_try_except_finally(self):
+        s = """
+            try:
+                c = 6
+            except:
+                b = 8
+            finally:
+                a = 9"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            finally:
+                a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            finally:
+                b = 6"""
+        self.failIf(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            except:
+                b = 9
+            finally:
+                b = 6"""
+        self.failIf(self.find_binding("a", s))
+
+    def test_try_except_finally_nested(self):
+        s = """
+            try:
+                c = 6
+            except:
+                b = 8
+            finally:
+                try:
+                    a = 9
+                except:
+                    b = 9
+                finally:
+                    c = 9"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            finally:
+                try:
+                    pass
+                finally:
+                    a = 6"""
+        self.failUnless(self.find_binding("a", s))
+
+        s = """
+            try:
+                b = 8
+            finally:
+                try:
+                    b = 6
+                finally:
+                    b = 7"""
+        self.failIf(self.find_binding("a", s))
+
+class Test_touch_import(support.TestCase):
+
+    def test_after_docstring(self):
+        node = parse('"""foo"""\nbar()')
+        fixer_util.touch_import(None, "foo", node)
+        self.assertEqual(str(node), '"""foo"""\nimport foo\nbar()\n\n')
+
+    def test_after_imports(self):
+        node = parse('"""foo"""\nimport bar\nbar()')
+        fixer_util.touch_import(None, "foo", node)
+        self.assertEqual(str(node), '"""foo"""\nimport bar\nimport foo\nbar()\n\n')
+
+    def test_beginning(self):
+        node = parse('bar()')
+        fixer_util.touch_import(None, "foo", node)
+        self.assertEqual(str(node), 'import foo\nbar()\n\n')
+
+    def test_from_import(self):
+        node = parse('bar()')
+        fixer_util.touch_import("cgi", "escape", node)
+        self.assertEqual(str(node), 'from cgi import escape\nbar()\n\n')
+
+    def test_name_import(self):
+        node = parse('bar()')
+        fixer_util.touch_import(None, "cgi", node)
+        self.assertEqual(str(node), 'import cgi\nbar()\n\n')
+
+
+if __name__ == "__main__":
+    import __main__
+    support.run_all_tests(__main__)

Added: sandbox/trunk/refactor_pkg/scripts/benchmark.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/scripts/benchmark.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,58 @@
+#!/usr/bin/env python2.5
+"""
+This is a benchmarking script to test the speed of 2to3's pattern matching
+system. It's equivalent to "refactor.py -f all" for every Python module
+in sys.modules, but without engaging the actual transformations.
+"""
+
+__author__ = "Collin Winter <collinw at gmail.com>"
+
+# Python imports
+import os.path
+import sys
+from time import time
+
+# Test imports
+from .support import adjust_path
+adjust_path()
+
+# Local imports
+from .. import refactor
+
+### Mock code for refactor.py and the fixers
+###############################################################################
+class Options:
+    def __init__(self, **kwargs):
+        for k, v in kwargs.items():
+            setattr(self, k, v)
+
+        self.verbose = False
+
+def dummy_transform(*args, **kwargs):
+    pass
+
+### Collect list of modules to match against
+###############################################################################
+files = []
+for mod in sys.modules.values():
+    if mod is None or not hasattr(mod, '__file__'):
+        continue
+    f = mod.__file__
+    if f.endswith('.pyc'):
+        f = f[:-1]
+    if f.endswith('.py'):
+        files.append(f)
+
+### Set up refactor and run the benchmark
+###############################################################################
+options = Options(fix=["all"], print_function=False, doctests_only=False)
+refactor = refactor.RefactoringTool(options)
+for fixer in refactor.fixers:
+    # We don't want them to actually fix the tree, just match against it.
+    fixer.transform = dummy_transform
+
+t = time()
+for f in files:
+    print "Matching", f
+    refactor.refactor_file(f)
+print "%d seconds to match %d files" % (time() - t, len(sys.modules))

Added: sandbox/trunk/refactor_pkg/scripts/find_pattern.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/scripts/find_pattern.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+"""Script that makes determining PATTERN for a new fix much easier.
+
+Figuring out exactly what PATTERN I want for a given fixer class is
+getting tedious. This script will step through each possible subtree
+for a given string, allowing you to select which one you want. It will
+then try to figure out an appropriate pattern to match that tree. This
+pattern will require some editing (it will be overly restrictive) but
+should provide a solid base to work with and handle the tricky parts.
+
+Usage:
+
+    python find_pattern.py "g.throw(E, V, T)"
+
+This will step through each subtree in the parse. To reject a
+candidate subtree, hit enter; to accept a candidate, hit "y" and
+enter. The pattern will be spit out to stdout.
+
+For example, the above will yield a succession of possible snippets,
+skipping all leaf-only trees. I accept
+
+'g.throw(E, V, T)'
+
+This causes find_pattern to spit out
+
+power< 'g' trailer< '.' 'throw' >
+           trailer< '(' arglist< 'E' ',' 'V' ',' 'T' > ')' > >
+
+
+Some minor tweaks later, I'm left with
+
+power< any trailer< '.' 'throw' >
+       trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' > >
+
+which is exactly what I was after.
+
+Larger snippets can be placed in a file (as opposed to a command-line
+arg) and processed with the -f option.
+"""
+
+__author__ = "Collin Winter <collinw at gmail.com>"
+
+# Python imports
+import optparse
+import sys
+from StringIO import StringIO
+
+# Local imports
+from refactor import pytree
+from refactor import pgen2
+from refactor.pygram import python_symbols, python_grammar
+
+def main(args):
+    parser = optparse.OptionParser(usage="find_pattern.py [options] [string]")
+    parser.add_option("-f", "--file", action="store",
+                      help="Read a code snippet from the specified file")
+    parser.add_option("-p", "--print-function", action="store_true",
+                      help="Modify the grammar so that print() is a function")
+
+    # Parse command line arguments
+    options, args = parser.parse_args(args)
+
+    if options.print_function:
+        del python_grammar.keywords["print"]
+
+    driver = pgen2.driver.Driver(python_grammar, convert=pytree.convert)
+    if options.file:
+        tree = driver.parse_file(options.file)
+    elif len(args) > 1:
+        tree = driver.parse_stream(StringIO(args[1] + "\n"))
+    else:
+        print >>sys.stderr, "You must specify an input file or an input string"
+        return 1
+
+    examine_tree(tree)
+    return 0
+
+def examine_tree(tree):
+    for node in tree.post_order():
+        if isinstance(node, pytree.Leaf):
+            continue
+        print repr(str(node))
+        verdict = raw_input()
+        if verdict.strip():
+            print find_pattern(node)
+            return
+
+def find_pattern(node):
+    if isinstance(node, pytree.Leaf):
+        return repr(node.value)
+
+    return find_symbol(node.type) + \
+           "< " + " ".join(find_pattern(n) for n in node.children) + " >"
+
+def find_symbol(sym):
+    for n, v in python_symbols.__dict__.items():
+        if v == sym:
+            return n
+
+if __name__ == "__main__":
+    sys.exit(main(sys.argv))

Added: sandbox/trunk/refactor_pkg/setup.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/setup.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,15 @@
+from distutils.core import setup
+
+setup(
+   name="2to3",
+   packages=['lib2to3','lib2to3.fixes','lib2to3.pgen2'],
+   package_data={'lib2to3':['lib2to3/Grammar.txt','lib2to3/PatternGrammar.txt']},
+   scripts=["2to3"]
+)
+
+setup(
+   name="refactor",
+   packages=['refactor','refactor.fixes','refactor.fixes.from2','refactor.fixes.from3','refactor.pgen2'],
+   package_data={'refactor':['Grammar.txt','PatternGrammar.txt']},
+   scripts=["3to2"]
+)

Added: sandbox/trunk/refactor_pkg/test.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/refactor_pkg/test.py	Wed Apr  1 21:02:05 2009
@@ -0,0 +1,68 @@
+#!/usr/bin/env python2.5
+
+"""Main test file for refactor (2to3 and back again).
+
+Running "python test.py" will run all tests in tests/test_*.py.
+"""
+# Original Author: Collin Winter
+
+import unittest
+from optparse import OptionParser, OptionGroup
+from sys import exit
+
+# Note more imports below, based on optparse output.
+
+usage = "usage: %prog [options] arg"
+usage += "\n\narg can be:\n"
+usage += "test suite: run tests in refactor/tests/<test suite>\n"
+usage += "test class: run tests in <test suite>.<test class>\n"
+usage += "(default: run all tests in refactor/tests/test_*.py)"
+
+parser = OptionParser(usage=usage)
+parser.add_option("--source",
+                  dest="source",
+                  help="source version of Python to refactor")
+parser.add_option("--target",
+                  dest="target",
+                  help="target version of Python")
+parser.add_option("--base",
+                  dest="base", default="refactor",
+                  help="base package, e.g. lib2to3 or refactor")
+
+(options, args) = parser.parse_args()
+
+# It's too late at night to figure out why __import__ is failing.
+exec "from %s import tests" % options.base
+exec "from %s.tests import support" % options.base
+exec "from %s.tests.test_fixers import FixerTestCase as Fixer" % options.base
+
+old_version = support.parse_version(options.source)
+new_version = support.parse_version(options.target)
+
+if old_version:
+    Fixer.old_version = old_version
+if new_version:
+    Fixer.new_version = new_version
+
+if len(args) > 0:
+    arg = args[0]
+    mod = tests
+    for m in arg.split("."):
+        mod = getattr(mod, m, None)
+        if not mod:
+            print "Error importing %s" %(m)
+            exit(1)
+
+    if arg.find(".") == -1:
+        # Just the module was specified, load all the tests
+        suite = unittest.TestLoader().loadTestsFromModule(mod)
+    else:
+        # A class was specified, load that
+        suite = unittest.makeSuite(mod)
+else:
+    suite = tests.all_tests
+
+try:
+    tests.support.run_all_tests(tests=suite)
+except KeyboardInterrupt:
+    pass


More information about the Python-checkins mailing list