[Python-checkins] r53758 - in sandbox/trunk/2to3: fixes/fix_apply.py fixes/fix_dict.py fixes/fix_dict2.py fixes/fix_except.py fixes/fix_exec.py fixes/fix_has_key.py fixes/fix_intern.py fixes/fix_long.py fixes/fix_ne.py fixes/fix_print.py fixes/fix_raise.py fixes/fix_repr.py fixes/fix_sysexcinfo.py fixes/fix_throw.py fixes/fix_xrange.py fixes/macros.py patcomp.py pgen2/conv.py pgen2/driver.py pgen2/grammar.py pgen2/parse.py pgen2/pgen.py pgen2/token.py pgen2/tokenize.py pygram.py refactor.py tokenize.py
collin.winter
python-checkins at python.org
Tue Feb 13 00:59:47 CET 2007
Author: collin.winter
Date: Tue Feb 13 00:59:44 2007
New Revision: 53758
Added:
sandbox/trunk/2to3/pgen2/token.py (contents, props changed)
sandbox/trunk/2to3/pgen2/tokenize.py
- copied, changed from r53757, sandbox/trunk/2to3/tokenize.py
Removed:
sandbox/trunk/2to3/tokenize.py
Modified:
sandbox/trunk/2to3/fixes/fix_apply.py
sandbox/trunk/2to3/fixes/fix_dict.py
sandbox/trunk/2to3/fixes/fix_dict2.py
sandbox/trunk/2to3/fixes/fix_except.py
sandbox/trunk/2to3/fixes/fix_exec.py
sandbox/trunk/2to3/fixes/fix_has_key.py
sandbox/trunk/2to3/fixes/fix_intern.py
sandbox/trunk/2to3/fixes/fix_long.py
sandbox/trunk/2to3/fixes/fix_ne.py
sandbox/trunk/2to3/fixes/fix_print.py
sandbox/trunk/2to3/fixes/fix_raise.py
sandbox/trunk/2to3/fixes/fix_repr.py
sandbox/trunk/2to3/fixes/fix_sysexcinfo.py
sandbox/trunk/2to3/fixes/fix_throw.py
sandbox/trunk/2to3/fixes/fix_xrange.py
sandbox/trunk/2to3/fixes/macros.py
sandbox/trunk/2to3/patcomp.py
sandbox/trunk/2to3/pgen2/conv.py
sandbox/trunk/2to3/pgen2/driver.py
sandbox/trunk/2to3/pgen2/grammar.py
sandbox/trunk/2to3/pgen2/parse.py
sandbox/trunk/2to3/pgen2/pgen.py
sandbox/trunk/2to3/pygram.py
sandbox/trunk/2to3/refactor.py
Log:
Move token and tokenize into pgen2
Modified: sandbox/trunk/2to3/fixes/fix_apply.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_apply.py (original)
+++ sandbox/trunk/2to3/fixes/fix_apply.py Tue Feb 13 00:59:44 2007
@@ -3,11 +3,9 @@
"""Fixer for apply()."""
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes.macros import Call, Comma
Modified: sandbox/trunk/2to3/fixes/fix_dict.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_dict.py (original)
+++ sandbox/trunk/2to3/fixes/fix_dict.py Tue Feb 13 00:59:44 2007
@@ -21,12 +21,10 @@
as an argument to a function that introspects the argument).
"""
-# Python imports
-import token
-
# Local imports
import pytree
import patcomp
+from pgen2 import token
from fixes import basefix
from fixes import macros
Modified: sandbox/trunk/2to3/fixes/fix_dict2.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_dict2.py (original)
+++ sandbox/trunk/2to3/fixes/fix_dict2.py Tue Feb 13 00:59:44 2007
@@ -9,11 +9,9 @@
.itervalues -> .values
"""
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes import macros
Modified: sandbox/trunk/2to3/fixes/fix_except.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_except.py (original)
+++ sandbox/trunk/2to3/fixes/fix_except.py Tue Feb 13 00:59:44 2007
@@ -1,11 +1,9 @@
"""Fixer for except statements with named exceptions."""
# Author: Collin Winter
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes.macros import Assign, Attr, Name
Modified: sandbox/trunk/2to3/fixes/fix_exec.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_exec.py (original)
+++ sandbox/trunk/2to3/fixes/fix_exec.py Tue Feb 13 00:59:44 2007
@@ -3,11 +3,9 @@
"""Fixer for exec."""
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes.macros import Comma, Name, Call
Modified: sandbox/trunk/2to3/fixes/fix_has_key.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_has_key.py (original)
+++ sandbox/trunk/2to3/fixes/fix_has_key.py Tue Feb 13 00:59:44 2007
@@ -3,11 +3,9 @@
"""Fixer for has_key()."""
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes.macros import Name
Modified: sandbox/trunk/2to3/fixes/fix_intern.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_intern.py (original)
+++ sandbox/trunk/2to3/fixes/fix_intern.py Tue Feb 13 00:59:44 2007
@@ -3,11 +3,9 @@
"""Fixer for intern()."""
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes.macros import Name, Attr
Modified: sandbox/trunk/2to3/fixes/fix_long.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_long.py (original)
+++ sandbox/trunk/2to3/fixes/fix_long.py Tue Feb 13 00:59:44 2007
@@ -6,11 +6,9 @@
This also strips the trailing 'L' or 'l' from long loterals.
"""
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes.macros import Name
Modified: sandbox/trunk/2to3/fixes/fix_ne.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_ne.py (original)
+++ sandbox/trunk/2to3/fixes/fix_ne.py Tue Feb 13 00:59:44 2007
@@ -6,11 +6,9 @@
This is so simple that we don't need the pattern compiler.
"""
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
Modified: sandbox/trunk/2to3/fixes/fix_print.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_print.py (original)
+++ sandbox/trunk/2to3/fixes/fix_print.py Tue Feb 13 00:59:44 2007
@@ -10,11 +10,9 @@
'print >>x, ...' into 'print(..., file=x)'
"""
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes.macros import Name, Call, Comma
Modified: sandbox/trunk/2to3/fixes/fix_raise.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_raise.py (original)
+++ sandbox/trunk/2to3/fixes/fix_raise.py Tue Feb 13 00:59:44 2007
@@ -1,11 +1,9 @@
"""Fixer for 'raise E, V, T'"""
# Author: Collin Winter
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes.macros import Name, Call, Assign, Newline, Attr, is_tuple
Modified: sandbox/trunk/2to3/fixes/fix_repr.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_repr.py (original)
+++ sandbox/trunk/2to3/fixes/fix_repr.py Tue Feb 13 00:59:44 2007
@@ -3,11 +3,9 @@
"""Fixer that transforms `xyzzy` into repr(xyzzy)."""
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes.macros import Call, Name
Modified: sandbox/trunk/2to3/fixes/fix_sysexcinfo.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_sysexcinfo.py (original)
+++ sandbox/trunk/2to3/fixes/fix_sysexcinfo.py Tue Feb 13 00:59:44 2007
@@ -1,10 +1,8 @@
"""Fixer/warner for sys.exc_{info,value,type,traceback}"""
# Author: Collin Winter
-# Python imports
-import token
-
# Local imports
+from pgen2 import token
from pytree import Leaf
from fixes import basefix
Modified: sandbox/trunk/2to3/fixes/fix_throw.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_throw.py (original)
+++ sandbox/trunk/2to3/fixes/fix_throw.py Tue Feb 13 00:59:44 2007
@@ -1,11 +1,9 @@
"""Fixer for generator.throw(E, V, T)"""
# Author: Collin Winter
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes.macros import Name, Call, Assign, Newline, Attr
Modified: sandbox/trunk/2to3/fixes/fix_xrange.py
==============================================================================
--- sandbox/trunk/2to3/fixes/fix_xrange.py (original)
+++ sandbox/trunk/2to3/fixes/fix_xrange.py Tue Feb 13 00:59:44 2007
@@ -2,11 +2,9 @@
"""Fixer that changes xrange(...) into range(...)."""
-# Python imports
-import token
-
# Local imports
import pytree
+from pgen2 import token
from fixes import basefix
from fixes import macros
Modified: sandbox/trunk/2to3/fixes/macros.py
==============================================================================
--- sandbox/trunk/2to3/fixes/macros.py (original)
+++ sandbox/trunk/2to3/fixes/macros.py Tue Feb 13 00:59:44 2007
@@ -1,10 +1,8 @@
"""Abstract away often-used node construction routines."""
# Author: Collin Winter
-# Python imports
-import token
-
# Local imports
+from pgen2 import token
from pytree import Leaf, Node
from pygram import python_symbols as syms
Modified: sandbox/trunk/2to3/patcomp.py
==============================================================================
--- sandbox/trunk/2to3/patcomp.py (original)
+++ sandbox/trunk/2to3/patcomp.py Tue Feb 13 00:59:44 2007
@@ -12,12 +12,12 @@
# Python imports
import os
-import token
-import tokenize
# Fairly local imports
from pgen2 import driver
from pgen2 import literals
+from pgen2 import token
+from pgen2 import tokenize
# Really local imports
import pytree
Modified: sandbox/trunk/2to3/pgen2/conv.py
==============================================================================
--- sandbox/trunk/2to3/pgen2/conv.py (original)
+++ sandbox/trunk/2to3/pgen2/conv.py Tue Feb 13 00:59:44 2007
@@ -28,9 +28,10 @@
# Python imports
import re
-import token
-from pgen2 import grammar
+# Local imports
+from pgen2 import grammar, token
+
class Converter(grammar.Grammar):
"""Grammar subclass that reads classic pgen output files.
Modified: sandbox/trunk/2to3/pgen2/driver.py
==============================================================================
--- sandbox/trunk/2to3/pgen2/driver.py (original)
+++ sandbox/trunk/2to3/pgen2/driver.py Tue Feb 13 00:59:44 2007
@@ -17,13 +17,10 @@
# Python imports
import os
-import token
import logging
-import tokenize
# Pgen imports
-from pgen2 import parse
-from pgen2 import grammar
+from pgen2 import grammar, parse, token, tokenize
class Driver(object):
Modified: sandbox/trunk/2to3/pgen2/grammar.py
==============================================================================
--- sandbox/trunk/2to3/pgen2/grammar.py (original)
+++ sandbox/trunk/2to3/pgen2/grammar.py Tue Feb 13 00:59:44 2007
@@ -13,9 +13,12 @@
"""
# Python imports
-import token, tokenize
import cPickle as pickle
+# Local imports
+from pgen2 import token, tokenize
+
+
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Modified: sandbox/trunk/2to3/pgen2/parse.py
==============================================================================
--- sandbox/trunk/2to3/pgen2/parse.py (original)
+++ sandbox/trunk/2to3/pgen2/parse.py Tue Feb 13 00:59:44 2007
@@ -10,8 +10,8 @@
"""
-# Python imports
-import token
+# Local imports
+from pgen2 import token
class ParseError(Exception):
"""Exception to signal the parser is stuck."""
Modified: sandbox/trunk/2to3/pgen2/pgen.py
==============================================================================
--- sandbox/trunk/2to3/pgen2/pgen.py (original)
+++ sandbox/trunk/2to3/pgen2/pgen.py Tue Feb 13 00:59:44 2007
@@ -1,12 +1,8 @@
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
-# Python imports
-import token
-import tokenize
-
# Pgen imports
-from pgen2 import grammar
+from pgen2 import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
pass
Added: sandbox/trunk/2to3/pgen2/token.py
==============================================================================
--- (empty file)
+++ sandbox/trunk/2to3/pgen2/token.py Tue Feb 13 00:59:44 2007
@@ -0,0 +1,82 @@
+#! /usr/bin/env python
+
+"""Token constants (from "token.h")."""
+
+# Taken from Python (r53757) and modified to include some tokens
+# originally monkeypatched in by pgen2.tokenize
+
+#--start constants--
+ENDMARKER = 0
+NAME = 1
+NUMBER = 2
+STRING = 3
+NEWLINE = 4
+INDENT = 5
+DEDENT = 6
+LPAR = 7
+RPAR = 8
+LSQB = 9
+RSQB = 10
+COLON = 11
+COMMA = 12
+SEMI = 13
+PLUS = 14
+MINUS = 15
+STAR = 16
+SLASH = 17
+VBAR = 18
+AMPER = 19
+LESS = 20
+GREATER = 21
+EQUAL = 22
+DOT = 23
+PERCENT = 24
+BACKQUOTE = 25
+LBRACE = 26
+RBRACE = 27
+EQEQUAL = 28
+NOTEQUAL = 29
+LESSEQUAL = 30
+GREATEREQUAL = 31
+TILDE = 32
+CIRCUMFLEX = 33
+LEFTSHIFT = 34
+RIGHTSHIFT = 35
+DOUBLESTAR = 36
+PLUSEQUAL = 37
+MINEQUAL = 38
+STAREQUAL = 39
+SLASHEQUAL = 40
+PERCENTEQUAL = 41
+AMPEREQUAL = 42
+VBAREQUAL = 43
+CIRCUMFLEXEQUAL = 44
+LEFTSHIFTEQUAL = 45
+RIGHTSHIFTEQUAL = 46
+DOUBLESTAREQUAL = 47
+DOUBLESLASH = 48
+DOUBLESLASHEQUAL = 49
+AT = 50
+OP = 51
+COMMENT = 52
+NL = 53
+RARROW = 54
+ERRORTOKEN = 55
+N_TOKENS = 56
+NT_OFFSET = 256
+#--end constants--
+
+tok_name = {}
+for _name, _value in globals().items():
+ if type(_value) is type(0):
+ tok_name[_value] = _name
+
+
+def ISTERMINAL(x):
+ return x < NT_OFFSET
+
+def ISNONTERMINAL(x):
+ return x >= NT_OFFSET
+
+def ISEOF(x):
+ return x == ENDMARKER
Copied: sandbox/trunk/2to3/pgen2/tokenize.py (from r53757, sandbox/trunk/2to3/tokenize.py)
==============================================================================
--- sandbox/trunk/2to3/tokenize.py (original)
+++ sandbox/trunk/2to3/pgen2/tokenize.py Tue Feb 13 00:59:44 2007
@@ -30,23 +30,12 @@
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
import string, re
-from token import *
+from pgen2.token import *
-import token
-__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
- "generate_tokens", "NL", "untokenize"]
-del x
-
-COMMENT = N_TOKENS
-tok_name[COMMENT] = 'COMMENT'
-NL = N_TOKENS + 1
-tok_name[NL] = 'NL'
-RARROW = N_TOKENS + 2
-token.RARROW = RARROW
-tok_name[RARROW] = 'RARROW'
-N_TOKENS += 3
-
-del token
+from pgen2 import token
+__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
+ "generate_tokens", "untokenize"]
+del x, token
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
Modified: sandbox/trunk/2to3/pygram.py
==============================================================================
--- sandbox/trunk/2to3/pygram.py (original)
+++ sandbox/trunk/2to3/pygram.py Tue Feb 13 00:59:44 2007
@@ -5,10 +5,10 @@
# Python imports
import os
-import token
# Local imports
import pytree
+from pgen2 import token
from pgen2 import driver
# The grammar file
Modified: sandbox/trunk/2to3/refactor.py
==============================================================================
--- sandbox/trunk/2to3/refactor.py (original)
+++ sandbox/trunk/2to3/refactor.py Tue Feb 13 00:59:44 2007
@@ -20,10 +20,10 @@
import logging
# Local imports
-import tokenize
import pytree
import patcomp
from pgen2 import driver
+from pgen2 import tokenize
import fixes
import fixes.macros
import pygram
Deleted: /sandbox/trunk/2to3/tokenize.py
==============================================================================
--- /sandbox/trunk/2to3/tokenize.py Tue Feb 13 00:59:44 2007
+++ (empty file)
@@ -1,399 +0,0 @@
-# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
-# All rights reserved.
-
-"""Tokenization help for Python programs.
-
-generate_tokens(readline) is a generator that breaks a stream of
-text into Python tokens. It accepts a readline-like method which is called
-repeatedly to get the next line of input (or "" for EOF). It generates
-5-tuples with these members:
-
- the token type (see token.py)
- the token (a string)
- the starting (row, column) indices of the token (a 2-tuple of ints)
- the ending (row, column) indices of the token (a 2-tuple of ints)
- the original line (string)
-
-It is designed to match the working of the Python tokenizer exactly, except
-that it produces COMMENT tokens for comments and gives type OP for all
-operators
-
-Older entry points
- tokenize_loop(readline, tokeneater)
- tokenize(readline, tokeneater=printtoken)
-are the same, except instead of generating tokens, tokeneater is a callback
-function to which the 5 fields described above are passed as 5 arguments,
-each time a new token is found."""
-
-__author__ = 'Ka-Ping Yee <ping at lfw.org>'
-__credits__ = \
- 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
-
-import string, re
-from token import *
-
-import token
-__all__ = [x for x in dir(token) if x[0] != '_'] + ["COMMENT", "tokenize",
- "generate_tokens", "NL", "untokenize"]
-del x
-
-COMMENT = N_TOKENS
-tok_name[COMMENT] = 'COMMENT'
-NL = N_TOKENS + 1
-tok_name[NL] = 'NL'
-RARROW = N_TOKENS + 2
-token.RARROW = RARROW
-tok_name[RARROW] = 'RARROW'
-N_TOKENS += 3
-
-del token
-
-def group(*choices): return '(' + '|'.join(choices) + ')'
-def any(*choices): return group(*choices) + '*'
-def maybe(*choices): return group(*choices) + '?'
-
-Whitespace = r'[ \f\t]*'
-Comment = r'#[^\r\n]*'
-Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
-Name = r'[a-zA-Z_]\w*'
-
-Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
-Octnumber = r'0[0-7]*[lL]?'
-Decnumber = r'[1-9]\d*[lL]?'
-Intnumber = group(Hexnumber, Octnumber, Decnumber)
-Exponent = r'[eE][-+]?\d+'
-Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
-Expfloat = r'\d+' + Exponent
-Floatnumber = group(Pointfloat, Expfloat)
-Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
-Number = group(Imagnumber, Floatnumber, Intnumber)
-
-# Tail end of ' string.
-Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
-# Tail end of " string.
-Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
-# Tail end of ''' string.
-Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
-# Tail end of """ string.
-Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
-Triple = group("[uU]?[rR]?'''", '[uU]?[rR]?"""')
-# Single-line ' or " string.
-String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
-
-# Because of leftmost-then-longest match semantics, be sure to put the
-# longest operators first (e.g., if = came before ==, == would get
-# recognized as two instances of =).
-Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
- r"//=?", r"->",
- r"[+\-*/%&|^=<>]=?",
- r"~")
-
-Bracket = '[][(){}]'
-Special = group(r'\r?\n', r'[:;.,`@]')
-Funny = group(Operator, Bracket, Special)
-
-PlainToken = group(Number, Funny, String, Name)
-Token = Ignore + PlainToken
-
-# First (or only) line of ' or " string.
-ContStr = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
- group("'", r'\\\r?\n'),
- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
- group('"', r'\\\r?\n'))
-PseudoExtras = group(r'\\\r?\n', Comment, Triple)
-PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
-
-tokenprog, pseudoprog, single3prog, double3prog = map(
- re.compile, (Token, PseudoToken, Single3, Double3))
-endprogs = {"'": re.compile(Single), '"': re.compile(Double),
- "'''": single3prog, '"""': double3prog,
- "r'''": single3prog, 'r"""': double3prog,
- "u'''": single3prog, 'u"""': double3prog,
- "ur'''": single3prog, 'ur"""': double3prog,
- "R'''": single3prog, 'R"""': double3prog,
- "U'''": single3prog, 'U"""': double3prog,
- "uR'''": single3prog, 'uR"""': double3prog,
- "Ur'''": single3prog, 'Ur"""': double3prog,
- "UR'''": single3prog, 'UR"""': double3prog,
- 'r': None, 'R': None, 'u': None, 'U': None}
-
-triple_quoted = {}
-for t in ("'''", '"""',
- "r'''", 'r"""', "R'''", 'R"""',
- "u'''", 'u"""', "U'''", 'U"""',
- "ur'''", 'ur"""', "Ur'''", 'Ur"""',
- "uR'''", 'uR"""', "UR'''", 'UR"""'):
- triple_quoted[t] = t
-single_quoted = {}
-for t in ("'", '"',
- "r'", 'r"', "R'", 'R"',
- "u'", 'u"', "U'", 'U"',
- "ur'", 'ur"', "Ur'", 'Ur"',
- "uR'", 'uR"', "UR'", 'UR"' ):
- single_quoted[t] = t
-
-tabsize = 8
-
-class TokenError(Exception): pass
-
-class StopTokenizing(Exception): pass
-
-def printtoken(type, token, (srow, scol), (erow, ecol), line): # for testing
- print "%d,%d-%d,%d:\t%s\t%s" % \
- (srow, scol, erow, ecol, tok_name[type], repr(token))
-
-def tokenize(readline, tokeneater=printtoken):
- """
- The tokenize() function accepts two parameters: one representing the
- input stream, and one providing an output mechanism for tokenize().
-
- The first parameter, readline, must be a callable object which provides
- the same interface as the readline() method of built-in file objects.
- Each call to the function should return one line of input as a string.
-
- The second parameter, tokeneater, must also be a callable object. It is
- called once for each token, with five arguments, corresponding to the
- tuples generated by generate_tokens().
- """
- try:
- tokenize_loop(readline, tokeneater)
- except StopTokenizing:
- pass
-
-# backwards compatible interface
-def tokenize_loop(readline, tokeneater):
- for token_info in generate_tokens(readline):
- tokeneater(*token_info)
-
-class Untokenizer:
-
- def __init__(self):
- self.tokens = []
- self.prev_row = 1
- self.prev_col = 0
-
- def add_whitespace(self, start):
- row, col = start
- assert row <= self.prev_row
- col_offset = col - self.prev_col
- if col_offset:
- self.tokens.append(" " * col_offset)
-
- def untokenize(self, iterable):
- for t in iterable:
- if len(t) == 2:
- self.compat(t, iterable)
- break
- tok_type, token, start, end, line = t
- self.add_whitespace(start)
- self.tokens.append(token)
- self.prev_row, self.prev_col = end
- if tok_type in (NEWLINE, NL):
- self.prev_row += 1
- self.prev_col = 0
- return "".join(self.tokens)
-
- def compat(self, token, iterable):
- startline = False
- indents = []
- toks_append = self.tokens.append
- toknum, tokval = token
- if toknum in (NAME, NUMBER):
- tokval += ' '
- if toknum in (NEWLINE, NL):
- startline = True
- for tok in iterable:
- toknum, tokval = tok[:2]
-
- if toknum in (NAME, NUMBER):
- tokval += ' '
-
- if toknum == INDENT:
- indents.append(tokval)
- continue
- elif toknum == DEDENT:
- indents.pop()
- continue
- elif toknum in (NEWLINE, NL):
- startline = True
- elif startline and indents:
- toks_append(indents[-1])
- startline = False
- toks_append(tokval)
-
-def untokenize(iterable):
- """Transform tokens back into Python source code.
-
- Each element returned by the iterable must be a token sequence
- with at least two elements, a token number and token value. If
- only two tokens are passed, the resulting output is poor.
-
- Round-trip invariant for full input:
- Untokenized source will match input source exactly
-
- Round-trip invariant for limited intput:
- # Output text will tokenize the back to the input
- t1 = [tok[:2] for tok in generate_tokens(f.readline)]
- newcode = untokenize(t1)
- readline = iter(newcode.splitlines(1)).next
- t2 = [tok[:2] for tokin generate_tokens(readline)]
- assert t1 == t2
- """
- ut = Untokenizer()
- return ut.untokenize(iterable)
-
-def generate_tokens(readline):
- """
- The generate_tokens() generator requires one argment, readline, which
- must be a callable object which provides the same interface as the
- readline() method of built-in file objects. Each call to the function
- should return one line of input as a string. Alternately, readline
- can be a callable function terminating with StopIteration:
- readline = open(myfile).next # Example of alternate readline
-
- The generator produces 5-tuples with these members: the token type; the
- token string; a 2-tuple (srow, scol) of ints specifying the row and
- column where the token begins in the source; a 2-tuple (erow, ecol) of
- ints specifying the row and column where the token ends in the source;
- and the line on which the token was found. The line passed is the
- logical line; continuation lines are included.
- """
- lnum = parenlev = continued = 0
- namechars, numchars = string.ascii_letters + '_', '0123456789'
- contstr, needcont = '', 0
- contline = None
- indents = [0]
-
- while 1: # loop over lines in stream
- try:
- line = readline()
- except StopIteration:
- line = ''
- lnum = lnum + 1
- pos, max = 0, len(line)
-
- if contstr: # continued string
- if not line:
- raise TokenError, ("EOF in multi-line string", strstart)
- endmatch = endprog.match(line)
- if endmatch:
- pos = end = endmatch.end(0)
- yield (STRING, contstr + line[:end],
- strstart, (lnum, end), contline + line)
- contstr, needcont = '', 0
- contline = None
- elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
- yield (ERRORTOKEN, contstr + line,
- strstart, (lnum, len(line)), contline)
- contstr = ''
- contline = None
- continue
- else:
- contstr = contstr + line
- contline = contline + line
- continue
-
- elif parenlev == 0 and not continued: # new statement
- if not line: break
- column = 0
- while pos < max: # measure leading whitespace
- if line[pos] == ' ': column = column + 1
- elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
- elif line[pos] == '\f': column = 0
- else: break
- pos = pos + 1
- if pos == max: break
-
- if line[pos] in '#\r\n': # skip comments or blank lines
- if line[pos] == '#':
- comment_token = line[pos:].rstrip('\r\n')
- nl_pos = pos + len(comment_token)
- yield (COMMENT, comment_token,
- (lnum, pos), (lnum, pos + len(comment_token)), line)
- yield (NL, line[nl_pos:],
- (lnum, nl_pos), (lnum, len(line)), line)
- else:
- yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
- (lnum, pos), (lnum, len(line)), line)
- continue
-
- if column > indents[-1]: # count indents or dedents
- indents.append(column)
- yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
- while column < indents[-1]:
- if column not in indents:
- raise IndentationError(
- "unindent does not match any outer indentation level",
- ("<tokenize>", lnum, pos, line))
- indents = indents[:-1]
- yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
-
- else: # continued statement
- if not line:
- raise TokenError, ("EOF in multi-line statement", (lnum, 0))
- continued = 0
-
- while pos < max:
- pseudomatch = pseudoprog.match(line, pos)
- if pseudomatch: # scan for tokens
- start, end = pseudomatch.span(1)
- spos, epos, pos = (lnum, start), (lnum, end), end
- token, initial = line[start:end], line[start]
-
- if initial in numchars or \
- (initial == '.' and token != '.'): # ordinary number
- yield (NUMBER, token, spos, epos, line)
- elif initial in '\r\n':
- yield (NL if parenlev > 0 else NEWLINE,
- token, spos, epos, line)
- elif initial == '#':
- assert not token.endswith("\n")
- yield (COMMENT, token, spos, epos, line)
- elif token in triple_quoted:
- endprog = endprogs[token]
- endmatch = endprog.match(line, pos)
- if endmatch: # all on one line
- pos = endmatch.end(0)
- token = line[start:pos]
- yield (STRING, token, spos, (lnum, pos), line)
- else:
- strstart = (lnum, start) # multiple lines
- contstr = line[start:]
- contline = line
- break
- elif initial in single_quoted or \
- token[:2] in single_quoted or \
- token[:3] in single_quoted:
- if token[-1] == '\n': # continued string
- strstart = (lnum, start)
- endprog = (endprogs[initial] or endprogs[token[1]] or
- endprogs[token[2]])
- contstr, needcont = line[start:], 1
- contline = line
- break
- else: # ordinary string
- yield (STRING, token, spos, epos, line)
- elif initial in namechars: # ordinary name
- yield (NAME, token, spos, epos, line)
- elif initial == '\\': # continued stmt
- # This yield is new; needed for better idempotency:
- yield (NL, token, spos, (lnum, pos), line)
- continued = 1
- else:
- if initial in '([{': parenlev = parenlev + 1
- elif initial in ')]}': parenlev = parenlev - 1
- yield (OP, token, spos, epos, line)
- else:
- yield (ERRORTOKEN, line[pos],
- (lnum, pos), (lnum, pos+1), line)
- pos = pos + 1
-
- for indent in indents[1:]: # pop remaining indent levels
- yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
- yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
-
-if __name__ == '__main__': # testing
- import sys
- if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
- else: tokenize(sys.stdin.readline)
More information about the Python-checkins
mailing list