[pypy-svn] r47977 - in pypy/branch/dist-future-fixing/pypy/interpreter/pyparser: . test

jacob at codespeak.net jacob at codespeak.net
Thu Oct 25 20:00:02 CEST 2007


Author: jacob
Date: Thu Oct 25 20:00:02 2007
New Revision: 47977

Modified:
   pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/astbuilder.py
   pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfgrammar.py
   pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfparse.py
   pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/grammar.py
   pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pythonparse.py
   pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pytoken.py
   pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_lookahead.py
   pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_parser.py
   pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/tuplebuilder.py
Log:
Refactored the parser bits to reduce the dependency mesh. This is only half way done, but it should work in its present state.

Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/astbuilder.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/astbuilder.py	(original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/astbuilder.py	Thu Oct 25 20:00:02 2007
@@ -1099,10 +1099,11 @@
         self.d = len(rule_stack)
 
 class AstBuilder(BaseGrammarBuilder):
-    """A builder that directly produce the AST"""
+    """A builder that directly produces the AST"""
 
     def __init__(self, parser, grammar_version, debug=0, space=None):
-        BaseGrammarBuilder.__init__(self, parser, debug)
+        BaseGrammarBuilder.__init__(self, debug)
+        self.parser = parser
         self.rule_stack = []
         self.space = space
         self.source_encoding = None

Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfgrammar.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfgrammar.py	(original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfgrammar.py	Thu Oct 25 20:00:02 2007
@@ -27,7 +27,7 @@
       group: '(' alternative ')' star?
     """
     p = GRAMMAR_GRAMMAR
-    p.add_token('EOF','EOF')
+    p.add_token(Token('EOF','EOF'))
 
     # star: '*' | '+'
     star          = p.Alternative_n( "star", [p.Token_n('TOK_STAR', '*'), p.Token_n('TOK_ADD', '+')] )

Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfparse.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfparse.py	(original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/ebnfparse.py	Thu Oct 25 20:00:02 2007
@@ -1,4 +1,4 @@
-from grammar import Token, GrammarProxy
+from grammar import Token, EmptyToken, GrammarProxy
 from grammar import AbstractBuilder, AbstractContext
 
 
@@ -72,7 +72,7 @@
         """
         if not isinstance(other, Token):
             raise RuntimeError("Unexpected token type")
-        if other is self.parser.EmptyToken:
+        if other is EmptyToken:
             return False
         if other.codename != self.codename:
             return False
@@ -93,7 +93,7 @@
 class EBNFBuilder(AbstractBuilder):
     """Build a grammar tree"""
     def __init__(self, gram_parser, dest_parser):
-        AbstractBuilder.__init__(self, dest_parser)
+        AbstractBuilder.__init__(self)
         self.gram = gram_parser
         self.rule_stack = []
         self.seqcounts = [] # number of items in the current sequence
@@ -105,10 +105,13 @@
         self.current_rule_name = ""
         self.tokens = {}
         self.keywords = []
-        NAME = dest_parser.add_token('NAME')
+        NAME = dest_parser.add_token(Token(dest_parser, 'NAME'))
         # NAME = dest_parser.tokens['NAME']
         self.tokens[NAME] = NameToken(dest_parser, keywords=self.keywords)
 
+        # XXX Temporary. We should be able to get rid of it later
+        self.parser = dest_parser
+
     def context(self):
         return EBNFBuilderContext(len(self.rule_stack), self.seqcounts, self.altcounts)
 

Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/grammar.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/grammar.py	(original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/grammar.py	Thu Oct 25 20:00:02 2007
@@ -7,13 +7,13 @@
 KleeneStar   : as in S -> A* or S -> A+
 Token       : a lexer token
 """
-try:
-    from pypy.interpreter.baseobjspace import Wrappable
-    from pypy.interpreter.pyparser.pytoken import NULLTOKEN
-except ImportError:
-    # allows standalone testing
-    Wrappable = object
-    NULLTOKEN = -1 # None
+
+from pypy.interpreter.baseobjspace import Wrappable
+from pypy.interpreter.pyparser.pytoken import NULLTOKEN
+#except ImportError:
+#    # allows standalone testing
+#    Wrappable = object
+#    NULLTOKEN = -1 # None
 
 
 from syntaxtree import SyntaxNode, TempSyntaxNode, TokenNode
@@ -105,13 +105,12 @@
 
 class AbstractBuilder(Wrappable):
     """Abstract base class for builder objects"""
-    def __init__(self, parser, debug=0 ):
+    def __init__(self, debug=0 ):
         # This attribute is here for convenience
         self.debug = debug
         # the parser that represent the grammar used
         # Commented the assert: this eases the testing
         #assert isinstance( parser, Parser )
-        self.parser = parser
 
     def context(self):
         """Return an opaque context object"""
@@ -145,8 +144,8 @@
     """Base/default class for a builder"""
     # XXX (adim): this is trunk's keyword management
     keywords = None
-    def __init__(self, parser, debug=0 ):
-        AbstractBuilder.__init__(self, parser, debug )
+    def __init__(self, debug=0 ):
+        AbstractBuilder.__init__(self, debug )
         # stacks contain different objects depending on the builder class
         # to be RPython they should not be defined in the base class
         self.stack = []
@@ -209,10 +208,8 @@
 
     symbols = {} # dirty trick to provide a symbols mapping while printing (and not putting it in every object)
 
-    def __init__(self, parser, codename):
+    def __init__(self, codename):
         # the rule name
-        assert isinstance(parser, Parser)
-        self.parser = parser
         # integer mapping to either a token value or rule symbol value
         self.codename = codename 
         self.args = []
@@ -249,7 +246,7 @@
             pos1 = source.get_pos()
         in_first_set = self.match_first_set(builder, token)
         if not in_first_set: # and not EmptyToken in self.first_set:
-            if self.parser.EmptyToken in self.first_set:
+            if EmptyToken in self.first_set:
                 ret = builder.sequence(self, source, 0 )
                 if self._trace:
                     self._debug_display(token, level, 'eee' )
@@ -364,7 +361,8 @@
 
 class GrammarProxy(GrammarElement):
     def __init__(self, parser, rule_name, codename=-1 ):
-        GrammarElement.__init__(self, parser, codename )
+        GrammarElement.__init__(self, codename )
+        self.parser = parser
         self.rule_name = rule_name
         self.object = None
 
@@ -372,10 +370,10 @@
         """Helper function used to represent the grammar.
         mostly used for debugging the grammar itself"""
         name = self.parser.symbol_repr(self.codename)
-        repr = "Proxy("+name
+        repr = 'Proxy(' + name
         if self.object:
-            repr+=","+self.object.display(1)
-        repr += ")"
+            repr += ',' + self.object.display(1)
+        repr += ')'
         return repr
 
 
@@ -383,7 +381,8 @@
 class Alternative(GrammarElement):
     """Represents an alternative in a grammar rule (as in S -> A | B | C)"""
     def __init__(self, parser, name, args):
-        GrammarElement.__init__(self, parser, name )
+        GrammarElement.__init__(self, name )
+        self.parser = parser
         self.args = args
         self._reordered = False
         for i in self.args:
@@ -401,7 +400,7 @@
         # to see if this solve our problems with infinite recursion
         for rule in self.args:
             if USE_LOOKAHEAD:
-                if not rule.match_first_set(builder, tok) and self.parser.EmptyToken not in rule.first_set:
+                if not rule.match_first_set(builder, tok) and EmptyToken not in rule.first_set:
                     if self._trace:
                         print "Skipping impossible rule: %s" % (rule,)
                     continue
@@ -443,7 +442,7 @@
         # <tokens> is only needed for warning / debugging purposes
         tokens_set = []
         for rule in self.args:
-            if self.parser.EmptyToken in rule.first_set:
+            if EmptyToken in rule.first_set:
                 empty_set.append(rule)
             else:
                 not_empty_set.append(rule)
@@ -452,7 +451,7 @@
                 # It will check if a token is part of several first sets of
                 # a same alternative
                 for token in rule.first_set:
-                    if token is not self.parser.EmptyToken and token in tokens_set:
+                    if token is not EmptyToken and token in tokens_set:
                         print "Warning, token %s in\n\t%s's first set is " \
                             " part of a previous rule's first set in " \
                             " alternative\n\t%s" % (token, rule, self)
@@ -485,7 +484,8 @@
 class Sequence(GrammarElement):
     """Reprensents a Sequence in a grammar rule (as in S -> A B C)"""
     def __init__(self, parser, name, args):
-        GrammarElement.__init__(self, parser, name )
+        GrammarElement.__init__(self, name )
+        self.parser = parser
         self.args = args
         for i in self.args:
             assert isinstance( i, GrammarElement )
@@ -531,16 +531,16 @@
         for rule in self.args:
             if not rule.first_set:
                 break
-            if self.parser.EmptyToken in self.first_set:
-                self.first_set.remove( self.parser.EmptyToken )
+            if EmptyToken in self.first_set:
+                self.first_set.remove(EmptyToken)
 
-                # del self.first_set[self.parser.EmptyToken]
+                # del self.first_set[EmptyToken]
             # while we're in this loop, keep agregating possible tokens
             for t in rule.first_set:
                 if t not in self.first_set:
                     self.first_set.append(t)
                 # self.first_set[t] = 1
-            if self.parser.EmptyToken not in rule.first_set:
+            if EmptyToken not in rule.first_set:
                 break
 
     def validate( self, syntax_node ):
@@ -561,7 +561,8 @@
 class KleeneStar(GrammarElement):
     """Represents a KleeneStar in a grammar rule as in (S -> A+) or (S -> A*)"""
     def __init__(self, parser, name, _min = 0, _max = -1, rule=None):
-        GrammarElement.__init__( self, parser, name )
+        GrammarElement.__init__( self, name )
+        self.parser = parser
         self.args = [rule]
         self.min = _min
         if _max == 0:
@@ -569,7 +570,7 @@
         self.max = _max
         self.star = "x"
         if self.min == 0:
-            self.first_set.append( self.parser.EmptyToken )
+            self.first_set.append(EmptyToken)
             # self.first_set[self.parser.EmptyToken] = 1
 
     def _match(self, source, builder, level=0):
@@ -626,15 +627,15 @@
     def calc_first_set(self):
         """returns the list of possible next tokens
         if S -> A*:
-            LAH(S) = Union( LAH(A), self.parser.EmptyToken )
+            LAH(S) = Union( LAH(A), EmptyToken )
         if S -> A+:
             LAH(S) = LAH(A)
         """
         rule = self.args[0]
         self.first_set = rule.first_set[:]
         # self.first_set = dict(rule.first_set)
-        if self.min == 0 and self.parser.EmptyToken not in self.first_set:
-            self.first_set.append(self.parser.EmptyToken)
+        if self.min == 0 and EmptyToken not in self.first_set:
+            self.first_set.append(EmptyToken)
             # self.first_set[self.parser.EmptyToken] = 1
 
     def validate( self, syntax_node ):
@@ -655,7 +656,8 @@
 class Token(GrammarElement):
     """Represents a Token in a grammar rule (a lexer token)"""
     def __init__(self, parser, codename, value=None):
-        GrammarElement.__init__(self, parser, codename)
+        GrammarElement.__init__(self, codename)
+        self.parser = parser
         self.value = value
         self.first_set = [self]
         # self.first_set = {self: 1}
@@ -707,9 +709,9 @@
         """
         if not isinstance(other, Token):
             raise RuntimeError("Unexpected token type")
-        if other is self.parser.EmptyToken:
+        if other is EmptyToken:
             return False
-        # XXX (adim): this is trunk's keyword management
+        # XXX (adim): this is trunk's keyWrappableword management
         # if (self.value is not None and builder.keywords is not None
         #     and self.value not in builder.keywords):
         #     return False
@@ -736,6 +738,7 @@
         return False
 
 
+EmptyToken = Token(None, -1, None)
 
 class Parser(object):
     def __init__(self):
@@ -745,7 +748,6 @@
         self.sym_name = {}  # mapping symbol code -> symbol name
         self.symbols = {}   # mapping symbol name -> symbol code
         self.tokens = { 'NULLTOKEN' : -1 }
-        self.EmptyToken = Token( self, -1, None )
         self.tok_name = {}
         self.tok_values = {}
         self.tok_rvalues = {}
@@ -781,20 +783,22 @@
             return val
         return self.symbols[ sym ]
 
-    def add_token( self, tok, value = None ):
+    def add_token(self, token):
+        name = token.codename
+        value = token.value
         # assert isinstance( tok, str )
-        if not tok in self.tokens:
-            val = self._sym_count
+        if not name in self.tokens:
+            number = self._sym_count
             self._sym_count += 1
-            self.tokens[tok] = val
-            self.tok_name[val] = tok
+            self.tokens[name] = number
+            self.tok_name[number] = name
             if value is not None:
-                self.tok_values[value] = val
+                self.tok_values[value] = number
                 # XXX : this reverse mapping seemed only to be used
                 # because of pycodegen visitAugAssign
-                self.tok_rvalues[val] = value
-            return val
-        return self.tokens[ tok ]
+                self.tok_rvalues[number] = value
+            return number
+        return self.tokens[name]
 
     def load_symbols( self, symbols ):
         for _value, _name in symbols.items():
@@ -868,7 +872,11 @@
     def Token_n(self, name, value = None ):
         # assert isinstance( name, str)
         # assert value is None or isinstance( value, str)
-        name_id = self.add_token(name, value)
+
+        # XXX What is the significance of the name_id? Needs to be found
+        # out for full refactoring of this code.
+        t = Token(self, name, value)
+        name_id = self.add_token(t)
         return Token(self, name_id, value)
 
     # Debugging functions

Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pythonparse.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pythonparse.py	(original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pythonparse.py	Thu Oct 25 20:00:02 2007
@@ -11,7 +11,8 @@
 from pypy.interpreter.pyparser.error import SyntaxError
 from pypy.interpreter.pyparser.pythonlexer import Source, match_encoding_declaration
 from pypy.interpreter.astcompiler.consts import CO_FUTURE_WITH_STATEMENT
-import pypy.interpreter.pyparser.pysymbol as pysymbol
+# XXX seems dead
+#import pypy.interpreter.pyparser.pysymbol as pysymbol
 import pypy.interpreter.pyparser.pytoken as pytoken
 import pypy.interpreter.pyparser.ebnfparse as ebnfparse
 from pypy.interpreter.pyparser.ebnflexer import GrammarSource
@@ -190,6 +191,7 @@
 ##     grammar.build_first_sets(ebnfbuilder.all_rules)
 ##     return space.wrap( ebnfbuilder.root_rules )
 
+# XXX Unused?
 def grammar_rules( space ):
     w_rules = space.newdict()
     parser = make_pyparser(space.config.objspace.pyversion)

Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pytoken.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pytoken.py	(original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/pytoken.py	Thu Oct 25 20:00:02 2007
@@ -13,69 +13,66 @@
 
 # tok_rpunct = {}
 
-def setup_tokens( parser ):
+def setup_tokens(parser):
     # global tok_rpunct
 # For compatibility, this produces the same constant values as Python 2.4.
-    parser.add_token( 'ENDMARKER' )
-    parser.add_token( 'NAME' )
-    parser.add_token( 'NUMBER' )
-    parser.add_token( 'STRING' )
-    parser.add_token( 'NEWLINE' )
-    parser.add_token( 'INDENT' )
-    parser.add_token( 'DEDENT' )
-    parser.add_token( 'LPAR',            "(" )
-    parser.add_token( 'RPAR',            ")" )
-    parser.add_token( 'LSQB',            "[" )
-    parser.add_token( 'RSQB',            "]" )
-    parser.add_token( 'COLON',           ":" )
-    parser.add_token( 'COMMA',           "," )
-    parser.add_token( 'SEMI',            ";" )
-    parser.add_token( 'PLUS',            "+" )
-    parser.add_token( 'MINUS',           "-" )
-    parser.add_token( 'STAR',            "*" )
-    parser.add_token( 'SLASH',           "/" )
-    parser.add_token( 'VBAR',            "|" )
-    parser.add_token( 'AMPER',           "&" )
-    parser.add_token( 'LESS',            "<" )
-    parser.add_token( 'GREATER',         ">" )
-    parser.add_token( 'EQUAL',           "=" )
-    parser.add_token( 'DOT',             "." )
-    parser.add_token( 'PERCENT',         "%" )
-    parser.add_token( 'BACKQUOTE',       "`" )
-    parser.add_token( 'LBRACE',          "{" )
-    parser.add_token( 'RBRACE',          "}" )
-    parser.add_token( 'EQEQUAL',         "==" )
-    ne = parser.add_token( 'NOTEQUAL',   "!=" )
+    from grammar import Token
+    parser.add_token(Token(parser, 'ENDMARKER' ))
+    parser.add_token(Token(parser, 'NAME' ))
+    parser.add_token(Token(parser, 'NUMBER' ))
+    parser.add_token(Token(parser, 'STRING' ))
+    parser.add_token(Token(parser, 'NEWLINE' ))
+    parser.add_token(Token(parser, 'INDENT' ))
+    parser.add_token(Token(parser, 'DEDENT' ))
+    parser.add_token(Token(parser, 'LPAR',            "(" ))
+    parser.add_token(Token(parser, 'RPAR',            ")" ))
+    parser.add_token(Token(parser, 'LSQB',            "[" ))
+    parser.add_token(Token(parser, 'RSQB',            "]" ))
+    parser.add_token(Token(parser, 'COLON',           ":" ))
+    parser.add_token(Token(parser, 'COMMA',           "," ))
+    parser.add_token(Token(parser, 'SEMI',            ";" ))
+    parser.add_token(Token(parser, 'PLUS',            "+" ))
+    parser.add_token(Token(parser, 'MINUS',           "-" ))
+    parser.add_token(Token(parser, 'STAR',            "*" ))
+    parser.add_token(Token(parser, 'SLASH',           "/" ))
+    parser.add_token(Token(parser, 'VBAR',            "|" ))
+    parser.add_token(Token(parser, 'AMPER',           "&" ))
+    parser.add_token(Token(parser, 'LESS',            "<" ))
+    parser.add_token(Token(parser, 'GREATER',         ">" ))
+    parser.add_token(Token(parser, 'EQUAL',           "=" ))
+    parser.add_token(Token(parser, 'DOT',             "." ))
+    parser.add_token(Token(parser, 'PERCENT',         "%" ))
+    parser.add_token(Token(parser, 'BACKQUOTE',       "`" ))
+    parser.add_token(Token(parser, 'LBRACE',          "{" ))
+    parser.add_token(Token(parser, 'RBRACE',          "}" ))
+    parser.add_token(Token(parser, 'EQEQUAL',         "==" ))
+    ne = parser.add_token(Token(parser, 'NOTEQUAL',   "!=" ))
     parser.tok_values["<>"] = ne
-    parser.add_token( 'LESSEQUAL',       "<=" )
-    parser.add_token( 'GREATEREQUAL',    ">=" )
-    parser.add_token( 'TILDE',           "~" )
-    parser.add_token( 'CIRCUMFLEX',      "^" )
-    parser.add_token( 'LEFTSHIFT',       "<<" )
-    parser.add_token( 'RIGHTSHIFT',      ">>" )
-    parser.add_token( 'DOUBLESTAR',      "**" )
-    parser.add_token( 'PLUSEQUAL',       "+=" )
-    parser.add_token( 'MINEQUAL',        "-=" )
-    parser.add_token( 'STAREQUAL',       "*=" )
-    parser.add_token( 'SLASHEQUAL',      "/=" )
-    parser.add_token( 'PERCENTEQUAL',    "%=" )
-    parser.add_token( 'AMPEREQUAL',      "&=" )
-    parser.add_token( 'VBAREQUAL',       "|=" )
-    parser.add_token( 'CIRCUMFLEXEQUAL', "^=" )
-    parser.add_token( 'LEFTSHIFTEQUAL',  "<<=" )
-    parser.add_token( 'RIGHTSHIFTEQUAL', ">>=" )
-    parser.add_token( 'DOUBLESTAREQUAL', "**=" )
-    parser.add_token( 'DOUBLESLASH',     "//" )
-    parser.add_token( 'DOUBLESLASHEQUAL',"//=" )
-    parser.add_token( 'AT',              "@" )
-    parser.add_token( 'OP' )
-    parser.add_token( 'ERRORTOKEN' )
+    parser.add_token(Token(parser, 'LESSEQUAL',       "<=" ))
+    parser.add_token(Token(parser, 'GREATEREQUAL',    ">=" ))
+    parser.add_token(Token(parser, 'TILDE',           "~" ))
+    parser.add_token(Token(parser, 'CIRCUMFLEX',      "^" ))
+    parser.add_token(Token(parser, 'LEFTSHIFT',       "<<" ))
+    parser.add_token(Token(parser, 'RIGHTSHIFT',      ">>" ))
+    parser.add_token(Token(parser, 'DOUBLESTAR',      "**" ))
+    parser.add_token(Token(parser, 'PLUSEQUAL',       "+=" ))
+    parser.add_token(Token(parser, 'MINEQUAL',        "-=" ))
+    parser.add_token(Token(parser, 'STAREQUAL',       "*=" ))
+    parser.add_token(Token(parser, 'SLASHEQUAL',      "/=" ))
+    parser.add_token(Token(parser, 'PERCENTEQUAL',    "%=" ))
+    parser.add_token(Token(parser, 'AMPEREQUAL',      "&=" ))
+    parser.add_token(Token(parser, 'VBAREQUAL',       "|=" ))
+    parser.add_token(Token(parser, 'CIRCUMFLEXEQUAL', "^=" ))
+    parser.add_token(Token(parser, 'LEFTSHIFTEQUAL',  "<<=" ))
+    parser.add_token(Token(parser, 'RIGHTSHIFTEQUAL', ">>=" ))
+    parser.add_token(Token(parser, 'DOUBLESTAREQUAL', "**=" ))
+    parser.add_token(Token(parser, 'DOUBLESLASH',     "//" ))
+    parser.add_token(Token(parser, 'DOUBLESLASHEQUAL',"//=" ))
+    parser.add_token(Token(parser, 'AT',              "@" ))
+    parser.add_token(Token(parser, 'OP' ))
+    parser.add_token(Token(parser, 'ERRORTOKEN' ))
 
 # extra PyPy-specific tokens
-    parser.add_token( "COMMENT" )
-    parser.add_token( "NL" )
+    parser.add_token(Token(parser, "COMMENT" ))
+    parser.add_token(Token(parser, "NL" ))
 
-    # tok_rpunct = parser.tok_values.copy()
-    # for _name, _value in parser.tokens.items():
-    # globals()[_name] = _value
-    # setattr(parser, _name, _value)

Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_lookahead.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_lookahead.py	(original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_lookahead.py	Thu Oct 25 20:00:02 2007
@@ -1,7 +1,7 @@
-from pypy.interpreter.pyparser.grammar import Alternative, Sequence, KleeneStar, \
-     Token, Parser
+from pypy.interpreter.pyparser.grammar import Alternative,\
+     Sequence, KleeneStar, Token, EmptyToken, Parser
 
-class TestLookAheadBasics:
+class TestLookAheadBasics(object):
 
     def setup_method(self, method):
         self.parser = Parser()
@@ -31,7 +31,7 @@
         kstar2 = self.parser.KleeneStar_n("k2", 0, 3, tok1)
         self.parser.build_first_sets()
         assert kstar1.first_set == [tok1]
-        assert kstar2.first_set == [tok1, self.parser.EmptyToken]
+        assert kstar2.first_set == [tok1, EmptyToken]
 
 
     def test_maybe_empty_sequence(self):
@@ -43,7 +43,7 @@
         k2 = self.parser.KleeneStar_n("k2", 0, 2, tok2)
         seq = self.parser.Sequence_n( "seq", [k1, k2])
         self.parser.build_first_sets()
-        assert seq.first_set == [tok1, tok2, self.parser.EmptyToken]
+        assert seq.first_set == [tok1, tok2, EmptyToken]
 
 
     def test_not_empty_sequence(self):
@@ -68,7 +68,7 @@
 
 
 
-class TestLookAhead:
+class TestLookAhead(object):
 
      def setup_method(self, method):
          p = self.parser = Parser()
@@ -86,7 +86,7 @@
          p = self.parser
          LOW = p.tokens['LOW']
          CAP = p.tokens['CAP']
-         for s in  [Token(p, LOW, 'low'), p.EmptyToken, Token(p, CAP, 'cap')]:
+         for s in  [Token(p, LOW, 'low'), EmptyToken, Token(p, CAP, 'cap')]:
              assert s in self.A.first_set
              assert s in self.B.first_set
              assert s in self.C.first_set

Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_parser.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_parser.py	(original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/test/test_parser.py	Thu Oct 25 20:00:02 2007
@@ -1,5 +1,5 @@
 from pypy.interpreter.pyparser.asthelper import get_atoms
-from pypy.interpreter.pyparser.grammar import Parser
+from pypy.interpreter.pyparser.grammar import Parser, Token
 from pypy.interpreter.pyparser import error
 from fakes import FakeSpace
 
@@ -7,7 +7,7 @@
 def test_symbols():
     p = Parser()
     x1 = p.add_symbol('sym')
-    x2 = p.add_token('tok')
+    x2 = p.add_token(Token(p, 'tok'))
     x3 = p.add_anon_symbol(':sym')
     x4 = p.add_anon_symbol(':sym1')
     # test basic numbering assumption
@@ -20,7 +20,7 @@
     assert x3 < 0
     y1 = p.add_symbol('sym')
     assert y1 == x1
-    y2 = p.add_token('tok')
+    y2 = p.add_token(Token(p, 'tok'))
     assert y2 == x2
     y3 = p.add_symbol(':sym')
     assert y3 == x3

Modified: pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/tuplebuilder.py
==============================================================================
--- pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/tuplebuilder.py	(original)
+++ pypy/branch/dist-future-fixing/pypy/interpreter/pyparser/tuplebuilder.py	Thu Oct 25 20:00:02 2007
@@ -57,13 +57,17 @@
     """A builder that directly produce the AST"""
 
     def __init__(self, parser, debug=0, lineno=True):
-        AbstractBuilder.__init__(self, parser, debug)
+        AbstractBuilder.__init__(self, debug)
         # This attribute is here for convenience
         self.source_encoding = None
         self.lineno = lineno
         self.stack = []
-        self.space_token = ( self.parser.tokens['NEWLINE'], self.parser.tokens['INDENT'],
-                             self.parser.tokens['DEDENT'], self.parser.tokens['ENDMARKER'] )
+        self.space_token = (parser.tokens['NEWLINE'],
+                            parser.tokens['INDENT'],
+                            parser.tokens['DEDENT'],
+                            parser.tokens['ENDMARKER'] )
+
+        self.parser = parser
 
     def context(self):
         """Returns the state of the builder to be restored later"""



More information about the Pypy-commit mailing list