[pypy-svn] r13444 - pypy/branch/pycompiler/module/recparser

adim at codespeak.net adim at codespeak.net
Wed Jun 15 16:28:37 CEST 2005


Author: adim
Date: Wed Jun 15 16:28:35 2005
New Revision: 13444

Modified:
   pypy/branch/pycompiler/module/recparser/grammar.py
   pypy/branch/pycompiler/module/recparser/pythonlexer.py
Log:
- made the annotator's life easier
- use the local version of automata.py / pytokenize.py in pythonlexer.py



Modified: pypy/branch/pycompiler/module/recparser/grammar.py
==============================================================================
--- pypy/branch/pycompiler/module/recparser/grammar.py	(original)
+++ pypy/branch/pycompiler/module/recparser/grammar.py	Wed Jun 15 16:28:35 2005
@@ -164,7 +164,8 @@
         """
         if not USE_LOOKAHEAD:
             return self._match(source, builder, level)
-        
+        pos1 = -1 # XXX make the annotator happy
+        pos2 = -1 # XXX make the annotator happy
         token = source.peek()
         if self._trace:
             pos1 = source.get_pos()

Modified: pypy/branch/pycompiler/module/recparser/pythonlexer.py
==============================================================================
--- pypy/branch/pycompiler/module/recparser/pythonlexer.py	(original)
+++ pypy/branch/pycompiler/module/recparser/pythonlexer.py	Wed Jun 15 16:28:35 2005
@@ -41,8 +41,7 @@
     if encoding != '':
         return encoding
     return None
-
-
+    
 def _normalize_encoding(encoding):
     """returns normalized name for <encoding>
 
@@ -63,9 +62,9 @@
 
 ################################################################################
 import token as tokenmod
-from pypy.module.parser.pytokenize import tabsize, \
-     whiteSpaceDFA, triple_quoted, endDFAs, single_quoted, pseudoDFA
-from pypy.module.parser import automata
+from pytokenize import tabsize, whiteSpaceDFA, triple_quoted, endDFAs, \
+     single_quoted, pseudoDFA 
+import automata
 
 # adopt pytokenize notations / values
 tokenmod.COMMENT = tokenmod.N_TOKENS 
@@ -112,7 +111,7 @@
     strstart = (0, 0)
 
     lines.append('') # XXX HACK probably not needed
-    endDFA = automata.DFA([]) # XXX Make the translator happy
+    endDFA = automata.DFA([], []) # XXX Make the translator happy
     line = ''                 # XXX Make the translator happy
     for line in lines:
         lnum = lnum + 1
@@ -298,7 +297,7 @@
 class PythonSource(TokenSource):
     """This source uses Jonathan's tokenizer"""
     def __init__(self, inpstring):
-        TokenSource.__init__(self)
+        # TokenSource.__init__(self)
         tokens, encoding = generate_tokens(inpstring.splitlines(True))
         self.token_stack = tokens
         self.encoding = encoding



More information about the Pypy-commit mailing list