[pypy-svn] r14998 - pypy/branch/pypy-translation-snapshot/interpreter/pyparser

arigo at codespeak.net arigo at codespeak.net
Mon Jul 25 11:47:26 CEST 2005


Author: arigo
Date: Mon Jul 25 11:47:25 2005
New Revision: 14998

Modified:
   pypy/branch/pypy-translation-snapshot/interpreter/pyparser/grammar.py
   pypy/branch/pypy-translation-snapshot/interpreter/pyparser/pythonlexer.py
   pypy/branch/pypy-translation-snapshot/interpreter/pyparser/tuplebuilder.py
Log:
Merged revisions 14996 and 14997 from the trunk.


Modified: pypy/branch/pypy-translation-snapshot/interpreter/pyparser/grammar.py
==============================================================================
--- pypy/branch/pypy-translation-snapshot/interpreter/pyparser/grammar.py	(original)
+++ pypy/branch/pypy-translation-snapshot/interpreter/pyparser/grammar.py	Mon Jul 25 11:47:25 2005
@@ -651,7 +651,7 @@
             raise RuntimeError("Unexpected token type %r" % other)
         if other is EmptyToken:
             return False
-        res = other.codename == self.codename and self.value in (None, other.value)
+        res = other.codename == self.codename and self.value in [None, other.value]
         #print "matching", self, other, res
         return res
     

Modified: pypy/branch/pypy-translation-snapshot/interpreter/pyparser/pythonlexer.py
==============================================================================
--- pypy/branch/pypy-translation-snapshot/interpreter/pyparser/pythonlexer.py	(original)
+++ pypy/branch/pypy-translation-snapshot/interpreter/pyparser/pythonlexer.py	Mon Jul 25 11:47:25 2005
@@ -126,7 +126,7 @@
                 raise TokenError("EOF in multi-line string", line,
                                  (lnum, 0), token_list)
             endmatch = endDFA.recognize(line)
-            if -1 != endmatch:
+            if endmatch >= 0:
                 pos = end = endmatch
                 tok = Token(pytoken.STRING, contstr + line[:end])
                 token_list.append((tok, line, lnum, pos))
@@ -135,7 +135,8 @@
                 #                    strstart, (lnum, end), contline + line))
                 contstr, needcont = '', 0
                 contline = None
-            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+            elif (needcont and not line.endswith('\\\n') and
+                               not line.endswith('\\\r\n')):
                 tok = Token(pytoken.ERRORTOKEN, contstr + line)
                 token_list.append((tok, line, lnum, pos))
                 last_comment = ''
@@ -193,10 +194,10 @@
 
         while pos < max:
             pseudomatch = pseudoDFA.recognize(line, pos)
-            if -1 != pseudomatch:                            # scan for tokens
+            if pseudomatch >= 0:                            # scan for tokens
                 # JDR: Modified
                 start = whiteSpaceDFA.recognize(line, pos)
-                if -1 == start:
+                if start < 0:
                     start = pos
                 end = pseudomatch
 
@@ -235,7 +236,7 @@
                 elif token in triple_quoted:
                     endDFA = endDFAs[token]
                     endmatch = endDFA.recognize(line, pos)
-                    if -1 != endmatch:                     # all on one line
+                    if endmatch >= 0:                     # all on one line
                         pos = endmatch
                         token = line[start:pos]
                         tok = Token(pytoken.STRING, token)

Modified: pypy/branch/pypy-translation-snapshot/interpreter/pyparser/tuplebuilder.py
==============================================================================
--- pypy/branch/pypy-translation-snapshot/interpreter/pyparser/tuplebuilder.py	(original)
+++ pypy/branch/pypy-translation-snapshot/interpreter/pyparser/tuplebuilder.py	Mon Jul 25 11:47:25 2005
@@ -73,11 +73,12 @@
         num = rule.codename
         node = [rule.codename]
         if elts_number > 0:
-            sequence_elements = self.stack[-elts_number:]
+            sequence_elements = [self.stack.pop() for i in range(elts_number)]
+            sequence_elements.reverse()
             nodes = expand_nodes( sequence_elements )
-            self.stack[-elts_number:] = [NonTerminal(num, nodes)]
         else:
-            self.stack.append( NonTerminal(num, []) )
+            nodes = []
+        self.stack.append( NonTerminal(num, nodes) )
         return True
 
     def token(self, codename, value, source):



More information about the Pypy-commit mailing list