[Python-checkins] cpython (2.7): Issue #8478: Untokenizer.compat now processes first token from iterator input.

terry.reedy python-checkins at python.org
Tue Feb 18 05:17:23 CET 2014


http://hg.python.org/cpython/rev/c2517a37c13a
changeset:   89240:c2517a37c13a
branch:      2.7
parent:      89232:c896d292080a
user:        Terry Jan Reedy <tjreedy at udel.edu>
date:        Mon Feb 17 23:12:07 2014 -0500
summary:
  Issue #8478: Untokenizer.compat now processes first token from iterator input.
Patch based on lines from Georg Brandl, Eric Snow, and Gareth Rees.

files:
  Lib/test/test_tokenize.py |  10 +++++++++-
  Lib/tokenize.py           |  16 +++++++---------
  Misc/NEWS                 |   3 +++
  3 files changed, 19 insertions(+), 10 deletions(-)


diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -627,9 +627,17 @@
                 'start (1,3) precedes previous end (2,2)')
         self.assertRaises(ValueError, u.add_whitespace, (2,1))
 
+    def test_iter_compat(self):
+        u = Untokenizer()
+        token = (NAME, 'Hello')
+        u.compat(token, iter([]))
+        self.assertEqual(u.tokens, ["Hello "])
+        u = Untokenizer()
+        self.assertEqual(u.untokenize(iter([token])), 'Hello ')
+
+
 __test__ = {"doctests" : doctests, 'decistmt': decistmt}
 
-
 def test_main():
     from test import test_tokenize
     test_support.run_doctest(test_tokenize, True)
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -26,6 +26,7 @@
 __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
                'Skip Montanaro, Raymond Hettinger')
 
+from itertools import chain
 import string, re
 from token import *
 
@@ -192,9 +193,10 @@
             self.tokens.append(" " * col_offset)
 
     def untokenize(self, iterable):
-        for t in iterable:
+        it = iter(iterable)
+        for t in it:
             if len(t) == 2:
-                self.compat(t, iterable)
+                self.compat(t, it)
                 break
             tok_type, token, start, end, line = t
             self.add_whitespace(start)
@@ -206,16 +208,12 @@
         return "".join(self.tokens)
 
     def compat(self, token, iterable):
-        startline = False
         indents = []
         toks_append = self.tokens.append
-        toknum, tokval = token
-        if toknum in (NAME, NUMBER):
-            tokval += ' '
-        if toknum in (NEWLINE, NL):
-            startline = True
+        startline = token[0] in (NEWLINE, NL)
         prevstring = False
-        for tok in iterable:
+        
+        for tok in chain([token], iterable):
             toknum, tokval = tok[:2]
 
             if toknum in (NAME, NUMBER):
diff --git a/Misc/NEWS b/Misc/NEWS
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -42,6 +42,9 @@
 - Issue #17671: Fixed a crash when use non-initialized io.BufferedRWPair.
   Based on patch by Stephen Tu.
 
+- Issue #8478: Untokenizer.compat processes first token from iterator input.
+  Patch based on lines from Georg Brandl, Eric Snow, and Gareth Rees.  
+
 - Issue #20594: Avoid name clash with the libc function posix_close.
 
 - Issue #19856: shutil.move() failed to move a directory to other directory

-- 
Repository URL: http://hg.python.org/cpython


More information about the Python-checkins mailing list