[Python-checkins] cpython: Issue #15054: Fix incorrect tokenization of 'b' string literals.

meador.inge python-checkins at python.org
Sun Jun 17 04:50:58 CEST 2012


http://hg.python.org/cpython/rev/115b0cb52c6c
changeset:   77475:115b0cb52c6c
parent:      77473:3680b3423aa3
user:        Meador Inge <meadori at gmail.com>
date:        Sat Jun 16 21:49:08 2012 -0500
summary:
  Issue #15054: Fix incorrect tokenization of 'b' string literals.

Patch by Serhiy Storchaka.

files:
  Lib/test/test_tokenize.py |  76 +++++++++++++++++++++++++++
  Lib/tokenize.py           |   2 +-
  Misc/NEWS                 |   4 +
  3 files changed, 81 insertions(+), 1 deletions(-)


diff --git a/Lib/test/test_tokenize.py b/Lib/test/test_tokenize.py
--- a/Lib/test/test_tokenize.py
+++ b/Lib/test/test_tokenize.py
@@ -289,6 +289,82 @@
     OP         '+'           (1, 29) (1, 30)
     STRING     'R"ABC"'      (1, 31) (1, 37)
 
+    >>> dump_tokens("u'abc' + U'abc'")
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    STRING     "u'abc'"      (1, 0) (1, 6)
+    OP         '+'           (1, 7) (1, 8)
+    STRING     "U'abc'"      (1, 9) (1, 15)
+    >>> dump_tokens('u"abc" + U"abc"')
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    STRING     'u"abc"'      (1, 0) (1, 6)
+    OP         '+'           (1, 7) (1, 8)
+    STRING     'U"abc"'      (1, 9) (1, 15)
+    >>> dump_tokens("ur'abc' + uR'abc' + Ur'abc' + UR'abc'")
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    STRING     "ur'abc'"     (1, 0) (1, 7)
+    OP         '+'           (1, 8) (1, 9)
+    STRING     "uR'abc'"     (1, 10) (1, 17)
+    OP         '+'           (1, 18) (1, 19)
+    STRING     "Ur'abc'"     (1, 20) (1, 27)
+    OP         '+'           (1, 28) (1, 29)
+    STRING     "UR'abc'"     (1, 30) (1, 37)
+    >>> dump_tokens('ur"abc" + uR"abc" + Ur"abc" + UR"abc"')
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    STRING     'ur"abc"'     (1, 0) (1, 7)
+    OP         '+'           (1, 8) (1, 9)
+    STRING     'uR"abc"'     (1, 10) (1, 17)
+    OP         '+'           (1, 18) (1, 19)
+    STRING     'Ur"abc"'     (1, 20) (1, 27)
+    OP         '+'           (1, 28) (1, 29)
+    STRING     'UR"abc"'     (1, 30) (1, 37)
+
+    >>> dump_tokens("b'abc' + B'abc'")
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    STRING     "b'abc'"      (1, 0) (1, 6)
+    OP         '+'           (1, 7) (1, 8)
+    STRING     "B'abc'"      (1, 9) (1, 15)
+    >>> dump_tokens('b"abc" + B"abc"')
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    STRING     'b"abc"'      (1, 0) (1, 6)
+    OP         '+'           (1, 7) (1, 8)
+    STRING     'B"abc"'      (1, 9) (1, 15)
+    >>> dump_tokens("br'abc' + bR'abc' + Br'abc' + BR'abc'")
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    STRING     "br'abc'"     (1, 0) (1, 7)
+    OP         '+'           (1, 8) (1, 9)
+    STRING     "bR'abc'"     (1, 10) (1, 17)
+    OP         '+'           (1, 18) (1, 19)
+    STRING     "Br'abc'"     (1, 20) (1, 27)
+    OP         '+'           (1, 28) (1, 29)
+    STRING     "BR'abc'"     (1, 30) (1, 37)
+    >>> dump_tokens('br"abc" + bR"abc" + Br"abc" + BR"abc"')
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    STRING     'br"abc"'     (1, 0) (1, 7)
+    OP         '+'           (1, 8) (1, 9)
+    STRING     'bR"abc"'     (1, 10) (1, 17)
+    OP         '+'           (1, 18) (1, 19)
+    STRING     'Br"abc"'     (1, 20) (1, 27)
+    OP         '+'           (1, 28) (1, 29)
+    STRING     'BR"abc"'     (1, 30) (1, 37)
+    >>> dump_tokens("rb'abc' + rB'abc' + Rb'abc' + RB'abc'")
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    STRING     "rb'abc'"     (1, 0) (1, 7)
+    OP         '+'           (1, 8) (1, 9)
+    STRING     "rB'abc'"     (1, 10) (1, 17)
+    OP         '+'           (1, 18) (1, 19)
+    STRING     "Rb'abc'"     (1, 20) (1, 27)
+    OP         '+'           (1, 28) (1, 29)
+    STRING     "RB'abc'"     (1, 30) (1, 37)
+    >>> dump_tokens('rb"abc" + rB"abc" + Rb"abc" + RB"abc"')
+    ENCODING   'utf-8'       (0, 0) (0, 0)
+    STRING     'rb"abc"'     (1, 0) (1, 7)
+    OP         '+'           (1, 8) (1, 9)
+    STRING     'rB"abc"'     (1, 10) (1, 17)
+    OP         '+'           (1, 18) (1, 19)
+    STRING     'Rb"abc"'     (1, 20) (1, 27)
+    OP         '+'           (1, 28) (1, 29)
+    STRING     'RB"abc"'     (1, 30) (1, 37)
+
 Operators
 
     >>> dump_tokens("def d22(a, b, c=2, d=2, *k): pass")
diff --git a/Lib/tokenize.py b/Lib/tokenize.py
--- a/Lib/tokenize.py
+++ b/Lib/tokenize.py
@@ -127,7 +127,7 @@
 Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
 Number = group(Imagnumber, Floatnumber, Intnumber)
 
-StringPrefix = r'(?:[uU][rR]?|[bB][rR]|[rR][bB]|[rR]|[uU])?'
+StringPrefix = r'(?:[uUbB][rR]?|[rR][bB]?)?'
 
 # Tail end of ' string.
 Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
diff --git a/Misc/NEWS b/Misc/NEWS
--- a/Misc/NEWS
+++ b/Misc/NEWS
@@ -27,6 +27,10 @@
 Library
 -------
 
+- Issue #15054: A bug in tokenize.tokenize that caused string literals
+  with 'b' prefixes to be incorrectly tokenized has been fixed.
+  Patch by Serhiy Storchaka.
+
 - Issue #15006: Allow equality comparison between naive and aware
   time or datetime objects.
 

-- 
Repository URL: http://hg.python.org/cpython


More information about the Python-checkins mailing list