[Python-checkins] r78603 - in python/trunk: Lib/test/test_pep263.py Misc/NEWS Parser/tokenizer.c

victor.stinner python-checkins at python.org
Wed Mar 3 00:20:02 CET 2010


Author: victor.stinner
Date: Wed Mar  3 00:20:02 2010
New Revision: 78603

Log:
Issue #7820: The parser tokenizer restores all bytes in the right if the BOM
check fails.

Fix an assertion in pydebug mode.


Modified:
   python/trunk/Lib/test/test_pep263.py
   python/trunk/Misc/NEWS
   python/trunk/Parser/tokenizer.c

Modified: python/trunk/Lib/test/test_pep263.py
==============================================================================
--- python/trunk/Lib/test/test_pep263.py	(original)
+++ python/trunk/Lib/test/test_pep263.py	Wed Mar  3 00:20:02 2010
@@ -30,6 +30,17 @@
         self.assertEqual(d['a'], d['b'])
         self.assertEqual(len(d['a']), len(d['b']))
 
+    def test_issue7820(self):
+        # Ensure that check_bom() restores all bytes in the right order if
+        # check_bom() fails in pydebug mode: a buffer starts with the first
+        # byte of a valid BOM, but next bytes are different
+
+        # one byte in common with the UTF-16-LE BOM
+        self.assertRaises(SyntaxError, eval, '\xff\x20')
+
+        # two bytes in common with the UTF-8 BOM
+        self.assertRaises(SyntaxError, eval, '\xef\xbb\x20')
+
 def test_main():
     test_support.run_unittest(PEP263Test)
 

Modified: python/trunk/Misc/NEWS
==============================================================================
--- python/trunk/Misc/NEWS	(original)
+++ python/trunk/Misc/NEWS	Wed Mar  3 00:20:02 2010
@@ -12,6 +12,9 @@
 Core and Builtins
 -----------------
 
+- Issue #7820: The parser tokenizer restores all bytes in the right if
+  the BOM check fails.
+
 - Issue #7309: Fix unchecked attribute access when converting
   UnicodeEncodeError, UnicodeDecodeError, and UnicodeTranslateError to
   strings.

Modified: python/trunk/Parser/tokenizer.c
==============================================================================
--- python/trunk/Parser/tokenizer.c	(original)
+++ python/trunk/Parser/tokenizer.c	Wed Mar  3 00:20:02 2010
@@ -312,47 +312,57 @@
 	  int set_readline(struct tok_state *, const char *),
 	  struct tok_state *tok)
 {
-	int ch = get_char(tok);
+	int ch1, ch2, ch3;
+	ch1 = get_char(tok);
 	tok->decoding_state = 1;
-	if (ch == EOF) {
+	if (ch1 == EOF) {
 		return 1;
-	} else if (ch == 0xEF) {
-		ch = get_char(tok);
-		if (ch != 0xBB)
-			goto NON_BOM;
-		ch = get_char(tok);
-		if (ch != 0xBF)
-			goto NON_BOM;
+	} else if (ch1 == 0xEF) {
+		ch2 = get_char(tok);
+		if (ch2 != 0xBB) {
+			unget_char(ch2, tok);
+			unget_char(ch1, tok);
+			return 1;
+		}
+		ch3 = get_char(tok);
+		if (ch3 != 0xBF) {
+			unget_char(ch3, tok);
+			unget_char(ch2, tok);
+			unget_char(ch1, tok);
+			return 1;
+		}
 #if 0
 	/* Disable support for UTF-16 BOMs until a decision
 	   is made whether this needs to be supported.  */
-	} else if (ch == 0xFE) {
-		ch = get_char(tok);
-		if (ch != 0xFF)
-			goto NON_BOM;
+	} else if (ch1 == 0xFE) {
+		ch2 = get_char(tok);
+		if (ch2 != 0xFF) {
+			unget_char(ch2, tok);
+			unget_char(ch1, tok);
+			return 1;
+		}
 		if (!set_readline(tok, "utf-16-be"))
 			return 0;
 		tok->decoding_state = -1;
-	} else if (ch == 0xFF) {
-		ch = get_char(tok);
-		if (ch != 0xFE)
-			goto NON_BOM;
+	} else if (ch1 == 0xFF) {
+		ch2 = get_char(tok);
+		if (ch2 != 0xFE) {
+			unget_char(ch2, tok);
+			unget_char(ch1, tok);
+			return 1;
+		}
 		if (!set_readline(tok, "utf-16-le"))
 			return 0;
 		tok->decoding_state = -1;
 #endif
 	} else {
-		unget_char(ch, tok);
+		unget_char(ch1, tok);
 		return 1;
 	}
 	if (tok->encoding != NULL)
 		PyMem_FREE(tok->encoding);
 	tok->encoding = new_string("utf-8", 5);	/* resulting is in utf-8 */
 	return 1;
-  NON_BOM:
-	/* any token beginning with '\xEF', '\xFE', '\xFF' is a bad token */
-	unget_char(0xFF, tok);	/* XXX this will cause a syntax error */
-	return 1;
 }
 
 /* Read a line of text from TOK into S, using the stream in TOK.


More information about the Python-checkins mailing list