[pypy-svn] r49081 - in pypy/dist/pypy/interpreter: . pyparser pyparser/data pyparser/test
ac at codespeak.net
ac at codespeak.net
Sun Nov 25 15:45:03 CET 2007
Author: ac
Date: Sun Nov 25 15:45:01 2007
New Revision: 49081
Removed:
pypy/dist/pypy/interpreter/pyparser/test/test_astbuilder_future.py
Modified:
pypy/dist/pypy/interpreter/pycompiler.py
pypy/dist/pypy/interpreter/pyparser/astbuilder.py
pypy/dist/pypy/interpreter/pyparser/data/Grammar2.5a
pypy/dist/pypy/interpreter/pyparser/ebnfparse.py
pypy/dist/pypy/interpreter/pyparser/future.py
pypy/dist/pypy/interpreter/pyparser/grammar.py
pypy/dist/pypy/interpreter/pyparser/pythonlexer.py
pypy/dist/pypy/interpreter/pyparser/pythonparse.py
pypy/dist/pypy/interpreter/pyparser/test/test_futureautomaton.py
pypy/dist/pypy/interpreter/pyparser/test/test_lookahead.py
pypy/dist/pypy/interpreter/pyparser/test/test_parser.py
pypy/dist/pypy/interpreter/pyparser/test/test_pytokenizer.py
Log:
Merge the __future__ mending from the dist-future-fixing branch, but skip the parser refactorings.
Modified: pypy/dist/pypy/interpreter/pycompiler.py
==============================================================================
--- pypy/dist/pypy/interpreter/pycompiler.py (original)
+++ pypy/dist/pypy/interpreter/pycompiler.py Sun Nov 25 15:45:01 2007
@@ -2,10 +2,12 @@
General classes for bytecode compilers.
Compiler instances are stored into 'space.getexecutioncontext().compiler'.
"""
+
+import sys
from codeop import PyCF_DONT_IMPLY_DEDENT
from pypy.interpreter.error import OperationError
-class AbstractCompiler:
+class AbstractCompiler(object):
"""Abstract base class for a bytecode compiler."""
# The idea is to grow more methods here over the time,
@@ -78,42 +80,53 @@
# faked compiler
import warnings
-from pypy.tool import stdlib___future__
-compiler_flags = 0
-compiler_features = {}
-for fname in stdlib___future__.all_feature_names:
- flag = getattr(stdlib___future__, fname).compiler_flag
- compiler_flags |= flag
- compiler_features[fname] = flag
-allowed_flags = compiler_flags | PyCF_DONT_IMPLY_DEDENT
-
-def get_flag_names(space, flags):
- if flags & ~allowed_flags:
- raise OperationError(space.w_ValueError,
- space.wrap("compile(): unrecognized flags"))
- flag_names = []
- for name, value in compiler_features.items():
- if flags & value:
- flag_names.append( name )
- return flag_names
+
+## from pypy.tool import stdlib___future__
+## compiler_flags = 0
+## compiler_features = {}
+## for fname in stdlib___future__.all_feature_names:
+## flag = getattr(stdlib___future__, fname).compiler_flag
+## compiler_flags |= flag
+## compiler_features[fname] = flag
+## allowed_flags = compiler_flags | PyCF_DONT_IMPLY_DEDENT
+
+## def get_flag_names(space, flags):
+## if flags & ~allowed_flags:
+## raise OperationError(space.w_ValueError,
+## space.wrap("compile(): unrecognized flags"))
+## flag_names = []
+## for name, value in compiler_features.items():
+## if flags & value:
+## flag_names.append( name )
+## return flag_names
class PyCodeCompiler(AbstractCompiler):
"""Base class for compilers producing PyCode objects."""
def getcodeflags(self, code):
+ """Return the __future__ compiler flags that were used to compile
+ the given code object."""
from pypy.interpreter.pycode import PyCode
if isinstance(code, PyCode):
- return code.co_flags & compiler_flags
+ return code.co_flags & self.compiler_flags
else:
return 0
+from pypy.interpreter.pyparser import future
class CPythonCompiler(PyCodeCompiler):
"""Faked implementation of a compiler, using the underlying compile()."""
+ def __init__(self, space):
+ self.space = space
+ self.w_compile_hook = space.w_None
+ if sys.version_info >= (2.5):
+ self.compiler_flags = future.futureFlags_2_5.allowed_flags
+ else:
+ self.compiler_flags = future.futureFlags_2_4.allowed_flags
+
def compile(self, source, filename, mode, flags):
- flags |= stdlib___future__.generators.compiler_flag # always on (2.2 compat)
space = self.space
try:
old = self.setup_warn_explicit(warnings)
@@ -191,6 +204,7 @@
########
+
class PythonAstCompiler(PyCodeCompiler):
"""Uses the stdlib's python implementation of compiler
@@ -206,6 +220,11 @@
self.grammar_version = override_version or space.config.objspace.pyversion
self.parser = make_pyparser(self.grammar_version)
self.additional_rules = {}
+ if self.grammar_version >= '2.5':
+ self.futureFlags = future.futureFlags_2_5
+ else:
+ self.futureFlags = future.futureFlags_2_4
+ self.compiler_flags = self.futureFlags.allowed_flags
def compile(self, source, filename, mode, flags):
from pyparser.error import SyntaxError
@@ -218,13 +237,16 @@
from pypy.interpreter.pycode import PyCode
from pypy.interpreter.function import Function
- flags |= stdlib___future__.generators.compiler_flag # always on (2.2 compat)
+ from pypy.interpreter.pyparser.future import getFutures
+
+## flags |= stdlib___future__.generators.compiler_flag # always on (2.2 compat)
space = self.space
try:
builder = AstBuilder(self.parser, self.grammar_version, space=space)
for rulename, buildfunc in self.additional_rules.iteritems():
assert isinstance(buildfunc, Function)
builder.user_build_rules[rulename] = buildfunc
+ flags |= getFutures(self.futureFlags, source)
self.parser.parse_source(source, mode, builder, flags)
ast_tree = builder.rule_stack[-1]
encoding = builder.source_encoding
@@ -244,7 +266,7 @@
raise
try:
astcompiler.misc.set_filename(filename, ast_tree)
- flag_names = get_flag_names(space, flags)
+ flag_names = self.futureFlags.get_flag_names(space, flags)
if mode == 'exec':
codegenerator = ModuleCodeGenerator(space, ast_tree, flag_names)
elif mode == 'single':
@@ -257,7 +279,7 @@
e.wrap_info(space, filename))
except (ValueError, TypeError), e:
raise OperationError(space.w_SystemError, space.wrap(str(e)))
- assert isinstance(c,PyCode)
+ assert isinstance(c, PyCode)
return c
Modified: pypy/dist/pypy/interpreter/pyparser/astbuilder.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/astbuilder.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/astbuilder.py Sun Nov 25 15:45:01 2007
@@ -872,52 +872,9 @@
names.append((name, as_name))
if index < l: # case ','
index += 1
-## if from_name == '__future__':
-## for name, asname in names:
-## if name == 'with_statement':
-## # found from __future__ import with_statement
-## if not builder.with_enabled:
-## builder.enable_with()
-## #raise pythonparse.AlternateGrammarException()
builder.push(ast.From(from_name, names, atoms[0].lineno))
-def build_future_import_feature(builder, nb):
- """
- future_import_feature: NAME [('as'|NAME) NAME]
-
- Enables python language future imports. Called once per feature imported,
- no matter how you got to this one particular feature.
- """
-
- atoms = peek_atoms(builder, nb)
-
- feature_name = atoms[0].get_value()
- assert type(feature_name) is str
- space = builder.space
- feature_code = space.unwrap(space.appexec([space.wrap(feature_name)],
- """(feature):
- import __future__ as f
- feature = getattr(f, feature, None)
- return feature and feature.compiler_flag or 0
- """))
-
- # We will call a method on the parser (the method exists only in unit
- # tests).
- if feature_code == consts.CO_FUTURE_WITH_STATEMENT:
- rules = """
- compound_stmt: (if_stmt | while_stmt | for_stmt | try_stmt |
- funcdef | classdef | with_stmt)
- with_stmt: 'with' test [ 'as' expr ] ':' suite
- """
- builder.insert_grammar_rule(rules, {
- 'with_stmt': build_with_stmt})
-
- # We need to keep the rule on the stack so we can share atoms
- # with a later rule
- return True
-
-
def build_yield_stmt(builder, nb):
atoms = get_atoms(builder, nb)
builder.push(ast.Yield(atoms[1], atoms[0].lineno))
@@ -1089,6 +1046,7 @@
'exprlist' : build_exprlist,
'decorator' : build_decorator,
'eval_input' : build_eval_input,
+ 'with_stmt' : build_with_stmt,
}
@@ -1099,28 +1057,15 @@
self.d = len(rule_stack)
class AstBuilder(BaseGrammarBuilder):
- """A builder that directly produce the AST"""
+ """A builder that directly produces the AST"""
def __init__(self, parser, grammar_version, debug=0, space=None):
BaseGrammarBuilder.__init__(self, parser, debug)
self.rule_stack = []
self.space = space
self.source_encoding = None
-## self.with_enabled = False
self.build_rules = ASTRULES_Template
self.user_build_rules = {}
- if grammar_version >= "2.5":
- self.build_rules.update({
- 'future_import_feature': build_future_import_feature,
- 'import_from_future': build_import_from,
- })
-
-## def enable_with(self):
-## if self.with_enabled:
-## return
-## self.with_enabled = True
-## # XXX
-## # self.keywords.update({'with':None, 'as': None})
def context(self):
return AstBuilderContext(self.rule_stack)
@@ -1156,7 +1101,9 @@
self.push(astnode)
else:
builder_func = self.build_rules.get(rulename, None)
- if not builder_func or builder_func(self, 1):
+ if builder_func:
+ builder_func(self, 1)
+ else:
self.push_rule(rule.codename, 1, source)
else:
self.push_rule(rule.codename, 1, source)
@@ -1176,7 +1123,9 @@
self.push(astnode)
else:
builder_func = self.build_rules.get(rulename, None)
- if not builder_func or builder_func(self, elts_number):
+ if builder_func:
+ builder_func(self, elts_number)
+ else:
self.push_rule(rule.codename, elts_number, source)
else:
self.push_rule(rule.codename, elts_number, source)
@@ -1227,14 +1176,6 @@
else:
return None
- def insert_grammar_rule(self, rule, buildfuncs):
- """Inserts new grammar rules for the builder
- This allows to change the rules during the parsing
- """
- self.parser.insert_rule(rule)
- self.build_rules.update(buildfuncs)
-
-
def show_stack(before, after):
"""debugging helper function"""
size1 = len(before)
Modified: pypy/dist/pypy/interpreter/pyparser/data/Grammar2.5a
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/data/Grammar2.5a (original)
+++ pypy/dist/pypy/interpreter/pyparser/data/Grammar2.5a Sun Nov 25 15:45:01 2007
@@ -24,16 +24,10 @@
# file_input is a module or sequence of commands read from an input file;
# eval_input is the input for the eval() and input() functions.
# NB: compound_stmt in single_input is followed by extra NEWLINE!
-
-single_input: NEWLINE | future_import_list [';'] | [future_import_list ';'] simple_stmt | compound_stmt NEWLINE
-file_input: (future_import_list [';'] NEWLINE)* (NEWLINE | stmt)* ENDMARKER
+single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
+file_input: (NEWLINE | stmt)* ENDMARKER
eval_input: testlist NEWLINE* ENDMARKER
-future_import_list: import_from_future (';' import_from_future)*
-import_from_future: 'from' '__future__' 'import' ('(' future_import_as_names [','] ')' | future_import_as_names)
-future_import_feature: NAME [('as'|NAME) NAME]
-future_import_as_names: future_import_feature (',' future_import_feature)*
-
decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
decorators: decorator+
funcdef: [decorators] 'def' NAME parameters ':' suite
@@ -69,12 +63,13 @@
exec_stmt: 'exec' expr ['in' test [',' test]]
assert_stmt: 'assert' test [',' test]
-compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
+compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef
if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
while_stmt: 'while' test ':' suite ['else' ':' suite]
for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
try_stmt: ('try' ':' suite (except_clause ':' suite)+ #diagram:break
['else' ':' suite] | 'try' ':' suite 'finally' ':' suite)
+with_stmt: 'with' test [ 'as' expr ] ':' suite
# NB compile.c makes sure that the default except clause is last
except_clause: 'except' [test [',' test]]
suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
Modified: pypy/dist/pypy/interpreter/pyparser/ebnfparse.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/ebnfparse.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/ebnfparse.py Sun Nov 25 15:45:01 2007
@@ -40,9 +40,8 @@
class NameToken(Token):
"""A token that is not a keyword"""
- def __init__(self, parser, keywords=None):
+ def __init__(self, parser):
Token.__init__(self, parser, parser.tokens['NAME'])
- self.keywords = keywords
def match(self, source, builder, level=0):
"""Matches a token.
@@ -60,7 +59,7 @@
if tk.codename == self.codename:
# XXX (adim): this is trunk's keyword management
# if tk.value not in builder.keywords:
- if tk.value not in self.keywords:
+ if not tk.isKeyword:
ret = builder.token( tk.codename, tk.value, source )
return ret
source.restore( ctx )
@@ -78,7 +77,7 @@
return False
# XXX (adim): this is trunk's keyword management
# if other.value in builder.keywords:
- if other.value in self.keywords:
+ if other.isKeyword:
return False
return True
@@ -107,7 +106,7 @@
self.keywords = []
NAME = dest_parser.add_token('NAME')
# NAME = dest_parser.tokens['NAME']
- self.tokens[NAME] = NameToken(dest_parser, keywords=self.keywords)
+ self.tokens[NAME] = NameToken(dest_parser)
def context(self):
return EBNFBuilderContext(len(self.rule_stack), self.seqcounts, self.altcounts)
Modified: pypy/dist/pypy/interpreter/pyparser/future.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/future.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/future.py Sun Nov 25 15:45:01 2007
@@ -27,8 +27,8 @@
from pypy.interpreter.astcompiler.consts import CO_GENERATOR_ALLOWED, \
CO_FUTURE_DIVISION, CO_FUTURE_WITH_STATEMENT
-def getFutures(source):
- futures = FutureAutomaton(source)
+def getFutures(futureFlags, source):
+ futures = FutureAutomaton(futureFlags, source)
try:
futures.start()
except (IndexError, DoneException), e:
@@ -62,9 +62,9 @@
precede a future statement.
"""
- def __init__(self, string):
+ def __init__(self, futureFlags, string):
+ self.futureFlags = futureFlags
self.s = string
- self.end = len(string)
self.pos = 0
self.docstringConsumed = False
self.flags = 0
@@ -237,10 +237,40 @@
self.getMore(parenList=parenList)
def setFlag(self, feature):
- if feature == "division":
- self.flags |= CO_FUTURE_DIVISION
- elif feature == "generators":
- self.flags |= CO_GENERATOR_ALLOWED
- elif feature == "with_statement":
- self.flags |= CO_FUTURE_WITH_STATEMENT
+ try:
+ self.flags |= self.futureFlags.compiler_features[feature]
+ except KeyError:
+ pass
+
+from codeop import PyCF_DONT_IMPLY_DEDENT
+from pypy.interpreter.error import OperationError
+
+from pypy.tool import stdlib___future__ as future
+
+class FutureFlags(object):
+ def __init__(self, version):
+ compiler_flags = 0
+ self.compiler_features = {}
+ self.mandatory_flags = 0
+ for fname in future.all_feature_names:
+ feature = getattr(future, fname)
+ if version >= feature.getOptionalRelease():
+ flag = feature.compiler_flag
+ compiler_flags |= flag
+ self.compiler_features[fname] = flag
+ if version >= feature.getMandatoryRelease():
+ self.mandatory_flags |= feature.compiler_flag
+ self.allowed_flags = compiler_flags | PyCF_DONT_IMPLY_DEDENT
+
+ def get_flag_names(self, space, flags):
+ if flags & ~self.allowed_flags:
+ raise OperationError(space.w_ValueError,
+ space.wrap("compile(): unrecognized flags"))
+ flag_names = []
+ for name, value in self.compiler_features.items():
+ if flags & value:
+ flag_names.append(name)
+ return flag_names
+futureFlags_2_4 = FutureFlags((2, 4, 4, 'final', 0))
+futureFlags_2_5 = FutureFlags((2, 5, 0, 'final', 0))
Modified: pypy/dist/pypy/interpreter/pyparser/grammar.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/grammar.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/grammar.py Sun Nov 25 15:45:01 2007
@@ -656,6 +656,7 @@
class Token(GrammarElement):
"""Represents a Token in a grammar rule (a lexer token)"""
+ isKeyword = True
def __init__(self, parser, codename, value=None):
GrammarElement.__init__(self, parser, codename)
self.value = value
@@ -680,7 +681,7 @@
ctx = source.context()
tk = source.next()
- if tk.codename == self.codename:
+ if tk.codename == self.codename and tk.isKeyword:
if self.value is None:
ret = builder.token( tk.codename, tk.value, source )
return ret
@@ -711,12 +712,7 @@
raise RuntimeError("Unexpected token type")
if other is self.parser.EmptyToken:
return False
- # XXX (adim): this is trunk's keyword management
- # if (self.value is not None and builder.keywords is not None
- # and self.value not in builder.keywords):
- # return False
- res = other.codename == self.codename and self.value in [None, other.value]
- #print "matching", self, other, res
+ res = other.isKeyword and other.codename == self.codename and self.value in [None, other.value]
return res
def __eq__(self, other):
Modified: pypy/dist/pypy/interpreter/pyparser/pythonlexer.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/pythonlexer.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/pythonlexer.py Sun Nov 25 15:45:01 2007
@@ -62,7 +62,7 @@
SyntaxError.__init__(self, msg, lineno, offset, line)
self.token_stack = token_stack
-def generate_tokens( parser, lines, flags):
+def generate_tokens( parser, lines, flags, keywords):
"""
This is a rewrite of pypy.module.parser.pytokenize.generate_tokens since
the original function is not RPYTHON (uses yield)
@@ -246,6 +246,8 @@
last_comment = ''
elif initial in namechars: # ordinary name
tok = Token(parser, parser.tokens['NAME'], token)
+ if token not in keywords:
+ tok.isKeyword = False
token_list.append((tok, line, lnum, pos))
last_comment = ''
elif initial == '\\': # continued stmt
@@ -302,12 +304,12 @@
class PythonSource(TokenSource):
"""This source uses Jonathan's tokenizer"""
- def __init__(self, parser, strings, flags=0):
+ def __init__(self, parser, strings, keywords, flags=0):
# TokenSource.__init__(self)
#self.parser = parser
self.input = strings
- tokens = generate_tokens( parser, strings, flags)
+ tokens = generate_tokens( parser, strings, flags, keywords)
self.token_stack = tokens
self._current_line = '' # the current line (as a string)
self._lineno = -1
@@ -361,13 +363,6 @@
def get_pos(self):
return self.stack_pos
-# Old code. Can't work, as self.stack_pos returns an int and the else branch
-# returns a tuple.
-## if self.stack_pos >= len(self.token_stack):
-## return self.stack_pos
-## else:
-## token, line, lnum, pos = self.token_stack[self.stack_pos]
-## return lnum, pos
def get_source_text(self, p0, p1):
"We get passed two token stack positions."
@@ -376,21 +371,6 @@
def debug(self):
"""return context for debug information"""
return (self._current_line, self._lineno)
- # return 'line %s : %s' % ('XXX', self._current_line)
-
-#NONE_LIST = [pytoken.ENDMARKER, pytoken.INDENT, pytoken.DEDENT]
-#NAMED_LIST = [pytoken.OP]
Source = PythonSource
-def tokenize_file(filename):
- f = file(filename).read()
- src = Source(f)
- token = src.next()
- while token != ("ENDMARKER", None) and token != (None, None):
- print token
- token = src.next()
-
-if __name__ == '__main__':
- import sys
- tokenize_file(sys.argv[1])
Modified: pypy/dist/pypy/interpreter/pyparser/pythonparse.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/pythonparse.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/pythonparse.py Sun Nov 25 15:45:01 2007
@@ -4,26 +4,17 @@
helper functions are provided that use the grammar to parse
using file_input, single_input and eval_input targets
"""
-import sys
-import os
-from pypy.interpreter.error import OperationError, debug_print
from pypy.interpreter import gateway
from pypy.interpreter.pyparser.error import SyntaxError
from pypy.interpreter.pyparser.pythonlexer import Source, match_encoding_declaration
from pypy.interpreter.astcompiler.consts import CO_FUTURE_WITH_STATEMENT
-import pypy.interpreter.pyparser.pysymbol as pysymbol
import pypy.interpreter.pyparser.pytoken as pytoken
import pypy.interpreter.pyparser.ebnfparse as ebnfparse
from pypy.interpreter.pyparser.ebnflexer import GrammarSource
from pypy.interpreter.pyparser.ebnfgrammar import GRAMMAR_GRAMMAR
import pypy.interpreter.pyparser.grammar as grammar
-from pypy.interpreter.pyparser.pythonutil import build_parser_for_version, build_parser
-
-# try:
+from pypy.interpreter.pyparser.pythonutil import build_parser_for_version
from pypy.interpreter.pyparser import symbol
-# except ImportError:
-# # for standalone testing
-# import symbol
from codeop import PyCF_DONT_IMPLY_DEDENT
@@ -123,12 +114,16 @@
def parse_lines(self, lines, goal, builder, flags=0):
- # builder.keywords = self.keywords.copy()
- # if flags & CO_FUTURE_WITH_STATEMENT:
- # builder.enable_with()
goalnumber = self.symbols[goal]
target = self.root_rules[goalnumber]
- src = Source(self, lines, flags)
+ keywords = {} # dict.fromkeys(self.keywords)
+ disable_with = not (flags & CO_FUTURE_WITH_STATEMENT)
+ for keyword in self.keywords:
+ if disable_with and keyword in ('with', 'as'):
+ continue
+ keywords[keyword] = None
+ src = Source(self, lines, keywords, flags)
+
if not target.match(src, builder):
line, lineno = src.debug()
# XXX needs better error messages
@@ -190,6 +185,7 @@
## grammar.build_first_sets(ebnfbuilder.all_rules)
## return space.wrap( ebnfbuilder.root_rules )
+# XXX Unused?
def grammar_rules( space ):
w_rules = space.newdict()
parser = make_pyparser(space.config.objspace.pyversion)
Modified: pypy/dist/pypy/interpreter/pyparser/test/test_futureautomaton.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/test/test_futureautomaton.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/test/test_futureautomaton.py Sun Nov 25 15:45:01 2007
@@ -1,10 +1,9 @@
import py
import pypy.interpreter.pyparser.future as future
-from pypy.interpreter.astcompiler.consts import CO_GENERATOR_ALLOWED, \
- CO_FUTURE_DIVISION, CO_FUTURE_WITH_STATEMENT
+from pypy.tool import stdlib___future__ as fut
def run(s):
- f = future.FutureAutomaton(s)
+ f = future.FutureAutomaton(future.futureFlags_2_5, s)
try:
f.start()
except IndexError, future.DoneException:
@@ -15,7 +14,7 @@
s = '"Docstring\\" "\nfrom __future__ import division\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == CO_FUTURE_DIVISION
+ assert f.flags == fut.CO_FUTURE_DIVISION
def test_comment():
s = '# A comment about nothing ;\n'
@@ -49,88 +48,97 @@
s = 'from __future__ import division\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == CO_FUTURE_DIVISION
+ assert f.flags == fut.CO_FUTURE_DIVISION
def test_froms():
s = 'from __future__ import division, generators, with_statement\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == (CO_FUTURE_DIVISION |
- CO_GENERATOR_ALLOWED |
- CO_FUTURE_WITH_STATEMENT)
+ assert f.flags == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_from_as():
s = 'from __future__ import division as b\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == CO_FUTURE_DIVISION
+ assert f.flags == fut.CO_FUTURE_DIVISION
def test_froms_as():
s = 'from __future__ import division as b, generators as c\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == (CO_FUTURE_DIVISION |
- CO_GENERATOR_ALLOWED)
+ assert f.flags == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED)
def test_from_paren():
s = 'from __future__ import (division)\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == CO_FUTURE_DIVISION
+ assert f.flags == fut.CO_FUTURE_DIVISION
def test_froms_paren():
s = 'from __future__ import (division, generators)\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == (CO_FUTURE_DIVISION |
- CO_GENERATOR_ALLOWED)
+ assert f.flags == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED)
def test_froms_paren_as():
s = 'from __future__ import (division as b, generators,)\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == (CO_FUTURE_DIVISION |
- CO_GENERATOR_ALLOWED)
+ assert f.flags == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED)
def test_multiline():
s = '"abc" #def\n #ghi\nfrom __future__ import (division as b, generators,)\nfrom __future__ import with_statement\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == (CO_FUTURE_DIVISION |
- CO_GENERATOR_ALLOWED |
- CO_FUTURE_WITH_STATEMENT)
+ assert f.flags == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_windows_style_lineendings():
s = '"abc" #def\r\n #ghi\r\nfrom __future__ import (division as b, generators,)\r\nfrom __future__ import with_statement\r\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == (CO_FUTURE_DIVISION |
- CO_GENERATOR_ALLOWED |
- CO_FUTURE_WITH_STATEMENT)
+ assert f.flags == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_mac_style_lineendings():
s = '"abc" #def\r #ghi\rfrom __future__ import (division as b, generators,)\rfrom __future__ import with_statement\r'
f = run(s)
assert f.pos == len(s)
- assert f.flags == (CO_FUTURE_DIVISION |
- CO_GENERATOR_ALLOWED |
- CO_FUTURE_WITH_STATEMENT)
+ assert f.flags == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_semicolon():
s = '"abc" #def\n #ghi\nfrom __future__ import (division as b, generators,); from __future__ import with_statement\n'
f = run(s)
assert f.pos == len(s)
- assert f.flags == (CO_FUTURE_DIVISION |
- CO_GENERATOR_ALLOWED |
- CO_FUTURE_WITH_STATEMENT)
+ assert f.flags == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_full_chain():
s = '"abc" #def\n #ghi\nfrom __future__ import (division as b, generators,); from __future__ import with_statement\n'
- flags = future.getFutures(s)
- assert flags == (CO_FUTURE_DIVISION |
- CO_GENERATOR_ALLOWED |
- CO_FUTURE_WITH_STATEMENT)
+ flags = future.getFutures(future.futureFlags_2_5, s)
+ assert flags == (fut.CO_FUTURE_DIVISION |
+ fut.CO_GENERATOR_ALLOWED |
+ fut.CO_FUTURE_WITH_STATEMENT)
def test_intervening_code():
s = 'from __future__ import (division as b, generators,)\nfrom sys import modules\nfrom __future__ import with_statement\n'
- flags = future.getFutures(s)
- assert flags & CO_FUTURE_WITH_STATEMENT == 0
+ flags = future.getFutures(future.futureFlags_2_5, s)
+ assert flags & fut.CO_FUTURE_WITH_STATEMENT == 0
+
+
+def test_nonexisting():
+ s = 'from __future__ import non_existing_feature\n'
+ f = run(s)
+ assert f.pos == len(s)
+ assert f.flags == 0
+
+
Modified: pypy/dist/pypy/interpreter/pyparser/test/test_lookahead.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/test/test_lookahead.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/test/test_lookahead.py Sun Nov 25 15:45:01 2007
@@ -1,5 +1,5 @@
-from pypy.interpreter.pyparser.grammar import Alternative, Sequence, KleeneStar, \
- Token, Parser
+from pypy.interpreter.pyparser.grammar import Alternative, \
+ Sequence, KleeneStar, Token, Parser
class TestLookAheadBasics:
Modified: pypy/dist/pypy/interpreter/pyparser/test/test_parser.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/test/test_parser.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/test/test_parser.py Sun Nov 25 15:45:01 2007
@@ -46,123 +46,3 @@
assert v == 9
-class RuleTracer(dict):
-
- def __init__(self, *args, **kw):
- self.trace = []
- self.exclude_rules = [
- 'dotted_name', 'dotted_as_name', 'dotted_as_names',
- 'import_stmt', 'small_stmt', 'simple_stmt', 'stmt',
- 'single_input', 'file_input', 'future_import_list',
- 'import_from_future', 'future_import_as_names']
-
- def __getitem__(self, attr):
- if attr in self.exclude_rules:
- return None
-
- def record_trace(builder, number):
- result = [t.value for t in get_atoms(builder, number)]
- self.trace.append((attr, result))
- return record_trace
-
- def get(self, attr, default):
- return self.__getitem__(attr)
-
-from pypy.interpreter.pyparser.astbuilder import AstBuilder
-class MockBuilder(AstBuilder):
-
- def __init__(self, *args, **kw):
- AstBuilder.__init__(self, *args, **kw)
- self.build_rules = RuleTracer()
-
-
-class TestFuture(object):
-
- _grammar_ver = '2.5a'
-
- def setup_class(self):
- from pypy.interpreter.pyparser.pythonparse import make_pyparser
- self.parser = make_pyparser(self._grammar_ver)
-
- def setup_method(self, method):
- self.builder = MockBuilder(self.parser, self._grammar_ver,
- space=FakeSpace())
-
- def check_parse_mode(self, tst, expected, mode):
- self.parser.parse_source(tst, mode, self.builder)
- assert self.builder.build_rules.trace == expected
-
- def check_parse(self, tst, expected):
- self.check_parse_mode(tst, expected, 'exec')
- self.builder.build_rules.trace = []
- self.check_parse_mode(tst, expected, 'single')
-
-
- def test_single_future_import(self):
- tst = 'from __future__ import na\n'
- expected = [('future_import_feature', ['na'])]
- self.check_parse(tst, expected)
-
- def test_double_future_import(self):
- tst = 'from __future__ import na, xx\n'
- expected = [('future_import_feature', ['na']),
- ('future_import_feature', ['xx'])]
- self.check_parse(tst, expected)
-
- def test_two_future_imports(self):
- tst = 'from __future__ import na;from __future__ import xx\n'
- expected = [('future_import_feature', ['na']),
- ('future_import_feature', ['xx'])]
- self.check_parse(tst, expected)
-
- def test_future_imports_nl(self):
- tst = '''
-from __future__ import na
-from __future__ import xx;
-from __future__ import yy
-'''
- expected = [('future_import_feature', ['na']),
- ('future_import_feature', ['xx']),
- ('future_import_feature', ['yy'])]
- self.check_parse_mode(tst, expected,'exec')
-
- def test_single_future_as(self):
- tst = 'from __future__ import na as x\n'
- expected = [('future_import_feature', ['na', 'as', 'x'])]
- self.check_parse(tst, expected)
-
- def test_single_future_as(self):
- tst = 'import sys;from __future__ import na as x\n'
- expected = []
- try:
- self.check_parse_mode(tst, expected,'exec')
- assert False == 'An import before a future import should throw an error.'
- except error.SyntaxError:
- pass
-
- def test_regular_import(self):
- tst = 'import sys'
- expected = [('import_name', ['import', 'sys'])]
- self.check_parse(tst, expected)
-
- def test_future_import(self):
- tst = 'import __future__'
- expected = [('import_name', ['import', '__future__'])]
- self.check_parse(tst, expected)
-
- def test_future_import_atoms(self):
- self.builder.build_rules.exclude_rules.remove('import_from_future')
- self.builder.build_rules.exclude_rules.append('future_import_feature')
- tst = 'from __future__ import na as x\n'
- expected = [('import_from_future',
- ['from', '__future__', 'import', 'na', 'as', 'x'])]
- self.check_parse(tst, expected)
-
- def test_regular_from(self):
- self.builder.build_rules.exclude_rules.extend([
- 'import_as_name', 'import_as_names'])
-
- tst = 'from stuff import na as x\n'
- expected = [('import_from',
- ['from', 'stuff', 'import', 'na', 'as', 'x'])]
- self.check_parse(tst, expected)
Modified: pypy/dist/pypy/interpreter/pyparser/test/test_pytokenizer.py
==============================================================================
--- pypy/dist/pypy/interpreter/pyparser/test/test_pytokenizer.py (original)
+++ pypy/dist/pypy/interpreter/pyparser/test/test_pytokenizer.py Sun Nov 25 15:45:01 2007
@@ -18,7 +18,7 @@
def parse_source(source):
"""returns list of parsed tokens"""
- lexer = Source( P, source.splitlines(True))
+ lexer = Source( P, source.splitlines(True), {})
tokens = []
last_token = Token( P, NULLTOKEN, None)
while last_token.codename != ENDMARKER:
More information about the Pypy-commit
mailing list