[pypy-commit] pypy tealet: hg merge default

arigo noreply at buildbot.pypy.org
Wed Jul 6 20:28:58 CEST 2011


Author: Armin Rigo <arigo at tunes.org>
Branch: tealet
Changeset: r45390:6404b8a8b2ff
Date: 2011-07-06 16:00 +0200
http://bitbucket.org/pypy/pypy/changeset/6404b8a8b2ff/

Log:	hg merge default

diff --git a/.hgignore b/.hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -1,6 +1,7 @@
 syntax: glob
 *.py[co]
 *~
+.*.swp
 
 syntax: regexp
 ^testresult$
@@ -38,6 +39,8 @@
 ^pypy/translator/benchmark/shootout_benchmarks$
 ^pypy/translator/goal/pypy-translation-snapshot$
 ^pypy/translator/goal/pypy-c
+^pypy/translator/goal/pypy-jvm
+^pypy/translator/goal/pypy-jvm.jar
 ^pypy/translator/goal/.+\.exe$
 ^pypy/translator/goal/.+\.dll$
 ^pypy/translator/goal/target.+-c$
diff --git a/_pytest/__init__.py b/_pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
 #
-__version__ = '2.0.3'
+__version__ = '2.1.0.dev4'
diff --git a/_pytest/assertion.py b/_pytest/assertion.py
deleted file mode 100644
--- a/_pytest/assertion.py
+++ /dev/null
@@ -1,177 +0,0 @@
-"""
-support for presented detailed information in failing assertions.
-"""
-import py
-import sys
-from _pytest.monkeypatch import monkeypatch
-
-def pytest_addoption(parser):
-    group = parser.getgroup("debugconfig")
-    group._addoption('--no-assert', action="store_true", default=False,
-        dest="noassert",
-        help="disable python assert expression reinterpretation."),
-
-def pytest_configure(config):
-    # The _reprcompare attribute on the py.code module is used by
-    # py._code._assertionnew to detect this plugin was loaded and in
-    # turn call the hooks defined here as part of the
-    # DebugInterpreter.
-    m = monkeypatch()
-    config._cleanup.append(m.undo)
-    warn_about_missing_assertion()
-    if not config.getvalue("noassert") and not config.getvalue("nomagic"):
-        def callbinrepr(op, left, right):
-            hook_result = config.hook.pytest_assertrepr_compare(
-                config=config, op=op, left=left, right=right)
-            for new_expl in hook_result:
-                if new_expl:
-                    return '\n~'.join(new_expl)
-        m.setattr(py.builtin.builtins,
-                  'AssertionError', py.code._AssertionError)
-        m.setattr(py.code, '_reprcompare', callbinrepr)
-
-def warn_about_missing_assertion():
-    try:
-        assert False
-    except AssertionError:
-        pass
-    else:
-        sys.stderr.write("WARNING: failing tests may report as passing because "
-        "assertions are turned off!  (are you using python -O?)\n")
-
-# Provide basestring in python3
-try:
-    basestring = basestring
-except NameError:
-    basestring = str
-
-
-def pytest_assertrepr_compare(op, left, right):
-    """return specialised explanations for some operators/operands"""
-    width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
-    left_repr = py.io.saferepr(left, maxsize=int(width/2))
-    right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
-    summary = '%s %s %s' % (left_repr, op, right_repr)
-
-    issequence = lambda x: isinstance(x, (list, tuple))
-    istext = lambda x: isinstance(x, basestring)
-    isdict = lambda x: isinstance(x, dict)
-    isset = lambda x: isinstance(x, set)
-
-    explanation = None
-    try:
-        if op == '==':
-            if istext(left) and istext(right):
-                explanation = _diff_text(left, right)
-            elif issequence(left) and issequence(right):
-                explanation = _compare_eq_sequence(left, right)
-            elif isset(left) and isset(right):
-                explanation = _compare_eq_set(left, right)
-            elif isdict(left) and isdict(right):
-                explanation = _diff_text(py.std.pprint.pformat(left),
-                                         py.std.pprint.pformat(right))
-        elif op == 'not in':
-            if istext(left) and istext(right):
-                explanation = _notin_text(left, right)
-    except py.builtin._sysex:
-        raise
-    except:
-        excinfo = py.code.ExceptionInfo()
-        explanation = ['(pytest_assertion plugin: representation of '
-            'details failed. Probably an object has a faulty __repr__.)',
-            str(excinfo)
-            ]
-
-
-    if not explanation:
-        return None
-
-    # Don't include pageloads of data, should be configurable
-    if len(''.join(explanation)) > 80*8:
-        explanation = ['Detailed information too verbose, truncated']
-
-    return [summary] + explanation
-
-
-def _diff_text(left, right):
-    """Return the explanation for the diff between text
-
-    This will skip leading and trailing characters which are
-    identical to keep the diff minimal.
-    """
-    explanation = []
-    i = 0 # just in case left or right has zero length
-    for i in range(min(len(left), len(right))):
-        if left[i] != right[i]:
-            break
-    if i > 42:
-        i -= 10                 # Provide some context
-        explanation = ['Skipping %s identical '
-                       'leading characters in diff' % i]
-        left = left[i:]
-        right = right[i:]
-    if len(left) == len(right):
-        for i in range(len(left)):
-            if left[-i] != right[-i]:
-                break
-        if i > 42:
-            i -= 10     # Provide some context
-            explanation += ['Skipping %s identical '
-                            'trailing characters in diff' % i]
-            left = left[:-i]
-            right = right[:-i]
-    explanation += [line.strip('\n')
-                    for line in py.std.difflib.ndiff(left.splitlines(),
-                                                     right.splitlines())]
-    return explanation
-
-
-def _compare_eq_sequence(left, right):
-    explanation = []
-    for i in range(min(len(left), len(right))):
-        if left[i] != right[i]:
-            explanation += ['At index %s diff: %r != %r' %
-                            (i, left[i], right[i])]
-            break
-    if len(left) > len(right):
-        explanation += ['Left contains more items, '
-            'first extra item: %s' % py.io.saferepr(left[len(right)],)]
-    elif len(left) < len(right):
-        explanation += ['Right contains more items, '
-            'first extra item: %s' % py.io.saferepr(right[len(left)],)]
-    return explanation # + _diff_text(py.std.pprint.pformat(left),
-                       #             py.std.pprint.pformat(right))
-
-
-def _compare_eq_set(left, right):
-    explanation = []
-    diff_left = left - right
-    diff_right = right - left
-    if diff_left:
-        explanation.append('Extra items in the left set:')
-        for item in diff_left:
-            explanation.append(py.io.saferepr(item))
-    if diff_right:
-        explanation.append('Extra items in the right set:')
-        for item in diff_right:
-            explanation.append(py.io.saferepr(item))
-    return explanation
-
-
-def _notin_text(term, text):
-    index = text.find(term)
-    head = text[:index]
-    tail = text[index+len(term):]
-    correct_text = head + tail
-    diff = _diff_text(correct_text, text)
-    newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)]
-    for line in diff:
-        if line.startswith('Skipping'):
-            continue
-        if line.startswith('- '):
-            continue
-        if line.startswith('+ '):
-            newdiff.append('  ' + line[2:])
-        else:
-            newdiff.append(line)
-    return newdiff
diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py
new file mode 100644
--- /dev/null
+++ b/_pytest/assertion/__init__.py
@@ -0,0 +1,128 @@
+"""
+support for presenting detailed information in failing assertions.
+"""
+import py
+import imp
+import marshal
+import struct
+import sys
+import pytest
+from _pytest.monkeypatch import monkeypatch
+from _pytest.assertion import reinterpret, util
+
+try:
+    from _pytest.assertion.rewrite import rewrite_asserts
+except ImportError:
+    rewrite_asserts = None
+else:
+    import ast
+
+def pytest_addoption(parser):
+    group = parser.getgroup("debugconfig")
+    group.addoption('--assertmode', action="store", dest="assertmode",
+                    choices=("on", "old", "off", "default"), default="default",
+                    metavar="on|old|off",
+                    help="""control assertion debugging tools.
+'off' performs no assertion debugging.
+'old' reinterprets the expressions in asserts to glean information.
+'on' (the default) rewrites the assert statements in test modules to provide
+sub-expression results.""")
+    group.addoption('--no-assert', action="store_true", default=False,
+        dest="noassert", help="DEPRECATED equivalent to --assertmode=off")
+    group.addoption('--nomagic', action="store_true", default=False,
+        dest="nomagic", help="DEPRECATED equivalent to --assertmode=off")
+
+class AssertionState:
+    """State for the assertion plugin."""
+
+    def __init__(self, config, mode):
+        self.mode = mode
+        self.trace = config.trace.root.get("assertion")
+
+def pytest_configure(config):
+    warn_about_missing_assertion()
+    mode = config.getvalue("assertmode")
+    if config.getvalue("noassert") or config.getvalue("nomagic"):
+        if mode not in ("off", "default"):
+            raise pytest.UsageError("assertion options conflict")
+        mode = "off"
+    elif mode == "default":
+        mode = "on"
+    if mode != "off":
+        def callbinrepr(op, left, right):
+            hook_result = config.hook.pytest_assertrepr_compare(
+                config=config, op=op, left=left, right=right)
+            for new_expl in hook_result:
+                if new_expl:
+                    return '\n~'.join(new_expl)
+        m = monkeypatch()
+        config._cleanup.append(m.undo)
+        m.setattr(py.builtin.builtins, 'AssertionError',
+                  reinterpret.AssertionError)
+        m.setattr(util, '_reprcompare', callbinrepr)
+    if mode == "on" and rewrite_asserts is None:
+        mode = "old"
+    config._assertstate = AssertionState(config, mode)
+    config._assertstate.trace("configured with mode set to %r" % (mode,))
+
+def _write_pyc(co, source_path):
+    if hasattr(imp, "cache_from_source"):
+        # Handle PEP 3147 pycs.
+        pyc = py.path.local(imp.cache_from_source(str(source_path)))
+        pyc.ensure()
+    else:
+        pyc = source_path + "c"
+    mtime = int(source_path.mtime())
+    fp = pyc.open("wb")
+    try:
+        fp.write(imp.get_magic())
+        fp.write(struct.pack("<l", mtime))
+        marshal.dump(co, fp)
+    finally:
+        fp.close()
+    return pyc
+
+def before_module_import(mod):
+    if mod.config._assertstate.mode != "on":
+        return
+    # Some deep magic: load the source, rewrite the asserts, and write a
+    # fake pyc, so that it'll be loaded when the module is imported.
+    source = mod.fspath.read()
+    try:
+        tree = ast.parse(source)
+    except SyntaxError:
+        # Let this pop up again in the real import.
+        mod.config._assertstate.trace("failed to parse: %r" % (mod.fspath,))
+        return
+    rewrite_asserts(tree)
+    try:
+        co = compile(tree, str(mod.fspath), "exec")
+    except SyntaxError:
+        # It's possible that this error is from some bug in the assertion
+        # rewriting, but I don't know of a fast way to tell.
+        mod.config._assertstate.trace("failed to compile: %r" % (mod.fspath,))
+        return
+    mod._pyc = _write_pyc(co, mod.fspath)
+    mod.config._assertstate.trace("wrote pyc: %r" % (mod._pyc,))
+
+def after_module_import(mod):
+    if not hasattr(mod, "_pyc"):
+        return
+    state = mod.config._assertstate
+    try:
+        mod._pyc.remove()
+    except py.error.ENOENT:
+        state.trace("couldn't find pyc: %r" % (mod._pyc,))
+    else:
+        state.trace("removed pyc: %r" % (mod._pyc,))
+
+def warn_about_missing_assertion():
+    try:
+        assert False
+    except AssertionError:
+        pass
+    else:
+        sys.stderr.write("WARNING: failing tests may report as passing because "
+        "assertions are turned off!  (are you using python -O?)\n")
+
+pytest_assertrepr_compare = util.assertrepr_compare
diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py
new file mode 100644
--- /dev/null
+++ b/_pytest/assertion/newinterpret.py
@@ -0,0 +1,333 @@
+"""
+Find intermediate evalutation results in assert statements through builtin AST.
+This should replace oldinterpret.py eventually.
+"""
+
+import sys
+import ast
+
+import py
+from _pytest.assertion import util
+from _pytest.assertion.reinterpret import BuiltinAssertionError
+
+
+if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+    # See http://bugs.jython.org/issue1497
+    _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
+              "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
+              "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
+              "List", "Tuple")
+    _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
+              "AugAssign", "Print", "For", "While", "If", "With", "Raise",
+              "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
+              "Exec", "Global", "Expr", "Pass", "Break", "Continue")
+    _expr_nodes = set(getattr(ast, name) for name in _exprs)
+    _stmt_nodes = set(getattr(ast, name) for name in _stmts)
+    def _is_ast_expr(node):
+        return node.__class__ in _expr_nodes
+    def _is_ast_stmt(node):
+        return node.__class__ in _stmt_nodes
+else:
+    def _is_ast_expr(node):
+        return isinstance(node, ast.expr)
+    def _is_ast_stmt(node):
+        return isinstance(node, ast.stmt)
+
+
+class Failure(Exception):
+    """Error found while interpreting AST."""
+
+    def __init__(self, explanation=""):
+        self.cause = sys.exc_info()
+        self.explanation = explanation
+
+
+def interpret(source, frame, should_fail=False):
+    mod = ast.parse(source)
+    visitor = DebugInterpreter(frame)
+    try:
+        visitor.visit(mod)
+    except Failure:
+        failure = sys.exc_info()[1]
+        return getfailure(failure)
+    if should_fail:
+        return ("(assertion failed, but when it was re-run for "
+                "printing intermediate values, it did not fail.  Suggestions: "
+                "compute assert expression before the assert or use --no-assert)")
+
+def run(offending_line, frame=None):
+    if frame is None:
+        frame = py.code.Frame(sys._getframe(1))
+    return interpret(offending_line, frame)
+
+def getfailure(e):
+    explanation = util.format_explanation(e.explanation)
+    value = e.cause[1]
+    if str(value):
+        lines = explanation.split('\n')
+        lines[0] += "  << %s" % (value,)
+        explanation = '\n'.join(lines)
+    text = "%s: %s" % (e.cause[0].__name__, explanation)
+    if text.startswith('AssertionError: assert '):
+        text = text[16:]
+    return text
+
+operator_map = {
+    ast.BitOr : "|",
+    ast.BitXor : "^",
+    ast.BitAnd : "&",
+    ast.LShift : "<<",
+    ast.RShift : ">>",
+    ast.Add : "+",
+    ast.Sub : "-",
+    ast.Mult : "*",
+    ast.Div : "/",
+    ast.FloorDiv : "//",
+    ast.Mod : "%",
+    ast.Eq : "==",
+    ast.NotEq : "!=",
+    ast.Lt : "<",
+    ast.LtE : "<=",
+    ast.Gt : ">",
+    ast.GtE : ">=",
+    ast.Pow : "**",
+    ast.Is : "is",
+    ast.IsNot : "is not",
+    ast.In : "in",
+    ast.NotIn : "not in"
+}
+
+unary_map = {
+    ast.Not : "not %s",
+    ast.Invert : "~%s",
+    ast.USub : "-%s",
+    ast.UAdd : "+%s"
+}
+
+
+class DebugInterpreter(ast.NodeVisitor):
+    """Interpret AST nodes to gleam useful debugging information. """
+
+    def __init__(self, frame):
+        self.frame = frame
+
+    def generic_visit(self, node):
+        # Fallback when we don't have a special implementation.
+        if _is_ast_expr(node):
+            mod = ast.Expression(node)
+            co = self._compile(mod)
+            try:
+                result = self.frame.eval(co)
+            except Exception:
+                raise Failure()
+            explanation = self.frame.repr(result)
+            return explanation, result
+        elif _is_ast_stmt(node):
+            mod = ast.Module([node])
+            co = self._compile(mod, "exec")
+            try:
+                self.frame.exec_(co)
+            except Exception:
+                raise Failure()
+            return None, None
+        else:
+            raise AssertionError("can't handle %s" %(node,))
+
+    def _compile(self, source, mode="eval"):
+        return compile(source, "<assertion interpretation>", mode)
+
+    def visit_Expr(self, expr):
+        return self.visit(expr.value)
+
+    def visit_Module(self, mod):
+        for stmt in mod.body:
+            self.visit(stmt)
+
+    def visit_Name(self, name):
+        explanation, result = self.generic_visit(name)
+        # See if the name is local.
+        source = "%r in locals() is not globals()" % (name.id,)
+        co = self._compile(source)
+        try:
+            local = self.frame.eval(co)
+        except Exception:
+            # have to assume it isn't
+            local = None
+        if local is None or not self.frame.is_true(local):
+            return name.id, result
+        return explanation, result
+
+    def visit_Compare(self, comp):
+        left = comp.left
+        left_explanation, left_result = self.visit(left)
+        for op, next_op in zip(comp.ops, comp.comparators):
+            next_explanation, next_result = self.visit(next_op)
+            op_symbol = operator_map[op.__class__]
+            explanation = "%s %s %s" % (left_explanation, op_symbol,
+                                        next_explanation)
+            source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
+            co = self._compile(source)
+            try:
+                result = self.frame.eval(co, __exprinfo_left=left_result,
+                                         __exprinfo_right=next_result)
+            except Exception:
+                raise Failure(explanation)
+            try:
+                if not self.frame.is_true(result):
+                    break
+            except KeyboardInterrupt:
+                raise
+            except:
+                break
+            left_explanation, left_result = next_explanation, next_result
+
+        if util._reprcompare is not None:
+            res = util._reprcompare(op_symbol, left_result, next_result)
+            if res:
+                explanation = res
+        return explanation, result
+
+    def visit_BoolOp(self, boolop):
+        is_or = isinstance(boolop.op, ast.Or)
+        explanations = []
+        for operand in boolop.values:
+            explanation, result = self.visit(operand)
+            explanations.append(explanation)
+            if result == is_or:
+                break
+        name = is_or and " or " or " and "
+        explanation = "(" + name.join(explanations) + ")"
+        return explanation, result
+
+    def visit_UnaryOp(self, unary):
+        pattern = unary_map[unary.op.__class__]
+        operand_explanation, operand_result = self.visit(unary.operand)
+        explanation = pattern % (operand_explanation,)
+        co = self._compile(pattern % ("__exprinfo_expr",))
+        try:
+            result = self.frame.eval(co, __exprinfo_expr=operand_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, result
+
+    def visit_BinOp(self, binop):
+        left_explanation, left_result = self.visit(binop.left)
+        right_explanation, right_result = self.visit(binop.right)
+        symbol = operator_map[binop.op.__class__]
+        explanation = "(%s %s %s)" % (left_explanation, symbol,
+                                      right_explanation)
+        source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, __exprinfo_left=left_result,
+                                     __exprinfo_right=right_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, result
+
+    def visit_Call(self, call):
+        func_explanation, func = self.visit(call.func)
+        arg_explanations = []
+        ns = {"__exprinfo_func" : func}
+        arguments = []
+        for arg in call.args:
+            arg_explanation, arg_result = self.visit(arg)
+            arg_name = "__exprinfo_%s" % (len(ns),)
+            ns[arg_name] = arg_result
+            arguments.append(arg_name)
+            arg_explanations.append(arg_explanation)
+        for keyword in call.keywords:
+            arg_explanation, arg_result = self.visit(keyword.value)
+            arg_name = "__exprinfo_%s" % (len(ns),)
+            ns[arg_name] = arg_result
+            keyword_source = "%s=%%s" % (keyword.arg)
+            arguments.append(keyword_source % (arg_name,))
+            arg_explanations.append(keyword_source % (arg_explanation,))
+        if call.starargs:
+            arg_explanation, arg_result = self.visit(call.starargs)
+            arg_name = "__exprinfo_star"
+            ns[arg_name] = arg_result
+            arguments.append("*%s" % (arg_name,))
+            arg_explanations.append("*%s" % (arg_explanation,))
+        if call.kwargs:
+            arg_explanation, arg_result = self.visit(call.kwargs)
+            arg_name = "__exprinfo_kwds"
+            ns[arg_name] = arg_result
+            arguments.append("**%s" % (arg_name,))
+            arg_explanations.append("**%s" % (arg_explanation,))
+        args_explained = ", ".join(arg_explanations)
+        explanation = "%s(%s)" % (func_explanation, args_explained)
+        args = ", ".join(arguments)
+        source = "__exprinfo_func(%s)" % (args,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, **ns)
+        except Exception:
+            raise Failure(explanation)
+        pattern = "%s\n{%s = %s\n}"
+        rep = self.frame.repr(result)
+        explanation = pattern % (rep, rep, explanation)
+        return explanation, result
+
+    def _is_builtin_name(self, name):
+        pattern = "%r not in globals() and %r not in locals()"
+        source = pattern % (name.id, name.id)
+        co = self._compile(source)
+        try:
+            return self.frame.eval(co)
+        except Exception:
+            return False
+
+    def visit_Attribute(self, attr):
+        if not isinstance(attr.ctx, ast.Load):
+            return self.generic_visit(attr)
+        source_explanation, source_result = self.visit(attr.value)
+        explanation = "%s.%s" % (source_explanation, attr.attr)
+        source = "__exprinfo_expr.%s" % (attr.attr,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, __exprinfo_expr=source_result)
+        except Exception:
+            raise Failure(explanation)
+        explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
+                                              self.frame.repr(result),
+                                              source_explanation, attr.attr)
+        # Check if the attr is from an instance.
+        source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
+        source = source % (attr.attr,)
+        co = self._compile(source)
+        try:
+            from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
+        except Exception:
+            from_instance = None
+        if from_instance is None or self.frame.is_true(from_instance):
+            rep = self.frame.repr(result)
+            pattern = "%s\n{%s = %s\n}"
+            explanation = pattern % (rep, rep, explanation)
+        return explanation, result
+
+    def visit_Assert(self, assrt):
+        test_explanation, test_result = self.visit(assrt.test)
+        explanation = "assert %s" % (test_explanation,)
+        if not self.frame.is_true(test_result):
+            try:
+                raise BuiltinAssertionError
+            except Exception:
+                raise Failure(explanation)
+        return explanation, test_result
+
+    def visit_Assign(self, assign):
+        value_explanation, value_result = self.visit(assign.value)
+        explanation = "... = %s" % (value_explanation,)
+        name = ast.Name("__exprinfo_expr", ast.Load(),
+                        lineno=assign.value.lineno,
+                        col_offset=assign.value.col_offset)
+        new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
+                                col_offset=assign.col_offset)
+        mod = ast.Module([new_assign])
+        co = self._compile(mod, "exec")
+        try:
+            self.frame.exec_(co, __exprinfo_expr=value_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, value_result
diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py
new file mode 100644
--- /dev/null
+++ b/_pytest/assertion/oldinterpret.py
@@ -0,0 +1,552 @@
+import py
+import sys, inspect
+from compiler import parse, ast, pycodegen
+from _pytest.assertion.util import format_explanation
+from _pytest.assertion.reinterpret import BuiltinAssertionError
+
+passthroughex = py.builtin._sysex
+
+class Failure:
+    def __init__(self, node):
+        self.exc, self.value, self.tb = sys.exc_info()
+        self.node = node
+
+class View(object):
+    """View base class.
+
+    If C is a subclass of View, then C(x) creates a proxy object around
+    the object x.  The actual class of the proxy is not C in general,
+    but a *subclass* of C determined by the rules below.  To avoid confusion
+    we call view class the class of the proxy (a subclass of C, so of View)
+    and object class the class of x.
+
+    Attributes and methods not found in the proxy are automatically read on x.
+    Other operations like setting attributes are performed on the proxy, as
+    determined by its view class.  The object x is available from the proxy
+    as its __obj__ attribute.
+
+    The view class selection is determined by the __view__ tuples and the
+    optional __viewkey__ method.  By default, the selected view class is the
+    most specific subclass of C whose __view__ mentions the class of x.
+    If no such subclass is found, the search proceeds with the parent
+    object classes.  For example, C(True) will first look for a subclass
+    of C with __view__ = (..., bool, ...) and only if it doesn't find any
+    look for one with __view__ = (..., int, ...), and then ..., object,...
+    If everything fails the class C itself is considered to be the default.
+
+    Alternatively, the view class selection can be driven by another aspect
+    of the object x, instead of the class of x, by overriding __viewkey__.
+    See last example at the end of this module.
+    """
+
+    _viewcache = {}
+    __view__ = ()
+
+    def __new__(rootclass, obj, *args, **kwds):
+        self = object.__new__(rootclass)
+        self.__obj__ = obj
+        self.__rootclass__ = rootclass
+        key = self.__viewkey__()
+        try:
+            self.__class__ = self._viewcache[key]
+        except KeyError:
+            self.__class__ = self._selectsubclass(key)
+        return self
+
+    def __getattr__(self, attr):
+        # attributes not found in the normal hierarchy rooted on View
+        # are looked up in the object's real class
+        return getattr(self.__obj__, attr)
+
+    def __viewkey__(self):
+        return self.__obj__.__class__
+
+    def __matchkey__(self, key, subclasses):
+        if inspect.isclass(key):
+            keys = inspect.getmro(key)
+        else:
+            keys = [key]
+        for key in keys:
+            result = [C for C in subclasses if key in C.__view__]
+            if result:
+                return result
+        return []
+
+    def _selectsubclass(self, key):
+        subclasses = list(enumsubclasses(self.__rootclass__))
+        for C in subclasses:
+            if not isinstance(C.__view__, tuple):
+                C.__view__ = (C.__view__,)
+        choices = self.__matchkey__(key, subclasses)
+        if not choices:
+            return self.__rootclass__
+        elif len(choices) == 1:
+            return choices[0]
+        else:
+            # combine the multiple choices
+            return type('?', tuple(choices), {})
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
+
+
+def enumsubclasses(cls):
+    for subcls in cls.__subclasses__():
+        for subsubclass in enumsubclasses(subcls):
+            yield subsubclass
+    yield cls
+
+
+class Interpretable(View):
+    """A parse tree node with a few extra methods."""
+    explanation = None
+
+    def is_builtin(self, frame):
+        return False
+
+    def eval(self, frame):
+        # fall-back for unknown expression nodes
+        try:
+            expr = ast.Expression(self.__obj__)
+            expr.filename = '<eval>'
+            self.__obj__.filename = '<eval>'
+            co = pycodegen.ExpressionCodeGenerator(expr).getCode()
+            result = frame.eval(co)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        self.result = result
+        self.explanation = self.explanation or frame.repr(self.result)
+
+    def run(self, frame):
+        # fall-back for unknown statement nodes
+        try:
+            expr = ast.Module(None, ast.Stmt([self.__obj__]))
+            expr.filename = '<run>'
+            co = pycodegen.ModuleCodeGenerator(expr).getCode()
+            frame.exec_(co)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+
+    def nice_explanation(self):
+        return format_explanation(self.explanation)
+
+
+class Name(Interpretable):
+    __view__ = ast.Name
+
+    def is_local(self, frame):
+        source = '%r in locals() is not globals()' % self.name
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def is_global(self, frame):
+        source = '%r in globals()' % self.name
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def is_builtin(self, frame):
+        source = '%r not in locals() and %r not in globals()' % (
+            self.name, self.name)
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def eval(self, frame):
+        super(Name, self).eval(frame)
+        if not self.is_local(frame):
+            self.explanation = self.name
+
+class Compare(Interpretable):
+    __view__ = ast.Compare
+
+    def eval(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        for operation, expr2 in self.ops:
+            if hasattr(self, 'result'):
+                # shortcutting in chained expressions
+                if not frame.is_true(self.result):
+                    break
+            expr2 = Interpretable(expr2)
+            expr2.eval(frame)
+            self.explanation = "%s %s %s" % (
+                expr.explanation, operation, expr2.explanation)
+            source = "__exprinfo_left %s __exprinfo_right" % operation
+            try:
+                self.result = frame.eval(source,
+                                         __exprinfo_left=expr.result,
+                                         __exprinfo_right=expr2.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+            expr = expr2
+
+class And(Interpretable):
+    __view__ = ast.And
+
+    def eval(self, frame):
+        explanations = []
+        for expr in self.nodes:
+            expr = Interpretable(expr)
+            expr.eval(frame)
+            explanations.append(expr.explanation)
+            self.result = expr.result
+            if not frame.is_true(expr.result):
+                break
+        self.explanation = '(' + ' and '.join(explanations) + ')'
+
+class Or(Interpretable):
+    __view__ = ast.Or
+
+    def eval(self, frame):
+        explanations = []
+        for expr in self.nodes:
+            expr = Interpretable(expr)
+            expr.eval(frame)
+            explanations.append(expr.explanation)
+            self.result = expr.result
+            if frame.is_true(expr.result):
+                break
+        self.explanation = '(' + ' or '.join(explanations) + ')'
+
+
+# == Unary operations ==
+keepalive = []
+for astclass, astpattern in {
+    ast.Not    : 'not __exprinfo_expr',
+    ast.Invert : '(~__exprinfo_expr)',
+    }.items():
+
+    class UnaryArith(Interpretable):
+        __view__ = astclass
+
+        def eval(self, frame, astpattern=astpattern):
+            expr = Interpretable(self.expr)
+            expr.eval(frame)
+            self.explanation = astpattern.replace('__exprinfo_expr',
+                                                  expr.explanation)
+            try:
+                self.result = frame.eval(astpattern,
+                                         __exprinfo_expr=expr.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+    keepalive.append(UnaryArith)
+
+# == Binary operations ==
+for astclass, astpattern in {
+    ast.Add    : '(__exprinfo_left + __exprinfo_right)',
+    ast.Sub    : '(__exprinfo_left - __exprinfo_right)',
+    ast.Mul    : '(__exprinfo_left * __exprinfo_right)',
+    ast.Div    : '(__exprinfo_left / __exprinfo_right)',
+    ast.Mod    : '(__exprinfo_left % __exprinfo_right)',
+    ast.Power  : '(__exprinfo_left ** __exprinfo_right)',
+    }.items():
+
+    class BinaryArith(Interpretable):
+        __view__ = astclass
+
+        def eval(self, frame, astpattern=astpattern):
+            left = Interpretable(self.left)
+            left.eval(frame)
+            right = Interpretable(self.right)
+            right.eval(frame)
+            self.explanation = (astpattern
+                                .replace('__exprinfo_left',  left .explanation)
+                                .replace('__exprinfo_right', right.explanation))
+            try:
+                self.result = frame.eval(astpattern,
+                                         __exprinfo_left=left.result,
+                                         __exprinfo_right=right.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+    keepalive.append(BinaryArith)
+
+
+class CallFunc(Interpretable):
+    __view__ = ast.CallFunc
+
+    def is_bool(self, frame):
+        source = 'isinstance(__exprinfo_value, bool)'
+        try:
+            return frame.is_true(frame.eval(source,
+                                            __exprinfo_value=self.result))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def eval(self, frame):
+        node = Interpretable(self.node)
+        node.eval(frame)
+        explanations = []
+        vars = {'__exprinfo_fn': node.result}
+        source = '__exprinfo_fn('
+        for a in self.args:
+            if isinstance(a, ast.Keyword):
+                keyword = a.name
+                a = a.expr
+            else:
+                keyword = None
+            a = Interpretable(a)
+            a.eval(frame)
+            argname = '__exprinfo_%d' % len(vars)
+            vars[argname] = a.result
+            if keyword is None:
+                source += argname + ','
+                explanations.append(a.explanation)
+            else:
+                source += '%s=%s,' % (keyword, argname)
+                explanations.append('%s=%s' % (keyword, a.explanation))
+        if self.star_args:
+            star_args = Interpretable(self.star_args)
+            star_args.eval(frame)
+            argname = '__exprinfo_star'
+            vars[argname] = star_args.result
+            source += '*' + argname + ','
+            explanations.append('*' + star_args.explanation)
+        if self.dstar_args:
+            dstar_args = Interpretable(self.dstar_args)
+            dstar_args.eval(frame)
+            argname = '__exprinfo_kwds'
+            vars[argname] = dstar_args.result
+            source += '**' + argname + ','
+            explanations.append('**' + dstar_args.explanation)
+        self.explanation = "%s(%s)" % (
+            node.explanation, ', '.join(explanations))
+        if source.endswith(','):
+            source = source[:-1]
+        source += ')'
+        try:
+            self.result = frame.eval(source, **vars)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        if not node.is_builtin(frame) or not self.is_bool(frame):
+            r = frame.repr(self.result)
+            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+class Getattr(Interpretable):
+    __view__ = ast.Getattr
+
+    def eval(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        source = '__exprinfo_expr.%s' % self.attrname
+        try:
+            self.result = frame.eval(source, __exprinfo_expr=expr.result)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        self.explanation = '%s.%s' % (expr.explanation, self.attrname)
+        # if the attribute comes from the instance, its value is interesting
+        source = ('hasattr(__exprinfo_expr, "__dict__") and '
+                  '%r in __exprinfo_expr.__dict__' % self.attrname)
+        try:
+            from_instance = frame.is_true(
+                frame.eval(source, __exprinfo_expr=expr.result))
+        except passthroughex:
+            raise
+        except:
+            from_instance = True
+        if from_instance:
+            r = frame.repr(self.result)
+            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+# == Re-interpretation of full statements ==
+
+class Assert(Interpretable):
+    __view__ = ast.Assert
+
+    def run(self, frame):
+        test = Interpretable(self.test)
+        test.eval(frame)
+        # print the result as  'assert <explanation>'
+        self.result = test.result
+        self.explanation = 'assert ' + test.explanation
+        if not frame.is_true(test.result):
+            try:
+                raise BuiltinAssertionError
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+class Assign(Interpretable):
+    __view__ = ast.Assign
+
+    def run(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        self.result = expr.result
+        self.explanation = '... = ' + expr.explanation
+        # fall-back-run the rest of the assignment
+        ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
+        mod = ast.Module(None, ast.Stmt([ass]))
+        mod.filename = '<run>'
+        co = pycodegen.ModuleCodeGenerator(mod).getCode()
+        try:
+            frame.exec_(co, __exprinfo_expr=expr.result)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+
+class Discard(Interpretable):
+    __view__ = ast.Discard
+
+    def run(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        self.result = expr.result
+        self.explanation = expr.explanation
+
+class Stmt(Interpretable):
+    __view__ = ast.Stmt
+
+    def run(self, frame):
+        for stmt in self.nodes:
+            stmt = Interpretable(stmt)
+            stmt.run(frame)
+
+
+def report_failure(e):
+    explanation = e.node.nice_explanation()
+    if explanation:
+        explanation = ", in: " + explanation
+    else:
+        explanation = ""
+    sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
+
+def check(s, frame=None):
+    if frame is None:
+        frame = sys._getframe(1)
+        frame = py.code.Frame(frame)
+    expr = parse(s, 'eval')
+    assert isinstance(expr, ast.Expression)
+    node = Interpretable(expr.node)
+    try:
+        node.eval(frame)
+    except passthroughex:
+        raise
+    except Failure:
+        e = sys.exc_info()[1]
+        report_failure(e)
+    else:
+        if not frame.is_true(node.result):
+            sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
+
+
+###########################################################
+# API / Entry points
+# #########################################################
+
+def interpret(source, frame, should_fail=False):
+    module = Interpretable(parse(source, 'exec').node)
+    #print "got module", module
+    if isinstance(frame, py.std.types.FrameType):
+        frame = py.code.Frame(frame)
+    try:
+        module.run(frame)
+    except Failure:
+        e = sys.exc_info()[1]
+        return getfailure(e)
+    except passthroughex:
+        raise
+    except:
+        import traceback
+        traceback.print_exc()
+    if should_fail:
+        return ("(assertion failed, but when it was re-run for "
+                "printing intermediate values, it did not fail.  Suggestions: "
+                "compute assert expression before the assert or use --nomagic)")
+    else:
+        return None
+
+def getmsg(excinfo):
+    if isinstance(excinfo, tuple):
+        excinfo = py.code.ExceptionInfo(excinfo)
+    #frame, line = gettbline(tb)
+    #frame = py.code.Frame(frame)
+    #return interpret(line, frame)
+
+    tb = excinfo.traceback[-1]
+    source = str(tb.statement).strip()
+    x = interpret(source, tb.frame, should_fail=True)
+    if not isinstance(x, str):
+        raise TypeError("interpret returned non-string %r" % (x,))
+    return x
+
+def getfailure(e):
+    explanation = e.node.nice_explanation()
+    if str(e.value):
+        lines = explanation.split('\n')
+        lines[0] += "  << %s" % (e.value,)
+        explanation = '\n'.join(lines)
+    text = "%s: %s" % (e.exc.__name__, explanation)
+    if text.startswith('AssertionError: assert '):
+        text = text[16:]
+    return text
+
+def run(s, frame=None):
+    if frame is None:
+        frame = sys._getframe(1)
+        frame = py.code.Frame(frame)
+    module = Interpretable(parse(s, 'exec').node)
+    try:
+        module.run(frame)
+    except Failure:
+        e = sys.exc_info()[1]
+        report_failure(e)
+
+
+if __name__ == '__main__':
+    # example:
+    def f():
+        return 5
+    def g():
+        return 3
+    def h(x):
+        return 'never'
+    check("f() * g() == 5")
+    check("not f()")
+    check("not (f() and g() or 0)")
+    check("f() == g()")
+    i = 4
+    check("i == f()")
+    check("len(f()) == 0")
+    check("isinstance(2+3+4, float)")
+
+    run("x = i")
+    check("x == 5")
+
+    run("assert not f(), 'oops'")
+    run("a, b, c = 1, 2")
+    run("a, b, c = f()")
+
+    check("max([f(),g()]) == 4")
+    check("'hello'[g()] == 'h'")
+    run("'guk%d' % h(f())")
diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py
new file mode 100644
--- /dev/null
+++ b/_pytest/assertion/reinterpret.py
@@ -0,0 +1,48 @@
+import sys
+import py
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+
+class AssertionError(BuiltinAssertionError):
+    def __init__(self, *args):
+        BuiltinAssertionError.__init__(self, *args)
+        if args:
+            try:
+                self.msg = str(args[0])
+            except py.builtin._sysex:
+                raise
+            except:
+                self.msg = "<[broken __repr__] %s at %0xd>" %(
+                    args[0].__class__, id(args[0]))
+        else:
+            f = py.code.Frame(sys._getframe(1))
+            try:
+                source = f.code.fullsource
+                if source is not None:
+                    try:
+                        source = source.getstatement(f.lineno, assertion=True)
+                    except IndexError:
+                        source = None
+                    else:
+                        source = str(source.deindent()).strip()
+            except py.error.ENOENT:
+                source = None
+                # this can also occur during reinterpretation, when the
+                # co_filename is set to "<run>".
+            if source:
+                self.msg = reinterpret(source, f, should_fail=True)
+            else:
+                self.msg = "<could not determine information>"
+            if not self.args:
+                self.args = (self.msg,)
+
+if sys.version_info > (3, 0):
+    AssertionError.__module__ = "builtins"
+    reinterpret_old = "old reinterpretation not available for py3"
+else:
+    from _pytest.assertion.oldinterpret import interpret as reinterpret_old
+if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
+    from _pytest.assertion.newinterpret import interpret as reinterpret
+else:
+    reinterpret = reinterpret_old
+
diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py
new file mode 100644
--- /dev/null
+++ b/_pytest/assertion/rewrite.py
@@ -0,0 +1,340 @@
+"""Rewrite assertion AST to produce nice error messages"""
+
+import ast
+import collections
+import itertools
+import sys
+
+import py
+from _pytest.assertion import util
+
+
+def rewrite_asserts(mod):
+    """Rewrite the assert statements in mod."""
+    AssertionRewriter().run(mod)
+
+
+_saferepr = py.io.saferepr
+from _pytest.assertion.util import format_explanation as _format_explanation
+
+def _format_boolop(operands, explanations, is_or):
+    show_explanations = []
+    for operand, expl in zip(operands, explanations):
+        show_explanations.append(expl)
+        if operand == is_or:
+            break
+    return "(" + (is_or and " or " or " and ").join(show_explanations) + ")"
+
+def _call_reprcompare(ops, results, expls, each_obj):
+    for i, res, expl in zip(range(len(ops)), results, expls):
+        try:
+            done = not res
+        except Exception:
+            done = True
+        if done:
+            break
+    if util._reprcompare is not None:
+        custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
+        if custom is not None:
+            return custom
+    return expl
+
+
+unary_map = {
+    ast.Not : "not %s",
+    ast.Invert : "~%s",
+    ast.USub : "-%s",
+    ast.UAdd : "+%s"
+}
+
+binop_map = {
+    ast.BitOr : "|",
+    ast.BitXor : "^",
+    ast.BitAnd : "&",
+    ast.LShift : "<<",
+    ast.RShift : ">>",
+    ast.Add : "+",
+    ast.Sub : "-",
+    ast.Mult : "*",
+    ast.Div : "/",
+    ast.FloorDiv : "//",
+    ast.Mod : "%",
+    ast.Eq : "==",
+    ast.NotEq : "!=",
+    ast.Lt : "<",
+    ast.LtE : "<=",
+    ast.Gt : ">",
+    ast.GtE : ">=",
+    ast.Pow : "**",
+    ast.Is : "is",
+    ast.IsNot : "is not",
+    ast.In : "in",
+    ast.NotIn : "not in"
+}
+
+
+def set_location(node, lineno, col_offset):
+    """Set node location information recursively."""
+    def _fix(node, lineno, col_offset):
+        if "lineno" in node._attributes:
+            node.lineno = lineno
+        if "col_offset" in node._attributes:
+            node.col_offset = col_offset
+        for child in ast.iter_child_nodes(node):
+            _fix(child, lineno, col_offset)
+    _fix(node, lineno, col_offset)
+    return node
+
+
+class AssertionRewriter(ast.NodeVisitor):
+
+    def run(self, mod):
+        """Find all assert statements in *mod* and rewrite them."""
+        if not mod.body:
+            # Nothing to do.
+            return
+        # Insert some special imports at the top of the module but after any
+        # docstrings and __future__ imports.
+        aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
+                   ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
+        expect_docstring = True
+        pos = 0
+        lineno = 0
+        for item in mod.body:
+            if (expect_docstring and isinstance(item, ast.Expr) and
+                isinstance(item.value, ast.Str)):
+                doc = item.value.s
+                if "PYTEST_DONT_REWRITE" in doc:
+                    # The module has disabled assertion rewriting.
+                    return
+                lineno += len(doc) - 1
+                expect_docstring = False
+            elif (not isinstance(item, ast.ImportFrom) or item.level > 0 and
+                  item.identifier != "__future__"):
+                lineno = item.lineno
+                break
+            pos += 1
+        imports = [ast.Import([alias], lineno=lineno, col_offset=0)
+                   for alias in aliases]
+        mod.body[pos:pos] = imports
+        # Collect asserts.
+        nodes = collections.deque([mod])
+        while nodes:
+            node = nodes.popleft()
+            for name, field in ast.iter_fields(node):
+                if isinstance(field, list):
+                    new = []
+                    for i, child in enumerate(field):
+                        if isinstance(child, ast.Assert):
+                            # Transform assert.
+                            new.extend(self.visit(child))
+                        else:
+                            new.append(child)
+                            if isinstance(child, ast.AST):
+                                nodes.append(child)
+                    setattr(node, name, new)
+                elif (isinstance(field, ast.AST) and
+                      # Don't recurse into expressions as they can't contain
+                      # asserts.
+                      not isinstance(field, ast.expr)):
+                    nodes.append(field)
+
+    def variable(self):
+        """Get a new variable."""
+        # Use a character invalid in python identifiers to avoid clashing.
+        name = "@py_assert" + str(next(self.variable_counter))
+        self.variables.add(name)
+        return name
+
+    def assign(self, expr):
+        """Give *expr* a name."""
+        name = self.variable()
+        self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
+        return ast.Name(name, ast.Load())
+
+    def display(self, expr):
+        """Call py.io.saferepr on the expression."""
+        return self.helper("saferepr", expr)
+
+    def helper(self, name, *args):
+        """Call a helper in this module."""
+        py_name = ast.Name("@pytest_ar", ast.Load())
+        attr = ast.Attribute(py_name, "_" + name, ast.Load())
+        return ast.Call(attr, list(args), [], None, None)
+
+    def builtin(self, name):
+        """Return the builtin called *name*."""
+        builtin_name = ast.Name("@py_builtins", ast.Load())
+        return ast.Attribute(builtin_name, name, ast.Load())
+
+    def explanation_param(self, expr):
+        specifier = "py" + str(next(self.variable_counter))
+        self.explanation_specifiers[specifier] = expr
+        return "%(" + specifier + ")s"
+
+    def push_format_context(self):
+        self.explanation_specifiers = {}
+        self.stack.append(self.explanation_specifiers)
+
+    def pop_format_context(self, expl_expr):
+        current = self.stack.pop()
+        if self.stack:
+            self.explanation_specifiers = self.stack[-1]
+        keys = [ast.Str(key) for key in current.keys()]
+        format_dict = ast.Dict(keys, list(current.values()))
+        form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
+        name = "@py_format" + str(next(self.variable_counter))
+        self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
+        return ast.Name(name, ast.Load())
+
+    def generic_visit(self, node):
+        """Handle expressions we don't have custom code for."""
+        assert isinstance(node, ast.expr)
+        res = self.assign(node)
+        return res, self.explanation_param(self.display(res))
+
+    def visit_Assert(self, assert_):
+        if assert_.msg:
+            # There's already a message. Don't mess with it.
+            return [assert_]
+        self.statements = []
+        self.variables = set()
+        self.variable_counter = itertools.count()
+        self.stack = []
+        self.on_failure = []
+        self.push_format_context()
+        # Rewrite assert into a bunch of statements.
+        top_condition, explanation = self.visit(assert_.test)
+        # Create failure message.
+        body = self.on_failure
+        negation = ast.UnaryOp(ast.Not(), top_condition)
+        self.statements.append(ast.If(negation, body, []))
+        explanation = "assert " + explanation
+        template = ast.Str(explanation)
+        msg = self.pop_format_context(template)
+        fmt = self.helper("format_explanation", msg)
+        err_name = ast.Name("AssertionError", ast.Load())
+        exc = ast.Call(err_name, [fmt], [], None, None)
+        if sys.version_info[0] >= 3:
+            raise_ = ast.Raise(exc, None)
+        else:
+            raise_ = ast.Raise(exc, None, None)
+        body.append(raise_)
+        # Delete temporary variables.
+        names = [ast.Name(name, ast.Del()) for name in self.variables]
+        if names:
+            delete = ast.Delete(names)
+            self.statements.append(delete)
+        # Fix line numbers.
+        for stmt in self.statements:
+            set_location(stmt, assert_.lineno, assert_.col_offset)
+        return self.statements
+
+    def visit_Name(self, name):
+        # Check if the name is local or not.
+        locs = ast.Call(self.builtin("locals"), [], [], None, None)
+        globs = ast.Call(self.builtin("globals"), [], [], None, None)
+        ops = [ast.In(), ast.IsNot()]
+        test = ast.Compare(ast.Str(name.id), ops, [locs, globs])
+        expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
+        return name, self.explanation_param(expr)
+
+    def visit_BoolOp(self, boolop):
+        operands = []
+        explanations = []
+        self.push_format_context()
+        for operand in boolop.values:
+            res, explanation = self.visit(operand)
+            operands.append(res)
+            explanations.append(explanation)
+        expls = ast.Tuple([ast.Str(expl) for expl in explanations], ast.Load())
+        is_or = ast.Num(isinstance(boolop.op, ast.Or))
+        expl_template = self.helper("format_boolop",
+                                    ast.Tuple(operands, ast.Load()), expls,
+                                    is_or)
+        expl = self.pop_format_context(expl_template)
+        res = self.assign(ast.BoolOp(boolop.op, operands))
+        return res, self.explanation_param(expl)
+
+    def visit_UnaryOp(self, unary):
+        pattern = unary_map[unary.op.__class__]
+        operand_res, operand_expl = self.visit(unary.operand)
+        res = self.assign(ast.UnaryOp(unary.op, operand_res))
+        return res, pattern % (operand_expl,)
+
+    def visit_BinOp(self, binop):
+        symbol = binop_map[binop.op.__class__]
+        left_expr, left_expl = self.visit(binop.left)
+        right_expr, right_expl = self.visit(binop.right)
+        explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
+        res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
+        return res, explanation
+
+    def visit_Call(self, call):
+        new_func, func_expl = self.visit(call.func)
+        arg_expls = []
+        new_args = []
+        new_kwargs = []
+        new_star = new_kwarg = None
+        for arg in call.args:
+            res, expl = self.visit(arg)
+            new_args.append(res)
+            arg_expls.append(expl)
+        for keyword in call.keywords:
+            res, expl = self.visit(keyword.value)
+            new_kwargs.append(ast.keyword(keyword.arg, res))
+            arg_expls.append(keyword.arg + "=" + expl)
+        if call.starargs:
+            new_star, expl = self.visit(call.starargs)
+            arg_expls.append("*" + expl)
+        if call.kwargs:
+            new_kwarg, expl = self.visit(call.kwarg)
+            arg_expls.append("**" + expl)
+        expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
+        new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
+        res = self.assign(new_call)
+        res_expl = self.explanation_param(self.display(res))
+        outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
+        return res, outer_expl
+
+    def visit_Attribute(self, attr):
+        if not isinstance(attr.ctx, ast.Load):
+            return self.generic_visit(attr)
+        value, value_expl = self.visit(attr.value)
+        res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
+        res_expl = self.explanation_param(self.display(res))
+        pat = "%s\n{%s = %s.%s\n}"
+        expl = pat % (res_expl, res_expl, value_expl, attr.attr)
+        return res, expl
+
+    def visit_Compare(self, comp):
+        self.push_format_context()
+        left_res, left_expl = self.visit(comp.left)
+        res_variables = [self.variable() for i in range(len(comp.ops))]
+        load_names = [ast.Name(v, ast.Load()) for v in res_variables]
+        store_names = [ast.Name(v, ast.Store()) for v in res_variables]
+        it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
+        expls = []
+        syms = []
+        results = [left_res]
+        for i, op, next_operand in it:
+            next_res, next_expl = self.visit(next_operand)
+            results.append(next_res)
+            sym = binop_map[op.__class__]
+            syms.append(ast.Str(sym))
+            expl = "%s %s %s" % (left_expl, sym, next_expl)
+            expls.append(ast.Str(expl))
+            res_expr = ast.Compare(left_res, [op], [next_res])
+            self.statements.append(ast.Assign([store_names[i]], res_expr))
+            left_res, left_expl = next_res, next_expl
+        # Use py.code._reprcompare if that's available.
+        expl_call = self.helper("call_reprcompare",
+                                ast.Tuple(syms, ast.Load()),
+                                ast.Tuple(load_names, ast.Load()),
+                                ast.Tuple(expls, ast.Load()),
+                                ast.Tuple(results, ast.Load()))
+        if len(comp.ops) > 1:
+            res = ast.BoolOp(ast.And(), load_names)
+        else:
+            res = load_names[0]
+        return res, self.explanation_param(self.pop_format_context(expl_call))
diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py
new file mode 100644
--- /dev/null
+++ b/_pytest/assertion/util.py
@@ -0,0 +1,213 @@
+"""Utilities for assertion debugging"""
+
+import py
+
+
+# The _reprcompare attribute on the util module is used by the new assertion
+# interpretation code and assertion rewriter to detect this plugin was
+# loaded and in turn call the hooks defined here as part of the
+# DebugInterpreter.
+_reprcompare = None
+
+def format_explanation(explanation):
+    """This formats an explanation
+
+    Normally all embedded newlines are escaped, however there are
+    three exceptions: \n{, \n} and \n~.  The first two are intended
+    cover nested explanations, see function and attribute explanations
+    for examples (.visit_Call(), visit_Attribute()).  The last one is
+    for when one explanation needs to span multiple lines, e.g. when
+    displaying diffs.
+    """
+    # simplify 'assert False where False = ...'
+    where = 0
+    while True:
+        start = where = explanation.find("False\n{False = ", where)
+        if where == -1:
+            break
+        level = 0
+        for i, c in enumerate(explanation[start:]):
+            if c == "{":
+                level += 1
+            elif c == "}":
+                level -= 1
+                if not level:
+                    break
+        else:
+            raise AssertionError("unbalanced braces: %r" % (explanation,))
+        end = start + i
+        where = end
+        if explanation[end - 1] == '\n':
+            explanation = (explanation[:start] + explanation[start+15:end-1] +
+                           explanation[end+1:])
+            where -= 17
+    raw_lines = (explanation or '').split('\n')
+    # escape newlines not followed by {, } and ~
+    lines = [raw_lines[0]]
+    for l in raw_lines[1:]:
+        if l.startswith('{') or l.startswith('}') or l.startswith('~'):
+            lines.append(l)
+        else:
+            lines[-1] += '\\n' + l
+
+    result = lines[:1]
+    stack = [0]
+    stackcnt = [0]
+    for line in lines[1:]:
+        if line.startswith('{'):
+            if stackcnt[-1]:
+                s = 'and   '
+            else:
+                s = 'where '
+            stack.append(len(result))
+            stackcnt[-1] += 1
+            stackcnt.append(0)
+            result.append(' +' + '  '*(len(stack)-1) + s + line[1:])
+        elif line.startswith('}'):
+            assert line.startswith('}')
+            stack.pop()
+            stackcnt.pop()
+            result[stack[-1]] += line[1:]
+        else:
+            assert line.startswith('~')
+            result.append('  '*len(stack) + line[1:])
+    assert len(stack) == 1
+    return '\n'.join(result)
+
+
+# Provide basestring in python3
+try:
+    basestring = basestring
+except NameError:
+    basestring = str
+
+
+def assertrepr_compare(op, left, right):
+    """return specialised explanations for some operators/operands"""
+    width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
+    left_repr = py.io.saferepr(left, maxsize=int(width/2))
+    right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
+    summary = '%s %s %s' % (left_repr, op, right_repr)
+
+    issequence = lambda x: isinstance(x, (list, tuple))
+    istext = lambda x: isinstance(x, basestring)
+    isdict = lambda x: isinstance(x, dict)
+    isset = lambda x: isinstance(x, set)
+
+    explanation = None
+    try:
+        if op == '==':
+            if istext(left) and istext(right):
+                explanation = _diff_text(left, right)
+            elif issequence(left) and issequence(right):
+                explanation = _compare_eq_sequence(left, right)
+            elif isset(left) and isset(right):
+                explanation = _compare_eq_set(left, right)
+            elif isdict(left) and isdict(right):
+                explanation = _diff_text(py.std.pprint.pformat(left),
+                                         py.std.pprint.pformat(right))
+        elif op == 'not in':
+            if istext(left) and istext(right):
+                explanation = _notin_text(left, right)
+    except py.builtin._sysex:
+        raise
+    except:
+        excinfo = py.code.ExceptionInfo()
+        explanation = ['(pytest_assertion plugin: representation of '
+            'details failed. Probably an object has a faulty __repr__.)',
+            str(excinfo)
+            ]
+
+
+    if not explanation:
+        return None
+
+    # Don't include pageloads of data, should be configurable
+    if len(''.join(explanation)) > 80*8:
+        explanation = ['Detailed information too verbose, truncated']
+
+    return [summary] + explanation
+
+
+def _diff_text(left, right):
+    """Return the explanation for the diff between text
+
+    This will skip leading and trailing characters which are
+    identical to keep the diff minimal.
+    """
+    explanation = []
+    i = 0 # just in case left or right has zero length
+    for i in range(min(len(left), len(right))):
+        if left[i] != right[i]:
+            break
+    if i > 42:
+        i -= 10                 # Provide some context
+        explanation = ['Skipping %s identical '
+                       'leading characters in diff' % i]
+        left = left[i:]
+        right = right[i:]
+    if len(left) == len(right):
+        for i in range(len(left)):
+            if left[-i] != right[-i]:
+                break
+        if i > 42:
+            i -= 10     # Provide some context
+            explanation += ['Skipping %s identical '
+                            'trailing characters in diff' % i]
+            left = left[:-i]
+            right = right[:-i]
+    explanation += [line.strip('\n')
+                    for line in py.std.difflib.ndiff(left.splitlines(),
+                                                     right.splitlines())]
+    return explanation
+
+
+def _compare_eq_sequence(left, right):
+    explanation = []
+    for i in range(min(len(left), len(right))):
+        if left[i] != right[i]:
+            explanation += ['At index %s diff: %r != %r' %
+                            (i, left[i], right[i])]
+            break
+    if len(left) > len(right):
+        explanation += ['Left contains more items, '
+            'first extra item: %s' % py.io.saferepr(left[len(right)],)]
+    elif len(left) < len(right):
+        explanation += ['Right contains more items, '
+            'first extra item: %s' % py.io.saferepr(right[len(left)],)]
+    return explanation # + _diff_text(py.std.pprint.pformat(left),
+                       #             py.std.pprint.pformat(right))
+
+
+def _compare_eq_set(left, right):
+    explanation = []
+    diff_left = left - right
+    diff_right = right - left
+    if diff_left:
+        explanation.append('Extra items in the left set:')
+        for item in diff_left:
+            explanation.append(py.io.saferepr(item))
+    if diff_right:
+        explanation.append('Extra items in the right set:')
+        for item in diff_right:
+            explanation.append(py.io.saferepr(item))
+    return explanation
+
+
+def _notin_text(term, text):
+    index = text.find(term)
+    head = text[:index]
+    tail = text[index+len(term):]
+    correct_text = head + tail
+    diff = _diff_text(correct_text, text)
+    newdiff = ['%s is contained here:' % py.io.saferepr(term, maxsize=42)]
+    for line in diff:
+        if line.startswith('Skipping'):
+            continue
+        if line.startswith('- '):
+            continue
+        if line.startswith('+ '):
+            newdiff.append('  ' + line[2:])
+        else:
+            newdiff.append(line)
+    return newdiff
diff --git a/_pytest/doctest.py b/_pytest/doctest.py
--- a/_pytest/doctest.py
+++ b/_pytest/doctest.py
@@ -59,7 +59,7 @@
                 inner_excinfo = py.code.ExceptionInfo(excinfo.value.exc_info)
                 lines += ["UNEXPECTED EXCEPTION: %s" %
                             repr(inner_excinfo.value)]
-
+                lines += py.std.traceback.format_exception(*excinfo.value.exc_info)
             return ReprFailDoctest(reprlocation, lines)
         else:
             return super(DoctestItem, self).repr_failure(excinfo)
diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py
--- a/_pytest/helpconfig.py
+++ b/_pytest/helpconfig.py
@@ -16,9 +16,6 @@
     group.addoption('--traceconfig',
                action="store_true", dest="traceconfig", default=False,
                help="trace considerations of conftest.py files."),
-    group._addoption('--nomagic',
-               action="store_true", dest="nomagic", default=False,
-               help="don't reinterpret asserts, no traceback cutting. ")
     group.addoption('--debug',
                action="store_true", dest="debug", default=False,
                help="generate and show internal debugging information.")
diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py
--- a/_pytest/junitxml.py
+++ b/_pytest/junitxml.py
@@ -65,7 +65,8 @@
 
 class LogXML(object):
     def __init__(self, logfile, prefix):
-        self.logfile = logfile
+        logfile = os.path.expanduser(os.path.expandvars(logfile))
+        self.logfile = os.path.normpath(logfile)
         self.prefix = prefix
         self.test_logs = []
         self.passed = self.skipped = 0
@@ -76,7 +77,7 @@
         names = report.nodeid.split("::")
         names[0] = names[0].replace("/", '.')
         names = tuple(names)
-        d = {'time': self._durations.pop(names, "0")}
+        d = {'time': self._durations.pop(report.nodeid, "0")}
         names = [x.replace(".py", "") for x in names if x != "()"]
         classnames = names[:-1]
         if self.prefix:
@@ -170,12 +171,11 @@
             self.append_skipped(report)
 
     def pytest_runtest_call(self, item, __multicall__):
-        names = tuple(item.listnames())
         start = time.time()
         try:
             return __multicall__.execute()
         finally:
-            self._durations[names] = time.time() - start
+            self._durations[item.nodeid] = time.time() - start
 
     def pytest_collectreport(self, report):
         if not report.passed:
diff --git a/_pytest/main.py b/_pytest/main.py
--- a/_pytest/main.py
+++ b/_pytest/main.py
@@ -46,23 +46,25 @@
 
 
 def pytest_namespace():
-    return dict(collect=dict(Item=Item, Collector=Collector, File=File))
+    collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
+    return dict(collect=collect)
         
 def pytest_configure(config):
     py.test.config = config # compatibiltiy
     if config.option.exitfirst:
         config.option.maxfail = 1
 
-def pytest_cmdline_main(config):
-    """ default command line protocol for initialization, session,
-    running tests and reporting. """
+def wrap_session(config, doit):
+    """Skeleton command line program"""
     session = Session(config)
     session.exitstatus = EXIT_OK
+    initstate = 0
     try:
         config.pluginmanager.do_configure(config)
+        initstate = 1
         config.hook.pytest_sessionstart(session=session)
-        config.hook.pytest_collection(session=session)
-        config.hook.pytest_runtestloop(session=session)
+        initstate = 2
+        doit(config, session)
     except pytest.UsageError:
         raise
     except KeyboardInterrupt:
@@ -77,18 +79,24 @@
             sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
     if not session.exitstatus and session._testsfailed:
         session.exitstatus = EXIT_TESTSFAILED
-    config.hook.pytest_sessionfinish(session=session,
-        exitstatus=session.exitstatus)
-    config.pluginmanager.do_unconfigure(config)
+    if initstate >= 2:
+        config.hook.pytest_sessionfinish(session=session,
+            exitstatus=session.exitstatus)
+    if initstate >= 1:
+        config.pluginmanager.do_unconfigure(config)
     return session.exitstatus
 
+def pytest_cmdline_main(config):
+    return wrap_session(config, _main)
+
+def _main(config, session):
+    """ default command line protocol for initialization, session,
+    running tests and reporting. """
+    config.hook.pytest_collection(session=session)
+    config.hook.pytest_runtestloop(session=session)
+
 def pytest_collection(session):
-    session.perform_collect()
-    hook = session.config.hook
-    hook.pytest_collection_modifyitems(session=session,
-        config=session.config, items=session.items)
-    hook.pytest_collection_finish(session=session)
-    return True
+    return session.perform_collect()
 
 def pytest_runtestloop(session):
     if session.config.option.collectonly:
@@ -374,6 +382,16 @@
         return HookProxy(fspath, self.config)
 
     def perform_collect(self, args=None, genitems=True):
+        hook = self.config.hook
+        try:
+            items = self._perform_collect(args, genitems)
+            hook.pytest_collection_modifyitems(session=self,
+                config=self.config, items=items)
+        finally:
+            hook.pytest_collection_finish(session=self)
+        return items
+
+    def _perform_collect(self, args, genitems):
         if args is None:
             args = self.config.args
         self.trace("perform_collect", self, args)
diff --git a/_pytest/mark.py b/_pytest/mark.py
--- a/_pytest/mark.py
+++ b/_pytest/mark.py
@@ -153,7 +153,7 @@
 
     def __repr__(self):
         return "<MarkInfo %r args=%r kwargs=%r>" % (
-                self._name, self.args, self.kwargs)
+                self.name, self.args, self.kwargs)
 
 def pytest_itemcollected(item):
     if not isinstance(item, pytest.Function):
diff --git a/_pytest/pytester.py b/_pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -6,7 +6,7 @@
 import inspect
 import time
 from fnmatch import fnmatch
-from _pytest.main import Session
+from _pytest.main import Session, EXIT_OK
 from py.builtin import print_
 from _pytest.core import HookRelay
 
@@ -292,13 +292,19 @@
         assert '::' not in str(arg)
         p = py.path.local(arg)
         x = session.fspath.bestrelpath(p)
-        return session.perform_collect([x], genitems=False)[0]
+        config.hook.pytest_sessionstart(session=session)
+        res = session.perform_collect([x], genitems=False)[0]
+        config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+        return res
 
     def getpathnode(self, path):
-        config = self.parseconfig(path)
+        config = self.parseconfigure(path)
         session = Session(config)
         x = session.fspath.bestrelpath(path)
-        return session.perform_collect([x], genitems=False)[0]
+        config.hook.pytest_sessionstart(session=session)
+        res = session.perform_collect([x], genitems=False)[0]
+        config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+        return res
 
     def genitems(self, colitems):
         session = colitems[0].session
@@ -312,7 +318,9 @@
         config = self.parseconfigure(*args)
         rec = self.getreportrecorder(config)
         session = Session(config)
+        config.hook.pytest_sessionstart(session=session)
         session.perform_collect()
+        config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
         return session.items, rec
 
     def runitem(self, source):
@@ -382,6 +390,8 @@
             c.basetemp = py.path.local.make_numbered_dir(prefix="reparse",
                 keep=0, rootdir=self.tmpdir, lock_timeout=None)
             c.parse(args)
+            c.pluginmanager.do_configure(c)
+            self.request.addfinalizer(lambda: c.pluginmanager.do_unconfigure(c))
             return c
         finally:
             py.test.config = oldconfig
diff --git a/_pytest/python.py b/_pytest/python.py
--- a/_pytest/python.py
+++ b/_pytest/python.py
@@ -226,8 +226,13 @@
 
     def _importtestmodule(self):
         # we assume we are only called once per module
+        from _pytest import assertion
+        assertion.before_module_import(self)
         try:
-            mod = self.fspath.pyimport(ensuresyspath=True)
+            try:
+                mod = self.fspath.pyimport(ensuresyspath=True)
+            finally:
+                assertion.after_module_import(self)
         except SyntaxError:
             excinfo = py.code.ExceptionInfo()
             raise self.CollectError(excinfo.getrepr(style="short"))
@@ -374,7 +379,7 @@
         # test generators are seen as collectors but they also
         # invoke setup/teardown on popular request
         # (induced by the common "test_*" naming shared with normal tests)
-        self.config._setupstate.prepare(self)
+        self.session._setupstate.prepare(self)
         # see FunctionMixin.setup and test_setupstate_is_preserved_134
         self._preservedparent = self.parent.obj
         l = []
@@ -721,7 +726,7 @@
 
     def _addfinalizer(self, finalizer, scope):
         colitem = self._getscopeitem(scope)
-        self.config._setupstate.addfinalizer(
+        self._pyfuncitem.session._setupstate.addfinalizer(
             finalizer=finalizer, colitem=colitem)
 
     def __repr__(self):
@@ -742,8 +747,10 @@
         raise self.LookupError(msg)
 
 def showfuncargs(config):
-    from _pytest.main import Session
-    session = Session(config)
+    from _pytest.main import wrap_session
+    return wrap_session(config, _showfuncargs_main)
+
+def _showfuncargs_main(config, session):
     session.perform_collect()
     if session.items:
         plugins = session.items[0].getplugins()
diff --git a/_pytest/runner.py b/_pytest/runner.py
--- a/_pytest/runner.py
+++ b/_pytest/runner.py
@@ -14,17 +14,15 @@
 #
 # pytest plugin hooks
 
-# XXX move to pytest_sessionstart and fix py.test owns tests
-def pytest_configure(config):
-    config._setupstate = SetupState()
+def pytest_sessionstart(session):
+    session._setupstate = SetupState()
 
 def pytest_sessionfinish(session, exitstatus):
-    if hasattr(session.config, '_setupstate'):
-        hook = session.config.hook
-        rep = hook.pytest__teardown_final(session=session)
-        if rep:
-            hook.pytest__teardown_final_logerror(session=session, report=rep)
-            session.exitstatus = 1
+    hook = session.config.hook
+    rep = hook.pytest__teardown_final(session=session)
+    if rep:
+        hook.pytest__teardown_final_logerror(session=session, report=rep)
+        session.exitstatus = 1
 
 class NodeInfo:
     def __init__(self, location):
@@ -46,16 +44,16 @@
     return reports
 
 def pytest_runtest_setup(item):
-    item.config._setupstate.prepare(item)
+    item.session._setupstate.prepare(item)
 
 def pytest_runtest_call(item):
     item.runtest()
 
 def pytest_runtest_teardown(item):
-    item.config._setupstate.teardown_exact(item)
+    item.session._setupstate.teardown_exact(item)
 
 def pytest__teardown_final(session):
-    call = CallInfo(session.config._setupstate.teardown_all, when="teardown")
+    call = CallInfo(session._setupstate.teardown_all, when="teardown")
     if call.excinfo:
         ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir)
         call.excinfo.traceback = ntraceback.filter()
diff --git a/lib-python/modified-2.7/distutils/cygwinccompiler.py b/lib-python/modified-2.7/distutils/cygwinccompiler.py
--- a/lib-python/modified-2.7/distutils/cygwinccompiler.py
+++ b/lib-python/modified-2.7/distutils/cygwinccompiler.py
@@ -75,6 +75,9 @@
         elif msc_ver == '1500':
             # VS2008 / MSVC 9.0
             return ['msvcr90']
+        elif msc_ver == '1600':
+            # VS2010 / MSVC 10.0
+            return ['msvcr100']
         else:
             raise ValueError("Unknown MS Compiler version %s " % msc_ver)
 
diff --git a/lib-python/modified-2.7/opcode.py b/lib-python/modified-2.7/opcode.py
--- a/lib-python/modified-2.7/opcode.py
+++ b/lib-python/modified-2.7/opcode.py
@@ -189,7 +189,6 @@
 def_op('MAP_ADD', 147)
 
 # pypy modification, experimental bytecode
-def_op('CALL_LIKELY_BUILTIN', 200)    # #args + (#kwargs << 8)
 def_op('LOOKUP_METHOD', 201)          # Index in name list
 hasname.append(201)
 def_op('CALL_METHOD', 202)            # #args not including 'self'
diff --git a/lib-python/modified-2.7/pickle.py b/lib-python/modified-2.7/pickle.py
--- a/lib-python/modified-2.7/pickle.py
+++ b/lib-python/modified-2.7/pickle.py
@@ -873,7 +873,7 @@
 
 # Unpickling machinery
 
-class Unpickler:
+class Unpickler(object):
 
     def __init__(self, file):
         """This takes a file-like object for reading a pickle data stream.
diff --git a/lib-python/modified-2.7/test/test_descr.py b/lib-python/modified-2.7/test/test_descr.py
--- a/lib-python/modified-2.7/test/test_descr.py
+++ b/lib-python/modified-2.7/test/test_descr.py
@@ -4399,13 +4399,10 @@
         self.assertTrue(l.__add__ != [5].__add__)
         self.assertTrue(l.__add__ != l.__mul__)
         self.assertTrue(l.__add__.__name__ == '__add__')
-        if hasattr(l.__add__, '__self__'):
-            # CPython
-            self.assertTrue(l.__add__.__self__ is l)
+        self.assertTrue(l.__add__.__self__ is l)
+        if hasattr(l.__add__, '__objclass__'):   # CPython
             self.assertTrue(l.__add__.__objclass__ is list)
-        else:
-            # Python implementations where [].__add__ is a normal bound method
-            self.assertTrue(l.__add__.im_self is l)
+        else:                                    # PyPy
             self.assertTrue(l.__add__.im_class is list)
         self.assertEqual(l.__add__.__doc__, list.__add__.__doc__)
         try:
diff --git a/lib-python/modified-2.7/test/test_dis.py b/lib-python/modified-2.7/test/test_dis.py
deleted file mode 100644
--- a/lib-python/modified-2.7/test/test_dis.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# Minimal tests for dis module
-
-from test.test_support import run_unittest
-import unittest
-import sys
-import dis
-import StringIO
-
-
-def _f(a):
-    print a
-    return 1
-
-dis_f = """\
- %-4d         0 LOAD_FAST                0 (a)
-              3 PRINT_ITEM
-              4 PRINT_NEWLINE
-
- %-4d         5 LOAD_CONST               1 (1)
-              8 RETURN_VALUE
-"""%(_f.func_code.co_firstlineno + 1,
-     _f.func_code.co_firstlineno + 2)
-
-
-# we "call" rangexxx() instead of range() to disable the
-# pypy optimization that turns it into CALL_LIKELY_BUILTIN.
-def bug708901():
-    for res in rangexxx(1,
-                        10):
-        pass
-
-dis_bug708901 = """\
- %-4d         0 SETUP_LOOP              23 (to 26)
-              3 LOAD_GLOBAL              0 (rangexxx)
-              6 LOAD_CONST               1 (1)
-
- %-4d         9 LOAD_CONST               2 (10)
-             12 CALL_FUNCTION            2
-             15 GET_ITER
-        >>   16 FOR_ITER                 6 (to 25)
-             19 STORE_FAST               0 (res)
-
- %-4d        22 JUMP_ABSOLUTE           16
-        >>   25 POP_BLOCK
-        >>   26 LOAD_CONST               0 (None)
-             29 RETURN_VALUE
-"""%(bug708901.func_code.co_firstlineno + 1,
-     bug708901.func_code.co_firstlineno + 2,
-     bug708901.func_code.co_firstlineno + 3)
-
-
-def bug1333982(x=[]):
-    assert 0, ([s for s in x] +
-              1)
-    pass
-
-dis_bug1333982 = """\
- %-4d         0 LOAD_CONST               1 (0)
-              3 POP_JUMP_IF_TRUE        38
-              6 LOAD_GLOBAL              0 (AssertionError)
-              9 BUILD_LIST               0
-             12 LOAD_FAST                0 (x)
-             15 GET_ITER
-        >>   16 FOR_ITER                12 (to 31)
-             19 STORE_FAST               1 (s)
-             22 LOAD_FAST                1 (s)
-             25 LIST_APPEND              2
-             28 JUMP_ABSOLUTE           16
-
- %-4d   >>   31 LOAD_CONST               2 (1)
-             34 BINARY_ADD
-             35 RAISE_VARARGS            2
-
- %-4d   >>   38 LOAD_CONST               0 (None)
-             41 RETURN_VALUE
-"""%(bug1333982.func_code.co_firstlineno + 1,
-     bug1333982.func_code.co_firstlineno + 2,
-     bug1333982.func_code.co_firstlineno + 3)
-
-_BIG_LINENO_FORMAT = """\
-%3d           0 LOAD_GLOBAL              0 (spam)
-              3 POP_TOP
-              4 LOAD_CONST               0 (None)
-              7 RETURN_VALUE
-"""
-
-class DisTests(unittest.TestCase):
-    def do_disassembly_test(self, func, expected):
-        s = StringIO.StringIO()
-        save_stdout = sys.stdout
-        sys.stdout = s
-        dis.dis(func)
-        sys.stdout = save_stdout
-        got = s.getvalue()
-        # Trim trailing blanks (if any).
-        lines = got.split('\n')
-        lines = [line.rstrip() for line in lines]
-        expected = expected.split("\n")
-        import difflib
-        if expected != lines:
-            self.fail(
-                "events did not match expectation:\n" +
-                "\n".join(difflib.ndiff(expected,
-                                        lines)))
-
-    def test_opmap(self):
-        self.assertEqual(dis.opmap["STOP_CODE"], 0)
-        self.assertIn(dis.opmap["LOAD_CONST"], dis.hasconst)
-        self.assertIn(dis.opmap["STORE_NAME"], dis.hasname)
-
-    def test_opname(self):
-        self.assertEqual(dis.opname[dis.opmap["LOAD_FAST"]], "LOAD_FAST")
-
-    def test_boundaries(self):
-        self.assertEqual(dis.opmap["EXTENDED_ARG"], dis.EXTENDED_ARG)
-        self.assertEqual(dis.opmap["STORE_NAME"], dis.HAVE_ARGUMENT)
-
-    def test_dis(self):
-        self.do_disassembly_test(_f, dis_f)
-
-    def test_bug_708901(self):
-        self.do_disassembly_test(bug708901, dis_bug708901)
-
-    def test_bug_1333982(self):
-        # This one is checking bytecodes generated for an `assert` statement,
-        # so fails if the tests are run with -O.  Skip this test then.
-        if __debug__:
-            self.do_disassembly_test(bug1333982, dis_bug1333982)
-
-    def test_big_linenos(self):
-        def func(count):
-            namespace = {}
-            func = "def foo():\n " + "".join(["\n "] * count + ["spam\n"])
-            exec func in namespace
-            return namespace['foo']
-
-        # Test all small ranges
-        for i in xrange(1, 300):
-            expected = _BIG_LINENO_FORMAT % (i + 2)
-            self.do_disassembly_test(func(i), expected)
-
-        # Test some larger ranges too
-        for i in xrange(300, 5000, 10):
-            expected = _BIG_LINENO_FORMAT % (i + 2)
-            self.do_disassembly_test(func(i), expected)
-
-def test_main():
-    run_unittest(DisTests)
-
-
-if __name__ == "__main__":
-    test_main()
diff --git a/lib-python/modified-2.7/test/test_extcall.py b/lib-python/modified-2.7/test/test_extcall.py
--- a/lib-python/modified-2.7/test/test_extcall.py
+++ b/lib-python/modified-2.7/test/test_extcall.py
@@ -299,7 +299,7 @@
         def f(a):
             return a
         self.assertEqual(f(**{u'a': 4}), 4)
-        self.assertRaises(TypeError, lambda: f(**{u'st&#246;ren': 4}))
+        self.assertRaises(TypeError, f, **{u'st&#246;ren': 4})
         self.assertRaises(TypeError, f, **{u'someLongString':2})
         try:
             f(a=4, **{u'a': 4})
diff --git a/lib-python/modified-2.7/test/test_weakref.py b/lib-python/modified-2.7/test/test_weakref.py
--- a/lib-python/modified-2.7/test/test_weakref.py
+++ b/lib-python/modified-2.7/test/test_weakref.py
@@ -993,13 +993,13 @@
         self.assertTrue(len(weakdict) == 2)
         k, v = weakdict.popitem()
         self.assertTrue(len(weakdict) == 1)
-        if k is key1:
+        if k == key1:
             self.assertTrue(v is value1)
         else:
             self.assertTrue(v is value2)
         k, v = weakdict.popitem()
         self.assertTrue(len(weakdict) == 0)
-        if k is key1:
+        if k == key1:
             self.assertTrue(v is value1)
         else:
             self.assertTrue(v is value2)
diff --git a/lib_pypy/_ctypes/__init__.py b/lib_pypy/_ctypes/__init__.py
--- a/lib_pypy/_ctypes/__init__.py
+++ b/lib_pypy/_ctypes/__init__.py
@@ -18,7 +18,16 @@
 if _os.name in ("nt", "ce"):
     from _rawffi import FormatError
     from _rawffi import check_HRESULT as _check_HRESULT
-    CopyComPointer = None # XXX
+
+    def CopyComPointer(src, dst):
+        from ctypes import c_void_p, cast
+        if src:
+            hr = src[0][0].AddRef(src)
+            if hr & 0x80000000:
+                return hr
+        dst[0] = cast(src, c_void_p).value
+        return 0
+
     LoadLibrary = dlopen
 
 from _rawffi import FUNCFLAG_STDCALL, FUNCFLAG_CDECL, FUNCFLAG_PYTHONAPI
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -139,7 +139,10 @@
         return buffer(self._buffer)
 
     def _get_b_base(self):
-        return self._base
+        try:
+            return self._base
+        except AttributeError:
+            return None
     _b_base_ = property(_get_b_base)
     _b_needsfree_ = False
 
@@ -218,5 +221,7 @@
     'z' : _ffi.types.void_p,
     'O' : _ffi.types.void_p,
     'Z' : _ffi.types.void_p,
+    'X' : _ffi.types.void_p,
+    'v' : _ffi.types.sshort,
     }
 
diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py
--- a/lib_pypy/_ctypes/function.py
+++ b/lib_pypy/_ctypes/function.py
@@ -322,20 +322,18 @@
                           RuntimeWarning, stacklevel=2)
 
         if self._com_index:
-            assert False, 'TODO2'
             from ctypes import cast, c_void_p, POINTER
             if not args:
                 raise ValueError(
                     "native COM method call without 'this' parameter"
                     )
-            thisarg = cast(args[0], POINTER(POINTER(c_void_p))).contents
-            argtypes = [c_void_p] + list(argtypes)
-            args = list(args)
-            args[0] = args[0].value
+            thisarg = cast(args[0], POINTER(POINTER(c_void_p)))
+            newargs, argtypes, outargs = self._convert_args(argtypes, args[1:], kwargs)
+            newargs.insert(0, args[0].value)
+            argtypes.insert(0, c_void_p)
         else:
             thisarg = None
-            
-        newargs, argtypes, outargs = self._convert_args(argtypes, args, kwargs)
+            newargs, argtypes, outargs = self._convert_args(argtypes, args, kwargs)
 
         funcptr = self._getfuncptr(argtypes, self._restype_, thisarg)
         result = self._call_funcptr(funcptr, *newargs)
@@ -343,6 +341,11 @@
 
         if not outargs:
             return result
+
+        simple_cdata = type(c_void_p()).__bases__[0]
+        outargs = [x.value if type(x).__bases__[0] is simple_cdata else x
+                   for x in outargs]
+
         if len(outargs) == 1:
             return outargs[0]
         return tuple(outargs)
@@ -398,10 +401,10 @@
             # extract the address from the object's virtual table
             if not thisarg:
                 raise ValueError("COM method call without VTable")
-            ptr = thisarg[self._com_index - 0x1000]
-            argshapes = [arg._ffiargshape for arg in argtypes]
-            resshape = restype._ffiargshape
-            return _rawffi.FuncPtr(ptr, argshapes, resshape, self._flags_)
+            ptr = thisarg[0][self._com_index - 0x1000]
+            ffiargs = [argtype.get_ffi_argtype() for argtype in argtypes]
+            ffires = restype.get_ffi_argtype()
+            return _ffi.FuncPtr.fromaddr(ptr, '', ffiargs, ffires)
         
         cdll = self.dll._handle
         try:
@@ -468,11 +471,7 @@
         newargtypes = []
         total = len(args)
         paramflags = self._paramflags
-
-        if self._com_index:
-            inargs_idx = 1
-        else:
-            inargs_idx = 0
+        inargs_idx = 0
 
         if not paramflags and total < len(argtypes):
             raise TypeError("not enough arguments")
@@ -587,13 +586,7 @@
 
         retval = None
 
-        if self._com_index:
-            if resbuffer[0] & 0x80000000:
-                raise get_com_error(resbuffer[0],
-                                    self._com_iid, argsandobjs[0])
-            else:
-                retval = int(resbuffer[0])
-        elif restype is not None:
+        if restype is not None:
             checker = getattr(self.restype, '_check_retval_', None)
             if checker:
                 val = restype(result)
@@ -601,7 +594,13 @@
                 # classes defining a new type, and their subclasses
                 if '_type_' in restype.__dict__:
                     val = val.value
-                retval = checker(val)
+                # XXX Raise a COMError when restype is HRESULT and
+                # checker(val) fails.  How to check for restype == HRESULT?
+                if self._com_index:
+                    if result & 0x80000000:
+                        raise get_com_error(result, None, None)
+                else:
+                    retval = checker(val)
             elif not isinstance(restype, _CDataMeta):
                 retval = restype(result)
             else:
diff --git a/lib_pypy/_ctypes/primitive.py b/lib_pypy/_ctypes/primitive.py
--- a/lib_pypy/_ctypes/primitive.py
+++ b/lib_pypy/_ctypes/primitive.py
@@ -216,10 +216,15 @@
             result.value = property(_getvalue, _setvalue)
 
         elif tp == 'X':
-            from ctypes import windll
-            SysAllocStringLen = windll.oleaut32.SysAllocStringLen
-            SysStringLen = windll.oleaut32.SysStringLen
-            SysFreeString = windll.oleaut32.SysFreeString
+            from ctypes import WinDLL
+            # Use WinDLL("oleaut32") instead of windll.oleaut32
+            # because the latter is a shared (cached) object; and
+            # other code may set their own restypes. We need out own
+            # restype here.
+            oleaut32 = WinDLL("oleaut32")
+            SysAllocStringLen = oleaut32.SysAllocStringLen
+            SysStringLen = oleaut32.SysStringLen
+            SysFreeString = oleaut32.SysFreeString
             def _getvalue(self):
                 addr = self._buffer[0]
                 if addr == 0:
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -275,7 +275,8 @@
     return unicode(x, 'utf-8')
 
 class Connection(object):
-    def __init__(self, database, isolation_level="", detect_types=0, timeout=None, cached_statements=None, factory=None):
+    def __init__(self, database, timeout=5.0, detect_types=0, isolation_level="",
+                 check_same_thread=True, factory=None, cached_statements=100):
         self.db = c_void_p()
         if sqlite.sqlite3_open(database, byref(self.db)) != SQLITE_OK:
             raise OperationalError("Could not open database")
@@ -308,7 +309,8 @@
         self._aggregates = {}
         self.aggregate_instances = {}
         self._collations = {}
-        self.thread_ident = thread_get_ident()
+        if check_same_thread:
+            self.thread_ident = thread_get_ident()
 
     def _get_exception(self, error_code = None):
         if error_code is None:
diff --git a/lib_pypy/pwd.py b/lib_pypy/pwd.py
--- a/lib_pypy/pwd.py
+++ b/lib_pypy/pwd.py
@@ -16,6 +16,7 @@
 
 from ctypes_support import standard_c_lib as libc
 from ctypes import Structure, POINTER, c_int, c_char_p, c_long
+from _structseq import structseqtype, structseqfield
 
 try: from __pypy__ import builtinify
 except ImportError: builtinify = lambda f: f
@@ -68,7 +69,7 @@
             yield self.pw_dir
             yield self.pw_shell
 
-class struct_passwd(tuple):
+class struct_passwd:
     """
     pwd.struct_passwd: Results from getpw*() routines.
 
@@ -76,15 +77,15 @@
       (pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell)
     or via the object attributes as named in the above tuple.
     """
-    def __init__(self, passwd):
-        self.pw_name = passwd.pw_name
-        self.pw_passwd = passwd.pw_passwd
-        self.pw_uid = passwd.pw_uid
-        self.pw_gid = passwd.pw_gid
-        self.pw_gecos = passwd.pw_gecos
-        self.pw_dir = passwd.pw_dir
-        self.pw_shell = passwd.pw_shell
-        tuple.__init__(self, passwd)
+    __metaclass__ = structseqtype
+    name = "pwd.struct_passwd"
+    pw_name = structseqfield(0)
+    pw_passwd = structseqfield(1)
+    pw_uid = structseqfield(2)
+    pw_gid = structseqfield(3)
+    pw_gecos = structseqfield(4)
+    pw_dir = structseqfield(5)
+    pw_shell = structseqfield(6)
 
 passwd_p = POINTER(passwd)
 
diff --git a/lib_pypy/resource.py b/lib_pypy/resource.py
--- a/lib_pypy/resource.py
+++ b/lib_pypy/resource.py
@@ -7,7 +7,7 @@
 
 from ctypes_support import standard_c_lib as libc
 from ctypes_support import get_errno
-from ctypes import Structure, c_int, c_long, byref, sizeof
+from ctypes import Structure, c_int, c_long, byref, sizeof, POINTER
 from errno import EINVAL, EPERM
 import _structseq
 
@@ -25,6 +25,8 @@
 _setrlimit = libc.setrlimit
 try:
     _getpagesize = libc.getpagesize
+    _getpagesize.argtypes = ()
+    _getpagesize.restype = c_int
 except AttributeError:
     from os import sysconf
     _getpagesize = None
@@ -61,6 +63,10 @@
         ("ru_nivcsw", c_long),
     )
 
+_getrusage.argtypes = (c_int, POINTER(_struct_rusage))
+_getrusage.restype = c_int
+
+
 class struct_rusage:
     __metaclass__ = _structseq.structseqtype
 
@@ -94,6 +100,12 @@
         ("rlim_max", rlim_t),
     )
 
+_getrlimit.argtypes = (c_int, POINTER(rlimit))
+_getrlimit.restype = c_int
+_setrlimit.argtypes = (c_int, POINTER(rlimit))
+_setrlimit.restype = c_int
+
+
 @builtinify
 def getrusage(who):
     ru = _struct_rusage()
diff --git a/py/__init__.py b/py/__init__.py
--- a/py/__init__.py
+++ b/py/__init__.py
@@ -8,7 +8,7 @@
 
 (c) Holger Krekel and others, 2004-2010
 """
-__version__ = '1.4.3'
+__version__ = '1.4.4.dev1'
 
 from py import _apipkg
 
@@ -70,10 +70,6 @@
         'getrawcode'        : '._code.code:getrawcode',
         'patch_builtins'    : '._code.code:patch_builtins',
         'unpatch_builtins'  : '._code.code:unpatch_builtins',
-        '_AssertionError'   : '._code.assertion:AssertionError',
-        '_reinterpret_old'  : '._code.assertion:reinterpret_old',
-        '_reinterpret'      : '._code.assertion:reinterpret',
-        '_reprcompare'      : '._code.assertion:_reprcompare',
     },
 
     # backports and additions of builtins
diff --git a/py/_code/_assertionnew.py b/py/_code/_assertionnew.py
deleted file mode 100644
--- a/py/_code/_assertionnew.py
+++ /dev/null
@@ -1,339 +0,0 @@
-"""
-Find intermediate evalutation results in assert statements through builtin AST.
-This should replace _assertionold.py eventually.
-"""
-
-import sys
-import ast
-
-import py
-from py._code.assertion import _format_explanation, BuiltinAssertionError
-
-
-if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
-    # See http://bugs.jython.org/issue1497
-    _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
-              "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
-              "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
-              "List", "Tuple")
-    _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
-              "AugAssign", "Print", "For", "While", "If", "With", "Raise",
-              "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
-              "Exec", "Global", "Expr", "Pass", "Break", "Continue")
-    _expr_nodes = set(getattr(ast, name) for name in _exprs)
-    _stmt_nodes = set(getattr(ast, name) for name in _stmts)
-    def _is_ast_expr(node):
-        return node.__class__ in _expr_nodes
-    def _is_ast_stmt(node):
-        return node.__class__ in _stmt_nodes
-else:
-    def _is_ast_expr(node):
-        return isinstance(node, ast.expr)
-    def _is_ast_stmt(node):
-        return isinstance(node, ast.stmt)
-
-
-class Failure(Exception):
-    """Error found while interpreting AST."""
-
-    def __init__(self, explanation=""):
-        self.cause = sys.exc_info()
-        self.explanation = explanation
-
-
-def interpret(source, frame, should_fail=False):
-    mod = ast.parse(source)
-    visitor = DebugInterpreter(frame)
-    try:
-        visitor.visit(mod)
-    except Failure:
-        failure = sys.exc_info()[1]
-        return getfailure(failure)
-    if should_fail:
-        return ("(assertion failed, but when it was re-run for "
-                "printing intermediate values, it did not fail.  Suggestions: "
-                "compute assert expression before the assert or use --no-assert)")
-
-def run(offending_line, frame=None):
-    if frame is None:
-        frame = py.code.Frame(sys._getframe(1))
-    return interpret(offending_line, frame)
-
-def getfailure(failure):
-    explanation = _format_explanation(failure.explanation)
-    value = failure.cause[1]
-    if str(value):
-        lines = explanation.splitlines()
-        if not lines:
-            lines.append("")
-        lines[0] += " << %s" % (value,)
-        explanation = "\n".join(lines)
-    text = "%s: %s" % (failure.cause[0].__name__, explanation)
-    if text.startswith("AssertionError: assert "):
-        text = text[16:]
-    return text
-
-
-operator_map = {
-    ast.BitOr : "|",
-    ast.BitXor : "^",
-    ast.BitAnd : "&",
-    ast.LShift : "<<",
-    ast.RShift : ">>",
-    ast.Add : "+",
-    ast.Sub : "-",
-    ast.Mult : "*",
-    ast.Div : "/",
-    ast.FloorDiv : "//",
-    ast.Mod : "%",
-    ast.Eq : "==",
-    ast.NotEq : "!=",
-    ast.Lt : "<",
-    ast.LtE : "<=",
-    ast.Gt : ">",
-    ast.GtE : ">=",
-    ast.Pow : "**",
-    ast.Is : "is",
-    ast.IsNot : "is not",
-    ast.In : "in",
-    ast.NotIn : "not in"
-}
-
-unary_map = {
-    ast.Not : "not %s",
-    ast.Invert : "~%s",
-    ast.USub : "-%s",
-    ast.UAdd : "+%s"
-}
-
-
-class DebugInterpreter(ast.NodeVisitor):
-    """Interpret AST nodes to gleam useful debugging information. """
-
-    def __init__(self, frame):
-        self.frame = frame
-
-    def generic_visit(self, node):
-        # Fallback when we don't have a special implementation.
-        if _is_ast_expr(node):
-            mod = ast.Expression(node)
-            co = self._compile(mod)
-            try:
-                result = self.frame.eval(co)
-            except Exception:
-                raise Failure()
-            explanation = self.frame.repr(result)
-            return explanation, result
-        elif _is_ast_stmt(node):
-            mod = ast.Module([node])
-            co = self._compile(mod, "exec")
-            try:
-                self.frame.exec_(co)
-            except Exception:
-                raise Failure()
-            return None, None
-        else:
-            raise AssertionError("can't handle %s" %(node,))
-
-    def _compile(self, source, mode="eval"):
-        return compile(source, "<assertion interpretation>", mode)
-
-    def visit_Expr(self, expr):
-        return self.visit(expr.value)
-
-    def visit_Module(self, mod):
-        for stmt in mod.body:
-            self.visit(stmt)
-
-    def visit_Name(self, name):
-        explanation, result = self.generic_visit(name)
-        # See if the name is local.
-        source = "%r in locals() is not globals()" % (name.id,)
-        co = self._compile(source)
-        try:
-            local = self.frame.eval(co)
-        except Exception:
-            # have to assume it isn't
-            local = False
-        if not local:
-            return name.id, result
-        return explanation, result
-
-    def visit_Compare(self, comp):
-        left = comp.left
-        left_explanation, left_result = self.visit(left)
-        for op, next_op in zip(comp.ops, comp.comparators):
-            next_explanation, next_result = self.visit(next_op)
-            op_symbol = operator_map[op.__class__]
-            explanation = "%s %s %s" % (left_explanation, op_symbol,
-                                        next_explanation)
-            source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
-            co = self._compile(source)
-            try:
-                result = self.frame.eval(co, __exprinfo_left=left_result,
-                                         __exprinfo_right=next_result)
-            except Exception:
-                raise Failure(explanation)
-            try:
-                if not result:
-                    break
-            except KeyboardInterrupt:
-                raise
-            except:
-                break
-            left_explanation, left_result = next_explanation, next_result
-
-        rcomp = py.code._reprcompare
-        if rcomp:
-            res = rcomp(op_symbol, left_result, next_result)
-            if res:
-                explanation = res
-        return explanation, result
-
-    def visit_BoolOp(self, boolop):
-        is_or = isinstance(boolop.op, ast.Or)
-        explanations = []
-        for operand in boolop.values:
-            explanation, result = self.visit(operand)
-            explanations.append(explanation)
-            if result == is_or:
-                break
-        name = is_or and " or " or " and "
-        explanation = "(" + name.join(explanations) + ")"
-        return explanation, result
-
-    def visit_UnaryOp(self, unary):
-        pattern = unary_map[unary.op.__class__]
-        operand_explanation, operand_result = self.visit(unary.operand)
-        explanation = pattern % (operand_explanation,)
-        co = self._compile(pattern % ("__exprinfo_expr",))
-        try:
-            result = self.frame.eval(co, __exprinfo_expr=operand_result)
-        except Exception:
-            raise Failure(explanation)
-        return explanation, result
-
-    def visit_BinOp(self, binop):
-        left_explanation, left_result = self.visit(binop.left)
-        right_explanation, right_result = self.visit(binop.right)
-        symbol = operator_map[binop.op.__class__]
-        explanation = "(%s %s %s)" % (left_explanation, symbol,
-                                      right_explanation)
-        source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
-        co = self._compile(source)
-        try:
-            result = self.frame.eval(co, __exprinfo_left=left_result,
-                                     __exprinfo_right=right_result)
-        except Exception:
-            raise Failure(explanation)
-        return explanation, result
-
-    def visit_Call(self, call):
-        func_explanation, func = self.visit(call.func)
-        arg_explanations = []
-        ns = {"__exprinfo_func" : func}
-        arguments = []
-        for arg in call.args:
-            arg_explanation, arg_result = self.visit(arg)
-            arg_name = "__exprinfo_%s" % (len(ns),)
-            ns[arg_name] = arg_result
-            arguments.append(arg_name)
-            arg_explanations.append(arg_explanation)
-        for keyword in call.keywords:
-            arg_explanation, arg_result = self.visit(keyword.value)
-            arg_name = "__exprinfo_%s" % (len(ns),)
-            ns[arg_name] = arg_result
-            keyword_source = "%s=%%s" % (keyword.arg)
-            arguments.append(keyword_source % (arg_name,))
-            arg_explanations.append(keyword_source % (arg_explanation,))
-        if call.starargs:
-            arg_explanation, arg_result = self.visit(call.starargs)
-            arg_name = "__exprinfo_star"
-            ns[arg_name] = arg_result
-            arguments.append("*%s" % (arg_name,))
-            arg_explanations.append("*%s" % (arg_explanation,))
-        if call.kwargs:
-            arg_explanation, arg_result = self.visit(call.kwargs)
-            arg_name = "__exprinfo_kwds"
-            ns[arg_name] = arg_result
-            arguments.append("**%s" % (arg_name,))
-            arg_explanations.append("**%s" % (arg_explanation,))
-        args_explained = ", ".join(arg_explanations)
-        explanation = "%s(%s)" % (func_explanation, args_explained)
-        args = ", ".join(arguments)
-        source = "__exprinfo_func(%s)" % (args,)
-        co = self._compile(source)
-        try:
-            result = self.frame.eval(co, **ns)
-        except Exception:
-            raise Failure(explanation)
-        pattern = "%s\n{%s = %s\n}"
-        rep = self.frame.repr(result)
-        explanation = pattern % (rep, rep, explanation)
-        return explanation, result
-
-    def _is_builtin_name(self, name):
-        pattern = "%r not in globals() and %r not in locals()"
-        source = pattern % (name.id, name.id)
-        co = self._compile(source)
-        try:
-            return self.frame.eval(co)
-        except Exception:
-            return False
-
-    def visit_Attribute(self, attr):
-        if not isinstance(attr.ctx, ast.Load):
-            return self.generic_visit(attr)
-        source_explanation, source_result = self.visit(attr.value)
-        explanation = "%s.%s" % (source_explanation, attr.attr)
-        source = "__exprinfo_expr.%s" % (attr.attr,)
-        co = self._compile(source)
-        try:
-            result = self.frame.eval(co, __exprinfo_expr=source_result)
-        except Exception:
-            raise Failure(explanation)
-        explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
-                                              self.frame.repr(result),
-                                              source_explanation, attr.attr)
-        # Check if the attr is from an instance.
-        source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
-        source = source % (attr.attr,)
-        co = self._compile(source)
-        try:
-            from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
-        except Exception:
-            from_instance = True
-        if from_instance:
-            rep = self.frame.repr(result)
-            pattern = "%s\n{%s = %s\n}"
-            explanation = pattern % (rep, rep, explanation)
-        return explanation, result
-
-    def visit_Assert(self, assrt):
-        test_explanation, test_result = self.visit(assrt.test)
-        if test_explanation.startswith("False\n{False =") and \
-                test_explanation.endswith("\n"):
-            test_explanation = test_explanation[15:-2]
-        explanation = "assert %s" % (test_explanation,)
-        if not test_result:
-            try:
-                raise BuiltinAssertionError
-            except Exception:
-                raise Failure(explanation)
-        return explanation, test_result
-
-    def visit_Assign(self, assign):
-        value_explanation, value_result = self.visit(assign.value)
-        explanation = "... = %s" % (value_explanation,)
-        name = ast.Name("__exprinfo_expr", ast.Load(),
-                        lineno=assign.value.lineno,
-                        col_offset=assign.value.col_offset)
-        new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
-                                col_offset=assign.col_offset)
-        mod = ast.Module([new_assign])
-        co = self._compile(mod, "exec")
-        try:
-            self.frame.exec_(co, __exprinfo_expr=value_result)
-        except Exception:
-            raise Failure(explanation)
-        return explanation, value_result
diff --git a/py/_code/_assertionold.py b/py/_code/_assertionold.py
deleted file mode 100644
--- a/py/_code/_assertionold.py
+++ /dev/null
@@ -1,555 +0,0 @@
-import py
-import sys, inspect
-from compiler import parse, ast, pycodegen
-from py._code.assertion import BuiltinAssertionError, _format_explanation
-
-passthroughex = py.builtin._sysex
-
-class Failure:
-    def __init__(self, node):
-        self.exc, self.value, self.tb = sys.exc_info()
-        self.node = node
-
-class View(object):
-    """View base class.
-
-    If C is a subclass of View, then C(x) creates a proxy object around
-    the object x.  The actual class of the proxy is not C in general,
-    but a *subclass* of C determined by the rules below.  To avoid confusion
-    we call view class the class of the proxy (a subclass of C, so of View)
-    and object class the class of x.
-
-    Attributes and methods not found in the proxy are automatically read on x.
-    Other operations like setting attributes are performed on the proxy, as
-    determined by its view class.  The object x is available from the proxy
-    as its __obj__ attribute.
-
-    The view class selection is determined by the __view__ tuples and the
-    optional __viewkey__ method.  By default, the selected view class is the
-    most specific subclass of C whose __view__ mentions the class of x.
-    If no such subclass is found, the search proceeds with the parent
-    object classes.  For example, C(True) will first look for a subclass
-    of C with __view__ = (..., bool, ...) and only if it doesn't find any
-    look for one with __view__ = (..., int, ...), and then ..., object,...
-    If everything fails the class C itself is considered to be the default.
-
-    Alternatively, the view class selection can be driven by another aspect
-    of the object x, instead of the class of x, by overriding __viewkey__.
-    See last example at the end of this module.
-    """
-
-    _viewcache = {}
-    __view__ = ()
-
-    def __new__(rootclass, obj, *args, **kwds):
-        self = object.__new__(rootclass)
-        self.__obj__ = obj
-        self.__rootclass__ = rootclass
-        key = self.__viewkey__()
-        try:
-            self.__class__ = self._viewcache[key]
-        except KeyError:
-            self.__class__ = self._selectsubclass(key)
-        return self
-
-    def __getattr__(self, attr):
-        # attributes not found in the normal hierarchy rooted on View
-        # are looked up in the object's real class
-        return getattr(self.__obj__, attr)
-
-    def __viewkey__(self):
-        return self.__obj__.__class__
-
-    def __matchkey__(self, key, subclasses):
-        if inspect.isclass(key):
-            keys = inspect.getmro(key)
-        else:
-            keys = [key]
-        for key in keys:
-            result = [C for C in subclasses if key in C.__view__]
-            if result:
-                return result
-        return []
-
-    def _selectsubclass(self, key):
-        subclasses = list(enumsubclasses(self.__rootclass__))
-        for C in subclasses:
-            if not isinstance(C.__view__, tuple):
-                C.__view__ = (C.__view__,)
-        choices = self.__matchkey__(key, subclasses)
-        if not choices:
-            return self.__rootclass__
-        elif len(choices) == 1:
-            return choices[0]
-        else:
-            # combine the multiple choices
-            return type('?', tuple(choices), {})
-
-    def __repr__(self):
-        return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
-
-
-def enumsubclasses(cls):
-    for subcls in cls.__subclasses__():
-        for subsubclass in enumsubclasses(subcls):
-            yield subsubclass
-    yield cls
-
-
-class Interpretable(View):
-    """A parse tree node with a few extra methods."""
-    explanation = None
-
-    def is_builtin(self, frame):
-        return False
-
-    def eval(self, frame):
-        # fall-back for unknown expression nodes
-        try:
-            expr = ast.Expression(self.__obj__)
-            expr.filename = '<eval>'
-            self.__obj__.filename = '<eval>'
-            co = pycodegen.ExpressionCodeGenerator(expr).getCode()
-            result = frame.eval(co)
-        except passthroughex:
-            raise
-        except:
-            raise Failure(self)
-        self.result = result
-        self.explanation = self.explanation or frame.repr(self.result)
-
-    def run(self, frame):
-        # fall-back for unknown statement nodes
-        try:
-            expr = ast.Module(None, ast.Stmt([self.__obj__]))
-            expr.filename = '<run>'
-            co = pycodegen.ModuleCodeGenerator(expr).getCode()
-            frame.exec_(co)
-        except passthroughex:
-            raise
-        except:
-            raise Failure(self)
-
-    def nice_explanation(self):
-        return _format_explanation(self.explanation)
-
-
-class Name(Interpretable):
-    __view__ = ast.Name
-
-    def is_local(self, frame):
-        source = '%r in locals() is not globals()' % self.name
-        try:
-            return frame.is_true(frame.eval(source))
-        except passthroughex:
-            raise
-        except:
-            return False
-
-    def is_global(self, frame):
-        source = '%r in globals()' % self.name
-        try:
-            return frame.is_true(frame.eval(source))
-        except passthroughex:
-            raise
-        except:
-            return False
-
-    def is_builtin(self, frame):
-        source = '%r not in locals() and %r not in globals()' % (
-            self.name, self.name)
-        try:
-            return frame.is_true(frame.eval(source))
-        except passthroughex:
-            raise
-        except:
-            return False
-
-    def eval(self, frame):
-        super(Name, self).eval(frame)
-        if not self.is_local(frame):
-            self.explanation = self.name
-
-class Compare(Interpretable):
-    __view__ = ast.Compare
-
-    def eval(self, frame):
-        expr = Interpretable(self.expr)
-        expr.eval(frame)
-        for operation, expr2 in self.ops:
-            if hasattr(self, 'result'):
-                # shortcutting in chained expressions
-                if not frame.is_true(self.result):
-                    break
-            expr2 = Interpretable(expr2)
-            expr2.eval(frame)
-            self.explanation = "%s %s %s" % (
-                expr.explanation, operation, expr2.explanation)
-            source = "__exprinfo_left %s __exprinfo_right" % operation
-            try:
-                self.result = frame.eval(source,
-                                         __exprinfo_left=expr.result,
-                                         __exprinfo_right=expr2.result)
-            except passthroughex:
-                raise
-            except:
-                raise Failure(self)
-            expr = expr2
-
-class And(Interpretable):
-    __view__ = ast.And
-
-    def eval(self, frame):
-        explanations = []
-        for expr in self.nodes:
-            expr = Interpretable(expr)
-            expr.eval(frame)
-            explanations.append(expr.explanation)
-            self.result = expr.result
-            if not frame.is_true(expr.result):
-                break
-        self.explanation = '(' + ' and '.join(explanations) + ')'
-
-class Or(Interpretable):
-    __view__ = ast.Or
-
-    def eval(self, frame):
-        explanations = []
-        for expr in self.nodes:
-            expr = Interpretable(expr)
-            expr.eval(frame)
-            explanations.append(expr.explanation)
-            self.result = expr.result
-            if frame.is_true(expr.result):
-                break
-        self.explanation = '(' + ' or '.join(explanations) + ')'
-
-
-# == Unary operations ==
-keepalive = []
-for astclass, astpattern in {
-    ast.Not    : 'not __exprinfo_expr',
-    ast.Invert : '(~__exprinfo_expr)',
-    }.items():
-
-    class UnaryArith(Interpretable):
-        __view__ = astclass
-
-        def eval(self, frame, astpattern=astpattern):
-            expr = Interpretable(self.expr)
-            expr.eval(frame)
-            self.explanation = astpattern.replace('__exprinfo_expr',
-                                                  expr.explanation)
-            try:
-                self.result = frame.eval(astpattern,
-                                         __exprinfo_expr=expr.result)
-            except passthroughex:
-                raise
-            except:
-                raise Failure(self)
-
-    keepalive.append(UnaryArith)
-
-# == Binary operations ==
-for astclass, astpattern in {
-    ast.Add    : '(__exprinfo_left + __exprinfo_right)',
-    ast.Sub    : '(__exprinfo_left - __exprinfo_right)',
-    ast.Mul    : '(__exprinfo_left * __exprinfo_right)',
-    ast.Div    : '(__exprinfo_left / __exprinfo_right)',
-    ast.Mod    : '(__exprinfo_left % __exprinfo_right)',
-    ast.Power  : '(__exprinfo_left ** __exprinfo_right)',
-    }.items():
-
-    class BinaryArith(Interpretable):
-        __view__ = astclass
-
-        def eval(self, frame, astpattern=astpattern):
-            left = Interpretable(self.left)
-            left.eval(frame)
-            right = Interpretable(self.right)
-            right.eval(frame)
-            self.explanation = (astpattern
-                                .replace('__exprinfo_left',  left .explanation)
-                                .replace('__exprinfo_right', right.explanation))
-            try:
-                self.result = frame.eval(astpattern,
-                                         __exprinfo_left=left.result,
-                                         __exprinfo_right=right.result)
-            except passthroughex:
-                raise
-            except:
-                raise Failure(self)
-
-    keepalive.append(BinaryArith)
-
-
-class CallFunc(Interpretable):
-    __view__ = ast.CallFunc
-
-    def is_bool(self, frame):
-        source = 'isinstance(__exprinfo_value, bool)'
-        try:
-            return frame.is_true(frame.eval(source,
-                                            __exprinfo_value=self.result))
-        except passthroughex:
-            raise
-        except:
-            return False
-
-    def eval(self, frame):
-        node = Interpretable(self.node)
-        node.eval(frame)
-        explanations = []
-        vars = {'__exprinfo_fn': node.result}
-        source = '__exprinfo_fn('
-        for a in self.args:
-            if isinstance(a, ast.Keyword):
-                keyword = a.name
-                a = a.expr
-            else:
-                keyword = None
-            a = Interpretable(a)
-            a.eval(frame)
-            argname = '__exprinfo_%d' % len(vars)
-            vars[argname] = a.result
-            if keyword is None:
-                source += argname + ','
-                explanations.append(a.explanation)
-            else:
-                source += '%s=%s,' % (keyword, argname)
-                explanations.append('%s=%s' % (keyword, a.explanation))
-        if self.star_args:
-            star_args = Interpretable(self.star_args)
-            star_args.eval(frame)
-            argname = '__exprinfo_star'
-            vars[argname] = star_args.result
-            source += '*' + argname + ','
-            explanations.append('*' + star_args.explanation)
-        if self.dstar_args:
-            dstar_args = Interpretable(self.dstar_args)
-            dstar_args.eval(frame)
-            argname = '__exprinfo_kwds'
-            vars[argname] = dstar_args.result
-            source += '**' + argname + ','
-            explanations.append('**' + dstar_args.explanation)
-        self.explanation = "%s(%s)" % (
-            node.explanation, ', '.join(explanations))
-        if source.endswith(','):
-            source = source[:-1]
-        source += ')'
-        try:
-            self.result = frame.eval(source, **vars)
-        except passthroughex:
-            raise
-        except:
-            raise Failure(self)
-        if not node.is_builtin(frame) or not self.is_bool(frame):
-            r = frame.repr(self.result)
-            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
-
-class Getattr(Interpretable):
-    __view__ = ast.Getattr
-
-    def eval(self, frame):
-        expr = Interpretable(self.expr)
-        expr.eval(frame)
-        source = '__exprinfo_expr.%s' % self.attrname
-        try:
-            self.result = frame.eval(source, __exprinfo_expr=expr.result)
-        except passthroughex:
-            raise
-        except:
-            raise Failure(self)
-        self.explanation = '%s.%s' % (expr.explanation, self.attrname)
-        # if the attribute comes from the instance, its value is interesting
-        source = ('hasattr(__exprinfo_expr, "__dict__") and '
-                  '%r in __exprinfo_expr.__dict__' % self.attrname)
-        try:
-            from_instance = frame.is_true(
-                frame.eval(source, __exprinfo_expr=expr.result))
-        except passthroughex:
-            raise
-        except:
-            from_instance = True
-        if from_instance:
-            r = frame.repr(self.result)
-            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
-
-# == Re-interpretation of full statements ==
-
-class Assert(Interpretable):
-    __view__ = ast.Assert
-
-    def run(self, frame):
-        test = Interpretable(self.test)
-        test.eval(frame)
-        # simplify 'assert False where False = ...'
-        if (test.explanation.startswith('False\n{False = ') and
-            test.explanation.endswith('\n}')):
-            test.explanation = test.explanation[15:-2]
-        # print the result as  'assert <explanation>'
-        self.result = test.result
-        self.explanation = 'assert ' + test.explanation
-        if not frame.is_true(test.result):
-            try:
-                raise BuiltinAssertionError
-            except passthroughex:
-                raise
-            except:
-                raise Failure(self)
-
-class Assign(Interpretable):
-    __view__ = ast.Assign
-
-    def run(self, frame):
-        expr = Interpretable(self.expr)
-        expr.eval(frame)
-        self.result = expr.result
-        self.explanation = '... = ' + expr.explanation
-        # fall-back-run the rest of the assignment
-        ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
-        mod = ast.Module(None, ast.Stmt([ass]))
-        mod.filename = '<run>'
-        co = pycodegen.ModuleCodeGenerator(mod).getCode()
-        try:
-            frame.exec_(co, __exprinfo_expr=expr.result)
-        except passthroughex:
-            raise
-        except:
-            raise Failure(self)
-
-class Discard(Interpretable):
-    __view__ = ast.Discard
-
-    def run(self, frame):
-        expr = Interpretable(self.expr)
-        expr.eval(frame)
-        self.result = expr.result
-        self.explanation = expr.explanation
-
-class Stmt(Interpretable):
-    __view__ = ast.Stmt
-
-    def run(self, frame):
-        for stmt in self.nodes:
-            stmt = Interpretable(stmt)
-            stmt.run(frame)
-
-
-def report_failure(e):
-    explanation = e.node.nice_explanation()
-    if explanation:
-        explanation = ", in: " + explanation
-    else:
-        explanation = ""
-    sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
-
-def check(s, frame=None):
-    if frame is None:
-        frame = sys._getframe(1)
-        frame = py.code.Frame(frame)
-    expr = parse(s, 'eval')
-    assert isinstance(expr, ast.Expression)
-    node = Interpretable(expr.node)
-    try:
-        node.eval(frame)
-    except passthroughex:
-        raise
-    except Failure:
-        e = sys.exc_info()[1]
-        report_failure(e)
-    else:
-        if not frame.is_true(node.result):
-            sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
-
-
-###########################################################
-# API / Entry points
-# #########################################################
-
-def interpret(source, frame, should_fail=False):
-    module = Interpretable(parse(source, 'exec').node)
-    #print "got module", module
-    if isinstance(frame, py.std.types.FrameType):
-        frame = py.code.Frame(frame)
-    try:
-        module.run(frame)
-    except Failure:
-        e = sys.exc_info()[1]
-        return getfailure(e)
-    except passthroughex:
-        raise
-    except:
-        import traceback
-        traceback.print_exc()
-    if should_fail:
-        return ("(assertion failed, but when it was re-run for "
-                "printing intermediate values, it did not fail.  Suggestions: "
-                "compute assert expression before the assert or use --nomagic)")
-    else:
-        return None
-
-def getmsg(excinfo):
-    if isinstance(excinfo, tuple):
-        excinfo = py.code.ExceptionInfo(excinfo)
-    #frame, line = gettbline(tb)
-    #frame = py.code.Frame(frame)
-    #return interpret(line, frame)
-
-    tb = excinfo.traceback[-1]
-    source = str(tb.statement).strip()
-    x = interpret(source, tb.frame, should_fail=True)
-    if not isinstance(x, str):
-        raise TypeError("interpret returned non-string %r" % (x,))
-    return x
-
-def getfailure(e):
-    explanation = e.node.nice_explanation()
-    if str(e.value):
-        lines = explanation.split('\n')
-        lines[0] += "  << %s" % (e.value,)
-        explanation = '\n'.join(lines)
-    text = "%s: %s" % (e.exc.__name__, explanation)
-    if text.startswith('AssertionError: assert '):
-        text = text[16:]
-    return text
-
-def run(s, frame=None):
-    if frame is None:
-        frame = sys._getframe(1)
-        frame = py.code.Frame(frame)
-    module = Interpretable(parse(s, 'exec').node)
-    try:
-        module.run(frame)
-    except Failure:
-        e = sys.exc_info()[1]
-        report_failure(e)
-
-
-if __name__ == '__main__':
-    # example:
-    def f():
-        return 5
-    def g():
-        return 3
-    def h(x):
-        return 'never'
-    check("f() * g() == 5")
-    check("not f()")
-    check("not (f() and g() or 0)")
-    check("f() == g()")
-    i = 4
-    check("i == f()")
-    check("len(f()) == 0")
-    check("isinstance(2+3+4, float)")
-
-    run("x = i")
-    check("x == 5")
-
-    run("assert not f(), 'oops'")
-    run("a, b, c = 1, 2")
-    run("a, b, c = f()")
-
-    check("max([f(),g()]) == 4")
-    check("'hello'[g()] == 'h'")
-    run("'guk%d' % h(f())")
diff --git a/py/_code/assertion.py b/py/_code/assertion.py
deleted file mode 100644
--- a/py/_code/assertion.py
+++ /dev/null
@@ -1,94 +0,0 @@
-import sys
-import py
-
-BuiltinAssertionError = py.builtin.builtins.AssertionError
-
-_reprcompare = None # if set, will be called by assert reinterp for comparison ops
-
-def _format_explanation(explanation):
-    """This formats an explanation
-
-    Normally all embedded newlines are escaped, however there are
-    three exceptions: \n{, \n} and \n~.  The first two are intended
-    cover nested explanations, see function and attribute explanations
-    for examples (.visit_Call(), visit_Attribute()).  The last one is
-    for when one explanation needs to span multiple lines, e.g. when
-    displaying diffs.
-    """
-    raw_lines = (explanation or '').split('\n')
-    # escape newlines not followed by {, } and ~
-    lines = [raw_lines[0]]
-    for l in raw_lines[1:]:
-        if l.startswith('{') or l.startswith('}') or l.startswith('~'):
-            lines.append(l)
-        else:
-            lines[-1] += '\\n' + l
-
-    result = lines[:1]
-    stack = [0]
-    stackcnt = [0]
-    for line in lines[1:]:
-        if line.startswith('{'):
-            if stackcnt[-1]:
-                s = 'and   '
-            else:
-                s = 'where '
-            stack.append(len(result))
-            stackcnt[-1] += 1
-            stackcnt.append(0)
-            result.append(' +' + '  '*(len(stack)-1) + s + line[1:])
-        elif line.startswith('}'):
-            assert line.startswith('}')
-            stack.pop()
-            stackcnt.pop()
-            result[stack[-1]] += line[1:]
-        else:
-            assert line.startswith('~')
-            result.append('  '*len(stack) + line[1:])
-    assert len(stack) == 1
-    return '\n'.join(result)
-
-
-class AssertionError(BuiltinAssertionError):
-    def __init__(self, *args):
-        BuiltinAssertionError.__init__(self, *args)
-        if args:
-            try:
-                self.msg = str(args[0])
-            except py.builtin._sysex:
-                raise
-            except:
-                self.msg = "<[broken __repr__] %s at %0xd>" %(
-                    args[0].__class__, id(args[0]))
-        else:
-            f = py.code.Frame(sys._getframe(1))
-            try:
-                source = f.code.fullsource
-                if source is not None:
-                    try:
-                        source = source.getstatement(f.lineno, assertion=True)
-                    except IndexError:
-                        source = None
-                    else:
-                        source = str(source.deindent()).strip()
-            except py.error.ENOENT:
-                source = None
-                # this can also occur during reinterpretation, when the
-                # co_filename is set to "<run>".
-            if source:
-                self.msg = reinterpret(source, f, should_fail=True)
-            else:
-                self.msg = "<could not determine information>"
-            if not self.args:
-                self.args = (self.msg,)
-
-if sys.version_info > (3, 0):
-    AssertionError.__module__ = "builtins"
-    reinterpret_old = "old reinterpretation not available for py3"
-else:
-    from py._code._assertionold import interpret as reinterpret_old
-if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
-    from py._code._assertionnew import interpret as reinterpret
-else:
-    reinterpret = reinterpret_old
-
diff --git a/py/_code/code.py b/py/_code/code.py
--- a/py/_code/code.py
+++ b/py/_code/code.py
@@ -145,17 +145,6 @@
         return self.frame.f_locals
     locals = property(getlocals, None, None, "locals of underlaying frame")
 
-    def reinterpret(self):
-        """Reinterpret the failing statement and returns a detailed information
-           about what operations are performed."""
-        if self.exprinfo is None:
-            source = str(self.statement).strip()
-            x = py.code._reinterpret(source, self.frame, should_fail=True)
-            if not isinstance(x, str):
-                raise TypeError("interpret returned non-string %r" % (x,))
-            self.exprinfo = x
-        return self.exprinfo
-
     def getfirstlinesource(self):
         # on Jython this firstlineno can be -1 apparently
         return max(self.frame.code.firstlineno, 0)
@@ -310,7 +299,7 @@
         #     ExceptionInfo-like classes may have different attributes.
         if tup is None:
             tup = sys.exc_info()
-            if exprinfo is None and isinstance(tup[1], py.code._AssertionError):
+            if exprinfo is None and isinstance(tup[1], AssertionError):
                 exprinfo = getattr(tup[1], 'msg', None)
                 if exprinfo is None:
                     exprinfo = str(tup[1])
@@ -690,22 +679,15 @@
 
 oldbuiltins = {}
 
-def patch_builtins(assertion=True, compile=True):
-    """ put compile and AssertionError builtins to Python's builtins. """
-    if assertion:
-        from py._code import assertion
-        l = oldbuiltins.setdefault('AssertionError', [])
-        l.append(py.builtin.builtins.AssertionError)
-        py.builtin.builtins.AssertionError = assertion.AssertionError
+def patch_builtins(compile=True):
+    """ put compile builtins to Python's builtins. """
     if compile:
         l = oldbuiltins.setdefault('compile', [])
         l.append(py.builtin.builtins.compile)
         py.builtin.builtins.compile = py.code.compile
 
-def unpatch_builtins(assertion=True, compile=True):
+def unpatch_builtins(compile=True):
     """ remove compile and AssertionError builtins from Python builtins. """
-    if assertion:
-        py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
     if compile:
         py.builtin.builtins.compile = oldbuiltins['compile'].pop()
 
diff --git a/pypy/annotation/bookkeeper.py b/pypy/annotation/bookkeeper.py
--- a/pypy/annotation/bookkeeper.py
+++ b/pypy/annotation/bookkeeper.py
@@ -299,12 +299,13 @@
             listdef.generalize_range_step(flags['range_step'])
         return SomeList(listdef)
 
-    def getdictdef(self, is_r_dict=False):
+    def getdictdef(self, is_r_dict=False, force_non_null=False):
         """Get the DictDef associated with the current position."""
         try:
             dictdef = self.dictdefs[self.position_key]
         except KeyError:
-            dictdef = DictDef(self, is_r_dict=is_r_dict)
+            dictdef = DictDef(self, is_r_dict=is_r_dict,
+                              force_non_null=force_non_null)
             self.dictdefs[self.position_key] = dictdef
         return dictdef
 
diff --git a/pypy/annotation/builtin.py b/pypy/annotation/builtin.py
--- a/pypy/annotation/builtin.py
+++ b/pypy/annotation/builtin.py
@@ -311,8 +311,14 @@
 def robjmodel_we_are_translated():
     return immutablevalue(True)
 
-def robjmodel_r_dict(s_eqfn, s_hashfn):
-    dictdef = getbookkeeper().getdictdef(is_r_dict=True)
+def robjmodel_r_dict(s_eqfn, s_hashfn, s_force_non_null=None):
+    if s_force_non_null is None:
+        force_non_null = False
+    else:
+        assert s_force_non_null.is_constant()
+        force_non_null = s_force_non_null.const
+    dictdef = getbookkeeper().getdictdef(is_r_dict=True,
+                                         force_non_null=force_non_null)
     dictdef.dictkey.update_rdict_annotations(s_eqfn, s_hashfn)
     return SomeDict(dictdef)
 
@@ -351,17 +357,6 @@
 def llmemory_cast_int_to_adr(s):
     return SomeAddress()
 
-
-##def rarith_ovfcheck(s_obj):
-##    if isinstance(s_obj, SomeInteger) and s_obj.unsigned:
-##        getbookkeeper().warning("ovfcheck on unsigned")
-##    return s_obj
-
-##def rarith_ovfcheck_lshift(s_obj1, s_obj2):
-##    if isinstance(s_obj1, SomeInteger) and s_obj1.unsigned:
-##        getbookkeeper().warning("ovfcheck_lshift with unsigned")
-##    return SomeInteger()
-
 def unicodedata_decimal(s_uchr):
     raise TypeError, "unicodedate.decimal() calls should not happen at interp-level"    
 
@@ -379,8 +374,6 @@
         original = getattr(__builtin__, name[8:])
         BUILTIN_ANALYZERS[original] = value
 
-##BUILTIN_ANALYZERS[pypy.rlib.rarithmetic.ovfcheck] = rarith_ovfcheck
-##BUILTIN_ANALYZERS[pypy.rlib.rarithmetic.ovfcheck_lshift] = rarith_ovfcheck_lshift
 BUILTIN_ANALYZERS[pypy.rlib.rarithmetic.intmask] = rarith_intmask
 BUILTIN_ANALYZERS[pypy.rlib.objectmodel.instantiate] = robjmodel_instantiate
 BUILTIN_ANALYZERS[pypy.rlib.objectmodel.we_are_translated] = (
diff --git a/pypy/annotation/dictdef.py b/pypy/annotation/dictdef.py
--- a/pypy/annotation/dictdef.py
+++ b/pypy/annotation/dictdef.py
@@ -85,12 +85,14 @@
 
     def __init__(self, bookkeeper, s_key = s_ImpossibleValue,
                                  s_value = s_ImpossibleValue,
-                               is_r_dict = False):
+                               is_r_dict = False,
+                           force_non_null = False):
         self.dictkey = DictKey(bookkeeper, s_key, is_r_dict)
         self.dictkey.itemof[self] = True
         self.dictvalue = DictValue(bookkeeper, s_value)
         self.dictvalue.itemof[self] = True
         self.bookkeeper = bookkeeper
+        self.force_non_null = force_non_null
 
     def read_key(self, position_key=None):
         if position_key is None:
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -129,9 +129,6 @@
                  cmdline='--objspace -o'),
 
     OptionDescription("opcodes", "opcodes to enable in the interpreter", [
-        BoolOption("CALL_LIKELY_BUILTIN", "emit a special bytecode for likely calls to builtin functions",
-                   default=False,
-                   requires=[("translation.stackless", False)]),
         BoolOption("CALL_METHOD", "emit a special bytecode for expr.name()",
                    default=False),
         ]),
@@ -266,13 +263,7 @@
         BoolOption("withcelldict",
                    "use dictionaries that are optimized for being used as module dicts",
                    default=False,
-                   requires=[("objspace.opcodes.CALL_LIKELY_BUILTIN", False),
-                             ("objspace.honor__builtins__", False)]),
-
-        BoolOption("withdictmeasurement",
-                   "create huge files with masses of information "
-                   "about dictionaries",
-                   default=False),
+                   requires=[("objspace.honor__builtins__", False)]),
 
         BoolOption("withmapdict",
                    "make instances really small but slow without the JIT",
@@ -355,8 +346,6 @@
     backend = config.translation.backend
 
     # all the good optimizations for PyPy should be listed here
-    if level in ['2', '3']:
-        config.objspace.opcodes.suggest(CALL_LIKELY_BUILTIN=True)
     if level in ['2', '3', 'jit']:
         config.objspace.opcodes.suggest(CALL_METHOD=True)
         config.objspace.std.suggest(withrangelist=True)
diff --git a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt b/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.opcodes.CALL_LIKELY_BUILTIN.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Introduce a new opcode called ``CALL_LIKELY_BUILTIN``. It is used when something
-is called, that looks like a builtin function (but could in reality be shadowed
-by a name in the module globals). For all module globals dictionaries it is
-then tracked which builtin name is shadowed in this module. If the
-``CALL_LIKELY_BUILTIN`` opcode is executed, it is checked whether the builtin is
-shadowed. If not, the corresponding builtin is called. Otherwise the object that
-is shadowing it is called instead. If no shadowing is happening, this saves two
-dictionary lookups on calls to builtins.
-
-For more information, see the section in `Standard Interpreter Optimizations`_.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#call-likely-builtin
diff --git a/pypy/doc/config/objspace.std.withdictmeasurement.txt b/pypy/doc/config/objspace.std.withdictmeasurement.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withdictmeasurement.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-Internal option.
-
-.. internal
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -248,5 +248,7 @@
   never a dictionary as it sometimes is in CPython. Assigning to
   ``__builtins__`` has no effect.
 
+* object identity of immutable keys in dictionaries is not necessarily preserved.
+  Never compare immutable objects with ``is``.
+
 .. include:: _ref.txt
-
diff --git a/pypy/doc/garbage_collection.rst b/pypy/doc/garbage_collection.rst
--- a/pypy/doc/garbage_collection.rst
+++ b/pypy/doc/garbage_collection.rst
@@ -212,90 +212,4 @@
   becomes free garbage, to be collected at the next major collection.
 
 
-Minimark GC
------------
-
-This is a simplification and rewrite of the ideas from the Hybrid GC.
-It uses a nursery for the young objects, and mark-and-sweep for the old
-objects.  This is a moving GC, but objects may only move once (from
-the nursery to the old stage).
-
-The main difference with the Hybrid GC is that the mark-and-sweep
-objects (the "old stage") are directly handled by the GC's custom
-allocator, instead of being handled by malloc() calls.  The gain is that
-it is then possible, during a major collection, to walk through all old
-generation objects without needing to store a list of pointers to them.
-So as a first approximation, when compared to the Hybrid GC, the
-Minimark GC saves one word of memory per old object.
-
-There are a number of environment variables that can be tweaked to
-influence the GC.  (Their default value should be ok for most usages.)
-You can read more about them at the start of
-`pypy/rpython/memory/gc/minimark.py`_.
-
-In more details:
-
-- The small newly malloced objects are allocated in the nursery (case 1).
-  All objects living in the nursery are "young".
-
-- The big objects are always handled directly by the system malloc().
-  But the big newly malloced objects are still "young" when they are
-  allocated (case 2), even though they don't live in the nursery.
-
-- When the nursery is full, we do a minor collection, i.e. we find
-  which "young" objects are still alive (from cases 1 and 2).  The
-  "young" flag is then removed.  The surviving case 1 objects are moved
-  to the old stage. The dying case 2 objects are immediately freed.
-
-- The old stage is an area of memory containing old (small) objects.  It
-  is handled by `pypy/rpython/memory/gc/minimarkpage.py`_.  It is organized
-  as "arenas" of 256KB or 512KB, subdivided into "pages" of 4KB or 8KB.
-  Each page can either be free, or contain small objects of all the same
-  size.  Furthermore at any point in time each object location can be
-  either allocated or freed.  The basic design comes from ``obmalloc.c``
-  from CPython (which itself comes from the same source as the Linux
-  system malloc()).
-
-- New objects are added to the old stage at every minor collection.
-  Immediately after a minor collection, when we reach some threshold, we
-  trigger a major collection.  This is the mark-and-sweep step.  It walks
-  over *all* objects (mark), and then frees some fraction of them (sweep).
-  This means that the only time when we want to free objects is while
-  walking over all of them; we never ask to free an object given just its
-  address.  This allows some simplifications and memory savings when
-  compared to ``obmalloc.c``.
-
-- As with all generational collectors, this GC needs a write barrier to
-  record which old objects have a reference to young objects.
-
-- Additionally, we found out that it is useful to handle the case of
-  big arrays specially: when we allocate a big array (with the system
-  malloc()), we reserve a small number of bytes before.  When the array
-  grows old, we use the extra bytes as a set of bits.  Each bit
-  represents 128 entries in the array.  Whenever the write barrier is
-  called to record a reference from the Nth entry of the array to some
-  young object, we set the bit number ``(N/128)`` to 1.  This can
-  considerably speed up minor collections, because we then only have to
-  scan 128 entries of the array instead of all of them.
-
-- As usual, we need special care about weak references, and objects with
-  finalizers.  Weak references are allocated in the nursery, and if they
-  survive they move to the old stage, as usual for all objects; the
-  difference is that the reference they contain must either follow the
-  object, or be set to NULL if the object dies.  And the objects with
-  finalizers, considered rare enough, are immediately allocated old to
-  simplify the design.  In particular their ``__del__`` method can only
-  be called just after a major collection.
-
-- The objects move once only, so we can use a trick to implement id()
-  and hash().  If the object is not in the nursery, it won't move any
-  more, so its id() and hash() are the object's address, cast to an
-  integer.  If the object is in the nursery, and we ask for its id()
-  or its hash(), then we pre-reserve a location in the old stage, and
-  return the address of that location.  If the object survives the
-  next minor collection, we move it there, and so its id() and hash()
-  are preserved.  If the object dies then the pre-reserved location
-  becomes free garbage, to be collected at the next major collection.
-
-
 .. include:: _ref.txt
diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst
--- a/pypy/doc/getting-started.rst
+++ b/pypy/doc/getting-started.rst
@@ -51,7 +51,7 @@
 ---------------
 
 PyPy is ready to be executed as soon as you unpack the tarball or the zip
-file, with no need install it in any specific location::
+file, with no need to install it in any specific location::
 
     $ tar xf pypy-1.5-linux.tar.bz2
 
diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst
--- a/pypy/doc/index.rst
+++ b/pypy/doc/index.rst
@@ -11,6 +11,10 @@
 Getting into PyPy ... 
 =============================================
 
+* `Getting started`_: how to install and run the PyPy Python interpreter
+
+* `FAQ`_: some frequently asked questions.
+
 * `Release 1.5`_: the latest official release
 
 * `PyPy Blog`_: news and status info about PyPy 
@@ -26,13 +30,6 @@
 Documentation for the PyPy Python Interpreter
 ===============================================
 
-`getting started`_ provides hands-on instructions 
-including a two-liner to run the PyPy Python interpreter 
-on your system, examples on advanced features and 
-entry points for using the `RPython toolchain`_.
-
-`FAQ`_ contains some frequently asked questions.
-
 New features of PyPy's Python Interpreter and 
 Translation Framework: 
 
diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -157,32 +157,6 @@
 A more advanced version of sharing dicts, called *map dicts,* is available
 with the :config:`objspace.std.withmapdict` option.
 
-Builtin-Shadowing
-+++++++++++++++++
-
-Usually the calling of builtins in Python requires two dictionary lookups: first
-to see whether the current global dictionary contains an object with the same
-name, then a lookup in the ``__builtin__`` dictionary. This is somehow
-circumvented by storing an often used builtin into a local variable to get
-the fast local lookup (which is a rather strange and ugly hack).
-
-The same problem is solved in a different way by "wary" dictionaries. They are
-another dictionary representation used together with multidicts. This
-representation is used only for module dictionaries. The representation checks on
-every setitem whether the key that is used is the name of a builtin. If this is
-the case, the dictionary is marked as shadowing that particular builtin.
-
-To identify calls to builtins easily, a new bytecode (``CALL_LIKELY_BUILTIN``)
-is introduced. Whenever it is executed, the globals dictionary is checked
-to see whether it masks the builtin (which is possible without a dictionary
-lookup).  Then the ``__builtin__`` dict is checked in the same way,
-to see whether somebody replaced the real builtin with something else. In the
-common case, the program didn't do any of these; the proper builtin can then
-be called without using any dictionary lookup at all.
-
-You can enable this feature with the
-:config:`objspace.opcodes.CALL_LIKELY_BUILTIN` option.
-
 
 List Optimizations
 ------------------
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -124,6 +124,25 @@
 for our needs. It's possible that this has changed, reviving the LLVM backend
 (or writing new from scratch) for static compilation would be a good project.
 
+(On the other hand, just generating C code and using clang might be enough.
+The issue with that is the so-called "asmgcc GC root finder", which has tons
+of issues of this own.  In my opinion (arigo), it would be definitely a
+better project to try to optimize the alternative, the "shadowstack" GC root
+finder, which is nicely portable.  So far it gives a pypy that is around
+7% slower.)
+
+Embedding PyPy
+----------------------------------------
+
+Being able to embed PyPy, say with its own limited C API, would be
+useful.  But here is the most interesting variant, straight from
+EuroPython live discussion :-)  We can have a generic "libpypy.so" that
+can be used as a placeholder dynamic library, and when it gets loaded,
+it runs a .py module that installs (via ctypes) the interface it wants
+exported.  This would give us a one-size-fits-all generic .so file to be
+imported by any application that wants to load .so files :-)
+
+
 .. _`issue tracker`: http://bugs.pypy.org
 .. _`mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev
 .. _`jitviewer`: http://bitbucket.org/pypy/jitviewer
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -17,7 +17,7 @@
         self.varargname = varargname
         self.kwargname = kwargname
 
-    @jit.purefunction
+    @jit.elidable
     def find_argname(self, name):
         try:
             return self.argnames.index(name)
@@ -90,15 +90,18 @@
     ###  Construction  ###
 
     def __init__(self, space, args_w, keywords=None, keywords_w=None,
-                 w_stararg=None, w_starstararg=None):
+                 w_stararg=None, w_starstararg=None, keyword_names_w=None):
         self.space = space
         assert isinstance(args_w, list)
         self.arguments_w = args_w
         self.keywords = keywords
         self.keywords_w = keywords_w
+        self.keyword_names_w = keyword_names_w  # matches the tail of .keywords
         if keywords is not None:
             assert keywords_w is not None
             assert len(keywords_w) == len(keywords)
+            assert (keyword_names_w is None or
+                    len(keyword_names_w) <= len(keywords))
             make_sure_not_resized(self.keywords)
             make_sure_not_resized(self.keywords_w)
 
@@ -132,7 +135,8 @@
 
     def replace_arguments(self, args_w):
         "Return a new Arguments with a args_w as positional arguments."
-        return Arguments(self.space, args_w, self.keywords, self.keywords_w)
+        return Arguments(self.space, args_w, self.keywords, self.keywords_w,
+                         keyword_names_w = self.keyword_names_w)
 
     def prepend(self, w_firstarg):
         "Return a new Arguments with a new argument inserted first."
@@ -201,15 +205,16 @@
                         space.w_TypeError,
                         space.wrap("keywords must be strings"))
                 if e.match(space, space.w_UnicodeEncodeError):
-                    raise OperationError(
-                        space.w_TypeError,
-                        space.wrap("keyword cannot be encoded to ascii"))
-                raise
-            if self.keywords and key in self.keywords:
-                raise operationerrfmt(self.space.w_TypeError,
-                                      "got multiple values "
-                                      "for keyword argument "
-                                      "'%s'", key)
+                    # Allow this to pass through
+                    key = None
+                else:
+                    raise
+            else:
+                if self.keywords and key in self.keywords:
+                    raise operationerrfmt(self.space.w_TypeError,
+                                          "got multiple values "
+                                          "for keyword argument "
+                                          "'%s'", key)
             keywords[i] = key
             keywords_w[i] = space.getitem(w_starstararg, w_key)
             i += 1
@@ -219,6 +224,7 @@
         else:
             self.keywords = self.keywords + keywords
             self.keywords_w = self.keywords_w + keywords_w
+        self.keyword_names_w = keys_w
 
     def fixedunpack(self, argcount):
         """The simplest argument parsing: get the 'argcount' arguments,
@@ -339,6 +345,10 @@
             used_keywords = [False] * num_kwds
             for i in range(num_kwds):
                 name = keywords[i]
+                # If name was not encoded as a string, it could be None. In that
+                # case, it's definitely not going to be in the signature.
+                if name is None:
+                    continue
                 j = signature.find_argname(name)
                 if j < 0:
                     continue
@@ -374,17 +384,26 @@
         if has_kwarg:
             w_kwds = self.space.newdict()
             if num_remainingkwds:
+                #
+                limit = len(keywords)
+                if self.keyword_names_w is not None:
+                    limit -= len(self.keyword_names_w)
                 for i in range(len(keywords)):
                     if not used_keywords[i]:
-                        key = keywords[i]
-                        self.space.setitem(w_kwds, self.space.wrap(key), keywords_w[i])
+                        if i < limit:
+                            w_key = self.space.wrap(keywords[i])
+                        else:
+                            w_key = self.keyword_names_w[i - limit]
+                        self.space.setitem(w_kwds, w_key, keywords_w[i])
+                #
             scope_w[co_argcount + has_vararg] = w_kwds
         elif num_remainingkwds:
             if co_argcount == 0:
                 raise ArgErrCount(avail, num_kwds,
                               co_argcount, has_vararg, has_kwarg,
                               defaults_w, missing)
-            raise ArgErrUnknownKwds(num_remainingkwds, keywords, used_keywords)
+            raise ArgErrUnknownKwds(self.space, num_remainingkwds, keywords,
+                                    used_keywords, self.keyword_names_w)
 
         if missing:
             raise ArgErrCount(avail, num_kwds,
@@ -443,9 +462,15 @@
         w_args = space.newtuple(self.arguments_w)
         w_kwds = space.newdict()
         if self.keywords is not None:
+            limit = len(self.keywords)
+            if self.keyword_names_w is not None:
+                limit -= len(self.keyword_names_w)
             for i in range(len(self.keywords)):
-                space.setitem(w_kwds, space.wrap(self.keywords[i]),
-                                      self.keywords_w[i])
+                if i < limit:
+                    w_key = space.wrap(self.keywords[i])
+                else:
+                    w_key = self.keyword_names_w[i - limit]
+                space.setitem(w_kwds, w_key, self.keywords_w[i])
         return w_args, w_kwds
 
 class ArgumentsForTranslation(Arguments):
@@ -666,14 +691,33 @@
 
 class ArgErrUnknownKwds(ArgErr):
 
-    def __init__(self, num_remainingkwds, keywords, used_keywords):
-        self.kwd_name = ''
+    def __init__(self, space, num_remainingkwds, keywords, used_keywords,
+                 keyword_names_w):
+        name = ''
         self.num_kwds = num_remainingkwds
         if num_remainingkwds == 1:
             for i in range(len(keywords)):
                 if not used_keywords[i]:
-                    self.kwd_name = keywords[i]
+                    name = keywords[i]
+                    if name is None:
+                        # We'll assume it's unicode. Encode it.
+                        # Careful, I *think* it should not be possible to
+                        # get an IndexError here but you never know.
+                        try:
+                            if keyword_names_w is None:
+                                raise IndexError
+                            # note: negative-based indexing from the end
+                            w_name = keyword_names_w[i - len(keywords)]
+                        except IndexError:
+                            name = '?'
+                        else:
+                            w_enc = space.wrap(space.sys.defaultencoding)
+                            w_err = space.wrap("replace")
+                            w_name = space.call_method(w_name, "encode", w_enc,
+                                                       w_err)
+                            name = space.str_w(w_name)
                     break
+        self.kwd_name = name
 
     def getmsg(self, fnname):
         if self.num_kwds == 1:
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -655,9 +655,6 @@
 def _compute_CALL_FUNCTION_VAR_KW(arg):
     return -_num_args(arg) - 2
 
-def _compute_CALL_LIKELY_BUILTIN(arg):
-    return -(arg & 0xFF) + 1
-
 def _compute_CALL_METHOD(arg):
     return -_num_args(arg) - 1
 
diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -12,7 +12,6 @@
 from pypy.interpreter.pyparser.error import SyntaxError
 from pypy.tool import stdlib_opcode as ops
 from pypy.interpreter.error import OperationError
-from pypy.module.__builtin__.__init__ import BUILTIN_TO_INDEX
 
 
 def compile_ast(space, module, info):
@@ -134,7 +133,7 @@
 
     def accept_comp_iteration(self, codegen, index):
         self.elt.walkabout(codegen)
-        codegen.emit_op_arg(ops.SET_ADD, index)
+        codegen.emit_op_arg(ops.SET_ADD, index + 1)
 
 
 class __extend__(ast.DictComp):
@@ -148,7 +147,7 @@
     def accept_comp_iteration(self, codegen, index):
         self.value.walkabout(codegen)
         self.key.walkabout(codegen)
-        codegen.emit_op_arg(ops.MAP_ADD, index)
+        codegen.emit_op_arg(ops.MAP_ADD, index + 1)
 
 
 # These are frame blocks.
@@ -942,8 +941,7 @@
 
     def visit_Call(self, call):
         self.update_position(call.lineno)
-        if self._optimize_builtin_call(call) or \
-                self._optimize_method_call(call):
+        if self._optimize_method_call(call):
             return
         call.func.walkabout(self)
         arg = 0
@@ -977,28 +975,6 @@
     def _call_has_simple_args(self, call):
         return self._call_has_no_star_args(call) and not call.keywords
 
-    def _optimize_builtin_call(self, call):
-        if not self.space.config.objspace.opcodes.CALL_LIKELY_BUILTIN or \
-                not self._call_has_simple_args(call) or \
-                not isinstance(call.func, ast.Name):
-            return False
-        func_name = call.func
-        assert isinstance(func_name, ast.Name)
-        name_scope = self.scope.lookup(func_name.id)
-        if name_scope == symtable.SCOPE_GLOBAL_IMPLICIT or \
-                name_scope == symtable.SCOPE_UNKNOWN:
-            builtin_index = BUILTIN_TO_INDEX.get(func_name.id, -1)
-            if builtin_index != -1:
-                if call.args:
-                    args_count = len(call.args)
-                    self.visit_sequence(call.args)
-                else:
-                    args_count = 0
-                arg = builtin_index << 8 | args_count
-                self.emit_op_arg(ops.CALL_LIKELY_BUILTIN, arg)
-                return True
-        return False
-
     def _optimize_method_call(self, call):
         if not self.space.config.objspace.opcodes.CALL_METHOD or \
                 not self._call_has_no_star_args(call) or \
diff --git a/pypy/interpreter/astcompiler/misc.py b/pypy/interpreter/astcompiler/misc.py
--- a/pypy/interpreter/astcompiler/misc.py
+++ b/pypy/interpreter/astcompiler/misc.py
@@ -92,7 +92,10 @@
         return name
     if len(name) + 2 >= MANGLE_LEN:
         return name
-    if name.endswith('__'):
+    # Don't mangle __id__ or names with dots. The only time a name with a dot
+    # can occur is when we are compiling an import statement that has a package
+    # name.
+    if name.endswith('__') or '.' in name:
         return name
     try:
         i = 0
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -55,7 +55,7 @@
         co_expr = compile(evalexpr, '<evalexpr>', 'eval')
         space = self.space
         pyco_expr = PyCode._from_code(space, co_expr)
-        w_res = pyco_expr.exec_host_bytecode(space, w_dict, w_dict)
+        w_res = pyco_expr.exec_host_bytecode(w_dict, w_dict)
         res = space.str_w(space.repr(w_res))
         if not isinstance(expected, float):
             assert res == repr(expected)
@@ -308,6 +308,15 @@
                "p.__name__", os.path.__name__)
         yield (self.st, 'from os import *',
                "path.__name__, sep", (os.path.__name__, os.sep))
+        yield (self.st, '''
+            class A(object):
+                def m(self):
+                    from __foo__.bar import x
+            try:
+                A().m()
+            except ImportError, e:
+                msg = str(e)
+            ''', "msg", "No module named __foo__")
 
     def test_if_stmts(self):
         yield self.st, "a = 42\nif a > 10: a += 2", "a", 44
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -237,7 +237,7 @@
 
 class ObjSpace(object):
     """Base class for the interpreter-level implementations of object spaces.
-    http://codespeak.net/pypy/dist/pypy/doc/objspace.html"""
+    http://pypy.readthedocs.org/en/latest/objspace.html"""
 
     full_exceptions = True  # full support for exceptions (normalization & more)
 
@@ -311,9 +311,6 @@
             mod = self.interpclass_w(w_mod)
             if isinstance(mod, Module) and mod.startup_called:
                 mod.shutdown(self)
-        if self.config.objspace.std.withdictmeasurement:
-            from pypy.objspace.std.dictmultiobject import report
-            report()
         if self.config.objspace.logbytecodes:
             self.reportbytecodecounts()
         if self.config.objspace.std.logspaceoptypes:
@@ -989,10 +986,7 @@
             compiler = self.createcompiler()
             expression = compiler.compile(expression, '?', 'eval', 0,
                                          hidden_applevel=hidden_applevel)
-        if isinstance(expression, types.CodeType):
-            # XXX only used by appsupport
-            expression = PyCode._from_code(self, expression)
-        if not isinstance(expression, PyCode):
+        else:
             raise TypeError, 'space.eval(): expected a string, code or PyCode object'
         return expression.exec_code(self, w_globals, w_locals)
 
@@ -1007,9 +1001,6 @@
             compiler = self.createcompiler()
             statement = compiler.compile(statement, filename, 'exec', 0,
                                          hidden_applevel=hidden_applevel)
-        if isinstance(statement, types.CodeType):
-            # XXX only used by appsupport
-            statement = PyCode._from_code(self, statement)
         if not isinstance(statement, PyCode):
             raise TypeError, 'space.exec_(): expected a string, code or PyCode object'
         w_key = self.wrap('__builtins__')
diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py
--- a/pypy/interpreter/eval.py
+++ b/pypy/interpreter/eval.py
@@ -100,12 +100,12 @@
 
     @jit.dont_look_inside
     def fast2locals(self):
-        # Copy values from self.fastlocals_w to self.w_locals
+        # Copy values from the fastlocals to self.w_locals
         if self.w_locals is None:
             self.w_locals = self.space.newdict()
         varnames = self.getcode().getvarnames()
         fastscope_w = self.getfastscope()
-        for i in range(min(len(varnames), len(fastscope_w))):
+        for i in range(min(len(varnames), self.getfastscopelength())):
             name = varnames[i]
             w_value = fastscope_w[i]
             if w_value is not None:
@@ -114,7 +114,7 @@
 
     @jit.dont_look_inside
     def locals2fast(self):
-        # Copy values from self.w_locals to self.fastlocals_w
+        # Copy values from self.w_locals to the fastlocals
         assert self.w_locals is not None
         varnames = self.getcode().getvarnames()
         numlocals = self.getfastscopelength()
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -16,7 +16,7 @@
 
 funccallunrolling = unrolling_iterable(range(4))
 
- at jit.purefunction_promote()
+ at jit.elidable_promote()
 def _get_immutable_code(func):
     assert not func.can_change_code
     return func.code
@@ -63,7 +63,7 @@
         if jit.we_are_jitted():
             if not self.can_change_code:
                 return _get_immutable_code(self)
-            return jit.hint(self.code, promote=True)
+            return jit.promote(self.code)
         return self.code
 
     def funccall(self, *args_w): # speed hack
@@ -98,7 +98,7 @@
                                                    self.closure)
                 for i in funccallunrolling:
                     if i < nargs:
-                        new_frame.fastlocals_w[i] = args_w[i]
+                        new_frame.locals_stack_w[i] = args_w[i]
                 return new_frame.run()
         elif nargs >= 1 and fast_natural_arity == Code.PASSTHROUGHARGS1:
             assert isinstance(code, gateway.BuiltinCodePassThroughArguments1)
@@ -158,7 +158,7 @@
                                                    self.closure)
         for i in xrange(nargs):
             w_arg = frame.peekvalue(nargs-1-i)
-            new_frame.fastlocals_w[i] = w_arg
+            new_frame.locals_stack_w[i] = w_arg
 
         return new_frame.run()
 
@@ -169,13 +169,13 @@
                                                    self.closure)
         for i in xrange(nargs):
             w_arg = frame.peekvalue(nargs-1-i)
-            new_frame.fastlocals_w[i] = w_arg
+            new_frame.locals_stack_w[i] = w_arg
 
         ndefs = len(self.defs_w)
         start = ndefs - defs_to_load
         i = nargs
         for j in xrange(start, ndefs):
-            new_frame.fastlocals_w[i] = self.defs_w[j]
+            new_frame.locals_stack_w[i] = self.defs_w[j]
             i += 1
         return new_frame.run()
 
@@ -465,19 +465,23 @@
                 space.abstract_isinstance_w(w_firstarg, self.w_class)):
             pass  # ok
         else:
-            myname = self.getname(space,"")
-            clsdescr = self.w_class.getname(space,"")
+            myname = self.getname(space, "")
+            clsdescr = self.w_class.getname(space, "")
             if clsdescr:
-                clsdescr+=" "
+                clsdescr += " instance"
+            else:
+                clsdescr = "instance"
             if w_firstarg is None:
                 instdescr = "nothing"
             else:
-                instname = space.abstract_getclass(w_firstarg).getname(space,"")
+                instname = space.abstract_getclass(w_firstarg).getname(space,
+                                                                       "")
                 if instname:
-                    instname += " "
-                instdescr = "%sinstance" %instname
-            msg = ("unbound method %s() must be called with %s"
-                   "instance as first argument (got %s instead)")
+                    instdescr = instname + " instance"
+                else:
+                    instdescr = "instance"
+            msg = ("unbound method %s() must be called with %s "
+                   "as first argument (got %s instead)")
             raise operationerrfmt(space.w_TypeError, msg,
                                   myname, clsdescr, instdescr)
         return space.call_args(self.w_function, args)
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -62,7 +62,7 @@
             raise operr
         # XXX it's not clear that last_instr should be promoted at all
         # but as long as it is necessary for call_assembler, let's do it early
-        last_instr = jit.hint(frame.last_instr, promote=True)
+        last_instr = jit.promote(frame.last_instr)
         if last_instr == -1:
             if w_arg and not space.is_w(w_arg, space.w_None):
                 msg = "can't send non-None value to a just-started generator"
diff --git a/pypy/interpreter/nestedscope.py b/pypy/interpreter/nestedscope.py
--- a/pypy/interpreter/nestedscope.py
+++ b/pypy/interpreter/nestedscope.py
@@ -170,7 +170,7 @@
         for i in range(len(args_to_copy)):
             argnum = args_to_copy[i]
             if argnum >= 0:
-                self.cells[i].set(self.fastlocals_w[argnum])
+                self.cells[i].set(self.locals_stack_w[argnum])
 
     def getfreevarname(self, index):
         freevarnames = self.pycode.co_cellvars + self.pycode.co_freevars
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -63,6 +63,7 @@
         the pypy compiler"""
         self.space = space
         eval.Code.__init__(self, name)
+        assert nlocals >= 0
         self.co_argcount = argcount
         self.co_nlocals = nlocals
         self.co_stacksize = stacksize
@@ -95,7 +96,7 @@
             if self.co_flags & CO_VARKEYWORDS:
                 argcount += 1
             # Cell vars could shadow already-set arguments.
-            # astcompiler.pyassem used to be clever about the order of
+            # The compiler used to be clever about the order of
             # the variables in both co_varnames and co_cellvars, but
             # it no longer is for the sake of simplicity.  Moreover
             # code objects loaded from CPython don't necessarily follow
@@ -202,7 +203,7 @@
         # speed hack
         fresh_frame = jit.hint(frame, access_directly=True,
                                       fresh_virtualizable=True)
-        args_matched = args.parse_into_scope(None, fresh_frame.fastlocals_w,
+        args_matched = args.parse_into_scope(None, fresh_frame.locals_stack_w,
                                              func.name,
                                              sig, func.defs_w)
         fresh_frame.init_cells()
@@ -215,7 +216,7 @@
         # speed hack
         fresh_frame = jit.hint(frame, access_directly=True,
                                       fresh_virtualizable=True)
-        args_matched = args.parse_into_scope(w_obj, fresh_frame.fastlocals_w,
+        args_matched = args.parse_into_scope(w_obj, fresh_frame.locals_stack_w,
                                              func.name,
                                              sig, func.defs_w)
         fresh_frame.init_cells()
@@ -256,7 +257,7 @@
                          tuple(self.co_freevars),
                          tuple(self.co_cellvars) )
 
-    def exec_host_bytecode(self, w_dict, w_globals, w_locals):
+    def exec_host_bytecode(self, w_globals, w_locals):
         from pypy.interpreter.pyframe import CPythonFrame
         frame = CPythonFrame(self.space, self, w_globals, None)
         frame.setdictscope(w_locals)
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -9,7 +9,7 @@
 from pypy.interpreter import pytraceback
 from pypy.rlib.objectmodel import we_are_translated, instantiate
 from pypy.rlib.jit import hint
-from pypy.rlib.debug import make_sure_not_resized
+from pypy.rlib.debug import make_sure_not_resized, check_nonneg
 from pypy.rlib.rarithmetic import intmask
 from pypy.rlib import jit
 from pypy.tool import stdlib_opcode
@@ -56,16 +56,18 @@
         assert isinstance(code, pycode.PyCode)
         self.pycode = code
         eval.Frame.__init__(self, space, w_globals)
-        self.valuestack_w = [None] * code.co_stacksize
-        self.valuestackdepth = 0
+        self.locals_stack_w = [None] * (code.co_nlocals + code.co_stacksize)
+        self.nlocals = code.co_nlocals
+        self.valuestackdepth = code.co_nlocals
         self.lastblock = None
+        make_sure_not_resized(self.locals_stack_w)
+        check_nonneg(self.nlocals)
+        #
         if space.config.objspace.honor__builtins__:
             self.builtin = space.builtin.pick_builtin(w_globals)
         # regular functions always have CO_OPTIMIZED and CO_NEWLOCALS.
         # class bodies only have CO_NEWLOCALS.
         self.initialize_frame_scopes(closure, code)
-        self.fastlocals_w = [None] * code.co_nlocals
-        make_sure_not_resized(self.fastlocals_w)
         self.f_lineno = code.co_firstlineno
 
     def mark_as_escaped(self):
@@ -184,14 +186,14 @@
     # stack manipulation helpers
     def pushvalue(self, w_object):
         depth = self.valuestackdepth
-        self.valuestack_w[depth] = w_object
+        self.locals_stack_w[depth] = w_object
         self.valuestackdepth = depth + 1
 
     def popvalue(self):
         depth = self.valuestackdepth - 1
-        assert depth >= 0, "pop from empty value stack"
-        w_object = self.valuestack_w[depth]
-        self.valuestack_w[depth] = None
+        assert depth >= self.nlocals, "pop from empty value stack"
+        w_object = self.locals_stack_w[depth]
+        self.locals_stack_w[depth] = None
         self.valuestackdepth = depth
         return w_object
 
@@ -217,24 +219,24 @@
     def peekvalues(self, n):
         values_w = [None] * n
         base = self.valuestackdepth - n
-        assert base >= 0
+        assert base >= self.nlocals
         while True:
             n -= 1
             if n < 0:
                 break
-            values_w[n] = self.valuestack_w[base+n]
+            values_w[n] = self.locals_stack_w[base+n]
         return values_w
 
     @jit.unroll_safe
     def dropvalues(self, n):
         n = hint(n, promote=True)
         finaldepth = self.valuestackdepth - n
-        assert finaldepth >= 0, "stack underflow in dropvalues()"        
+        assert finaldepth >= self.nlocals, "stack underflow in dropvalues()"
         while True:
             n -= 1
             if n < 0:
                 break
-            self.valuestack_w[finaldepth+n] = None
+            self.locals_stack_w[finaldepth+n] = None
         self.valuestackdepth = finaldepth
 
     @jit.unroll_safe
@@ -261,30 +263,30 @@
         # Contrast this with CPython where it's PEEK(-1).
         index_from_top = hint(index_from_top, promote=True)
         index = self.valuestackdepth + ~index_from_top
-        assert index >= 0, "peek past the bottom of the stack"
-        return self.valuestack_w[index]
+        assert index >= self.nlocals, "peek past the bottom of the stack"
+        return self.locals_stack_w[index]
 
     def settopvalue(self, w_object, index_from_top=0):
         index_from_top = hint(index_from_top, promote=True)
         index = self.valuestackdepth + ~index_from_top
-        assert index >= 0, "settop past the bottom of the stack"
-        self.valuestack_w[index] = w_object
+        assert index >= self.nlocals, "settop past the bottom of the stack"
+        self.locals_stack_w[index] = w_object
 
     @jit.unroll_safe
     def dropvaluesuntil(self, finaldepth):
         depth = self.valuestackdepth - 1
         finaldepth = hint(finaldepth, promote=True)
         while depth >= finaldepth:
-            self.valuestack_w[depth] = None
+            self.locals_stack_w[depth] = None
             depth -= 1
         self.valuestackdepth = finaldepth
 
-    def savevaluestack(self):
-        return self.valuestack_w[:self.valuestackdepth]
+    def save_locals_stack(self):
+        return self.locals_stack_w[:self.valuestackdepth]
 
-    def restorevaluestack(self, items_w):
-        assert None not in items_w
-        self.valuestack_w[:len(items_w)] = items_w
+    def restore_locals_stack(self, items_w):
+        self.locals_stack_w[:len(items_w)] = items_w
+        self.init_cells()
         self.dropvaluesuntil(len(items_w))
 
     def make_arguments(self, nargs):
@@ -314,11 +316,12 @@
         else:
             f_lineno = self.f_lineno
 
-        values_w = self.valuestack_w[0:self.valuestackdepth]
+        values_w = self.locals_stack_w[self.nlocals:self.valuestackdepth]
         w_valuestack = maker.slp_into_tuple_with_nulls(space, values_w)
         
         w_blockstack = nt([block._get_state_(space) for block in self.get_blocklist()])
-        w_fastlocals = maker.slp_into_tuple_with_nulls(space, self.fastlocals_w)
+        w_fastlocals = maker.slp_into_tuple_with_nulls(
+            space, self.locals_stack_w[:self.nlocals])
         if self.last_exception is None:
             w_exc_value = space.w_None
             w_tb = space.w_None
@@ -399,7 +402,8 @@
         new_frame.last_instr = space.int_w(w_last_instr)
         new_frame.frame_finished_execution = space.is_true(w_finished)
         new_frame.f_lineno = space.int_w(w_f_lineno)
-        new_frame.fastlocals_w = maker.slp_from_tuple_with_nulls(space, w_fastlocals)
+        fastlocals_w = maker.slp_from_tuple_with_nulls(space, w_fastlocals)
+        new_frame.locals_stack_w[:len(fastlocals_w)] = fastlocals_w
 
         if space.is_w(w_f_trace, space.w_None):
             new_frame.w_f_trace = None
@@ -423,28 +427,28 @@
     @jit.dont_look_inside
     def getfastscope(self):
         "Get the fast locals as a list."
-        return self.fastlocals_w
+        return self.locals_stack_w
 
     @jit.dont_look_inside
     def setfastscope(self, scope_w):
         """Initialize the fast locals from a list of values,
         where the order is according to self.pycode.signature()."""
         scope_len = len(scope_w)
-        if scope_len > len(self.fastlocals_w):
+        if scope_len > self.nlocals:
             raise ValueError, "new fastscope is longer than the allocated area"
-        # don't assign directly to 'fastlocals_w[:scope_len]' to be
+        # don't assign directly to 'locals_stack_w[:scope_len]' to be
         # virtualizable-friendly
         for i in range(scope_len):
-            self.fastlocals_w[i] = scope_w[i]
+            self.locals_stack_w[i] = scope_w[i]
         self.init_cells()
 
     def init_cells(self):
-        """Initialize cellvars from self.fastlocals_w
+        """Initialize cellvars from self.locals_stack_w.
         This is overridden in nestedscope.py"""
         pass
 
     def getfastscopelength(self):
-        return self.pycode.co_nlocals
+        return self.nlocals
 
     def getclosure(self):
         return None
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -324,7 +324,7 @@
 
     def LOAD_FAST(self, varindex, next_instr):
         # access a local variable directly
-        w_value = self.fastlocals_w[varindex]
+        w_value = self.locals_stack_w[varindex]
         if w_value is None:
             self._load_fast_failed(varindex)
         self.pushvalue(w_value)
@@ -343,7 +343,7 @@
     def STORE_FAST(self, varindex, next_instr):
         w_newvalue = self.popvalue()
         assert w_newvalue is not None
-        self.fastlocals_w[varindex] = w_newvalue
+        self.locals_stack_w[varindex] = w_newvalue
 
     def POP_TOP(self, oparg, next_instr):
         self.popvalue()
@@ -696,12 +696,12 @@
     LOAD_GLOBAL._always_inline_ = True
 
     def DELETE_FAST(self, varindex, next_instr):
-        if self.fastlocals_w[varindex] is None:
+        if self.locals_stack_w[varindex] is None:
             varname = self.getlocalvarname(varindex)
             message = "local variable '%s' referenced before assignment"
             raise operationerrfmt(self.space.w_UnboundLocalError, message,
                                   varname)
-        self.fastlocals_w[varindex] = None
+        self.locals_stack_w[varindex] = None
 
     def BUILD_TUPLE(self, itemcount, next_instr):
         items = self.popvalues(itemcount)
@@ -1048,30 +1048,18 @@
 
     def SET_ADD(self, oparg, next_instr):
         w_value = self.popvalue()
-        w_set = self.peekvalue(oparg)
+        w_set = self.peekvalue(oparg - 1)
         self.space.call_method(w_set, 'add', w_value)
 
     def MAP_ADD(self, oparg, next_instr):
         w_key = self.popvalue()
         w_value = self.popvalue()
-        w_dict = self.peekvalue(oparg)
+        w_dict = self.peekvalue(oparg - 1)
         self.space.setitem(w_dict, w_key, w_value)
 
     def SET_LINENO(self, lineno, next_instr):
         pass
 
-    def CALL_LIKELY_BUILTIN(self, oparg, next_instr):
-        # overridden by faster version in the standard object space.
-        from pypy.module.__builtin__ import OPTIMIZED_BUILTINS
-        varname = OPTIMIZED_BUILTINS[oparg >> 8]
-        w_function = self._load_global(varname)
-        nargs = oparg&0xFF
-        try:
-            w_result = self.space.call_valuestack(w_function, nargs, self)
-        finally:
-            self.dropvalues(nargs)
-        self.pushvalue(w_result)
-
     # overridden by faster version in the standard object space.
     LOOKUP_METHOD = LOAD_ATTR
     CALL_METHOD = CALL_FUNCTION
@@ -1091,12 +1079,10 @@
 
     @jit.unroll_safe
     def BUILD_SET(self, itemcount, next_instr):
-        w_set = self.space.call_function(self.space.w_set)
-        if itemcount:
-            w_add = self.space.getattr(w_set, self.space.wrap("add"))
-            for i in range(itemcount):
-                w_item = self.popvalue()
-                self.space.call_function(w_add, w_item)
+        w_set = self.space.newset()
+        for i in range(itemcount):
+            w_item = self.popvalue()
+            self.space.call_method(w_set, 'add', w_item)
         self.pushvalue(w_set)
 
     def STORE_MAP(self, oparg, next_instr):
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -1,3 +1,4 @@
+# -*- coding: utf-8 -*-
 import py
 from pypy.interpreter.argument import (Arguments, ArgumentsForTranslation,
     ArgErr, ArgErrUnknownKwds, ArgErrMultipleValues, ArgErrCount, rawshape,
@@ -126,6 +127,7 @@
     w_AttributeError = AttributeError
     w_UnicodeEncodeError = UnicodeEncodeError
     w_dict = dict
+    w_str = str
 
 class TestArgumentsNormal(object):
 
@@ -485,26 +487,6 @@
         args._match_signature(None, l, Signature(['abc']))
         assert len(l) == 1
         assert l[0] == space.wrap(5)
-        #
-        def str_w(w):
-            try:
-                return str(w)
-            except UnicodeEncodeError:
-                raise OperationError(space.w_UnicodeEncodeError,
-                                     space.wrap("oups"))
-        space.str_w = str_w
-        w_starstar = space.wrap({u'\u1234': 5})
-        err = py.test.raises(OperationError, Arguments,
-                             space, [], w_starstararg=w_starstar)
-        # Check that we get a TypeError.  On CPython it is because of
-        # "no argument called '?'".  On PyPy we get a TypeError too, but
-        # earlier: "keyword cannot be encoded to ascii".  The
-        # difference, besides the error message, is only apparent if the
-        # receiver also takes a **arg.  Then CPython passes the
-        # non-ascii unicode unmodified, whereas PyPy complains.  We will
-        # not care until someone has a use case for that.
-        assert not err.value.match(space, space.w_UnicodeEncodeError)
-        assert     err.value.match(space, space.w_TypeError)
 
 class TestErrorHandling(object):
     def test_missing_args(self):
@@ -559,13 +541,26 @@
             assert 0, "did not raise"
 
     def test_unknown_keywords(self):
-        err = ArgErrUnknownKwds(1, ['a', 'b'], [True, False])
+        space = DummySpace()
+        err = ArgErrUnknownKwds(space, 1, ['a', 'b'], [True, False], None)
         s = err.getmsg('foo')
         assert s == "foo() got an unexpected keyword argument 'b'"
-        err = ArgErrUnknownKwds(2, ['a', 'b', 'c'], [True, False, False])
+        err = ArgErrUnknownKwds(space, 2, ['a', 'b', 'c'],
+                                [True, False, False], None)
         s = err.getmsg('foo')
         assert s == "foo() got 2 unexpected keyword arguments"
 
+    def test_unknown_unicode_keyword(self):
+        class DummySpaceUnicode(DummySpace):
+            class sys:
+                defaultencoding = 'utf-8'
+        space = DummySpaceUnicode()
+        err = ArgErrUnknownKwds(space, 1, ['a', None, 'b', 'c'],
+                                [True, False, True, True],
+                                [unichr(0x1234), u'b', u'c'])
+        s = err.getmsg('foo')
+        assert s == "foo() got an unexpected keyword argument '\xe1\x88\xb4'"
+
     def test_multiple_values(self):
         err = ArgErrMultipleValues('bla')
         s = err.getmsg('foo')
@@ -592,6 +587,14 @@
         exc = raises(TypeError, (lambda a, b, **kw: 0), a=1)
         assert exc.value.message == "<lambda>() takes exactly 2 non-keyword arguments (0 given)"
 
+    def test_unicode_keywords(self):
+        def f(**kwargs):
+            assert kwargs[u"&#32654;"] == 42
+        f(**{u"&#32654;" : 42})
+        def f(x): pass
+        e = raises(TypeError, "f(**{u'&#252;' : 19})")
+        assert "?" in str(e.value)
+
 def make_arguments_for_translation(space, args_w, keywords_w={},
                                    w_stararg=None, w_starstararg=None):
     return ArgumentsForTranslation(space, args_w, keywords_w.keys(),
diff --git a/pypy/interpreter/test/test_eval.py b/pypy/interpreter/test/test_eval.py
--- a/pypy/interpreter/test/test_eval.py
+++ b/pypy/interpreter/test/test_eval.py
@@ -15,16 +15,16 @@
                 self.code = code
                 Frame.__init__(self, space)
                 self.numlocals = numlocals
-                self.fastlocals_w = [None] * self.numlocals
+                self._fastlocals_w = [None] * self.numlocals
 
             def getcode(self):
                 return self.code
 
             def setfastscope(self, scope_w):
-                self.fastlocals_w = scope_w
+                self._fastlocals_w = scope_w
 
             def getfastscope(self):
-                return self.fastlocals_w
+                return self._fastlocals_w
 
             def getfastscopelength(self):
                 return self.numlocals
@@ -38,11 +38,11 @@
         self.f.fast2locals()
         assert space.eq_w(self.f.w_locals, self.space.wrap({}))
         
-        self.f.fastlocals_w[0] = w(5)
+        self.f._fastlocals_w[0] = w(5)
         self.f.fast2locals()
         assert space.eq_w(self.f.w_locals, self.space.wrap({'x': 5}))
 
-        self.f.fastlocals_w[2] = w(7)
+        self.f._fastlocals_w[2] = w(7)
         self.f.fast2locals()
         assert space.eq_w(self.f.w_locals, self.space.wrap({'x': 5, 'args': 7}))
 
@@ -57,13 +57,13 @@
         w = self.space.wrap
         self.f.w_locals = self.space.wrap({})
         self.f.locals2fast()
-        self.sameList(self.f.fastlocals_w, [None]*5)
+        self.sameList(self.f._fastlocals_w, [None]*5)
 
         self.f.w_locals = self.space.wrap({'x': 5})
         self.f.locals2fast()
-        self.sameList(self.f.fastlocals_w, [w(5)] + [None]*4)
+        self.sameList(self.f._fastlocals_w, [w(5)] + [None]*4)
 
         self.f.w_locals = self.space.wrap({'x':5, 'args':7})
         self.f.locals2fast()
-        self.sameList(self.f.fastlocals_w, [w(5), None, w(7),
-                                            None, None])
+        self.sameList(self.f._fastlocals_w, [w(5), None, w(7),
+                                             None, None])
diff --git a/pypy/interpreter/test/test_executioncontext.py b/pypy/interpreter/test/test_executioncontext.py
--- a/pypy/interpreter/test/test_executioncontext.py
+++ b/pypy/interpreter/test/test_executioncontext.py
@@ -106,7 +106,7 @@
             if isinstance(seen[0], Method):
                 found = 'method %s of %s' % (
                     seen[0].w_function.name,
-                    seen[0].w_class.getname(space, '?'))
+                    seen[0].w_class.getname(space))
             else:
                 assert isinstance(seen[0], Function)
                 found = 'builtin %s' % seen[0].name
@@ -232,31 +232,6 @@
         assert [i[0] for i in events] == ['c_call', 'c_return', 'return', 'c_call']
         assert events[0][1] == events[1][1]
 
-    def test_tracing_range_builtinshortcut(self):
-        opts = {"objspace.opcodes.CALL_LIKELY_BUILTIN": True}
-        space = gettestobjspace(**opts)
-        source = """def f(profile):
-        import sys
-        sys.setprofile(profile)
-        range(10)
-        sys.setprofile(None)
-        """
-        w_events = space.appexec([space.wrap(source)], """(source):
-        import sys
-        l = []
-        def profile(frame, event, arg):
-            l.append((event, arg))
-        d = {}
-        exec source in d
-        f = d['f']
-        f(profile)
-        import dis
-        print dis.dis(f)
-        return l
-        """)
-        events = space.unwrap(w_events)
-        assert [i[0] for i in events] == ['c_call', 'c_return', 'c_call']
-
     def test_profile_and_exception(self):
         space = self.space
         w_res = space.appexec([], """():
@@ -280,9 +255,6 @@
         """)
 
 
-class TestExecutionContextWithCallLikelyBuiltin(TestExecutionContext):
-    keywords = {'objspace.opcodes.CALL_LIKELY_BUILTIN': True}
-
 class TestExecutionContextWithCallMethod(TestExecutionContext):
     keywords = {'objspace.opcodes.CALL_METHOD': True}
 
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -16,7 +16,7 @@
 
         def g():
             f()
-        
+
         try:
             g()
         except:
@@ -203,3 +203,27 @@
         lst = seen[:]
         assert lst == [5, 10, 2]
         raises(OSError, os.lseek, fd, 7, 0)
+
+    def test_method_attrs(self):
+        import sys
+        class A(object):
+            def m(self):
+                "aaa"
+            m.x = 3
+        class B(A):
+            pass
+
+        bm = B().m
+        assert bm.__func__ is bm.im_func
+        assert bm.__self__ is bm.im_self
+        assert bm.im_class is B
+        assert bm.__doc__ == "aaa"
+        assert bm.x == 3
+        raises(AttributeError, setattr, bm, 'x', 15)
+        l = []
+        assert l.append.__self__ is l
+        assert l.__add__.__self__ is l
+        # note: 'l.__add__.__objclass__' is not defined in pypy
+        # because it's a regular method, and .__objclass__
+        # differs from .im_class in case the method is
+        # defined in some parent class of l's actual class
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -9,7 +9,7 @@
 from pypy.interpreter.error import OperationError, operationerrfmt
 from pypy.tool.sourcetools import compile2, func_with_new_name
 from pypy.rlib.objectmodel import instantiate, compute_identity_hash, specialize
-from pypy.rlib.jit import hint
+from pypy.rlib.jit import promote
 
 class TypeDef:
     def __init__(self, __name, __base=None, **rawdict):
@@ -206,7 +206,7 @@
             user_overridden_class = True
 
             def getclass(self, space):
-                return hint(self.w__class__, promote=True)
+                return promote(self.w__class__)
 
             def setclass(self, space, w_subtype):
                 # only used by descr_set___class__
@@ -761,12 +761,15 @@
     )
 Function.typedef.acceptable_as_base_class = False
 
-Method.typedef = TypeDef("method",
+Method.typedef = TypeDef(
+    "method",
     __new__ = interp2app(Method.descr_method__new__.im_func),
     __call__ = interp2app(Method.descr_method_call),
     __get__ = interp2app(Method.descr_method_get),
     im_func  = interp_attrproperty_w('w_function', cls=Method),
+    __func__ = interp_attrproperty_w('w_function', cls=Method),
     im_self  = interp_attrproperty_w('w_instance', cls=Method),
+    __self__ = interp_attrproperty_w('w_instance', cls=Method),
     im_class = interp_attrproperty_w('w_class', cls=Method),
     __getattribute__ = interp2app(Method.descr_method_getattribute),
     __eq__ = interp2app(Method.descr_method_eq),
diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py
--- a/pypy/jit/backend/llgraph/llimpl.py
+++ b/pypy/jit/backend/llgraph/llimpl.py
@@ -136,6 +136,7 @@
     'call'            : (('ref', 'varargs'), 'intorptr'),
     'call_assembler'  : (('varargs',), 'intorptr'),
     'cond_call_gc_wb' : (('ptr', 'ptr'), None),
+    'cond_call_gc_wb_array': (('ptr', 'int', 'ptr'), None),
     'oosend'          : (('varargs',), 'intorptr'),
     'oosend_pure'     : (('varargs',), 'intorptr'),
     'guard_true'      : (('bool',), None),
@@ -857,6 +858,9 @@
     def op_cond_call_gc_wb(self, descr, a, b):
         py.test.skip("cond_call_gc_wb not supported")
 
+    def op_cond_call_gc_wb_array(self, descr, a, b, c):
+        py.test.skip("cond_call_gc_wb_array not supported")
+
     def op_oosend(self, descr, obj, *args):
         raise NotImplementedError("oosend for lltype backend??")
 
diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py
--- a/pypy/jit/backend/llsupport/descr.py
+++ b/pypy/jit/backend/llsupport/descr.py
@@ -267,6 +267,9 @@
 
     def __repr__(self):
         res = '%s(%s)' % (self.__class__.__name__, self.arg_classes)
+        extraeffect = getattr(self.extrainfo, 'extraeffect', None)
+        if extraeffect is not None:
+            res += ' EF=%r' % extraeffect
         oopspecindex = getattr(self.extrainfo, 'oopspecindex', 0)
         if oopspecindex:
             from pypy.jit.codewriter.effectinfo import EffectInfo
diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py
--- a/pypy/jit/backend/llsupport/gc.py
+++ b/pypy/jit/backend/llsupport/gc.py
@@ -618,6 +618,7 @@
         return cpu.cast_adr_to_int(funcaddr)
 
     def get_write_barrier_from_array_fn(self, cpu):
+        # returns a function with arguments [array, index, newvalue]
         llop1 = self.llop1
         funcptr = llop1.get_write_barrier_from_array_failing_case(
             self.WB_ARRAY_FUNCPTR)
@@ -920,7 +921,7 @@
             length = known_lengths.get(v_base, LARGE)
             if length >= LARGE:
                 # unknown or too big: produce a write_barrier_from_array
-                args = [v_base, v_value, v_index]
+                args = [v_base, v_index, v_value]
                 newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args,
                                            None,
                                            descr=self.write_barrier_descr))
diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py
--- a/pypy/jit/backend/llsupport/test/test_gc.py
+++ b/pypy/jit/backend/llsupport/test/test_gc.py
@@ -9,7 +9,7 @@
 from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist
 from pypy.jit.tool.oparser import parse
 from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE
-from pypy.jit.metainterp.test.test_optimizeopt import equaloplists
+from pypy.jit.metainterp.optimizeopt.util import equaloplists
 
 def test_boehm():
     gc_ll_descr = GcLLDescr_boehm(None, None, None)
@@ -716,8 +716,8 @@
             else:
                 assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY
                 assert operations[0].getarg(0) == v_base
-                assert operations[0].getarg(1) == v_value
-                assert operations[0].getarg(2) == v_index
+                assert operations[0].getarg(1) == v_index
+                assert operations[0].getarg(2) == v_value
             assert operations[0].result is None
             #
             assert operations[1].getopnum() == rop.SETARRAYITEM_RAW
diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py
--- a/pypy/jit/backend/test/runner_test.py
+++ b/pypy/jit/backend/test/runner_test.py
@@ -1694,12 +1694,13 @@
                 assert record == []
 
     def test_cond_call_gc_wb_array(self):
-        def func_void(a, b):
-            record.append((a, b))
+        def func_void(a, b, c):
+            record.append((a, b, c))
         record = []
         #
         S = lltype.GcStruct('S', ('tid', lltype.Signed))
-        FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed], lltype.Void)
+        FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed, lltype.Ptr(S)],
+                             lltype.Void)
         func_ptr = llhelper(lltype.Ptr(FUNC), func_void)
         funcbox = self.get_funcbox(self.cpu, func_ptr)
         class WriteBarrierDescr(AbstractDescr):
@@ -1719,11 +1720,11 @@
             s.tid = value
             sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s)
             del record[:]
-            self.execute_operation(rop.COND_CALL_GC_WB,
-                                   [BoxPtr(sgcref), ConstInt(123)],
-                                   'void', descr=WriteBarrierDescr())
+            self.execute_operation(rop.COND_CALL_GC_WB_ARRAY,
+                       [BoxPtr(sgcref), ConstInt(123), BoxPtr(sgcref)],
+                       'void', descr=WriteBarrierDescr())
             if cond:
-                assert record == [(s, 123)]
+                assert record == [(s, 123, s)]
             else:
                 assert record == []
 
diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
--- a/pypy/jit/backend/x86/assembler.py
+++ b/pypy/jit/backend/x86/assembler.py
@@ -703,22 +703,28 @@
         # we need to put two words into the shadowstack: the MARKER
         # and the address of the frame (ebp, actually)
         rst = gcrootmap.get_root_stack_top_addr()
-        assert rx86.fits_in_32bits(rst)
-        if IS_X86_64:
-            # cannot use rdx here, it's used to pass arguments!
-            tmp = X86_64_SCRATCH_REG
+        if rx86.fits_in_32bits(rst):
+            self.mc.MOV_rj(eax.value, rst)            # MOV eax, [rootstacktop]
         else:
-            tmp = edx
-        self.mc.MOV_rj(eax.value, rst)                # MOV eax, [rootstacktop]
-        self.mc.LEA_rm(tmp.value, (eax.value, 2*WORD))  # LEA edx, [eax+2*WORD]
+            self.mc.MOV_ri(r13.value, rst)            # MOV r13, rootstacktop
+            self.mc.MOV_rm(eax.value, (r13.value, 0)) # MOV eax, [r13]
+        #
+        self.mc.LEA_rm(ebx.value, (eax.value, 2*WORD))  # LEA ebx, [eax+2*WORD]
         self.mc.MOV_mi((eax.value, 0), gcrootmap.MARKER)    # MOV [eax], MARKER
         self.mc.MOV_mr((eax.value, WORD), ebp.value)      # MOV [eax+WORD], ebp
-        self.mc.MOV_jr(rst, tmp.value)                # MOV [rootstacktop], edx
+        #
+        if rx86.fits_in_32bits(rst):
+            self.mc.MOV_jr(rst, ebx.value)            # MOV [rootstacktop], ebx
+        else:
+            self.mc.MOV_mr((r13.value, 0), ebx.value) # MOV [r13], ebx
 
     def _call_footer_shadowstack(self, gcrootmap):
         rst = gcrootmap.get_root_stack_top_addr()
-        assert rx86.fits_in_32bits(rst)
-        self.mc.SUB_ji8(rst, 2*WORD)       # SUB [rootstacktop], 2*WORD
+        if rx86.fits_in_32bits(rst):
+            self.mc.SUB_ji8(rst, 2*WORD)       # SUB [rootstacktop], 2*WORD
+        else:
+            self.mc.MOV_ri(ebx.value, rst)           # MOV ebx, rootstacktop
+            self.mc.SUB_mi8((ebx.value, 0), 2*WORD)  # SUB [ebx], 2*WORD
 
     def _assemble_bootstrap_direct_call(self, arglocs, jmppos, stackdepth):
         if IS_X86_64:
@@ -889,7 +895,7 @@
 
     def regalloc_push(self, loc):
         if isinstance(loc, RegLoc) and loc.is_xmm:
-            self.mc.SUB_ri(esp.value, 2*WORD)
+            self.mc.SUB_ri(esp.value, 8)   # = size of doubles
             self.mc.MOVSD_sx(0, loc.value)
         elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8:
             # XXX evil trick
@@ -901,7 +907,7 @@
     def regalloc_pop(self, loc):
         if isinstance(loc, RegLoc) and loc.is_xmm:
             self.mc.MOVSD_xs(loc.value, 0)
-            self.mc.ADD_ri(esp.value, 2*WORD)
+            self.mc.ADD_ri(esp.value, 8)   # = size of doubles
         elif WORD == 4 and isinstance(loc, StackLoc) and loc.width == 8:
             # XXX evil trick
             self.mc.POP_b(get_ebp_ofs(loc.position + 1))
diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
--- a/pypy/jit/backend/x86/regalloc.py
+++ b/pypy/jit/backend/x86/regalloc.py
@@ -884,18 +884,12 @@
     def consider_cond_call_gc_wb(self, op):
         assert op.result is None
         args = op.getarglist()
-        loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), args)
-        # ^^^ we force loc_newvalue in a reg (unless it's a Const),
-        # because it will be needed anyway by the following setfield_gc.
-        # It avoids loading it twice from the memory.
-        loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), args)
-        #
-        if len(args) == 2:
-            arglocs = [loc_base, loc_newvalue]    # cond_call_gc_wb
-        else:
-            # cond_call_gc_wb_array
-            loc_arrayindex = self.rm.make_sure_var_in_reg(op.getarg(2), args)
-            arglocs = [loc_base, loc_newvalue, loc_arrayindex]
+        N = len(args)
+        # we force all arguments in a reg (unless they are Consts),
+        # because it will be needed anyway by the following setfield_gc
+        # or setarrayitem_gc. It avoids loading it twice from the memory.
+        arglocs = [self.rm.make_sure_var_in_reg(op.getarg(i), args)
+                   for i in range(N)]
         # add eax, ecx and edx as extra "arguments" to ensure they are
         # saved and restored.  Fish in self.rm to know which of these
         # registers really need to be saved (a bit of a hack).  Moreover,
diff --git a/pypy/jit/backend/x86/regloc.py b/pypy/jit/backend/x86/regloc.py
--- a/pypy/jit/backend/x86/regloc.py
+++ b/pypy/jit/backend/x86/regloc.py
@@ -318,7 +318,9 @@
             # must be careful not to combine it with location types that
             # might need to use the scratch register themselves.
             if loc2 is X86_64_SCRATCH_REG:
-                assert code1 != 'j'
+                if code1 == 'j':
+                    assert (name.startswith("MOV") and
+                            rx86.fits_in_32bits(loc1.value_j()))
             if loc1 is X86_64_SCRATCH_REG and not name.startswith("MOV"):
                 assert code2 not in ('j', 'i')
 
diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py
--- a/pypy/jit/backend/x86/rx86.py
+++ b/pypy/jit/backend/x86/rx86.py
@@ -283,7 +283,7 @@
 # with immediate(argnum)).
 
 def encode_abs(mc, _1, _2, orbyte):
-    # expands to either '\x05' on 32-bit, or '\x04\x25' or 64-bit
+    # expands to either '\x05' on 32-bit, or '\x04\x25' on 64-bit
     if mc.WORD == 8:
         mc.writechar(chr(0x04 | orbyte))
         mc.writechar(chr(0x25))
@@ -370,6 +370,8 @@
     INSN_rj = insn(rex_w, chr(base+3), register(1,8), abs_, immediate(2))
     INSN_ji8 = insn(rex_w, '\x83', orbyte(base), abs_, immediate(1),
                     immediate(2,'b'))
+    INSN_mi8 = insn(rex_w, '\x83', orbyte(base), mem_reg_plus_const(1),
+                    immediate(2,'b'))
     INSN_bi8 = insn(rex_w, '\x83', orbyte(base), stack_bp(1), immediate(2,'b'))
     INSN_bi32= insn(rex_w, '\x81', orbyte(base), stack_bp(1), immediate(2))
 
@@ -388,7 +390,7 @@
     INSN_bi._always_inline_ = True      # try to constant-fold single_byte()
 
     return (INSN_ri, INSN_rr, INSN_rb, INSN_bi, INSN_br, INSN_rm, INSN_rj,
-            INSN_ji8)
+            INSN_ji8, INSN_mi8)
 
 def select_8_or_32_bit_immed(insn_8, insn_32):
     def INSN(*args):
@@ -467,13 +469,13 @@
 
     # ------------------------------ Arithmetic ------------------------------
 
-    ADD_ri, ADD_rr, ADD_rb, _, _, ADD_rm, ADD_rj, _ = common_modes(0)
-    OR_ri,  OR_rr,  OR_rb,  _, _, OR_rm,  OR_rj,  _ = common_modes(1)
-    AND_ri, AND_rr, AND_rb, _, _, AND_rm, AND_rj, _ = common_modes(4)
-    SUB_ri, SUB_rr, SUB_rb, _, _, SUB_rm, SUB_rj, SUB_ji8 = common_modes(5)
-    SBB_ri, SBB_rr, SBB_rb, _, _, SBB_rm, SBB_rj, _ = common_modes(3)
-    XOR_ri, XOR_rr, XOR_rb, _, _, XOR_rm, XOR_rj, _ = common_modes(6)
-    CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br, CMP_rm, CMP_rj, _ = common_modes(7)
+    ADD_ri,ADD_rr,ADD_rb,_,_,ADD_rm,ADD_rj,_,_ = common_modes(0)
+    OR_ri, OR_rr, OR_rb, _,_,OR_rm, OR_rj, _,_ = common_modes(1)
+    AND_ri,AND_rr,AND_rb,_,_,AND_rm,AND_rj,_,_ = common_modes(4)
+    SUB_ri,SUB_rr,SUB_rb,_,_,SUB_rm,SUB_rj,SUB_ji8,SUB_mi8 = common_modes(5)
+    SBB_ri,SBB_rr,SBB_rb,_,_,SBB_rm,SBB_rj,_,_ = common_modes(3)
+    XOR_ri,XOR_rr,XOR_rb,_,_,XOR_rm,XOR_rj,_,_ = common_modes(6)
+    CMP_ri,CMP_rr,CMP_rb,CMP_bi,CMP_br,CMP_rm,CMP_rj,_,_ = common_modes(7)
 
     CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b'))
     CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2))
diff --git a/pypy/jit/backend/x86/test/test_assembler.py b/pypy/jit/backend/x86/test/test_assembler.py
--- a/pypy/jit/backend/x86/test/test_assembler.py
+++ b/pypy/jit/backend/x86/test/test_assembler.py
@@ -1,13 +1,15 @@
 from pypy.jit.backend.x86.regloc import *
 from pypy.jit.backend.x86.assembler import Assembler386
 from pypy.jit.backend.x86.regalloc import X86FrameManager, get_ebp_ofs
-from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, INT, REF, FLOAT
+from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, ConstFloat
+from pypy.jit.metainterp.history import INT, REF, FLOAT
 from pypy.rlib.rarithmetic import intmask
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.jit.backend.x86.arch import WORD, IS_X86_32, IS_X86_64
 from pypy.jit.backend.detect_cpu import getcpuclass 
 from pypy.jit.backend.x86.regalloc import X86RegisterManager, X86_64_RegisterManager, X86XMMRegisterManager, X86_64_XMMRegisterManager
 from pypy.jit.codewriter import longlong
+import ctypes
 
 ACTUAL_CPU = getcpuclass()
 
@@ -238,3 +240,103 @@
         assert assembler.fail_boxes_int.getitem(i) == expected_ints[i]
         assert assembler.fail_boxes_ptr.getitem(i) == expected_ptrs[i]
         assert assembler.fail_boxes_float.getitem(i) == expected_floats[i]
+
+# ____________________________________________________________
+
+class TestRegallocPushPop(object):
+
+    def do_test(self, callback):
+        from pypy.jit.backend.x86.regalloc import X86FrameManager
+        from pypy.jit.backend.x86.regalloc import X86XMMRegisterManager
+        class FakeToken:
+            class compiled_loop_token:
+                asmmemmgr_blocks = None
+        cpu = ACTUAL_CPU(None, None)
+        cpu.setup()
+        looptoken = FakeToken()
+        asm = cpu.assembler
+        asm.setup_once()
+        asm.setup(looptoken)
+        self.fm = X86FrameManager()
+        self.xrm = X86XMMRegisterManager(None, frame_manager=self.fm,
+                                         assembler=asm)
+        callback(asm)
+        asm.mc.RET()
+        rawstart = asm.materialize_loop(looptoken)
+        #
+        F = ctypes.CFUNCTYPE(ctypes.c_long)
+        fn = ctypes.cast(rawstart, F)
+        res = fn()
+        return res
+
+    def test_simple(self):
+        def callback(asm):
+            asm.mov(imm(42), edx)
+            asm.regalloc_push(edx)
+            asm.regalloc_pop(eax)
+        res = self.do_test(callback)
+        assert res == 42
+
+    def test_push_stack(self):
+        def callback(asm):
+            loc = self.fm.frame_pos(5, INT)
+            asm.mc.SUB_ri(esp.value, 64)
+            asm.mov(imm(42), loc)
+            asm.regalloc_push(loc)
+            asm.regalloc_pop(eax)
+            asm.mc.ADD_ri(esp.value, 64)
+        res = self.do_test(callback)
+        assert res == 42
+
+    def test_pop_stack(self):
+        def callback(asm):
+            loc = self.fm.frame_pos(5, INT)
+            asm.mc.SUB_ri(esp.value, 64)
+            asm.mov(imm(42), edx)
+            asm.regalloc_push(edx)
+            asm.regalloc_pop(loc)
+            asm.mov(loc, eax)
+            asm.mc.ADD_ri(esp.value, 64)
+        res = self.do_test(callback)
+        assert res == 42
+
+    def test_simple_xmm(self):
+        def callback(asm):
+            c = ConstFloat(longlong.getfloatstorage(-42.5))
+            loc = self.xrm.convert_to_imm(c)
+            asm.mov(loc, xmm5)
+            asm.regalloc_push(xmm5)
+            asm.regalloc_pop(xmm0)
+            asm.mc.CVTTSD2SI(eax, xmm0)
+        res = self.do_test(callback)
+        assert res == -42
+
+    def test_push_stack_xmm(self):
+        def callback(asm):
+            c = ConstFloat(longlong.getfloatstorage(-42.5))
+            loc = self.xrm.convert_to_imm(c)
+            loc2 = self.fm.frame_pos(4, FLOAT)
+            asm.mc.SUB_ri(esp.value, 64)
+            asm.mov(loc, xmm5)
+            asm.mov(xmm5, loc2)
+            asm.regalloc_push(loc2)
+            asm.regalloc_pop(xmm0)
+            asm.mc.ADD_ri(esp.value, 64)
+            asm.mc.CVTTSD2SI(eax, xmm0)
+        res = self.do_test(callback)
+        assert res == -42
+
+    def test_pop_stack_xmm(self):
+        def callback(asm):
+            c = ConstFloat(longlong.getfloatstorage(-42.5))
+            loc = self.xrm.convert_to_imm(c)
+            loc2 = self.fm.frame_pos(4, FLOAT)
+            asm.mc.SUB_ri(esp.value, 64)
+            asm.mov(loc, xmm5)
+            asm.regalloc_push(xmm5)
+            asm.regalloc_pop(loc2)
+            asm.mov(loc2, xmm0)
+            asm.mc.ADD_ri(esp.value, 64)
+            asm.mc.CVTTSD2SI(eax, xmm0)
+        res = self.do_test(callback)
+        assert res == -42
diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py
--- a/pypy/jit/backend/x86/test/test_runner.py
+++ b/pypy/jit/backend/x86/test/test_runner.py
@@ -6,6 +6,7 @@
                                          ConstPtr, Box, BoxFloat, BasicFailDescr)
 from pypy.jit.backend.detect_cpu import getcpuclass
 from pypy.jit.backend.x86.arch import WORD
+from pypy.jit.backend.x86.rx86 import fits_in_32bits
 from pypy.jit.backend.llsupport import symbolic
 from pypy.jit.metainterp.resoperation import rop
 from pypy.jit.metainterp.executor import execute
@@ -241,6 +242,23 @@
         c = self.execute_operation(rop.GETFIELD_GC, [res], 'int', ofsc3)
         assert c.value == 3
 
+    def test_bug_setfield_64bit(self):
+        if WORD == 4:
+            py.test.skip("only for 64 bits")
+        TP = lltype.GcStruct('S', ('i', lltype.Signed))
+        ofsi = self.cpu.fielddescrof(TP, 'i')
+        for i in range(500):
+            p = lltype.malloc(TP)
+            addr = rffi.cast(lltype.Signed, p)
+            if fits_in_32bits(addr):
+                break    # fitting in 32 bits, good
+        else:
+            py.test.skip("cannot get a 32-bit pointer")
+        res = ConstPtr(rffi.cast(llmemory.GCREF, addr))
+        self.execute_operation(rop.SETFIELD_RAW, [res, ConstInt(3**33)],
+                               'void', ofsi)
+        assert p.i == 3**33
+
     def test_nullity_with_guard(self):
         allops = [rop.INT_IS_TRUE]
         guards = [rop.GUARD_TRUE, rop.GUARD_FALSE]
diff --git a/pypy/jit/backend/x86/test/test_rx86.py b/pypy/jit/backend/x86/test/test_rx86.py
--- a/pypy/jit/backend/x86/test/test_rx86.py
+++ b/pypy/jit/backend/x86/test/test_rx86.py
@@ -185,6 +185,13 @@
     cb = CodeBuilder32
     assert_encodes_as(cb, 'PUSH_i32', (9,), '\x68\x09\x00\x00\x00')
 
+def test_sub_ji8():
+    cb = CodeBuilder32
+    assert_encodes_as(cb, 'SUB_ji8', (11223344, 55),
+                      '\x83\x2D\x30\x41\xAB\x00\x37')
+    assert_encodes_as(cb, 'SUB_mi8', ((edx, 16), 55),
+                      '\x83\x6A\x10\x37')
+
 class CodeBuilder64(CodeBuilderMixin, X86_64_CodeBuilder):
     pass
 
diff --git a/pypy/jit/backend/x86/test/test_zrpy_gc.py b/pypy/jit/backend/x86/test/test_zrpy_gc.py
--- a/pypy/jit/backend/x86/test/test_zrpy_gc.py
+++ b/pypy/jit/backend/x86/test/test_zrpy_gc.py
@@ -10,7 +10,7 @@
 from pypy.rlib import rgc
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.rlib.jit import JitDriver, dont_look_inside
-from pypy.rlib.jit import purefunction, unroll_safe
+from pypy.rlib.jit import elidable, unroll_safe
 from pypy.jit.backend.llsupport.gc import GcLLDescr_framework
 from pypy.tool.udir import udir
 from pypy.config.translationoption import DEFL_GC
@@ -561,7 +561,7 @@
         self.run('compile_framework_external_exception_handling')
 
     def define_compile_framework_bug1(self):
-        @purefunction
+        @elidable
         def nonmoving():
             x = X(1)
             for i in range(7):
diff --git a/pypy/jit/backend/x86/test/test_ztranslation.py b/pypy/jit/backend/x86/test/test_ztranslation.py
--- a/pypy/jit/backend/x86/test/test_ztranslation.py
+++ b/pypy/jit/backend/x86/test/test_ztranslation.py
@@ -2,7 +2,7 @@
 from pypy.tool.udir import udir
 from pypy.rlib.jit import JitDriver, unroll_parameters
 from pypy.rlib.jit import PARAMETERS, dont_look_inside
-from pypy.rlib.jit import hint
+from pypy.rlib.jit import promote
 from pypy.jit.metainterp.jitprof import Profiler
 from pypy.jit.backend.detect_cpu import getcpuclass
 from pypy.jit.backend.test.support import CCompiledMixin
@@ -78,8 +78,7 @@
             x = float(j)
             while i > 0:
                 jitdriver2.jit_merge_point(i=i, res=res, func=func, x=x)
-                jitdriver2.can_enter_jit(i=i, res=res, func=func, x=x)
-                func = hint(func, promote=True)
+                promote(func)
                 argchain = ArgChain()
                 argchain.arg(x)
                 res = func.call(argchain, rffi.DOUBLE)
diff --git a/pypy/jit/codewriter/call.py b/pypy/jit/codewriter/call.py
--- a/pypy/jit/codewriter/call.py
+++ b/pypy/jit/codewriter/call.py
@@ -208,12 +208,12 @@
         assert NON_VOID_ARGS == [T for T in ARGS if T is not lltype.Void]
         assert RESULT == FUNC.RESULT
         # ok
-        # get the 'pure' and 'loopinvariant' flags from the function object
-        pure = False
+        # get the 'elidable' and 'loopinvariant' flags from the function object
+        elidable = False
         loopinvariant = False
         if op.opname == "direct_call":
             func = getattr(get_funcobj(op.args[0].value), '_callable', None)
-            pure = getattr(func, "_pure_function_", False)
+            elidable = getattr(func, "_elidable_function_", False)
             loopinvariant = getattr(func, "_jit_loop_invariant_", False)
             if loopinvariant:
                 assert not NON_VOID_ARGS, ("arguments not supported for "
@@ -225,9 +225,9 @@
                 extraeffect = EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
             elif loopinvariant:
                 extraeffect = EffectInfo.EF_LOOPINVARIANT
-            elif pure:
+            elif elidable:
                 # XXX check what to do about exceptions (also MemoryError?)
-                extraeffect = EffectInfo.EF_PURE
+                extraeffect = EffectInfo.EF_ELIDABLE
             elif self._canraise(op):
                 extraeffect = EffectInfo.EF_CAN_RAISE
             else:
@@ -239,7 +239,7 @@
         #
         if oopspecindex != EffectInfo.OS_NONE:
             assert effectinfo is not None
-        if pure or loopinvariant:
+        if elidable or loopinvariant:
             assert effectinfo is not None
             assert extraeffect != EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
             # XXX this should also say assert not can_invalidate, but
diff --git a/pypy/jit/codewriter/effectinfo.py b/pypy/jit/codewriter/effectinfo.py
--- a/pypy/jit/codewriter/effectinfo.py
+++ b/pypy/jit/codewriter/effectinfo.py
@@ -9,7 +9,7 @@
     _cache = {}
 
     # the 'extraeffect' field is one of the following values:
-    EF_PURE                            = 0 #pure function (and cannot raise)
+    EF_ELIDABLE                        = 0 #elidable function (and cannot raise)
     EF_LOOPINVARIANT                   = 1 #special: call it only once per loop
     EF_CANNOT_RAISE                    = 2 #a function which cannot raise
     EF_CAN_RAISE                       = 3 #normal function (can raise)
@@ -75,12 +75,13 @@
     #
     OS_MATH_SQRT                = 100
 
-    def __new__(cls, readonly_descrs_fields,
+    def __new__(cls, readonly_descrs_fields, readonly_descrs_arrays,
                 write_descrs_fields, write_descrs_arrays,
                 extraeffect=EF_CAN_RAISE,
                 oopspecindex=OS_NONE,
                 can_invalidate=False):
         key = (frozenset(readonly_descrs_fields),
+               frozenset(readonly_descrs_arrays),
                frozenset(write_descrs_fields),
                frozenset(write_descrs_arrays),
                extraeffect,
@@ -89,8 +90,9 @@
             return cls._cache[key]
         result = object.__new__(cls)
         result.readonly_descrs_fields = readonly_descrs_fields
+        result.readonly_descrs_arrays = readonly_descrs_arrays
         if extraeffect == EffectInfo.EF_LOOPINVARIANT or \
-           extraeffect == EffectInfo.EF_PURE:            
+           extraeffect == EffectInfo.EF_ELIDABLE:
             result.write_descrs_fields = []
             result.write_descrs_arrays = []
         else:
@@ -119,7 +121,7 @@
     if effects is top_set:
         return None
     readonly_descrs_fields = []
-    # readonly_descrs_arrays = [] --- not enabled for now
+    readonly_descrs_arrays = []
     write_descrs_fields = []
     write_descrs_arrays = []
 
@@ -145,10 +147,13 @@
         elif tup[0] == "array":
             add_array(write_descrs_arrays, tup)
         elif tup[0] == "readarray":
-            pass
+            tupw = ("array",) + tup[1:]
+            if tupw not in effects:
+                add_array(readonly_descrs_arrays, tup)
         else:
             assert 0
     return EffectInfo(readonly_descrs_fields,
+                      readonly_descrs_arrays,
                       write_descrs_fields,
                       write_descrs_arrays,
                       extraeffect,
diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py
--- a/pypy/jit/codewriter/jtransform.py
+++ b/pypy/jit/codewriter/jtransform.py
@@ -847,7 +847,7 @@
                 op1 = self.prepare_builtin_call(op, "llong_%s", args)
                 op2 = self._handle_oopspec_call(op1, args,
                                                 EffectInfo.OS_LLONG_%s,
-                                                EffectInfo.EF_PURE)
+                                                EffectInfo.EF_ELIDABLE)
                 if %r == "TO_INT":
                     assert op2.result.concretetype == lltype.Signed
                 return op2
@@ -1328,13 +1328,13 @@
                     otherindex += EffectInfo._OS_offset_uni
                 self._register_extra_helper(otherindex, othername,
                                             argtypes, resulttype,
-                                            EffectInfo.EF_PURE)
+                                            EffectInfo.EF_ELIDABLE)
         #
         return self._handle_oopspec_call(op, args, dict[oopspec_name],
-                                         EffectInfo.EF_PURE)
+                                         EffectInfo.EF_ELIDABLE)
 
     def _handle_str2unicode_call(self, op, oopspec_name, args):
-        # ll_str2unicode is not EF_PURE, because it can raise
+        # ll_str2unicode is not EF_ELIDABLE, because it can raise
         # UnicodeDecodeError...
         return self._handle_oopspec_call(op, args, EffectInfo.OS_STR2UNICODE)
 
@@ -1380,7 +1380,7 @@
     
     def _handle_math_sqrt_call(self, op, oopspec_name, args):
         return self._handle_oopspec_call(op, args, EffectInfo.OS_MATH_SQRT,
-                                         EffectInfo.EF_PURE)
+                                         EffectInfo.EF_ELIDABLE)
 
     def rewrite_op_jit_force_quasi_immutable(self, op):
         v_inst, c_fieldname = op.args
diff --git a/pypy/jit/codewriter/policy.py b/pypy/jit/codewriter/policy.py
--- a/pypy/jit/codewriter/policy.py
+++ b/pypy/jit/codewriter/policy.py
@@ -35,8 +35,8 @@
     def _reject_function(self, func):
         if hasattr(func, '_jit_look_inside_'):
             return not func._jit_look_inside_
-        # explicitly pure functions are always opaque
-        if getattr(func, '_pure_function_', False):
+        # explicitly elidable functions are always opaque
+        if getattr(func, '_elidable_function_', False):
             return True
         # pypy.rpython.module.* are opaque helpers
         mod = func.__module__ or '?'
@@ -44,10 +44,6 @@
             return True
         if mod.startswith('pypy.translator.'): # XXX wtf?
             return True
-        # string builder interface
-        if mod == 'pypy.rpython.lltypesystem.rbuilder':
-            return True
-        
         return False
 
     def look_inside_graph(self, graph):
diff --git a/pypy/jit/codewriter/test/test_effectinfo.py b/pypy/jit/codewriter/test/test_effectinfo.py
--- a/pypy/jit/codewriter/test/test_effectinfo.py
+++ b/pypy/jit/codewriter/test/test_effectinfo.py
@@ -34,6 +34,15 @@
     assert not effectinfo.readonly_descrs_fields
     assert not effectinfo.write_descrs_arrays
 
+def test_include_read_array():
+    A = lltype.GcArray(lltype.Signed)
+    effects = frozenset([("readarray", lltype.Ptr(A))])
+    effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU())
+    assert not effectinfo.readonly_descrs_fields
+    assert list(effectinfo.readonly_descrs_arrays) == [('arraydescr', A)]
+    assert not effectinfo.write_descrs_fields
+    assert not effectinfo.write_descrs_arrays
+
 def test_include_write_array():
     A = lltype.GcArray(lltype.Signed)
     effects = frozenset([("array", lltype.Ptr(A))])
@@ -51,6 +60,16 @@
     assert list(effectinfo.write_descrs_fields) == [('fielddescr', S, "a")]
     assert not effectinfo.write_descrs_arrays
 
+def test_dont_include_read_and_write_array():
+    A = lltype.GcArray(lltype.Signed)
+    effects = frozenset([("readarray", lltype.Ptr(A)),
+                         ("array", lltype.Ptr(A))])
+    effectinfo = effectinfo_from_writeanalyze(effects, FakeCPU())
+    assert not effectinfo.readonly_descrs_fields
+    assert not effectinfo.readonly_descrs_arrays
+    assert not effectinfo.write_descrs_fields
+    assert list(effectinfo.write_descrs_arrays) == [('arraydescr', A)]
+
 
 def test_filter_out_typeptr():
     effects = frozenset([("struct", lltype.Ptr(OBJECT), "typeptr")])
diff --git a/pypy/jit/codewriter/test/test_jtransform.py b/pypy/jit/codewriter/test/test_jtransform.py
--- a/pypy/jit/codewriter/test/test_jtransform.py
+++ b/pypy/jit/codewriter/test/test_jtransform.py
@@ -122,7 +122,7 @@
             if oopspecindex == EI.OS_STR2UNICODE:
                 assert extraeffect == None    # not pure, can raise!
             else:
-                assert extraeffect == EI.EF_PURE
+                assert extraeffect == EI.EF_ELIDABLE
         return 'calldescr-%d' % oopspecindex
     def calldescr_canraise(self, calldescr):
         return False
diff --git a/pypy/jit/codewriter/test/test_policy.py b/pypy/jit/codewriter/test/test_policy.py
--- a/pypy/jit/codewriter/test/test_policy.py
+++ b/pypy/jit/codewriter/test/test_policy.py
@@ -45,8 +45,8 @@
     policy.set_supports_floats(False)
     assert not policy.look_inside_graph(graph)
 
-def test_purefunction():
-    @jit.purefunction
+def test_elidable():
+    @jit.elidable
     def g(x):
         return x + 2
     graph = support.getgraph(g, [5])
diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py
--- a/pypy/jit/metainterp/blackhole.py
+++ b/pypy/jit/metainterp/blackhole.py
@@ -3,7 +3,7 @@
 from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.debug import debug_start, debug_stop
-from pypy.rlib.debug import make_sure_not_resized, fatalerror
+from pypy.rlib.debug import make_sure_not_resized
 from pypy.rpython.lltypesystem import lltype, llmemory, rclass
 from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.rpython.llinterp import LLException
diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py
--- a/pypy/jit/metainterp/compile.py
+++ b/pypy/jit/metainterp/compile.py
@@ -3,7 +3,7 @@
 from pypy.rpython.ootypesystem import ootype
 from pypy.objspace.flow.model import Constant, Variable
 from pypy.rlib.objectmodel import we_are_translated
-from pypy.rlib.debug import debug_start, debug_stop
+from pypy.rlib.debug import debug_start, debug_stop, debug_print
 from pypy.rlib import rstack
 from pypy.conftest import option
 from pypy.tool.sourcetools import func_with_new_name
@@ -14,8 +14,8 @@
 from pypy.jit.metainterp.history import BoxPtr, BoxObj, BoxFloat, Const
 from pypy.jit.metainterp import history
 from pypy.jit.metainterp.typesystem import llhelper, oohelper
-from pypy.jit.metainterp.optimizeutil import InvalidLoop
-from pypy.jit.metainterp.resume import NUMBERING
+from pypy.jit.metainterp.optimize import InvalidLoop
+from pypy.jit.metainterp.resume import NUMBERING, PENDINGFIELDSP
 from pypy.jit.codewriter import heaptracker, longlong
 
 def giveup():
@@ -119,6 +119,7 @@
         old_loop_token = optimize_loop(metainterp_sd, old_loop_tokens, loop,
                                        jitdriver_sd.warmstate.enable_opts)
     except InvalidLoop:
+        debug_print("compile_new_loop: got an InvalidLoop")
         return None
     if old_loop_token is not None:
         metainterp.staticdata.log("reusing old loop")
@@ -302,7 +303,7 @@
     rd_numb = lltype.nullptr(NUMBERING)
     rd_consts = None
     rd_virtuals = None
-    rd_pendingfields = None
+    rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO)
 
     CNT_INT   = -0x20000000
     CNT_REF   = -0x40000000
@@ -633,6 +634,7 @@
                                             new_loop, state.enable_opts,
                                             inline_short_preamble, retraced)
     except InvalidLoop:
+        debug_print("compile_new_bridge: got an InvalidLoop")
         # XXX I am fairly convinced that optimize_bridge cannot actually raise
         # InvalidLoop
         return None
diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py
--- a/pypy/jit/metainterp/history.py
+++ b/pypy/jit/metainterp/history.py
@@ -4,7 +4,7 @@
 from pypy.rpython.ootypesystem import ootype
 from pypy.rlib.objectmodel import we_are_translated, r_dict, Symbolic
 from pypy.rlib.objectmodel import compute_unique_id
-from pypy.rlib.rarithmetic import intmask, r_int64
+from pypy.rlib.rarithmetic import r_int64
 from pypy.conftest import option
 
 from pypy.jit.metainterp.resoperation import ResOperation, rop
@@ -765,6 +765,7 @@
     """
     short_preamble = None
     failed_states = None
+    retraced_count = 0
     terminating = False # see TerminatingLoopToken in compile.py
     outermost_jitdriver_sd = None
     # and more data specified by the backend when the loop is compiled
@@ -791,6 +792,7 @@
 
     def dump(self):
         self.compiled_loop_token.cpu.dump_loop_token(self)
+
 class TreeLoop(object):
     inputargs = None
     operations = None
diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py
--- a/pypy/jit/metainterp/logger.py
+++ b/pypy/jit/metainterp/logger.py
@@ -103,6 +103,7 @@
         if op.getopnum() == rop.DEBUG_MERGE_POINT:
             jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()]
             s = jd_sd.warmstate.get_location_str(op.getarglist()[2:])
+            s = s.replace(',', '.') # we use comma for argument splitting
             return "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s)
         if ops_offset is None:
             offset = -1
diff --git a/pypy/jit/metainterp/optimize.py b/pypy/jit/metainterp/optimize.py
--- a/pypy/jit/metainterp/optimize.py
+++ b/pypy/jit/metainterp/optimize.py
@@ -1,9 +1,20 @@
 from pypy.rlib.debug import debug_start, debug_stop
+from pypy.jit.metainterp.jitexc import JitException
+
+class InvalidLoop(JitException):
+    """Raised when the optimize*.py detect that the loop that
+    we are trying to build cannot possibly make sense as a
+    long-running loop (e.g. it cannot run 2 complete iterations)."""
+
+class RetraceLoop(JitException):
+    """ Raised when inlining a short preamble resulted in an
+        InvalidLoop. This means the optimized loop is too specialized
+        to be useful here, so we trace it again and produced a second
+        copy specialized in some different way.
+    """
 
 # ____________________________________________________________
 
-from pypy.jit.metainterp.optimizeopt import optimize_loop_1, optimize_bridge_1
-
 def optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts):
     debug_start("jit-optimize")
     try:
@@ -13,7 +24,7 @@
         debug_stop("jit-optimize")
 
 def _optimize_loop(metainterp_sd, old_loop_tokens, loop, enable_opts):
-    cpu = metainterp_sd.cpu
+    from pypy.jit.metainterp.optimizeopt import optimize_loop_1
     loop.logops = metainterp_sd.logger_noopt.log_loop(loop.inputargs,
                                                       loop.operations)
     # XXX do we really still need a list?
@@ -36,7 +47,7 @@
 
 def _optimize_bridge(metainterp_sd, old_loop_tokens, bridge, enable_opts,
                      inline_short_preamble, retraced=False):
-    cpu = metainterp_sd.cpu
+    from pypy.jit.metainterp.optimizeopt import optimize_bridge_1
     bridge.logops = metainterp_sd.logger_noopt.log_loop(bridge.inputargs,
                                                         bridge.operations)
     if old_loop_tokens:
diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py
--- a/pypy/jit/metainterp/optimizeopt/__init__.py
+++ b/pypy/jit/metainterp/optimizeopt/__init__.py
@@ -3,7 +3,7 @@
 from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds
 from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize
 from pypy.jit.metainterp.optimizeopt.heap import OptHeap
-from pypy.jit.metainterp.optimizeopt.string import OptString
+from pypy.jit.metainterp.optimizeopt.vstring import OptString
 from pypy.jit.metainterp.optimizeopt.unroll import optimize_unroll, OptInlineShortPreamble
 from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall
 from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify
@@ -21,15 +21,14 @@
 unroll_all_opts = unrolling_iterable(ALL_OPTS)
 
 ALL_OPTS_DICT = dict.fromkeys([name for name, _ in ALL_OPTS])
-
+ALL_OPTS_LIST = [name for name, _ in ALL_OPTS]
 ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS])
-PARAMETERS['enable_opts'] = ALL_OPTS_NAMES
 
 def build_opt_chain(metainterp_sd, enable_opts,
                     inline_short_preamble=True, retraced=False):
     config = metainterp_sd.config
     optimizations = []
-    unroll = 'unroll' in enable_opts
+    unroll = 'unroll' in enable_opts    # 'enable_opts' is normally a dict
     for name, opt in unroll_all_opts:
         if name in enable_opts:
             if opt is not None:
diff --git a/pypy/jit/metainterp/optimizeopt/fficall.py b/pypy/jit/metainterp/optimizeopt/fficall.py
--- a/pypy/jit/metainterp/optimizeopt/fficall.py
+++ b/pypy/jit/metainterp/optimizeopt/fficall.py
@@ -4,7 +4,7 @@
 from pypy.rlib.debug import debug_start, debug_stop, debug_print, have_debug_prints
 from pypy.jit.codewriter.effectinfo import EffectInfo
 from pypy.jit.metainterp.resoperation import rop, ResOperation
-from pypy.jit.metainterp.optimizeutil import _findall
+from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
 from pypy.jit.metainterp.optimizeopt.optimizer import Optimization
 from pypy.jit.backend.llsupport.ffisupport import UnsupportedKind
 
@@ -203,13 +203,7 @@
     def propagate_forward(self, op):
         if self.logops is not None:
             debug_print(self.logops.repr_of_resop(op))
-        opnum = op.getopnum()
-        for value, func in optimize_ops:
-            if opnum == value:
-                func(self, op)
-                break
-        else:
-            self.emit_operation(op)
+        dispatch_opt(self, op)
 
     def _get_oopspec(self, op):
         effectinfo = op.getdescr().get_extra_info()
@@ -220,4 +214,5 @@
     def _get_funcval(self, op):
         return self.getvalue(op.getarg(1))
 
-optimize_ops = _findall(OptFfiCall, 'optimize_')
+dispatch_opt = make_dispatcher_method(OptFfiCall, 'optimize_',
+        default=OptFfiCall.emit_operation)
diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py
--- a/pypy/jit/metainterp/optimizeopt/heap.py
+++ b/pypy/jit/metainterp/optimizeopt/heap.py
@@ -1,5 +1,5 @@
 import os
-from pypy.jit.metainterp.optimizeutil import _findall
+from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
 from pypy.jit.metainterp.resoperation import rop, ResOperation
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.jit.metainterp.jitexc import JitException
@@ -8,8 +8,8 @@
 
 class CachedField(object):
     def __init__(self):
-        # Cache information for a field descr.  It can be in one
-        # of two states:
+        # Cache information for a field descr, or for an (array descr, index)
+        # pair.  It can be in one of two states:
         #
         #   1. 'cached_fields' is a dict mapping OptValues of structs
         #      to OptValues of fields.  All fields on-heap are
@@ -27,19 +27,19 @@
         self._lazy_setfield_registered = False
 
     def do_setfield(self, optheap, op):
-        # Update the state with the SETFIELD_GC operation 'op'.
+        # Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'.
         structvalue = optheap.getvalue(op.getarg(0))
-        fieldvalue  = optheap.getvalue(op.getarg(1))
+        fieldvalue  = optheap.getvalue(op.getarglist()[-1])
         if self.possible_aliasing(optheap, structvalue):
             self.force_lazy_setfield(optheap)
             assert not self.possible_aliasing(optheap, structvalue)
         cached_fieldvalue = self._cached_fields.get(structvalue, None)
         if cached_fieldvalue is not fieldvalue:
             # common case: store the 'op' as lazy_setfield, and register
-            # myself in the optheap's _lazy_setfields list
+            # myself in the optheap's _lazy_setfields_and_arrayitems list
             self._lazy_setfield = op
             if not self._lazy_setfield_registered:
-                optheap._lazy_setfields.append(self)
+                optheap._lazy_setfields_and_arrayitems.append(self)
                 self._lazy_setfield_registered = True
         else:
             # this is the case where the pending setfield ends up
@@ -65,7 +65,7 @@
         if self._lazy_setfield is not None:
             op = self._lazy_setfield
             assert optheap.getvalue(op.getarg(0)) is structvalue
-            return optheap.getvalue(op.getarg(1))
+            return optheap.getvalue(op.getarglist()[-1])
         else:
             return self._cached_fields.get(structvalue, None)
 
@@ -87,7 +87,7 @@
             # back in the cache: the value of this particular structure's
             # field.
             structvalue = optheap.getvalue(op.getarg(0))
-            fieldvalue  = optheap.getvalue(op.getarg(1))
+            fieldvalue  = optheap.getvalue(op.getarglist()[-1])
             self.remember_field_value(structvalue, fieldvalue)
 
     def get_reconstructed(self, optimizer, valuemap):
@@ -100,25 +100,20 @@
         return cf
 
 
-class CachedArrayItems(object):
-    def __init__(self):
-        self.fixed_index_items = {}
-        self.var_index_item = None
-        self.var_index_indexvalue = None
-
 class BogusPureField(JitException):
     pass
 
 
 class OptHeap(Optimization):
     """Cache repeated heap accesses"""
-    
+
     def __init__(self):
         # cached fields:  {descr: CachedField}
         self.cached_fields = {}
-        self._lazy_setfields = []
-        # cached array items:  {descr: CachedArrayItems}
+        # cached array items:  {array descr: {index: CachedField}}
         self.cached_arrayitems = {}
+        #
+        self._lazy_setfields_and_arrayitems = []
         self._remove_guard_not_invalidated = False
         self._seen_guard_not_invalidated = False
 
@@ -126,34 +121,23 @@
         new = OptHeap()
 
         if True:
-            self.force_all_lazy_setfields()
+            self.force_all_lazy_setfields_and_arrayitems()
         else:
             assert 0   # was: new.lazy_setfields = self.lazy_setfields
-        
+
         for descr, d in self.cached_fields.items():
             new.cached_fields[descr] = d.get_reconstructed(optimizer, valuemap)
 
-        new.cached_arrayitems = {}
-        for descr, d in self.cached_arrayitems.items():
-            newd = {}
-            new.cached_arrayitems[descr] = newd
-            for value, cache in d.items():
-                newcache = CachedArrayItems()
-                newd[value.get_reconstructed(optimizer, valuemap)] = newcache
-                if cache.var_index_item:
-                    newcache.var_index_item = \
-                          cache.var_index_item.get_reconstructed(optimizer, valuemap)
-                if cache.var_index_indexvalue:
-                    newcache.var_index_indexvalue = \
-                          cache.var_index_indexvalue.get_reconstructed(optimizer, valuemap)
-                for index, fieldvalue in cache.fixed_index_items.items():
-                    newcache.fixed_index_items[index] = \
-                           fieldvalue.get_reconstructed(optimizer, valuemap)
+        for descr, submap in self.cached_arrayitems.items():
+            newdict = {}
+            for index, d in submap.items():
+                newdict[index] = d.get_reconstructed(optimizer, valuemap)
+            new.cached_arrayitems[descr] = newdict
 
         return new
 
     def clean_caches(self):
-        del self._lazy_setfields[:]
+        del self._lazy_setfields_and_arrayitems[:]
         self.cached_fields.clear()
         self.cached_arrayitems.clear()
 
@@ -164,50 +148,16 @@
             cf = self.cached_fields[descr] = CachedField()
         return cf
 
-    def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False):
-        d = self.cached_arrayitems.get(descr, None)
-        if d is None:
-            d = self.cached_arrayitems[descr] = {}
-        cache = d.get(value, None)
-        if cache is None:
-            cache = d[value] = CachedArrayItems()
-        indexbox = self.get_constant_box(indexvalue.box)
-        if indexbox is not None:
-            index = indexbox.getint()
-            if write:
-                for value, othercache in d.iteritems():
-                    # fixed index, clean the variable index cache, in case the
-                    # index is the same
-                    othercache.var_index_indexvalue = None
-                    othercache.var_index_item = None
-                    try:
-                        del othercache.fixed_index_items[index]
-                    except KeyError:
-                        pass
-            cache.fixed_index_items[index] = fieldvalue
-        else:
-            if write:
-                for value, othercache in d.iteritems():
-                    # variable index, clear all caches for this descr
-                    othercache.var_index_indexvalue = None
-                    othercache.var_index_item = None
-                    othercache.fixed_index_items.clear()
-            cache.var_index_indexvalue = indexvalue
-            cache.var_index_item = fieldvalue
-
-    def read_cached_arrayitem(self, descr, value, indexvalue):
-        d = self.cached_arrayitems.get(descr, None)
-        if d is None:
-            return None
-        cache = d.get(value, None)
-        if cache is None:
-            return None
-        indexbox = self.get_constant_box(indexvalue.box)
-        if indexbox is not None:
-            return cache.fixed_index_items.get(indexbox.getint(), None)
-        elif cache.var_index_indexvalue is indexvalue:
-            return cache.var_index_item
-        return None
+    def arrayitem_cache(self, descr, index):
+        try:
+            submap = self.cached_arrayitems[descr]
+        except KeyError:
+            submap = self.cached_arrayitems[descr] = {}
+        try:
+            cf = submap[index]
+        except KeyError:
+            cf = submap[index] = CachedField()
+        return cf
 
     def emit_operation(self, op):
         self.emitting_operation(op)
@@ -219,7 +169,8 @@
         if op.is_ovf():
             return
         if op.is_guard():
-            self.optimizer.pendingfields = self.force_lazy_setfields_for_guard()
+            self.optimizer.pendingfields = (
+                self.force_lazy_setfields_and_arrayitems_for_guard())
             return
         opnum = op.getopnum()
         if (opnum == rop.SETFIELD_GC or        # handled specially
@@ -248,6 +199,8 @@
                 # XXX stored on effectinfo are large
                 for fielddescr in effectinfo.readonly_descrs_fields:
                     self.force_lazy_setfield(fielddescr)
+                for arraydescr in effectinfo.readonly_descrs_arrays:
+                    self.force_lazy_setarrayitem(arraydescr)
                 for fielddescr in effectinfo.write_descrs_fields:
                     self.force_lazy_setfield(fielddescr)
                     try:
@@ -256,8 +209,11 @@
                     except KeyError:
                         pass
                 for arraydescr in effectinfo.write_descrs_arrays:
+                    self.force_lazy_setarrayitem(arraydescr)
                     try:
-                        del self.cached_arrayitems[arraydescr]
+                        submap = self.cached_arrayitems[arraydescr]
+                        for cf in submap.itervalues():
+                            cf._cached_fields.clear()
                     except KeyError:
                         pass
                 if effectinfo.check_forces_virtual_or_virtualizable():
@@ -266,7 +222,7 @@
                     # ^^^ we only need to force this field; the other fields
                     # of virtualref_info and virtualizable_info are not gcptrs.
                 return
-        self.force_all_lazy_setfields()
+        self.force_all_lazy_setfields_and_arrayitems()
         self.clean_caches()
 
 
@@ -277,6 +233,10 @@
             for cf in self.cached_fields.itervalues():
                 if value in cf._cached_fields:
                     cf._cached_fields[newvalue] = cf._cached_fields[value]
+            for submap in self.cached_arrayitems.itervalues():
+                for cf in submap.itervalues():
+                    if value in cf._cached_fields:
+                        cf._cached_fields[newvalue] = cf._cached_fields[value]
 
     def force_lazy_setfield(self, descr):
         try:
@@ -285,6 +245,14 @@
             return
         cf.force_lazy_setfield(self)
 
+    def force_lazy_setarrayitem(self, arraydescr):
+        try:
+            submap = self.cached_arrayitems[arraydescr]
+        except KeyError:
+            return
+        for cf in submap.values():
+            cf.force_lazy_setfield(self)
+
     def fixup_guard_situation(self):
         # hackish: reverse the order of the last two operations if it makes
         # sense to avoid a situation like "int_eq/setfield_gc/guard_true",
@@ -309,30 +277,49 @@
         newoperations[-2] = lastop
         newoperations[-1] = prevop
 
-    def force_all_lazy_setfields(self):
-        for cf in self._lazy_setfields:
-            if not we_are_translated():
-                assert cf in self.cached_fields.values()
+    def _assert_valid_cf(self, cf):
+        # check that 'cf' is in cached_fields or cached_arrayitems
+        if not we_are_translated():
+            if cf not in self.cached_fields.values():
+                for submap in self.cached_arrayitems.values():
+                    if cf in submap.values():
+                        break
+                else:
+                    assert 0, "'cf' not in cached_fields/cached_arrayitems"
+
+    def force_all_lazy_setfields_and_arrayitems(self):
+        for cf in self._lazy_setfields_and_arrayitems:
+            self._assert_valid_cf(cf)
             cf.force_lazy_setfield(self)
 
-    def force_lazy_setfields_for_guard(self):
+    def force_lazy_setfields_and_arrayitems_for_guard(self):
         pendingfields = []
-        for cf in self._lazy_setfields:
-            if not we_are_translated():
-                assert cf in self.cached_fields.values()
+        for cf in self._lazy_setfields_and_arrayitems:
+            self._assert_valid_cf(cf)
             op = cf._lazy_setfield
             if op is None:
                 continue
             # the only really interesting case that we need to handle in the
             # guards' resume data is that of a virtual object that is stored
-            # into a field of a non-virtual object.
+            # into a field of a non-virtual object.  Here, 'op' in either
+            # SETFIELD_GC or SETARRAYITEM_GC.
             value = self.getvalue(op.getarg(0))
             assert not value.is_virtual()      # it must be a non-virtual
-            fieldvalue = self.getvalue(op.getarg(1))
+            fieldvalue = self.getvalue(op.getarglist()[-1])
             if fieldvalue.is_virtual():
                 # this is the case that we leave to resume.py
+                opnum = op.getopnum()
+                if opnum == rop.SETFIELD_GC:
+                    itemindex = -1
+                elif opnum == rop.SETARRAYITEM_GC:
+                    indexvalue = self.getvalue(op.getarg(1))
+                    assert indexvalue.is_constant()
+                    itemindex = indexvalue.box.getint()
+                    assert itemindex >= 0
+                else:
+                    assert 0
                 pendingfields.append((op.getdescr(), value.box,
-                                      fieldvalue.get_key_box()))
+                                      fieldvalue.get_key_box(), itemindex))
             else:
                 cf.force_lazy_setfield(self)
                 self.fixup_guard_situation()
@@ -364,24 +351,45 @@
         cf.do_setfield(self, op)
 
     def optimize_GETARRAYITEM_GC(self, op):
-        value = self.getvalue(op.getarg(0))
+        arrayvalue = self.getvalue(op.getarg(0))
         indexvalue = self.getvalue(op.getarg(1))
-        fieldvalue = self.read_cached_arrayitem(op.getdescr(), value, indexvalue)
-        if fieldvalue is not None:
-            self.make_equal_to(op.result, fieldvalue)
-            return
-        ###self.optimizer.optimize_default(op)
+        cf = None
+        if indexvalue.is_constant():
+            # use the cache on (arraydescr, index), which is a constant
+            cf = self.arrayitem_cache(op.getdescr(), indexvalue.box.getint())
+            fieldvalue = cf.getfield_from_cache(self, arrayvalue)
+            if fieldvalue is not None:
+                self.make_equal_to(op.result, fieldvalue)
+                return
+        else:
+            # variable index, so make sure the lazy setarrayitems are done
+            self.force_lazy_setarrayitem(op.getdescr())
+        # default case: produce the operation
+        arrayvalue.ensure_nonnull()
         self.emit_operation(op)
-        fieldvalue = self.getvalue(op.result)
-        self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue)
+        # the remember the result of reading the array item
+        if cf is not None:
+            fieldvalue = self.getvalue(op.result)
+            cf.remember_field_value(arrayvalue, fieldvalue)
 
     def optimize_SETARRAYITEM_GC(self, op):
-        self.emit_operation(op)
-        value = self.getvalue(op.getarg(0))
-        fieldvalue = self.getvalue(op.getarg(2))
+        if self.has_pure_result(rop.GETARRAYITEM_GC_PURE, [op.getarg(0),
+                                                           op.getarg(1)],
+                                op.getdescr()):
+            os.write(2, '[bogus immutable array declaration: %s]\n' %
+                     (op.getdescr().repr_of_descr()))
+            raise BogusPureField
+        #
         indexvalue = self.getvalue(op.getarg(1))
-        self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue,
-                                   write=True)
+        if indexvalue.is_constant():
+            # use the cache on (arraydescr, index), which is a constant
+            cf = self.arrayitem_cache(op.getdescr(), indexvalue.box.getint())
+            cf.do_setfield(self, op)
+        else:
+            # variable index, so make sure the lazy setarrayitems are done
+            self.force_lazy_setarrayitem(op.getdescr())
+            # and then emit the operation
+            self.emit_operation(op)
 
     def optimize_QUASIIMMUT_FIELD(self, op):
         # Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr)
@@ -423,13 +431,7 @@
         self._seen_guard_not_invalidated = True
         self.emit_operation(op)
 
-    def propagate_forward(self, op):
-        opnum = op.getopnum()
-        for value, func in optimize_ops:
-            if opnum == value:
-                func(self, op)
-                break
-        else:
-            self.emit_operation(op)
 
-optimize_ops = _findall(OptHeap, 'optimize_')
+dispatch_opt = make_dispatcher_method(OptHeap, 'optimize_',
+        default=OptHeap.emit_operation)
+OptHeap.propagate_forward = dispatch_opt
diff --git a/pypy/jit/metainterp/optimizeopt/intbounds.py b/pypy/jit/metainterp/optimizeopt/intbounds.py
--- a/pypy/jit/metainterp/optimizeopt/intbounds.py
+++ b/pypy/jit/metainterp/optimizeopt/intbounds.py
@@ -1,7 +1,7 @@
 from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0
-from pypy.jit.metainterp.optimizeutil import _findall
-from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \
-    IntLowerBound, IntUpperBound
+from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
+from pypy.jit.metainterp.optimizeopt.intutils import (IntBound, IntUnbounded,
+    IntLowerBound, IntUpperBound)
 from pypy.jit.metainterp.history import Const, ConstInt
 from pypy.jit.metainterp.resoperation import rop, ResOperation
 
@@ -23,7 +23,7 @@
 
     def reconstruct_for_next_iteration(self, optimizer, valuemap):
         assert self.posponedop is None
-        return self 
+        return self
 
     def propagate_forward(self, op):
         if op.is_ovf():
@@ -34,14 +34,11 @@
             op = self.posponedop
             self.posponedop = None
 
-        opnum = op.getopnum()
-        for value, func in optimize_ops:
-            if opnum == value:
-                func(self, op)
-                break
-        else:
-            assert not op.is_ovf()
-            self.emit_operation(op)
+        dispatch_opt(self, op)
+
+    def opt_default(self, op):
+        assert not op.is_ovf()
+        self.emit_operation(op)
 
 
     def propagate_bounds_backward(self, box):
@@ -57,11 +54,7 @@
             op = self.optimizer.producer[box]
         except KeyError:
             return
-        opnum = op.getopnum()
-        for value, func in propagate_bounds_ops:
-            if opnum == value:
-                func(self, op)
-                break
+        dispatch_bounds_ops(self, op)
 
     def optimize_GUARD_TRUE(self, op):
         self.emit_operation(op)
@@ -194,7 +187,7 @@
                 # Synthesize the reverse ops for optimize_default to reuse
                 self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0))
                 self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1))
-                
+
 
     def optimize_INT_MUL_OVF(self, op):
         v1 = self.getvalue(op.getarg(0))
@@ -292,6 +285,11 @@
         v1.intbound.make_ge(IntLowerBound(0))
         v1.intbound.make_lt(IntUpperBound(256))
 
+    def optimize_UNICODEGETITEM(self, op):
+        self.emit_operation(op)
+        v1 = self.getvalue(op.result)
+        v1.intbound.make_ge(IntLowerBound(0))
+
     def make_int_lt(self, box1, box2):
         v1 = self.getvalue(box1)
         v2 = self.getvalue(box2)
@@ -368,6 +366,27 @@
                 if v2.intbound.intersect(v1.intbound):
                     self.propagate_bounds_backward(op.getarg(1))
 
+    def propagate_bounds_INT_IS_TRUE(self, op):
+        r = self.getvalue(op.result)
+        if r.is_constant():
+            if r.box.same_constant(CONST_1):
+                v1 = self.getvalue(op.getarg(0))
+                if v1.intbound.known_ge(IntBound(0, 0)):
+                    v1.intbound.make_gt(IntBound(0, 0))
+                    self.propagate_bounds_backward(op.getarg(0))
+
+    def propagate_bounds_INT_IS_ZERO(self, op):
+        r = self.getvalue(op.result)
+        if r.is_constant():
+            if r.box.same_constant(CONST_1):
+                v1 = self.getvalue(op.getarg(0))
+                # Clever hack, we can't use self.make_constant_int yet because
+                # the args aren't in the values dictionary yet so it runs into
+                # an assert, this is a clever way of expressing the same thing.
+                v1.intbound.make_ge(IntBound(0, 0))
+                v1.intbound.make_lt(IntBound(1, 1))
+                self.propagate_bounds_backward(op.getarg(0))
+
     def propagate_bounds_INT_ADD(self, op):
         v1 = self.getvalue(op.getarg(0))
         v2 = self.getvalue(op.getarg(1))
@@ -413,5 +432,7 @@
     propagate_bounds_INT_SUB_OVF  = propagate_bounds_INT_SUB
     propagate_bounds_INT_MUL_OVF  = propagate_bounds_INT_MUL
 
-optimize_ops = _findall(OptIntBounds, 'optimize_')
-propagate_bounds_ops = _findall(OptIntBounds, 'propagate_bounds_')
+
+dispatch_opt = make_dispatcher_method(OptIntBounds, 'optimize_',
+        default=OptIntBounds.opt_default)
+dispatch_bounds_ops = make_dispatcher_method(OptIntBounds, 'propagate_bounds_')
diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py
--- a/pypy/jit/metainterp/optimizeopt/optimizer.py
+++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
@@ -4,9 +4,9 @@
 from pypy.jit.metainterp.resoperation import rop, ResOperation
 from pypy.jit.metainterp import jitprof
 from pypy.jit.metainterp.executor import execute_nonspec
-from pypy.jit.metainterp.optimizeutil import _findall, sort_descrs
-from pypy.jit.metainterp.optimizeutil import descrlist_dict
-from pypy.jit.metainterp.optimizeutil import InvalidLoop, args_dict
+from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method, sort_descrs
+from pypy.jit.metainterp.optimizeopt.util import descrlist_dict, args_dict
+from pypy.jit.metainterp.optimize import InvalidLoop
 from pypy.jit.metainterp import resume, compile
 from pypy.jit.metainterp.typesystem import llhelper, oohelper
 from pypy.rpython.lltypesystem import lltype
@@ -141,6 +141,9 @@
         # meaning it has been forced.
         return self.box is None
 
+    def is_forced_virtual(self):
+        return False
+
     def getfield(self, ofs, default):
         raise NotImplementedError
 
@@ -431,14 +434,7 @@
 
     def propagate_forward(self, op):
         self.producer[op.result] = op
-        opnum = op.getopnum()
-        for value, func in optimize_ops:
-            if opnum == value:
-                func(self, op)
-                break
-        else:
-            self.optimize_default(op)
-        #print '\n'.join([str(o) for o in self.newoperations]) + '\n---\n'
+        dispatch_opt(self, op)
 
     def test_emittable(self, op):
         return True
@@ -566,7 +562,8 @@
     def optimize_DEBUG_MERGE_POINT(self, op):
         self.emit_operation(op)
 
-optimize_ops = _findall(Optimizer, 'optimize_')
+dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_',
+        default=Optimizer.optimize_default)
 
 
 
diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py
--- a/pypy/jit/metainterp/optimizeopt/rewrite.py
+++ b/pypy/jit/metainterp/optimizeopt/rewrite.py
@@ -1,7 +1,7 @@
 from pypy.jit.metainterp.optimizeopt.optimizer import *
 from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex
 from pypy.jit.metainterp.history import ConstInt
-from pypy.jit.metainterp.optimizeutil import _findall
+from pypy.jit.metainterp.optimizeopt.util import _findall, make_dispatcher_method
 from pypy.jit.metainterp.resoperation import rop, ResOperation
 from pypy.jit.codewriter.effectinfo import EffectInfo
 from pypy.jit.metainterp.optimizeopt.intutils import IntBound
@@ -21,18 +21,13 @@
         if self.find_rewritable_bool(op, args):
             return
 
-        opnum = op.getopnum()
-        for value, func in optimize_ops:
-            if opnum == value:
-                func(self, op)
-                break
-        else:
-            self.emit_operation(op)
+        dispatch_opt(self, op)
 
     def test_emittable(self, op):
         opnum = op.getopnum()
-        for value, func in optimize_guards:
+        for value, cls, func in optimize_guards:
             if opnum == value:
+                assert isinstance(op, cls)
                 try:
                     func(self, op, dryrun=True)
                     return self.is_emittable(op)
@@ -184,6 +179,32 @@
         else:
             self.emit_operation(op)
 
+    def optimize_FLOAT_MUL(self, op):
+        arg1 = op.getarg(0)
+        arg2 = op.getarg(1)
+
+        # Constant fold f0 * 1.0 and turn f0 * -1.0 into a FLOAT_NEG, these
+        # work in all cases, including NaN and inf
+        for lhs, rhs in [(arg1, arg2), (arg2, arg1)]:
+            v1 = self.getvalue(lhs)
+            v2 = self.getvalue(rhs)
+
+            if v1.is_constant():
+                if v1.box.getfloat() == 1.0:
+                    self.make_equal_to(op.result, v2)
+                    return
+                elif v1.box.getfloat() == -1.0:
+                    self.emit_operation(ResOperation(
+                        rop.FLOAT_NEG, [rhs], op.result
+                    ))
+                    return
+        self.emit_operation(op)
+
+    def optimize_FLOAT_NEG(self, op):
+        v1 = op.getarg(0)
+        self.emit_operation(op)
+        self.pure(rop.FLOAT_NEG, [op.result], v1)
+
     def optimize_CALL_PURE(self, op):
         arg_consts = []
         for i in range(op.numargs()):
@@ -193,7 +214,7 @@
                 break
             arg_consts.append(const)
         else:
-            # all constant arguments: check if we already know the reslut
+            # all constant arguments: check if we already know the result
             try:
                 result = self.optimizer.call_pure_results[arg_consts]
             except KeyError:
@@ -451,5 +472,6 @@
         self.emit_operation(op)
 
 
-optimize_ops = _findall(OptRewrite, 'optimize_')
+dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_',
+        default=OptRewrite.emit_operation)
 optimize_guards = _findall(OptRewrite, 'optimize_', 'GUARD')
diff --git a/pypy/jit/metainterp/optimizeopt/simplify.py b/pypy/jit/metainterp/optimizeopt/simplify.py
--- a/pypy/jit/metainterp/optimizeopt/simplify.py
+++ b/pypy/jit/metainterp/optimizeopt/simplify.py
@@ -1,7 +1,7 @@
 
 from pypy.jit.metainterp.resoperation import ResOperation, rop
 from pypy.jit.metainterp.optimizeopt.optimizer import Optimization
-from pypy.jit.metainterp.optimizeutil import _findall
+from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
 
 class OptSimplify(Optimization):
     def optimize_CALL_PURE(self, op):
@@ -25,13 +25,7 @@
         #     but it's a bit hard to implement robustly if heap.py is also run
         pass
 
-    def propagate_forward(self, op):
-        opnum = op.getopnum()
-        for value, func in optimize_ops:
-            if opnum == value:
-                func(self, op)
-                break
-        else:
-            self.emit_operation(op)
 
-optimize_ops = _findall(OptSimplify, 'optimize_')
+dispatch_opt = make_dispatcher_method(OptSimplify, 'optimize_',
+        default=OptSimplify.emit_operation)
+OptSimplify.propagate_forward = dispatch_opt
diff --git a/pypy/jit/metainterp/optimizeopt/test/__init__.py b/pypy/jit/metainterp/optimizeopt/test/__init__.py
new file mode 100644
diff --git a/pypy/jit/metainterp/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
rename from pypy/jit/metainterp/test/test_optimizebasic.py
rename to pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/pypy/jit/metainterp/test/test_optimizebasic.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -1,40 +1,15 @@
 import py
 from pypy.rlib.objectmodel import instantiate
-from pypy.jit.metainterp.test.test_optimizeutil import (LLtypeMixin,
-                                                        #OOtypeMixin,
-                                                        BaseTest)
+from pypy.jit.metainterp.optimizeopt.test.test_util import (
+    LLtypeMixin, BaseTest, FakeMetaInterpStaticData)
 from pypy.jit.metainterp.test.test_compile import FakeLogger
 import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt
 import pypy.jit.metainterp.optimizeopt.virtualize as virtualize
-from pypy.jit.metainterp.optimizeutil import InvalidLoop
+from pypy.jit.metainterp.optimize import InvalidLoop
 from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt
-from pypy.jit.metainterp.jitprof import EmptyProfiler
 from pypy.jit.metainterp import executor, compile, resume, history
 from pypy.jit.metainterp.resoperation import rop, opname, ResOperation
-from pypy.jit.tool.oparser import pure_parse
-from pypy.jit.metainterp.optimizeutil import args_dict
-
-##class FakeFrame(object):
-##    parent_resumedata_snapshot = None
-##    parent_resumedata_frame_info_list = None
-
-##    def __init__(self, code="", pc=0):
-##        self.jitcode = code
-##        self.pc = pc
-
-class Fake(object):
-    failargs_limit = 1000
-    storedebug = None
-
-class FakeMetaInterpStaticData(object):
-
-    def __init__(self, cpu):
-        self.cpu = cpu
-        self.profiler = EmptyProfiler()
-        self.options = Fake()
-        self.globaldata = Fake()
-        self.logger_ops = FakeLogger()
-        self.logger_noopt = FakeLogger()
+
 
 def test_store_final_boxes_in_guard():
     from pypy.jit.metainterp.compile import ResumeGuardDescr
@@ -104,7 +79,7 @@
     assert vinfo3 is vinfo4
 
 def test_descrlist_dict():
-    from pypy.jit.metainterp import optimizeutil
+    from pypy.jit.metainterp.optimizeopt import util as optimizeutil
     h1 = optimizeutil.descrlist_hash([])
     h2 = optimizeutil.descrlist_hash([LLtypeMixin.valuedescr])
     h3 = optimizeutil.descrlist_hash(
@@ -133,159 +108,55 @@
 
 # ____________________________________________________________
 
-def equaloplists(oplist1, oplist2, strict_fail_args=True, remap={},
-                 text_right=None):
-    # try to use the full width of the terminal to display the list
-    # unfortunately, does not work with the default capture method of py.test
-    # (which is fd), you you need to use either -s or --capture=sys, else you
-    # get the standard 80 columns width
-    totwidth = py.io.get_terminal_width()
-    width = totwidth / 2 - 1
-    print ' Comparing lists '.center(totwidth, '-')
-    text_right = text_right or 'expected'
-    print '%s| %s' % ('optimized'.center(width), text_right.center(width))
-    for op1, op2 in zip(oplist1, oplist2):
-        txt1 = str(op1)
-        txt2 = str(op2)
-        while txt1 or txt2:
-            print '%s| %s' % (txt1[:width].ljust(width), txt2[:width])
-            txt1 = txt1[width:]
-            txt2 = txt2[width:]
-        assert op1.getopnum() == op2.getopnum()
-        assert op1.numargs() == op2.numargs()
-        for i in range(op1.numargs()):
-            x = op1.getarg(i)
-            y = op2.getarg(i)
-            assert x == remap.get(y, y)
-        if op2.result in remap:
-            assert op1.result == remap[op2.result]
-        else:
-            remap[op2.result] = op1.result
-        if op1.getopnum() != rop.JUMP:      # xxx obscure
-            assert op1.getdescr() == op2.getdescr()
-        if op1.getfailargs() or op2.getfailargs():
-            assert len(op1.getfailargs()) == len(op2.getfailargs())
-            if strict_fail_args:
-                for x, y in zip(op1.getfailargs(), op2.getfailargs()):
-                    assert x == remap.get(y, y)
-            else:
-                fail_args1 = set(op1.getfailargs())
-                fail_args2 = set([remap.get(y, y) for y in op2.getfailargs()])
-                assert fail_args1 == fail_args2
-    assert len(oplist1) == len(oplist2)
-    print '-'*totwidth
-    return True
-
-def test_equaloplists():
-    ops = """
-    [i0]
-    i1 = int_add(i0, 1)
-    i2 = int_add(i1, 1)
-    guard_true(i1) [i2]
-    jump(i1)
-    """
-    namespace = {}
-    loop1 = pure_parse(ops, namespace=namespace)
-    loop2 = pure_parse(ops, namespace=namespace)
-    loop3 = pure_parse(ops.replace("i2 = int_add", "i2 = int_sub"),
-                       namespace=namespace)
-    assert equaloplists(loop1.operations, loop2.operations)
-    py.test.raises(AssertionError,
-                   "equaloplists(loop1.operations, loop3.operations)")
-
-def test_equaloplists_fail_args():
-    ops = """
-    [i0]
-    i1 = int_add(i0, 1)
-    i2 = int_add(i1, 1)
-    guard_true(i1) [i2, i1]
-    jump(i1)
-    """
-    namespace = {}
-    loop1 = pure_parse(ops, namespace=namespace)
-    loop2 = pure_parse(ops.replace("[i2, i1]", "[i1, i2]"),
-                       namespace=namespace)
-    py.test.raises(AssertionError,
-                   "equaloplists(loop1.operations, loop2.operations)")
-    assert equaloplists(loop1.operations, loop2.operations,
-                        strict_fail_args=False)
-    loop3 = pure_parse(ops.replace("[i2, i1]", "[i2, i0]"),
-                       namespace=namespace)
-    py.test.raises(AssertionError,
-                   "equaloplists(loop1.operations, loop3.operations)")
-
-# ____________________________________________________________
-
-class Storage(compile.ResumeGuardDescr):
-    "for tests."
-    def __init__(self, metainterp_sd=None, original_greenkey=None):
-        self.metainterp_sd = metainterp_sd
-        self.original_greenkey = original_greenkey
-    def store_final_boxes(self, op, boxes):
-        op.setfailargs(boxes)
-    def __eq__(self, other):
-        return type(self) is type(other)      # xxx obscure
-
-def _sortboxes(boxes):
-    _kind2count = {history.INT: 1, history.REF: 2, history.FLOAT: 3}
-    return sorted(boxes, key=lambda box: _kind2count[box.type])
 
 class BaseTestBasic(BaseTest):
 
-    def invent_fail_descr(self, model, fail_args):
-        if fail_args is None:
-            return None
-        descr = Storage()
-        descr.rd_frame_info_list = resume.FrameInfo(None, "code", 11)
-        descr.rd_snapshot = resume.Snapshot(None, _sortboxes(fail_args))
-        return descr
-
-    def assert_equal(self, optimized, expected):
-        assert len(optimized.inputargs) == len(expected.inputargs)
-        remap = {}
-        for box1, box2 in zip(optimized.inputargs, expected.inputargs):
-            assert box1.__class__ == box2.__class__
-            remap[box2] = box1
-        assert equaloplists(optimized.operations,
-                            expected.operations, False, remap)
+    enable_opts = "intbounds:rewrite:virtualize:string:heap"
 
     def optimize_loop(self, ops, optops, call_pure_results=None):
+
         loop = self.parse(ops)
-        #
-        self.loop = loop
-        loop.call_pure_results = args_dict()
-        if call_pure_results is not None:
-            for k, v in call_pure_results.items():
-                loop.call_pure_results[list(k)] = v
-        metainterp_sd = FakeMetaInterpStaticData(self.cpu)
-        if hasattr(self, 'vrefinfo'):
-            metainterp_sd.virtualref_info = self.vrefinfo
-        if hasattr(self, 'callinfocollection'):
-            metainterp_sd.callinfocollection = self.callinfocollection
-        #
-        # XXX list the exact optimizations that are needed for each test
-        from pypy.jit.metainterp.optimizeopt import (OptIntBounds,
-                                                     OptRewrite,
-                                                     OptVirtualize,
-                                                     OptString,
-                                                     OptHeap,
-                                                     Optimizer)
-        from pypy.jit.metainterp.optimizeopt.fficall import OptFfiCall
-
-        optimizations = [OptIntBounds(),
-                         OptRewrite(),
-                         OptVirtualize(),
-                         OptString(),
-                         OptHeap(),
-                         OptFfiCall(),
-                         ]
-        optimizer = Optimizer(metainterp_sd, loop, optimizations)
-        optimizer.propagate_all_forward()
-        #
         expected = self.parse(optops)
+        self._do_optimize_loop(loop, call_pure_results)
         print '\n'.join([str(o) for o in loop.operations])
         self.assert_equal(loop, expected)
 
+    def setup_method(self, meth=None):
+        class FailDescr(compile.ResumeGuardDescr):
+            oparse = None
+            def _oparser_uses_descr_of_guard(self, oparse, fail_args):
+                # typically called 3 times: once when parsing 'ops',
+                # once when parsing 'preamble', once when parsing 'expected'.
+                self.oparse = oparse
+                self.rd_frame_info_list, self.rd_snapshot = snapshot(fail_args)
+            def _clone_if_mutable(self):
+                assert self is fdescr
+                return fdescr2
+            def __repr__(self):
+                if self is fdescr:
+                    return 'fdescr'
+                if self is fdescr2:
+                    return 'fdescr2'
+                return compile.ResumeGuardDescr.__repr__(self)
+        #
+        def snapshot(fail_args, got=[]):
+            if not got:    # only the first time, i.e. when parsing 'ops'
+                rd_frame_info_list = resume.FrameInfo(None, "code", 11)
+                rd_snapshot = resume.Snapshot(None, fail_args)
+                got.append(rd_frame_info_list)
+                got.append(rd_snapshot)
+            return got
+        #
+        fdescr = instantiate(FailDescr)
+        self.namespace['fdescr'] = fdescr
+        fdescr2 = instantiate(FailDescr)
+        self.namespace['fdescr2'] = fdescr2
+
+    def teardown_method(self, meth):
+        self.namespace.pop('fdescr', None)
+        self.namespace.pop('fdescr2', None)
+
+
 
 class BaseTestOptimizeBasic(BaseTestBasic):
 
@@ -568,6 +439,23 @@
         """
         self.optimize_loop(ops, expected)
 
+    def test_int_is_zero_int_is_true(self):
+        ops = """
+        [i0]
+        i1 = int_is_zero(i0)
+        guard_true(i1) []
+        i2 = int_is_true(i0)
+        guard_false(i2) []
+        jump(i0)
+        """
+        expected = """
+        [i0]
+        i1 = int_is_zero(i0)
+        guard_true(i1) []
+        jump(0)
+        """
+        self.optimize_loop(ops, expected)
+
     def test_ooisnull_oononnull_2(self):
         ops = """
         [p0]
@@ -1234,8 +1122,8 @@
         """
         expected = """
         [i1, p0]
+        p1 = new_array(i1, descr=arraydescr)
         setarrayitem_gc(p0, 0, i1, descr=arraydescr)
-        p1 = new_array(i1, descr=arraydescr)
         jump(i1, p1)
         """
         self.optimize_loop(ops, expected)
@@ -1600,9 +1488,9 @@
         i3 = getarrayitem_gc_pure(p3, 1, descr=arraydescr)
         i4 = getarrayitem_gc(p3, i3, descr=arraydescr)
         i5 = int_add(i3, i4)
-        setarrayitem_gc(p3, 0, i5, descr=arraydescr)
         #
         setfield_gc(p1, i2, descr=valuedescr)
+        setarrayitem_gc(p3, 0, i5, descr=arraydescr)
         setfield_gc(p1, i4, descr=nextdescr)
         jump(p1, i1, i2, p3)
         """
@@ -1776,6 +1664,7 @@
         self.optimize_loop(ops, expected)
 
     def test_duplicate_getarrayitem_after_setarrayitem_2(self):
+        py.test.skip("setarrayitem with variable index")
         ops = """
         [p1, p2, p3, i1]
         setarrayitem_gc(p1, 0, p2, descr=arraydescr2)
@@ -2038,7 +1927,6 @@
         self.optimize_loop(ops, expected)
 
     def test_merge_guard_nonnull_guard_class(self):
-        self.make_fail_descr()
         ops = """
         [p1, i0, i1, i2, p2]
         guard_nonnull(p1, descr=fdescr) [i0]
@@ -2056,7 +1944,6 @@
         self.check_expanded_fail_descr("i0", rop.GUARD_NONNULL_CLASS)
 
     def test_merge_guard_nonnull_guard_value(self):
-        self.make_fail_descr()
         ops = """
         [p1, i0, i1, i2, p2]
         guard_nonnull(p1, descr=fdescr) [i0]
@@ -2074,7 +1961,6 @@
         self.check_expanded_fail_descr("i0", rop.GUARD_VALUE)
 
     def test_merge_guard_nonnull_guard_class_guard_value(self):
-        self.make_fail_descr()
         ops = """
         [p1, i0, i1, i2, p2]
         guard_nonnull(p1, descr=fdescr) [i0]
@@ -2290,25 +2176,83 @@
         """
         self.optimize_loop(ops, expected)
 
+    def test_fold_constant_partial_ops_float(self):
+        ops = """
+        [f0]
+        f1 = float_mul(f0, 1.0)
+        f2 = escape(f1)
+        jump(f2)
+        """
+        expected = """
+        [f0]
+        f2 = escape(f0)
+        jump(f2)
+        """
+        self.optimize_loop(ops, expected)
+
+        ops = """
+        [f0]
+        f1 = float_mul(1.0, f0)
+        f2 = escape(f1)
+        jump(f2)
+        """
+        expected = """
+        [f0]
+        f2 = escape(f0)
+        jump(f2)
+        """
+        self.optimize_loop(ops, expected)
+
+
+        ops = """
+        [f0]
+        f1 = float_mul(f0, -1.0)
+        f2 = escape(f1)
+        jump(f2)
+        """
+        expected = """
+        [f0]
+        f1 = float_neg(f0)
+        f2 = escape(f1)
+        jump(f2)
+        """
+        self.optimize_loop(ops, expected)
+
+        ops = """
+        [f0]
+        f1 = float_mul(-1.0, f0)
+        f2 = escape(f1)
+        jump(f2)
+        """
+        expected = """
+        [f0]
+        f1 = float_neg(f0)
+        f2 = escape(f1)
+        jump(f2)
+        """
+        self.optimize_loop(ops, expected)
+
+    def test_fold_repeated_float_neg(self):
+        ops = """
+        [f0]
+        f1 = float_neg(f0)
+        f2 = float_neg(f1)
+        f3 = float_neg(f2)
+        f4 = float_neg(f3)
+        escape(f4)
+        jump(f4)
+        """
+        expected = """
+        [f0]
+        # The backend removes this dead op.
+        f1 = float_neg(f0)
+        escape(f0)
+        jump(f0)
+        """
+        self.optimize_loop(ops, expected)
+
     # ----------
 
-    def make_fail_descr(self):
-        class FailDescr(compile.ResumeGuardDescr):
-            oparse = None
-            def _oparser_uses_descr_of_guard(self, oparse, fail_args):
-                # typically called twice, before and after optimization
-                if self.oparse is None:
-                    fdescr.rd_frame_info_list = resume.FrameInfo(None,
-                                                                 "code", 11)
-                    fdescr.rd_snapshot = resume.Snapshot(None, fail_args)
-                self.oparse = oparse
-        #
-        fdescr = instantiate(FailDescr)
-        self.namespace['fdescr'] = fdescr
-
-    def teardown_method(self, meth):
-        self.namespace.pop('fdescr', None)
-
     def _verify_fail_args(self, boxes, oparse, text):
         import re
         r = re.compile(r"\bwhere\s+(\w+)\s+is a\s+(\w+)")
@@ -2417,7 +2361,6 @@
         self._verify_fail_args(boxes, fdescr.oparse, expectedtext)
 
     def test_expand_fail_1(self):
-        self.make_fail_descr()
         ops = """
         [i1, i3]
         # first rename i3 into i4
@@ -2438,7 +2381,6 @@
         self.check_expanded_fail_descr('15, i3', rop.GUARD_TRUE)
 
     def test_expand_fail_2(self):
-        self.make_fail_descr()
         ops = """
         [i1, i2]
         p1 = new_with_vtable(ConstClass(node_vtable))
@@ -2458,7 +2400,6 @@
             ''', rop.GUARD_TRUE)
 
     def test_expand_fail_3(self):
-        self.make_fail_descr()
         ops = """
         [i1, i2, i3, p3]
         p1 = new_with_vtable(ConstClass(node_vtable))
@@ -2484,7 +2425,7 @@
     def test_expand_fail_4(self):
         for arg in ['p1', 'i2,p1', 'p1,p2', 'p2,p1',
                     'i2,p1,p2', 'i2,p2,p1']:
-            self.make_fail_descr()
+            self.setup_method() # humpf
             ops = """
             [i1, i2, i3]
             p1 = new_with_vtable(ConstClass(node_vtable))
@@ -2509,7 +2450,6 @@
                                            rop.GUARD_TRUE)
 
     def test_expand_fail_5(self):
-        self.make_fail_descr()
         ops = """
         [i1, i2, i3, i4]
         p1 = new_with_vtable(ConstClass(node_vtable))
@@ -2533,7 +2473,6 @@
             ''', rop.GUARD_TRUE)
 
     def test_expand_fail_6(self):
-        self.make_fail_descr()
         ops = """
         [p0, i0, i1]
         guard_true(i0, descr=fdescr) [p0]
@@ -2554,7 +2493,6 @@
             ''', rop.GUARD_TRUE)
 
     def test_expand_fail_varray(self):
-        self.make_fail_descr()
         ops = """
         [i1]
         p1 = new_array(3, descr=arraydescr)
@@ -2575,7 +2513,6 @@
             ''', rop.GUARD_TRUE)
 
     def test_expand_fail_vstruct(self):
-        self.make_fail_descr()
         ops = """
         [i1, p1]
         p2 = new(descr=ssize)
@@ -2597,7 +2534,6 @@
             ''', rop.GUARD_TRUE)
 
     def test_expand_fail_v_all_1(self):
-        self.make_fail_descr()
         ops = """
         [i1, p1a, i2]
         p6s = getarrayitem_gc(p1a, 0, descr=arraydescr2)
@@ -2639,7 +2575,6 @@
             ''', rop.GUARD_TRUE)
 
     def test_expand_fail_lazy_setfield_1(self):
-        self.make_fail_descr()
         ops = """
         [p1, i2, i3]
         p2 = new_with_vtable(ConstClass(node_vtable))
@@ -2665,7 +2600,6 @@
             ''', rop.GUARD_TRUE)
 
     def test_expand_fail_lazy_setfield_2(self):
-        self.make_fail_descr()
         ops = """
         [i2, i3]
         p2 = new_with_vtable(ConstClass(node_vtable))
@@ -2689,9 +2623,6 @@
             where p2 is a node_vtable, valuedescr=i2
             ''', rop.GUARD_TRUE)
 
-
-class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
-
     def test_residual_call_does_not_invalidate_caches(self):
         ops = """
         [p1, p2]
@@ -2983,7 +2914,6 @@
         self.optimize_loop(ops, expected)
 
     def test_vref_virtual_2(self):
-        self.make_fail_descr()
         ops = """
         [p0, i1]
         #
@@ -3029,7 +2959,6 @@
             ''', rop.GUARD_NOT_FORCED)
 
     def test_vref_virtual_and_lazy_setfield(self):
-        self.make_fail_descr()
         ops = """
         [p0, i1]
         #
@@ -3068,7 +2997,6 @@
             ''', rop.GUARD_NO_EXCEPTION)
 
     def test_vref_virtual_after_finish(self):
-        self.make_fail_descr()
         ops = """
         [i1]
         p1 = new_with_vtable(ConstClass(node_vtable))
@@ -3095,7 +3023,6 @@
         self.optimize_loop(ops, expected)
 
     def test_vref_nonvirtual_and_lazy_setfield(self):
-        self.make_fail_descr()
         ops = """
         [i1, p1]
         p2 = virtual_ref(p1, 23)
@@ -4213,7 +4140,6 @@
 
     # ----------
     def optimize_strunicode_loop_extradescrs(self, ops, optops):
-        from pypy.jit.metainterp.optimizeopt import string
         class FakeCallInfoCollection:
             def callinfo_for_oopspec(self, oopspecindex):
                 calldescrtype = type(LLtypeMixin.strequaldescr)
@@ -4569,6 +4495,47 @@
         # not obvious, because of the exception UnicodeDecodeError that
         # can be raised by ll_str2unicode()
 
+    def test_strgetitem_repeated(self):
+        ops = """
+        [p0, i0]
+        i1 = strgetitem(p0, i0)
+        i2 = strgetitem(p0, i0)
+        i3 = int_eq(i1, i2)
+        guard_true(i3) []
+        escape(i2)
+        jump(p0, i0)
+        """
+        expected = """
+        [p0, i0]
+        i1 = strgetitem(p0, i0)
+        escape(i1)
+        jump(p0, i0)
+        """
+        self.optimize_loop(ops, expected)
+
+    def test_int_is_true_bounds(self):
+        ops = """
+        [p0]
+        i0 = strlen(p0)
+        i1 = int_is_true(i0)
+        guard_true(i1) []
+        i2 = int_ge(0, i0)
+        guard_false(i2) []
+        jump(p0)
+        """
+        expected = """
+        [p0]
+        i0 = strlen(p0)
+        i1 = int_is_true(i0)
+        guard_true(i1) []
+        jump(p0)
+        """
+        self.optimize_loop(ops, expected)
+
+
+class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
+    pass
+
 
 ##class TestOOtype(BaseTestOptimizeBasic, OOtypeMixin):
 
diff --git a/pypy/jit/metainterp/test/test_optimizefficall.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
rename from pypy/jit/metainterp/test/test_optimizefficall.py
rename to pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
--- a/pypy/jit/metainterp/test/test_optimizefficall.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizefficall.py
@@ -2,8 +2,8 @@
 from pypy.rlib.libffi import Func, types
 from pypy.jit.metainterp.history import AbstractDescr
 from pypy.jit.codewriter.effectinfo import EffectInfo
-from pypy.jit.metainterp.test.test_optimizebasic import BaseTestBasic
-from pypy.jit.metainterp.test.test_optimizebasic import LLtypeMixin
+from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import BaseTestBasic
+from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import LLtypeMixin
 
 class MyCallDescr(AbstractDescr):
     """
@@ -33,6 +33,8 @@
 
 class TestFfiCall(BaseTestBasic, LLtypeMixin):
 
+    enable_opts = "intbounds:rewrite:virtualize:string:heap:ffi"
+
     class namespace:
         cpu = LLtypeMixin.cpu
         FUNC = LLtypeMixin.FUNC
@@ -49,7 +51,7 @@
                              restype=types.sint)
         #
         def calldescr(cpu, FUNC, oopspecindex, extraeffect=None):
-            einfo = EffectInfo([], [], [], oopspecindex=oopspecindex,
+            einfo = EffectInfo([], [], [], [], oopspecindex=oopspecindex,
                                extraeffect=extraeffect)
             return cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT, einfo)
         #
diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
rename from pypy/jit/metainterp/test/test_optimizeopt.py
rename to pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/pypy/jit/metainterp/test/test_optimizeopt.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -1,36 +1,21 @@
 import py
 from pypy.rlib.objectmodel import instantiate
-from pypy.jit.metainterp.test.test_optimizeutil import (LLtypeMixin,
-                                                        #OOtypeMixin,
-                                                        BaseTest)
+from pypy.jit.metainterp.optimizeopt.test.test_util import (
+    LLtypeMixin, BaseTest, Storage, _sortboxes)
 import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt
 import pypy.jit.metainterp.optimizeopt.virtualize as virtualize
 from pypy.jit.metainterp.optimizeopt import optimize_loop_1, ALL_OPTS_DICT, build_opt_chain
-from pypy.jit.metainterp.optimizeutil import InvalidLoop
+from pypy.jit.metainterp.optimize import InvalidLoop
 from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt
 from pypy.jit.metainterp.history import TreeLoop, LoopToken
 from pypy.jit.metainterp.jitprof import EmptyProfiler
 from pypy.jit.metainterp import executor, compile, resume, history
 from pypy.jit.metainterp.resoperation import rop, opname, ResOperation
 from pypy.jit.tool.oparser import pure_parse
-from pypy.jit.metainterp.test.test_optimizebasic import equaloplists
-from pypy.jit.metainterp.optimizeutil import args_dict
+from pypy.jit.metainterp.optimizeopt.util import args_dict
+from pypy.jit.metainterp.optimizeopt.test.test_optimizebasic import FakeMetaInterpStaticData
 from pypy.config.pypyoption import get_pypy_config
 
-class Fake(object):
-    failargs_limit = 1000
-    storedebug = None
-
-class FakeMetaInterpStaticData(object):
-
-    def __init__(self, cpu):
-        self.cpu = cpu
-        self.profiler = EmptyProfiler()
-        self.options = Fake()
-        self.globaldata = Fake()
-        self.config = get_pypy_config(translating=True)
-        self.config.translation.jit_ffi = True
-
 
 def test_build_opt_chain():
     def check(chain, expected_names):
@@ -66,173 +51,38 @@
     check(chain, ["OptSimplify"])
 
 
-def test_store_final_boxes_in_guard():
-    from pypy.jit.metainterp.compile import ResumeGuardDescr
-    from pypy.jit.metainterp.resume import tag, TAGBOX
-    b0 = BoxInt()
-    b1 = BoxInt()
-    opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu),
-                                None)
-    fdescr = ResumeGuardDescr()
-    op = ResOperation(rop.GUARD_TRUE, ['dummy'], None, descr=fdescr)
-    # setup rd data
-    fi0 = resume.FrameInfo(None, "code0", 11)
-    fdescr.rd_frame_info_list = resume.FrameInfo(fi0, "code1", 33)
-    snapshot0 = resume.Snapshot(None, [b0])
-    fdescr.rd_snapshot = resume.Snapshot(snapshot0, [b1])
-    #
-    opt.store_final_boxes_in_guard(op)
-    if op.getfailargs() == [b0, b1]:
-        assert list(fdescr.rd_numb.nums)      == [tag(1, TAGBOX)]
-        assert list(fdescr.rd_numb.prev.nums) == [tag(0, TAGBOX)]
-    else:
-        assert op.getfailargs() == [b1, b0]
-        assert list(fdescr.rd_numb.nums)      == [tag(0, TAGBOX)]
-        assert list(fdescr.rd_numb.prev.nums) == [tag(1, TAGBOX)]
-    assert fdescr.rd_virtuals is None
-    assert fdescr.rd_consts == []
-
-def test_sharing_field_lists_of_virtual():
-    class FakeOptimizer(object):
-        class cpu(object):
-            pass
-    opt = FakeOptimizer()
-    virt1 = virtualize.AbstractVirtualStructValue(opt, None)
-    lst1 = virt1._get_field_descr_list()
-    assert lst1 == []
-    lst2 = virt1._get_field_descr_list()
-    assert lst1 is lst2
-    virt1.setfield(LLtypeMixin.valuedescr, optimizeopt.OptValue(None))
-    lst3 = virt1._get_field_descr_list()
-    assert lst3 == [LLtypeMixin.valuedescr]
-    lst4 = virt1._get_field_descr_list()
-    assert lst3 is lst4
-
-    virt2 = virtualize.AbstractVirtualStructValue(opt, None)
-    lst5 = virt2._get_field_descr_list()
-    assert lst5 is lst1
-    virt2.setfield(LLtypeMixin.valuedescr, optimizeopt.OptValue(None))
-    lst6 = virt1._get_field_descr_list()
-    assert lst6 is lst3
-
-def test_reuse_vinfo():
-    class FakeVInfo(object):
-        def set_content(self, fieldnums):
-            self.fieldnums = fieldnums
-        def equals(self, fieldnums):
-            return self.fieldnums == fieldnums
-    class FakeVirtualValue(virtualize.AbstractVirtualValue):
-        def _make_virtual(self, *args):
-            return FakeVInfo()
-    v1 = FakeVirtualValue(None, None, None)
-    vinfo1 = v1.make_virtual_info(None, [1, 2, 4])
-    vinfo2 = v1.make_virtual_info(None, [1, 2, 4])
-    assert vinfo1 is vinfo2
-    vinfo3 = v1.make_virtual_info(None, [1, 2, 6])
-    assert vinfo3 is not vinfo2
-    vinfo4 = v1.make_virtual_info(None, [1, 2, 6])
-    assert vinfo3 is vinfo4
-
-def test_descrlist_dict():
-    from pypy.jit.metainterp import optimizeutil
-    h1 = optimizeutil.descrlist_hash([])
-    h2 = optimizeutil.descrlist_hash([LLtypeMixin.valuedescr])
-    h3 = optimizeutil.descrlist_hash(
-            [LLtypeMixin.valuedescr, LLtypeMixin.nextdescr])
-    assert h1 != h2
-    assert h2 != h3
-    assert optimizeutil.descrlist_eq([], [])
-    assert not optimizeutil.descrlist_eq([], [LLtypeMixin.valuedescr])
-    assert optimizeutil.descrlist_eq([LLtypeMixin.valuedescr],
-                                     [LLtypeMixin.valuedescr])
-    assert not optimizeutil.descrlist_eq([LLtypeMixin.valuedescr],
-                                         [LLtypeMixin.nextdescr])
-    assert optimizeutil.descrlist_eq([LLtypeMixin.valuedescr, LLtypeMixin.nextdescr],
-                                     [LLtypeMixin.valuedescr, LLtypeMixin.nextdescr])
-    assert not optimizeutil.descrlist_eq([LLtypeMixin.nextdescr, LLtypeMixin.valuedescr],
-                                         [LLtypeMixin.valuedescr, LLtypeMixin.nextdescr])
-
-    # descrlist_eq should compare by identity of the descrs, not by the result
-    # of sort_key
-    class FakeDescr(object):
-        def sort_key(self):
-            return 1
-
-    assert not optimizeutil.descrlist_eq([FakeDescr()], [FakeDescr()])
-
 # ____________________________________________________________
-class Storage(compile.ResumeGuardDescr):
-    "for tests."
-    def __init__(self, metainterp_sd=None, original_greenkey=None):
-        self.metainterp_sd = metainterp_sd
-        self.original_greenkey = original_greenkey
-    def store_final_boxes(self, op, boxes):
-        op.setfailargs(boxes)
-    def __eq__(self, other):
-        return type(self) is type(other)      # xxx obscure
+
+
+class FakeDescr(compile.ResumeGuardDescr):
+    class rd_snapshot:
+        class prev:
+            prev = None
+            boxes = []
+        boxes = []
     def clone_if_mutable(self):
-        res = Storage(self.metainterp_sd, self.original_greenkey)
-        self.copy_all_attributes_into(res)
-        return res
-
-def _sortboxes(boxes):
-    _kind2count = {history.INT: 1, history.REF: 2, history.FLOAT: 3}
-    return sorted(boxes, key=lambda box: _kind2count[box.type])
-
-class BaseTestOptimizeOpt(BaseTest):
-
-    def invent_fail_descr(self, model, fail_args):
-        if fail_args is None:
-            return None
-        descr = Storage()
-        descr.rd_frame_info_list = resume.FrameInfo(None, "code", 11)
-        descr.rd_snapshot = resume.Snapshot(None, _sortboxes(fail_args))
-        return descr
-
-    def assert_equal(self, optimized, expected, text_right=None):
-        assert len(optimized.inputargs) == len(expected.inputargs)
-        remap = {}
-        for box1, box2 in zip(optimized.inputargs, expected.inputargs):
-            assert box1.__class__ == box2.__class__
-            remap[box2] = box1
-        assert equaloplists(optimized.operations,
-                            expected.operations, False, remap, text_right)
-
-    def optimize_loop(self, ops, optops, expected_preamble=None,
+        return self
+
+
+class BaseTestWithUnroll(BaseTest):
+
+    enable_opts = "intbounds:rewrite:virtualize:string:heap:unroll"
+
+    def optimize_loop(self, ops, expected, expected_preamble=None,
                       call_pure_results=None):
         loop = self.parse(ops)
-        if optops != "crash!":
-            expected = self.parse(optops)
-        else:
-            expected = "crash!"
+        if expected != "crash!":
+            expected = self.parse(expected)
         if expected_preamble:
             expected_preamble = self.parse(expected_preamble)
-        #
-        self.loop = loop
-        loop.call_pure_results = args_dict()
-        if call_pure_results is not None:
-            for k, v in call_pure_results.items():
-                loop.call_pure_results[list(k)] = v
+
         loop.preamble = TreeLoop('preamble')
         loop.preamble.inputargs = loop.inputargs
         loop.preamble.token = LoopToken()
-        metainterp_sd = FakeMetaInterpStaticData(self.cpu)
-        if hasattr(self, 'vrefinfo'):
-            metainterp_sd.virtualref_info = self.vrefinfo
-        if hasattr(self, 'callinfocollection'):
-            metainterp_sd.callinfocollection = self.callinfocollection
-        class FakeDescr(compile.ResumeGuardDescr):
-            class rd_snapshot:
-                class prev:
-                    prev = None
-                    boxes = []
-                boxes = []
-            def clone_if_mutable(self):
-                return self
         loop.preamble.start_resumedescr = FakeDescr()
-        optimize_loop_1(metainterp_sd, loop, ALL_OPTS_DICT)
         #
-
+        self._do_optimize_loop(loop, call_pure_results)
+        #
         print
         print loop.preamble.inputargs
         print '\n'.join([str(o) for o in loop.preamble.operations])
@@ -240,16 +90,14 @@
         print loop.inputargs
         print '\n'.join([str(o) for o in loop.operations])
         print
-
         assert expected != "crash!", "should have raised an exception"
         self.assert_equal(loop, expected)
         if expected_preamble:
             self.assert_equal(loop.preamble, expected_preamble,
                               text_right='expected preamble')
-
         return loop
 
-class OptimizeOptTest(BaseTestOptimizeOpt):
+class OptimizeOptTest(BaseTestWithUnroll):
 
     def setup_method(self, meth=None):
         class FailDescr(compile.ResumeGuardDescr):
@@ -1533,8 +1381,8 @@
         """
         expected = """
         [i1, p0]
+        p1 = new_array(i1, descr=arraydescr)
         setarrayitem_gc(p0, 0, i1, descr=arraydescr)
-        p1 = new_array(i1, descr=arraydescr)
         jump(i1, p1)
         """
         self.optimize_loop(ops, expected)
@@ -1958,9 +1806,9 @@
         i3 = getarrayitem_gc_pure(p3, 1, descr=arraydescr)
         i4 = getarrayitem_gc(p3, i3, descr=arraydescr)
         i5 = int_add(i3, i4)
-        setarrayitem_gc(p3, 0, i5, descr=arraydescr)
         #
         setfield_gc(p1, i2, descr=valuedescr)
+        setarrayitem_gc(p3, 0, i5, descr=arraydescr)
         setfield_gc(p1, i4, descr=nextdescr)
         escape()
         jump(p1, i1, i2, p3, i3)
@@ -1970,9 +1818,9 @@
         #
         i4 = getarrayitem_gc(p3, i3, descr=arraydescr)
         i5 = int_add(i3, i4)
-        setarrayitem_gc(p3, 0, i5, descr=arraydescr)
         #
         setfield_gc(p1, i2, descr=valuedescr)
+        setarrayitem_gc(p3, 0, i5, descr=arraydescr)
         setfield_gc(p1, i4, descr=nextdescr)
         escape()
         jump(p1, i1, i2, p3, i3)
@@ -2207,6 +2055,7 @@
         self.optimize_loop(ops, expected)
 
     def test_duplicate_getarrayitem_after_setarrayitem_2(self):
+        py.test.skip("setarrayitem with variable index")
         ops = """
         [p1, p2, p3, i1]
         setarrayitem_gc(p1, 0, p2, descr=arraydescr2)
@@ -2893,8 +2742,6 @@
 
     # ----------
 
-class TestLLtype(OptimizeOptTest, LLtypeMixin):
-
     def test_residual_call_does_not_invalidate_caches(self):
         ops = """
         [p1, p2]
@@ -5463,7 +5310,7 @@
         """
         self.optimize_strunicode_loop(ops, expected)
 
-    def test_strgetitem_small(self):
+    def test_strgetitem_bounds(self):
         ops = """
         [p0, i0]
         i1 = strgetitem(p0, i0)
@@ -5475,7 +5322,20 @@
         """
         expected = """
         [p0, i0]
-        i1 = strgetitem(p0, i0)
+        jump(p0, i0)
+        """
+        self.optimize_loop(ops, expected)
+
+    def test_unicodegetitem_bounds(self):
+        ops = """
+        [p0, i0]
+        i1 = unicodegetitem(p0, i0)
+        i2 = int_lt(i1, 0)
+        guard_false(i2) []
+        jump(p0, i0)
+        """
+        expected = """
+        [p0, i0]
         jump(p0, i0)
         """
         self.optimize_loop(ops, expected)
@@ -5513,7 +5373,6 @@
 
     # ----------
     def optimize_strunicode_loop_extradescrs(self, ops, optops, preamble=None):
-        from pypy.jit.metainterp.optimizeopt import string
         class FakeCallInfoCollection:
             def callinfo_for_oopspec(self, oopspecindex):
                 calldescrtype = type(LLtypeMixin.strequaldescr)
@@ -5989,3 +5848,54 @@
         jump(i3, i4)
         """
         self.optimize_loop(ops, expected)
+
+    def test_forced_virtual_pure_getfield(self):
+        ops = """
+        [p0]
+        p1 = getfield_gc_pure(p0, descr=valuedescr)
+        jump(p1)
+        """
+        self.optimize_loop(ops, ops)
+
+        ops = """
+        [p0]
+        p1 = new_with_vtable(ConstClass(node_vtable))
+        setfield_gc(p1, p0, descr=valuedescr)
+        escape(p1)
+        p2 = getfield_gc_pure(p1, descr=valuedescr)
+        escape(p2)
+        jump(p0)
+        """
+        expected = """
+        [p0]
+        p1 = new_with_vtable(ConstClass(node_vtable))
+        setfield_gc(p1, p0, descr=valuedescr)
+        escape(p1)
+        escape(p0)
+        jump(p0)
+        """
+        self.optimize_loop(ops, expected)
+
+    def test_setarrayitem_lazy(self):
+        ops = """
+        [i0, i1]
+        p0 = escape()
+        i2 = escape()
+        p1 = new_with_vtable(ConstClass(node_vtable))
+        setarrayitem_gc(p0, 2, p1, descr=arraydescr)
+        guard_true(i2) []
+        setarrayitem_gc(p0, 2, p0, descr=arraydescr)
+        jump(i0, i1)
+        """
+        expected = """
+        [i0, i1]
+        p0 = escape()
+        i2 = escape()
+        guard_true(i2) [p0]
+        setarrayitem_gc(p0, 2, p0, descr=arraydescr)
+        jump(i0, i1)
+        """
+        self.optimize_loop(ops, expected)
+
+class TestLLtype(OptimizeOptTest, LLtypeMixin):
+    pass
diff --git a/pypy/jit/metainterp/test/test_optimizeutil.py b/pypy/jit/metainterp/optimizeopt/test/test_util.py
rename from pypy/jit/metainterp/test/test_optimizeutil.py
rename to pypy/jit/metainterp/optimizeopt/test/test_util.py
--- a/pypy/jit/metainterp/test/test_optimizeutil.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_util.py
@@ -9,11 +9,15 @@
 from pypy.jit.metainterp.history import (BoxInt, BoxPtr, ConstInt, ConstPtr,
                                          Const, TreeLoop, BoxObj,
                                          ConstObj, AbstractDescr)
-from pypy.jit.metainterp.optimizeutil import sort_descrs, InvalidLoop
+from pypy.jit.metainterp.optimizeopt.util import sort_descrs, equaloplists
+from pypy.jit.metainterp.optimize import InvalidLoop
 from pypy.jit.codewriter.effectinfo import EffectInfo
 from pypy.jit.codewriter.heaptracker import register_known_gctype, adr2int
-from pypy.jit.tool.oparser import parse
+from pypy.jit.tool.oparser import parse, pure_parse
 from pypy.jit.metainterp.quasiimmut import QuasiImmutDescr
+from pypy.jit.metainterp import compile, resume, history
+from pypy.jit.metainterp.jitprof import EmptyProfiler
+from pypy.config.pypyoption import get_pypy_config
 
 def test_sort_descrs():
     class PseudoDescr(AbstractDescr):
@@ -28,6 +32,44 @@
         sort_descrs(lst2)
         assert lst2 == lst
 
+def test_equaloplists():
+    ops = """
+    [i0]
+    i1 = int_add(i0, 1)
+    i2 = int_add(i1, 1)
+    guard_true(i1) [i2]
+    jump(i1)
+    """
+    namespace = {}
+    loop1 = pure_parse(ops, namespace=namespace)
+    loop2 = pure_parse(ops, namespace=namespace)
+    loop3 = pure_parse(ops.replace("i2 = int_add", "i2 = int_sub"),
+                       namespace=namespace)
+    assert equaloplists(loop1.operations, loop2.operations)
+    py.test.raises(AssertionError,
+                   "equaloplists(loop1.operations, loop3.operations)")
+
+def test_equaloplists_fail_args():
+    ops = """
+    [i0]
+    i1 = int_add(i0, 1)
+    i2 = int_add(i1, 1)
+    guard_true(i1) [i2, i1]
+    jump(i1)
+    """
+    namespace = {}
+    loop1 = pure_parse(ops, namespace=namespace)
+    loop2 = pure_parse(ops.replace("[i2, i1]", "[i1, i2]"),
+                       namespace=namespace)
+    py.test.raises(AssertionError,
+                   "equaloplists(loop1.operations, loop2.operations)")
+    assert equaloplists(loop1.operations, loop2.operations,
+                        strict_fail_args=False)
+    loop3 = pure_parse(ops.replace("[i2, i1]", "[i2, i0]"),
+                       namespace=namespace)
+    py.test.raises(AssertionError,
+                   "equaloplists(loop1.operations, loop3.operations)")
+
 # ____________________________________________________________
 
 class LLtypeMixin(object):
@@ -124,19 +166,19 @@
     FUNC = lltype.FuncType([lltype.Signed], lltype.Signed)
     plaincalldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT)
     nonwritedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
-                                    EffectInfo([], [], []))
+                                    EffectInfo([], [], [], []))
     writeadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
-                                  EffectInfo([], [adescr], []))
+                                  EffectInfo([], [], [adescr], []))
     writearraydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
-                                      EffectInfo([], [adescr], [arraydescr]))
+                                  EffectInfo([], [], [adescr], [arraydescr]))
     readadescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
-                                 EffectInfo([adescr], [], []))
+                                 EffectInfo([adescr], [], [], []))
     mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
-                 EffectInfo([nextdescr], [], [],
+                 EffectInfo([nextdescr], [], [], [],
                             EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE,
                             can_invalidate=True))
     arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
-                 EffectInfo([], [], [], oopspecindex=EffectInfo.OS_ARRAYCOPY))
+             EffectInfo([], [], [], [], oopspecindex=EffectInfo.OS_ARRAYCOPY))
 
     for _name, _os in [
         ('strconcatdescr',               'OS_STR_CONCAT'),
@@ -153,15 +195,15 @@
         _oopspecindex = getattr(EffectInfo, _os)
         locals()[_name] = \
             cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
-                EffectInfo([], [], [], oopspecindex=_oopspecindex))
+                EffectInfo([], [], [], [], oopspecindex=_oopspecindex))
         #
         _oopspecindex = getattr(EffectInfo, _os.replace('STR', 'UNI'))
         locals()[_name.replace('str', 'unicode')] = \
             cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
-                EffectInfo([], [], [], oopspecindex=_oopspecindex))
+                EffectInfo([], [], [], [], oopspecindex=_oopspecindex))
 
     s2u_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
-                EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR2UNICODE))
+            EffectInfo([], [], [], [], oopspecindex=EffectInfo.OS_STR2UNICODE))
     #
 
     class LoopToken(AbstractDescr):
@@ -256,8 +298,45 @@
 ##                       u_vtable_adr: cpu.typedescrof(U)}
 ##    namespace = locals()
 
+# ____________________________________________________________
+
+
+
+class Fake(object):
+    failargs_limit = 1000
+    storedebug = None
+
+
+class FakeMetaInterpStaticData(object):
+
+    def __init__(self, cpu):
+        self.cpu = cpu
+        self.profiler = EmptyProfiler()
+        self.options = Fake()
+        self.globaldata = Fake()
+        self.config = get_pypy_config(translating=True)
+        self.config.translation.jit_ffi = True
+
+
+class Storage(compile.ResumeGuardDescr):
+    "for tests."
+    def __init__(self, metainterp_sd=None, original_greenkey=None):
+        self.metainterp_sd = metainterp_sd
+        self.original_greenkey = original_greenkey
+    def store_final_boxes(self, op, boxes):
+        op.setfailargs(boxes)
+    def __eq__(self, other):
+        return type(self) is type(other)      # xxx obscure
+    def clone_if_mutable(self):
+        res = Storage(self.metainterp_sd, self.original_greenkey)
+        self.copy_all_attributes_into(res)
+        return res
+
+def _sortboxes(boxes):
+    _kind2count = {history.INT: 1, history.REF: 2, history.FLOAT: 3}
+    return sorted(boxes, key=lambda box: _kind2count[box.type])
+
 class BaseTest(object):
-    invent_fail_descr = None
 
     def parse(self, s, boxkinds=None):
         return parse(s, self.cpu, self.namespace,
@@ -265,5 +344,40 @@
                      boxkinds=boxkinds,
                      invent_fail_descr=self.invent_fail_descr)
 
+    def invent_fail_descr(self, model, fail_args):
+        if fail_args is None:
+            return None
+        descr = Storage()
+        descr.rd_frame_info_list = resume.FrameInfo(None, "code", 11)
+        descr.rd_snapshot = resume.Snapshot(None, _sortboxes(fail_args))
+        return descr
+
+    def assert_equal(self, optimized, expected, text_right=None):
+        from pypy.jit.metainterp.optimizeopt.util import equaloplists
+        assert len(optimized.inputargs) == len(expected.inputargs)
+        remap = {}
+        for box1, box2 in zip(optimized.inputargs, expected.inputargs):
+            assert box1.__class__ == box2.__class__
+            remap[box2] = box1
+        assert equaloplists(optimized.operations,
+                            expected.operations, False, remap, text_right)
+
+    def _do_optimize_loop(self, loop, call_pure_results):
+        from pypy.jit.metainterp.optimizeopt import optimize_loop_1
+        from pypy.jit.metainterp.optimizeopt.util import args_dict
+
+        self.loop = loop
+        loop.call_pure_results = args_dict()
+        if call_pure_results is not None:
+            for k, v in call_pure_results.items():
+                loop.call_pure_results[list(k)] = v
+        metainterp_sd = FakeMetaInterpStaticData(self.cpu)
+        if hasattr(self, 'vrefinfo'):
+            metainterp_sd.virtualref_info = self.vrefinfo
+        if hasattr(self, 'callinfocollection'):
+            metainterp_sd.callinfocollection = self.callinfocollection
+        #
+        optimize_loop_1(metainterp_sd, loop, self.enable_opts)
+
 # ____________________________________________________________
 
diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py
--- a/pypy/jit/metainterp/optimizeopt/unroll.py
+++ b/pypy/jit/metainterp/optimizeopt/unroll.py
@@ -5,7 +5,7 @@
 from pypy.jit.metainterp.resume import Snapshot
 from pypy.jit.metainterp.history import TreeLoop, LoopToken
 from pypy.rlib.debug import debug_start, debug_stop, debug_print
-from pypy.jit.metainterp.optimizeutil import InvalidLoop, RetraceLoop
+from pypy.jit.metainterp.optimize import InvalidLoop, RetraceLoop
 from pypy.jit.metainterp.jitexc import JitException
 from pypy.jit.metainterp.history import make_hashable_int
 from pypy.jit.codewriter.effectinfo import EffectInfo
@@ -546,7 +546,7 @@
             effectinfo = descr.get_extra_info()
             if effectinfo is not None:
                 if effectinfo.extraeffect == EffectInfo.EF_LOOPINVARIANT or \
-                   effectinfo.extraeffect == EffectInfo.EF_PURE:
+                   effectinfo.extraeffect == EffectInfo.EF_ELIDABLE:
                     return True
         return False
     
@@ -676,24 +676,28 @@
                             jumpop = self.optimizer.newoperations.pop()
                             assert jumpop.getopnum() == rop.JUMP
                             for guard in extra_guards:
-                                descr = sh.start_resumedescr.clone_if_mutable()
-                                self.inliner.inline_descr_inplace(descr)
-                                guard.setdescr(descr)
+                                d = sh.start_resumedescr.clone_if_mutable()
+                                self.inliner.inline_descr_inplace(d)
+                                guard.setdescr(d)
                                 self.emit_operation(guard)
                             self.optimizer.newoperations.append(jumpop)
                         return
-                retraced_count = len(short)
-                if descr.failed_states:
-                    retraced_count += len(descr.failed_states)
+                retraced_count = descr.retraced_count
+                descr.retraced_count += 1
                 limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit
                 if not self.retraced and retraced_count<limit:
                     if not descr.failed_states:
+                        debug_print("Retracing (%d of %d)" % (retraced_count,
+                                                              limit))
                         raise RetraceLoop
                     for failed in descr.failed_states:
                         if failed.generalization_of(virtual_state):
                             # Retracing once more will most likely fail again
                             break
                     else:
+                        debug_print("Retracing (%d of %d)" % (retraced_count,
+                                                              limit))
+                                                              
                         raise RetraceLoop
                 else:
                     if not descr.failed_states:
diff --git a/pypy/jit/metainterp/optimizeutil.py b/pypy/jit/metainterp/optimizeopt/util.py
rename from pypy/jit/metainterp/optimizeutil.py
rename to pypy/jit/metainterp/optimizeopt/util.py
--- a/pypy/jit/metainterp/optimizeutil.py
+++ b/pypy/jit/metainterp/optimizeopt/util.py
@@ -1,21 +1,10 @@
+import py
 from pypy.rlib.objectmodel import r_dict, compute_identity_hash
 from pypy.rlib.rarithmetic import intmask
 from pypy.rlib.unroll import unrolling_iterable
 from pypy.jit.metainterp import resoperation, history
-from pypy.jit.metainterp.jitexc import JitException
 from pypy.rlib.debug import make_sure_not_resized
-
-class InvalidLoop(JitException):
-    """Raised when the optimize*.py detect that the loop that
-    we are trying to build cannot possibly make sense as a
-    long-running loop (e.g. it cannot run 2 complete iterations)."""
-
-class RetraceLoop(JitException):
-    """ Raised when inlining a short preamble resulted in an
-        InvalidLoop. This means the optimized loop is too specialized
-        to be useful here, so we trace it again and produced a second
-        copy specialized in some different way.
-    """
+from pypy.jit.metainterp.resoperation import rop
 
 # ____________________________________________________________
 # Misc. utilities
@@ -31,9 +20,25 @@
         if op_prefix and not name.startswith(op_prefix):
             continue
         if hasattr(Class, name_prefix + name):
-            result.append((value, getattr(Class, name_prefix + name)))
+            opclass = resoperation.opclasses[getattr(rop, name)]
+            print value, name, opclass
+            result.append((value, opclass, getattr(Class, name_prefix + name)))
     return unrolling_iterable(result)
 
+def make_dispatcher_method(Class, name_prefix, op_prefix=None, default=None):
+    ops = _findall(Class, name_prefix, op_prefix)
+    def dispatch(self, op, *args):
+        opnum = op.getopnum()
+        for value, cls, func in ops:
+            if opnum == value:
+                assert isinstance(op, cls)
+                return func(self, op, *args)
+        if default:
+            return default(self, op, *args)
+    dispatch.func_name = "dispatch_" + name_prefix
+    return dispatch
+
+
 def partition(array, left, right):
     last_item = array[right]
     pivot = last_item.sort_key()
@@ -113,3 +118,49 @@
 
 def args_dict_box():
     return r_dict(args_eq, args_hash)
+
+
+# ____________________________________________________________
+
+def equaloplists(oplist1, oplist2, strict_fail_args=True, remap={},
+                 text_right=None):
+    # try to use the full width of the terminal to display the list
+    # unfortunately, does not work with the default capture method of py.test
+    # (which is fd), you you need to use either -s or --capture=sys, else you
+    # get the standard 80 columns width
+    totwidth = py.io.get_terminal_width()
+    width = totwidth / 2 - 1
+    print ' Comparing lists '.center(totwidth, '-')
+    text_right = text_right or 'expected'
+    print '%s| %s' % ('optimized'.center(width), text_right.center(width))
+    for op1, op2 in zip(oplist1, oplist2):
+        txt1 = str(op1)
+        txt2 = str(op2)
+        while txt1 or txt2:
+            print '%s| %s' % (txt1[:width].ljust(width), txt2[:width])
+            txt1 = txt1[width:]
+            txt2 = txt2[width:]
+        assert op1.getopnum() == op2.getopnum()
+        assert op1.numargs() == op2.numargs()
+        for i in range(op1.numargs()):
+            x = op1.getarg(i)
+            y = op2.getarg(i)
+            assert x == remap.get(y, y)
+        if op2.result in remap:
+            assert op1.result == remap[op2.result]
+        else:
+            remap[op2.result] = op1.result
+        if op1.getopnum() != rop.JUMP:      # xxx obscure
+            assert op1.getdescr() == op2.getdescr()
+        if op1.getfailargs() or op2.getfailargs():
+            assert len(op1.getfailargs()) == len(op2.getfailargs())
+            if strict_fail_args:
+                for x, y in zip(op1.getfailargs(), op2.getfailargs()):
+                    assert x == remap.get(y, y)
+            else:
+                fail_args1 = set(op1.getfailargs())
+                fail_args2 = set([remap.get(y, y) for y in op2.getfailargs()])
+                assert fail_args1 == fail_args2
+    assert len(oplist1) == len(oplist2)
+    print '-'*totwidth
+    return True
diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py
--- a/pypy/jit/metainterp/optimizeopt/virtualize.py
+++ b/pypy/jit/metainterp/optimizeopt/virtualize.py
@@ -1,7 +1,7 @@
 from pypy.jit.metainterp.history import Const, ConstInt, BoxInt
 from pypy.jit.metainterp.resoperation import rop, ResOperation
-from pypy.jit.metainterp.optimizeutil import _findall, sort_descrs
-from pypy.jit.metainterp.optimizeutil import descrlist_dict
+from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
+from pypy.jit.metainterp.optimizeopt.util import descrlist_dict, sort_descrs
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.jit.metainterp.optimizeopt import optimizer
 from pypy.jit.metainterp.executor import execute
@@ -20,6 +20,9 @@
         self.source_op = source_op  # the NEW_WITH_VTABLE/NEW_ARRAY operation
                                     # that builds this box
 
+    def is_forced_virtual(self):
+        return self.box is not None
+
     def get_key_box(self):
         if self.box is None:
             return self.keybox
@@ -120,7 +123,6 @@
                 op = ResOperation(rop.SETFIELD_GC, [box, subbox], None,
                                   descr=ofs)
                 newoperations.append(op)
-            self._fields = None
 
     def _get_field_descr_list(self):
         _cached_sorted_fields = self._cached_sorted_fields
@@ -351,7 +353,7 @@
         if not self.optimizer.cpu.ts.CONST_NULL.same_constant(objbox):
             seo(ResOperation(rop.SETFIELD_GC, op.getarglist(), None,
                              descr = vrefinfo.descr_forced))
-        
+
         # - set 'virtual_token' to TOKEN_NONE
         args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)]
         seo(ResOperation(rop.SETFIELD_GC, args, None,
@@ -365,6 +367,14 @@
 
     def optimize_GETFIELD_GC(self, op):
         value = self.getvalue(op.getarg(0))
+        # If this is an immutable field (as indicated by op.is_always_pure())
+        # then it's safe to reuse the virtual's field, even if it has been
+        # forced, because it should never be written to again.
+        if value.is_forced_virtual() and op.is_always_pure():
+            fieldvalue = value.getfield(op.getdescr(), None)
+            if fieldvalue is not None:
+                self.make_equal_to(op.result, fieldvalue)
+                return
         if value.is_virtual():
             assert isinstance(value, AbstractVirtualValue)
             fieldvalue = value.getfield(op.getdescr(), None)
@@ -382,6 +392,7 @@
 
     def optimize_SETFIELD_GC(self, op):
         value = self.getvalue(op.getarg(0))
+
         if value.is_virtual():
             fieldvalue = self.getvalue(op.getarg(1))
             value.setfield(op.getdescr(), fieldvalue)
@@ -445,13 +456,8 @@
         ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue)
         self.emit_operation(op)
 
-    def propagate_forward(self, op):
-        opnum = op.getopnum()
-        for value, func in optimize_ops:
-            if opnum == value:
-                func(self, op)
-                break
-        else:
-            self.emit_operation(op)
 
-optimize_ops = _findall(OptVirtualize, 'optimize_')
+dispatch_opt = make_dispatcher_method(OptVirtualize, 'optimize_',
+        default=OptVirtualize.emit_operation)
+
+OptVirtualize.propagate_forward = dispatch_opt
diff --git a/pypy/jit/metainterp/optimizeopt/string.py b/pypy/jit/metainterp/optimizeopt/vstring.py
rename from pypy/jit/metainterp/optimizeopt/string.py
rename to pypy/jit/metainterp/optimizeopt/vstring.py
--- a/pypy/jit/metainterp/optimizeopt/string.py
+++ b/pypy/jit/metainterp/optimizeopt/vstring.py
@@ -8,7 +8,7 @@
 from pypy.jit.metainterp.optimizeopt import optimizer, virtualize
 from pypy.jit.metainterp.optimizeopt.optimizer import CONST_0, CONST_1
 from pypy.jit.metainterp.optimizeopt.optimizer import llhelper
-from pypy.jit.metainterp.optimizeutil import _findall
+from pypy.jit.metainterp.optimizeopt.util import make_dispatcher_method
 from pypy.jit.codewriter.effectinfo import EffectInfo
 from pypy.jit.codewriter import heaptracker
 from pypy.rlib.unroll import unrolling_iterable
@@ -348,7 +348,7 @@
     optimizer.emit_operation(ResOperation(rop.INT_SUB, [box1, box2], resbox))
     return resbox
 
-def _strgetitem(optimizer, strbox, indexbox, mode):
+def _strgetitem(optimization, strbox, indexbox, mode):
     if isinstance(strbox, ConstPtr) and isinstance(indexbox, ConstInt):
         if mode is mode_string:
             s = strbox.getref(lltype.Ptr(rstr.STR))
@@ -357,7 +357,7 @@
             s = strbox.getref(lltype.Ptr(rstr.UNICODE))
             return ConstInt(ord(s.chars[indexbox.getint()]))
     resbox = BoxInt()
-    optimizer.emit_operation(ResOperation(mode.STRGETITEM, [strbox, indexbox],
+    optimization.emit_operation(ResOperation(mode.STRGETITEM, [strbox, indexbox],
                                       resbox))
     return resbox
 
@@ -440,8 +440,7 @@
             if vindex.is_constant():
                 return value.getitem(vindex.box.getint())
         #
-        resbox = _strgetitem(self.optimizer,
-                             value.force_box(),vindex.force_box(), mode)
+        resbox = _strgetitem(self, value.force_box(), vindex.force_box(), mode)
         return self.getvalue(resbox)
 
     def optimize_STRLEN(self, op):
@@ -650,16 +649,11 @@
             self.emit_operation(op)
             return
 
-        opnum = op.getopnum()
-        for value, func in optimize_ops:
-            if opnum == value:
-                func(self, op)
-                break
-        else:
-            self.emit_operation(op)
+        dispatch_opt(self, op)
 
 
-optimize_ops = _findall(OptString, 'optimize_')
+dispatch_opt = make_dispatcher_method(OptString, 'optimize_',
+        default=OptString.emit_operation)
 
 def _findall_call_oopspec():
     prefix = 'opt_call_stroruni_'
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -1,5 +1,5 @@
-import py, os, sys
-from pypy.rpython.lltypesystem import lltype, llmemory, rclass
+import py, sys
+from pypy.rpython.lltypesystem import lltype, rclass
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.unroll import unrolling_iterable
 from pypy.rlib.debug import debug_start, debug_stop, debug_print
@@ -15,13 +15,13 @@
 from pypy.jit.metainterp.jitprof import EmptyProfiler
 from pypy.jit.metainterp.jitprof import GUARDS, RECORDED_OPS, ABORT_ESCAPE
 from pypy.jit.metainterp.jitprof import ABORT_TOO_LONG, ABORT_BRIDGE, \
-                                        ABORT_BAD_LOOP, ABORT_FORCE_QUASIIMMUT
+                                        ABORT_FORCE_QUASIIMMUT
 from pypy.jit.metainterp.jitexc import JitException, get_llexception
-from pypy.rlib.rarithmetic import intmask
 from pypy.rlib.objectmodel import specialize
-from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr, MissingLiveness
-from pypy.jit.codewriter import heaptracker, longlong
-from pypy.jit.metainterp.optimizeutil import RetraceLoop, args_dict_box, args_dict
+from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr
+from pypy.jit.codewriter import heaptracker
+from pypy.jit.metainterp.optimizeopt.util import args_dict_box
+from pypy.jit.metainterp.optimize import RetraceLoop
 
 # ____________________________________________________________
 
@@ -867,7 +867,7 @@
         any_operation = len(self.metainterp.history.operations) > 0
         jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex]
         self.verify_green_args(jitdriver_sd, greenboxes)
-        self.debug_merge_point(jdindex, self.metainterp.in_recursion,
+        self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.in_recursion,
                                greenboxes)
 
         if self.metainterp.seen_loop_header_for_jdindex < 0:
@@ -914,8 +914,10 @@
                                     assembler_call=True)
             raise ChangeFrame
 
-    def debug_merge_point(self, jd_index, in_recursion, greenkey):
+    def debug_merge_point(self, jitdriver_sd, jd_index, in_recursion, greenkey):
         # debugging: produce a DEBUG_MERGE_POINT operation
+        loc = jitdriver_sd.warmstate.get_location_str(greenkey)
+        debug_print(loc)
         args = [ConstInt(jd_index), ConstInt(in_recursion)] + greenkey
         self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None)
 
@@ -1231,7 +1233,7 @@
             effect = effectinfo.extraeffect
             if effect == effectinfo.EF_CANNOT_RAISE:
                 return self.execute_varargs(rop.CALL, allboxes, descr, False)
-            elif effect == effectinfo.EF_PURE:
+            elif effect == effectinfo.EF_ELIDABLE:
                 return self.metainterp.record_result_of_call_pure(
                     self.execute_varargs(rop.CALL, allboxes, descr, False))
             elif effect == effectinfo.EF_LOOPINVARIANT:
@@ -1927,7 +1929,6 @@
 
         self.history.inputargs = original_inputargs
         self.history.operations.pop()     # remove the JUMP
-        # FIXME: Why is self.history.inputargs not restored?
 
     def compile_bridge(self, live_arg_boxes):
         num_green_args = self.jitdriver_sd.num_green_args
@@ -1963,6 +1964,8 @@
                                               start_resumedescr, False)
         self.history.operations.pop()     # remove the JUMP
         if loop_token is None:
+            self.history.inputargs = original_inputargs
+            self.history.operations = original_operations
             return
 
         if loop_token.short_preamble:
@@ -2117,7 +2120,6 @@
     def vrefs_after_residual_call(self):
         vrefinfo = self.staticdata.virtualref_info
         for i in range(0, len(self.virtualref_boxes), 2):
-            virtualbox = self.virtualref_boxes[i]
             vrefbox = self.virtualref_boxes[i+1]
             vref = vrefbox.getref_base()
             if vrefinfo.tracing_after_residual_call(vref):
diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py
--- a/pypy/jit/metainterp/resoperation.py
+++ b/pypy/jit/metainterp/resoperation.py
@@ -478,7 +478,7 @@
     'UNICODESETITEM/3',
     #'RUNTIMENEW/1',     # ootype operation
     'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier)
-    'COND_CALL_GC_WB_ARRAY/3d', # [objptr, newvalue, arrayindex] (write barr.)
+    'COND_CALL_GC_WB_ARRAY/3d', # [objptr, arrayindex, newvalue] (write barr.)
     'DEBUG_MERGE_POINT/*',      # debugging only
     'JIT_DEBUG/*',              # debugging only
     'VIRTUAL_REF_FINISH/2',   # removed before it's passed to the backend
diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py
--- a/pypy/jit/metainterp/resume.py
+++ b/pypy/jit/metainterp/resume.py
@@ -2,15 +2,17 @@
 from pypy.jit.metainterp.history import Box, Const, ConstInt, getkind
 from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat
 from pypy.jit.metainterp.history import INT, REF, FLOAT, HOLE
+from pypy.jit.metainterp.history import AbstractDescr
 from pypy.jit.metainterp.resoperation import rop
 from pypy.jit.metainterp import jitprof
 from pypy.jit.codewriter.effectinfo import EffectInfo
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr
+from pypy.rpython import annlowlevel
 from pypy.rlib import rarithmetic, rstack
 from pypy.rlib.objectmodel import we_are_translated, specialize
 from pypy.rlib.debug import have_debug_prints, ll_assert
 from pypy.rlib.debug import debug_start, debug_stop, debug_print
-from pypy.jit.metainterp.optimizeutil import InvalidLoop
+from pypy.jit.metainterp.optimize import InvalidLoop
 
 # Logic to encode the chain of frames and the state of the boxes at a
 # guard operation, and to decode it again.  This is a bit advanced,
@@ -82,6 +84,13 @@
                             ('nums', lltype.Array(rffi.SHORT)))
 NUMBERINGP.TO.become(NUMBERING)
 
+PENDINGFIELDSTRUCT = lltype.Struct('PendingField',
+                                   ('lldescr', annlowlevel.base_ptr_lltype()),
+                                   ('num', rffi.SHORT),
+                                   ('fieldnum', rffi.SHORT),
+                                   ('itemindex', rffi.INT))
+PENDINGFIELDSP = lltype.Ptr(lltype.GcArray(PENDINGFIELDSTRUCT))
+
 TAGMASK = 3
 
 def tag(value, tagbits):
@@ -329,7 +338,7 @@
                 value = values[box]
                 value.get_args_for_fail(self)
 
-        for _, box, fieldbox in pending_setfields:
+        for _, box, fieldbox, _ in pending_setfields:
             self.register_box(box)
             self.register_box(fieldbox)
             value = values[fieldbox]
@@ -405,13 +414,25 @@
         return False
 
     def _add_pending_fields(self, pending_setfields):
-        rd_pendingfields = None
+        rd_pendingfields = lltype.nullptr(PENDINGFIELDSP.TO)
         if pending_setfields:
-            rd_pendingfields = []
-            for descr, box, fieldbox in pending_setfields:
+            n = len(pending_setfields)
+            rd_pendingfields = lltype.malloc(PENDINGFIELDSP.TO, n)
+            for i in range(n):
+                descr, box, fieldbox, itemindex = pending_setfields[i]
+                lldescr = annlowlevel.cast_instance_to_base_ptr(descr)
                 num = self._gettagged(box)
                 fieldnum = self._gettagged(fieldbox)
-                rd_pendingfields.append((descr, num, fieldnum))
+                # the index is limited to 2147483647 (64-bit machines only)
+                if itemindex > 2147483647:
+                    from pypy.jit.metainterp import compile
+                    compile.giveup()
+                itemindex = rffi.cast(rffi.INT, itemindex)
+                #
+                rd_pendingfields[i].lldescr  = lldescr
+                rd_pendingfields[i].num      = num
+                rd_pendingfields[i].fieldnum = fieldnum
+                rd_pendingfields[i].itemindex= itemindex
         self.storage.rd_pendingfields = rd_pendingfields
 
     def _gettagged(self, box):
@@ -727,10 +748,28 @@
             self.virtuals_cache = [self.virtual_default] * len(virtuals)
 
     def _prepare_pendingfields(self, pendingfields):
-        if pendingfields is not None:
-            for descr, num, fieldnum in pendingfields:
+        if pendingfields:
+            for i in range(len(pendingfields)):
+                lldescr  = pendingfields[i].lldescr
+                num      = pendingfields[i].num
+                fieldnum = pendingfields[i].fieldnum
+                itemindex= pendingfields[i].itemindex
+                descr = annlowlevel.cast_base_ptr_to_instance(AbstractDescr,
+                                                              lldescr)
                 struct = self.decode_ref(num)
-                self.setfield(descr, struct, fieldnum)
+                itemindex = rffi.cast(lltype.Signed, itemindex)
+                if itemindex < 0:
+                    self.setfield(descr, struct, fieldnum)
+                else:
+                    self.setarrayitem(descr, struct, itemindex, fieldnum)
+
+    def setarrayitem(self, arraydescr, array, index, fieldnum):
+        if arraydescr.is_array_of_pointers():
+            self.setarrayitem_ref(arraydescr, array, index, fieldnum)
+        elif arraydescr.is_array_of_floats():
+            self.setarrayitem_float(arraydescr, array, index, fieldnum)
+        else:
+            self.setarrayitem_int(arraydescr, array, index, fieldnum)
 
     def _prepare_next_section(self, info):
         # Use info.enumerate_vars(), normally dispatching to
@@ -903,15 +942,15 @@
                                            structbox, fieldbox)
 
     def setarrayitem_int(self, arraydescr, arraybox, index, fieldnum):
-        self.setarrayitem(arraydescr, arraybox, index, fieldnum, INT)
+        self._setarrayitem(arraydescr, arraybox, index, fieldnum, INT)
 
     def setarrayitem_ref(self, arraydescr, arraybox, index, fieldnum):
-        self.setarrayitem(arraydescr, arraybox, index, fieldnum, REF)
+        self._setarrayitem(arraydescr, arraybox, index, fieldnum, REF)
 
     def setarrayitem_float(self, arraydescr, arraybox, index, fieldnum):
-        self.setarrayitem(arraydescr, arraybox, index, fieldnum, FLOAT)
+        self._setarrayitem(arraydescr, arraybox, index, fieldnum, FLOAT)
 
-    def setarrayitem(self, arraydescr, arraybox, index, fieldnum, kind):
+    def _setarrayitem(self, arraydescr, arraybox, index, fieldnum, kind):
         itembox = self.decode_box(fieldnum, kind)
         self.metainterp.execute_and_record(rop.SETARRAYITEM_GC,
                                            arraydescr, arraybox,
diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py
--- a/pypy/jit/metainterp/test/test_ajit.py
+++ b/pypy/jit/metainterp/test/test_ajit.py
@@ -1,7 +1,7 @@
 import py
 import sys
 from pypy.rlib.jit import JitDriver, we_are_jitted, hint, dont_look_inside
-from pypy.rlib.jit import loop_invariant
+from pypy.rlib.jit import loop_invariant, elidable, promote
 from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed
 from pypy.rlib.jit import unroll_safe, current_trace_length
 from pypy.jit.metainterp import pyjitpl, history
@@ -304,12 +304,12 @@
         assert res == 42
         self.check_operations_history(int_add=1, int_mul=0, call=1, guard_no_exception=0)
 
-    def test_residual_call_pure(self):
+    def test_residual_call_elidable(self):
         def externfn(x, y):
             return x * y
-        externfn._pure_function_ = True
+        externfn._elidable_function_ = True
         def f(n):
-            n = hint(n, promote=True)
+            promote(n)
             return externfn(n, n+1)
         res = self.interp_operations(f, [6])
         assert res == 42
@@ -317,10 +317,10 @@
         self.check_operations_history(int_add=0, int_mul=0,
                                       call=0, call_pure=0)
 
-    def test_residual_call_pure_1(self):
+    def test_residual_call_elidable_1(self):
+        @elidable
         def externfn(x, y):
             return x * y
-        externfn._pure_function_ = True
         def f(n):
             return externfn(n, n+1)
         res = self.interp_operations(f, [6])
@@ -329,11 +329,11 @@
         self.check_operations_history(int_add=1, int_mul=0,
                                       call=0, call_pure=1)
 
-    def test_residual_call_pure_2(self):
+    def test_residual_call_elidable_2(self):
         myjitdriver = JitDriver(greens = [], reds = ['n'])
+        @elidable
         def externfn(x):
             return x - 1
-        externfn._pure_function_ = True
         def f(n):
             while n > 0:
                 myjitdriver.can_enter_jit(n=n)
@@ -346,11 +346,11 @@
         # by optimizeopt.py
         self.check_loops(int_sub=0, call=1, call_pure=0)
 
-    def test_constfold_call_pure(self):
+    def test_constfold_call_elidable(self):
         myjitdriver = JitDriver(greens = ['m'], reds = ['n'])
+        @elidable
         def externfn(x):
             return x - 3
-        externfn._pure_function_ = True
         def f(n, m):
             while n > 0:
                 myjitdriver.can_enter_jit(n=n, m=m)
@@ -362,11 +362,11 @@
         # the CALL_PURE is constant-folded away by optimizeopt.py
         self.check_loops(int_sub=1, call=0, call_pure=0)
 
-    def test_constfold_call_pure_2(self):
+    def test_constfold_call_elidable_2(self):
         myjitdriver = JitDriver(greens = ['m'], reds = ['n'])
+        @elidable
         def externfn(x):
             return x - 3
-        externfn._pure_function_ = True
         class V:
             def __init__(self, value):
                 self.value = value
@@ -382,19 +382,19 @@
         # the CALL_PURE is constant-folded away by optimizeopt.py
         self.check_loops(int_sub=1, call=0, call_pure=0)
 
-    def test_pure_function_returning_object(self):
+    def test_elidable_function_returning_object(self):
         myjitdriver = JitDriver(greens = ['m'], reds = ['n'])
         class V:
             def __init__(self, x):
                 self.x = x
         v1 = V(1)
         v2 = V(2)
+        @elidable
         def externfn(x):
             if x:
                 return v1
             else:
                 return v2
-        externfn._pure_function_ = True
         def f(n, m):
             while n > 0:
                 myjitdriver.can_enter_jit(n=n, m=m)
@@ -500,7 +500,7 @@
                 y -= x
             return y
         #
-        res = self.meta_interp(f, [3, 6], repeat=7)
+        res = self.meta_interp(f, [3, 6], repeat=7, function_threshold=0)
         assert res == 6 - 4 - 5
         self.check_history(call=0)   # because the trace starts in the middle
         #
@@ -1252,7 +1252,7 @@
                 myjitdriver.jit_merge_point(x=x, l=l)
                 a = l[x]
                 x = a.g(x)
-                hint(a, promote=True)
+                promote(a)
             return x
         res = self.meta_interp(f, [299], listops=True)
         assert res == f(299)
@@ -1312,7 +1312,7 @@
                     x -= 5
                 else:
                     x -= 7
-                hint(a, promote=True)
+                promote(a)
             return x
         res = self.meta_interp(f, [299], listops=True)
         assert res == f(299)
@@ -1343,7 +1343,7 @@
                     x -= 5
                 else:
                     x -= 7
-                hint(a, promote=True)
+                promote(a)
             return x
         res = self.meta_interp(f, [299], listops=True)
         assert res == f(299)
@@ -1377,7 +1377,7 @@
                     x = a.g(x)
                 else:
                     x -= 7
-                hint(a, promote=True)
+                promote(a)
             return x
         res = self.meta_interp(f, [399], listops=True)
         assert res == f(399)
@@ -1496,7 +1496,7 @@
                     glob.a = B()
                     const = 2
                 else:
-                    const = hint(const, promote=True)
+                    promote(const)
                     x -= const
                     res += a.x
                     a = None
@@ -1531,7 +1531,7 @@
                 myjitdriver.can_enter_jit(x=x)
                 myjitdriver.jit_merge_point(x=x)
                 a = A()
-                hint(a, promote=True)
+                promote(a)
                 x -= 1
         self.meta_interp(f, [50])
         self.check_loop_count(1)
@@ -1595,9 +1595,9 @@
         self.check_loops(jit_debug=2)
 
     def test_assert_green(self):
-        def f(x, promote):
-            if promote:
-                x = hint(x, promote=True)
+        def f(x, promote_flag):
+            if promote_flag:
+                promote(x)
             assert_green(x)
             return x
         res = self.interp_operations(f, [8, 1])
@@ -1676,7 +1676,9 @@
             return a1.val + b1.val
         res = self.meta_interp(g, [6, 14])
         assert res == g(6, 14)
-        self.check_loop_count(9)
+        self.check_loop_count(8)
+        self.check_loops(getarrayitem_gc=7, everywhere=True)
+        py.test.skip("for the following, we need setarrayitem(varindex)")
         self.check_loops(getarrayitem_gc=6, everywhere=True)
 
     def test_multiple_specialied_versions_bridge(self):
@@ -1815,7 +1817,7 @@
             while y > 0:
                 myjitdriver.can_enter_jit(y=y, x=x, res=res, const=const)
                 myjitdriver.jit_merge_point(y=y, x=x, res=res, const=const)
-                const = hint(const, promote=True)
+                const = promote(const)
                 res = res.binop(A(const))
                 if y<7:
                     res = x
@@ -2000,7 +2002,7 @@
             n = sa = 0
             while n < 10:
                 myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa)
-                if 0 < a < hint(sys.maxint/2, promote=True): pass
+                if 0 < a < promote(sys.maxint/2): pass
                 if 0 < b < 100: pass
                 sa += (((((a << b) << b) << b) >> b) >> b) >> b                
                 n += 1
@@ -2045,7 +2047,7 @@
             n = sa = 0
             while n < 10:
                 myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa)
-                if -hint(sys.maxint/2, promote=True) < a < 0: pass
+                if -promote(sys.maxint/2) < a < 0: pass
                 if 0 < b < 100: pass
                 sa += (((((a << b) << b) << b) >> b) >> b) >> b                
                 n += 1
@@ -2080,7 +2082,7 @@
             n = sa = 0
             while n < 10:
                 myjitdriver.jit_merge_point(a=a, b=b, n=n, sa=sa)
-                if 0 < a < hint(sys.maxint/2, promote=True): pass
+                if 0 < a < promote(sys.maxint/2): pass
                 if 0 < b < 100: pass
                 sa += (a << b) >> b
                 n += 1
@@ -2137,7 +2139,7 @@
                 if op == 'j':
                     j += 1
                 elif op == 'c':
-                    c = hint(c, promote=True)
+                    promote(c)
                     c = 1 - c
                 elif op == '2':
                     if j < 3:
@@ -2206,7 +2208,8 @@
                 self.local_names[0] = 1
 
             def retrieve(self):
-                variables = hint(self.variables, promote=True)
+                variables = self.variables
+                promote(variables)
                 result = self.local_names[0]
                 if result == 0:
                     return -1
@@ -2230,6 +2233,148 @@
         self.check_loops(getfield_gc_pure=0)
         self.check_loops(getfield_gc_pure=2, everywhere=True)
         
+    def test_frame_finished_during_retrace(self):
+        class Base(object):
+            pass
+        class A(Base):
+            def __init__(self, a):
+                self.val = a
+                self.num = 1
+            def inc(self):
+                return A(self.val + 1)
+        class B(Base):
+            def __init__(self, a):
+                self.val = a
+                self.num = 1000
+            def inc(self):
+                return B(self.val + 1)
+        myjitdriver = JitDriver(greens = [], reds = ['sa', 'a'])
+        def f():
+            myjitdriver.set_param('threshold', 3)
+            myjitdriver.set_param('trace_eagerness', 2)
+            a = A(0)
+            sa = 0
+            while a.val < 8:
+                myjitdriver.jit_merge_point(a=a, sa=sa)
+                a = a.inc()
+                if a.val > 4:
+                    a = B(a.val)
+                sa += a.num
+            return sa
+        res = self.meta_interp(f, [])
+        assert res == f()
+        
+    def test_frame_finished_during_continued_retrace(self):
+        class Base(object):
+            pass
+        class A(Base):
+            def __init__(self, a):
+                self.val = a
+                self.num = 100
+            def inc(self):
+                return A(self.val + 1)
+        class B(Base):
+            def __init__(self, a):
+                self.val = a
+                self.num = 10000
+            def inc(self):
+                return B(self.val + 1)
+        myjitdriver = JitDriver(greens = [], reds = ['sa', 'b', 'a'])
+        def f(b):
+            myjitdriver.set_param('threshold', 6)
+            myjitdriver.set_param('trace_eagerness', 4)
+            a = A(0)
+            sa = 0
+            while a.val < 15:
+                myjitdriver.jit_merge_point(a=a, b=b, sa=sa)
+                a = a.inc()
+                if a.val > 8:
+                    a = B(a.val)
+                if b == 1:
+                    b = 2
+                else:
+                    b = 1
+                sa += a.num + b
+            return sa
+        res = self.meta_interp(f, [1])
+        assert res == f(1)
+
+    def test_remove_array_operations(self):
+        myjitdriver = JitDriver(greens = [], reds = ['a'])
+        class W_Int:
+            def __init__(self, intvalue):
+                self.intvalue = intvalue
+        def f(x):
+            a = [W_Int(x)]
+            while a[0].intvalue > 0:
+                myjitdriver.jit_merge_point(a=a)
+                a[0] = W_Int(a[0].intvalue - 3)
+            return a[0].intvalue
+        res = self.meta_interp(f, [100])
+        assert res == -2
+        #self.check_loops(getarrayitem_gc=0, setarrayitem_gc=0) -- xxx?
+
+    def test_retrace_ending_up_retrazing_another_loop(self):
+
+        myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'i', 'sa'])
+        bytecode = "0+sI0+SI"
+        def f(n):
+            myjitdriver.set_param('threshold', 3)
+            myjitdriver.set_param('trace_eagerness', 1)
+            myjitdriver.set_param('retrace_limit', 5)
+            myjitdriver.set_param('function_threshold', -1)
+            pc = sa = i = 0
+            while pc < len(bytecode):
+                myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i)
+                n = hint(n, promote=True)
+                op = bytecode[pc]
+                if op == '0':
+                    i = 0
+                elif op == '+':
+                    i += 1
+                elif op == 's':
+                    sa += i
+                elif op == 'S':
+                    sa += 2
+                elif op == 'I':
+                    if i < n:
+                        pc -= 2
+                        myjitdriver.can_enter_jit(pc=pc, n=n, sa=sa, i=i)
+                        continue
+                pc += 1
+            return sa
+
+        def g(n1, n2):
+            for i in range(10):
+                f(n1)
+            for i in range(10):                
+                f(n2)
+
+        nn = [10, 3]
+        assert self.meta_interp(g, nn) == g(*nn)
+        
+        # The attempts of retracing first loop will end up retracing the
+        # second and thus fail 5 times, saturating the retrace_count. Instead a
+        # bridge back to the preamble of the first loop is produced. A guard in
+        # this bridge is later traced resulting in a retrace of the second loop.
+        # Thus we end up with:
+        #   1 preamble and 1 specialized version of first loop
+        #   1 preamble and 2 specialized version of second loop
+        self.check_tree_loop_count(2 + 3)
+
+        # FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times.
+        
+        def g(n):
+            for i in range(n):
+                for j in range(10):
+                    f(n-i)
+
+        res = self.meta_interp(g, [10])
+        assert res == g(10)
+        # 1 preamble and 6 speciealized versions of each loop
+        self.check_tree_loop_count(2*(1 + 6))
+
+
 class TestOOtype(BasicTests, OOJitMixin):
 
     def test_oohash(self):
diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py
--- a/pypy/jit/metainterp/test/test_compile.py
+++ b/pypy/jit/metainterp/test/test_compile.py
@@ -1,3 +1,4 @@
+from pypy.config.pypyoption import get_pypy_config
 from pypy.jit.metainterp.history import LoopToken, ConstInt, History, Stats
 from pypy.jit.metainterp.history import BoxInt, INT
 from pypy.jit.metainterp.compile import insert_loop_token, compile_new_loop
@@ -5,7 +6,7 @@
 from pypy.jit.metainterp.compile import ResumeGuardCountersInt
 from pypy.jit.metainterp.compile import compile_tmp_callback
 from pypy.jit.metainterp import jitprof, typesystem, compile
-from pypy.jit.metainterp.test.test_optimizeutil import LLtypeMixin
+from pypy.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin
 from pypy.jit.tool.oparser import parse
 from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT
 
@@ -57,6 +58,7 @@
     
     logger_noopt = FakeLogger()
     logger_ops = FakeLogger()
+    config = get_pypy_config(translating=True)
 
     stats = Stats()
     profiler = jitprof.EmptyProfiler()
diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py
--- a/pypy/jit/metainterp/test/test_dict.py
+++ b/pypy/jit/metainterp/test/test_dict.py
@@ -130,6 +130,38 @@
         assert res == 50
         self.check_loops(int_mod=1)
 
+    def test_repeated_lookup(self):
+        myjitdriver = JitDriver(greens = [], reds = ['n', 'd'])
+        class Wrapper(object):
+            _immutable_fields_ = ["value"]
+            def __init__(self, value):
+                self.value = value
+        def eq_func(a, b):
+            return a.value == b.value
+        def hash_func(x):
+            return objectmodel.compute_hash(x.value)
+
+        def f(n):
+            d = None
+            while n > 0:
+                myjitdriver.jit_merge_point(n=n, d=d)
+                d = objectmodel.r_dict(eq_func, hash_func)
+                y = Wrapper(str(n))
+                d[y] = n - 1
+                n = d[y]
+            return d[Wrapper(str(n + 1))]
+
+        res = self.meta_interp(f, [100], listops=True)
+        assert res == f(50)
+        # XXX: ideally there would be 7 calls here, but repeated CALL_PURE with
+        # the same arguments are not folded, because we have conflicting
+        # definitions of pure, once strhash can be appropriately folded
+        # this should be decreased to seven.
+        self.check_loops({"call": 8, "guard_false": 1, "guard_no_exception": 5,
+                          "guard_true": 1, "int_and": 1, "int_gt": 1,
+                          "int_is_true": 1, "int_sub": 1, "jump": 1,
+                          "new_with_vtable": 1, "setfield_gc": 1})
+
 
 class TestOOtype(DictTests, OOJitMixin):
     pass
diff --git a/pypy/jit/metainterp/test/test_fficall.py b/pypy/jit/metainterp/test/test_fficall.py
--- a/pypy/jit/metainterp/test/test_fficall.py
+++ b/pypy/jit/metainterp/test/test_fficall.py
@@ -1,7 +1,7 @@
 
 import py
 from pypy.rlib.rarithmetic import r_singlefloat, r_longlong, r_ulonglong
-from pypy.rlib.jit import JitDriver, hint, dont_look_inside
+from pypy.rlib.jit import JitDriver, promote, dont_look_inside
 from pypy.rlib.unroll import unrolling_iterable
 from pypy.rlib.libffi import ArgChain, longlong2float, float2longlong
 from pypy.rlib.libffi import IS_32_BIT
@@ -49,8 +49,7 @@
             res = init_result
             while n < 10:
                 driver.jit_merge_point(n=n, res=res, func=func)
-                driver.can_enter_jit(n=n, res=res, func=func)
-                func = hint(func, promote=True)
+                promote(func)
                 argchain = ArgChain()
                 # this loop is unrolled
                 for method_name, argval in method_and_args:
diff --git a/pypy/jit/metainterp/test/test_jitdriver.py b/pypy/jit/metainterp/test/test_jitdriver.py
--- a/pypy/jit/metainterp/test/test_jitdriver.py
+++ b/pypy/jit/metainterp/test/test_jitdriver.py
@@ -113,6 +113,7 @@
             return n
         #
         def loop2(g, r):
+            myjitdriver1.set_param('function_threshold', 0)
             while r > 0:
                 myjitdriver2.can_enter_jit(g=g, r=r)
                 myjitdriver2.jit_merge_point(g=g, r=r)
diff --git a/pypy/jit/metainterp/test/test_jitprof.py b/pypy/jit/metainterp/test/test_jitprof.py
--- a/pypy/jit/metainterp/test/test_jitprof.py
+++ b/pypy/jit/metainterp/test/test_jitprof.py
@@ -1,6 +1,6 @@
 
 from pypy.jit.metainterp.warmspot import ll_meta_interp
-from pypy.rlib.jit import JitDriver, dont_look_inside, purefunction
+from pypy.rlib.jit import JitDriver, dont_look_inside, elidable
 from pypy.jit.metainterp.test.support import LLJitMixin
 from pypy.jit.metainterp import pyjitpl
 from pypy.jit.metainterp.jitprof import *
@@ -89,7 +89,7 @@
         assert profiler.calls == 1
 
     def test_blackhole_pure(self):
-        @purefunction
+        @elidable
         def g(n):
             return n+1
         
diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py
--- a/pypy/jit/metainterp/test/test_list.py
+++ b/pypy/jit/metainterp/test/test_list.py
@@ -49,7 +49,7 @@
                 x = l[n]
                 l = [3] * 100
                 l[3] = x
-                l[3] = x + 1
+                l[4] = x + 1
                 n -= 1
             return l[0]
 
diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py
--- a/pypy/jit/metainterp/test/test_logger.py
+++ b/pypy/jit/metainterp/test/test_logger.py
@@ -4,7 +4,7 @@
 from pypy.jit.metainterp import logger
 from pypy.jit.metainterp.typesystem import llhelper
 from StringIO import StringIO
-from pypy.jit.metainterp.test.test_optimizeopt import equaloplists
+from pypy.jit.metainterp.optimizeopt.util import equaloplists
 from pypy.jit.metainterp.history import AbstractDescr, LoopToken, BasicFailDescr
 from pypy.jit.backend.model import AbstractCPU
 
diff --git a/pypy/jit/metainterp/test/test_pyjitpl.py b/pypy/jit/metainterp/test/test_pyjitpl.py
--- a/pypy/jit/metainterp/test/test_pyjitpl.py
+++ b/pypy/jit/metainterp/test/test_pyjitpl.py
@@ -6,7 +6,7 @@
 from pypy.jit.metainterp.history import BoxInt, ConstInt
 from pypy.jit.metainterp.history import History
 from pypy.jit.metainterp.resoperation import ResOperation, rop
-from pypy.jit.metainterp.test.test_optimizeopt import equaloplists
+from pypy.jit.metainterp.optimizeopt.util import equaloplists
 from pypy.jit.codewriter.jitcode import JitCode
 
 
diff --git a/pypy/jit/metainterp/test/test_recursive.py b/pypy/jit/metainterp/test/test_recursive.py
--- a/pypy/jit/metainterp/test/test_recursive.py
+++ b/pypy/jit/metainterp/test/test_recursive.py
@@ -1,6 +1,6 @@
 import py
 from pypy.rlib.jit import JitDriver, we_are_jitted, hint
-from pypy.rlib.jit import unroll_safe, dont_look_inside
+from pypy.rlib.jit import unroll_safe, dont_look_inside, promote
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.debug import fatalerror
 from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin
@@ -483,6 +483,7 @@
  
         def main(inline):
             myjitdriver.set_param("threshold", 10)
+            myjitdriver.set_param('function_threshold', 60)
             if inline:
                 myjitdriver.set_param('inlining', True)
             else:
@@ -925,7 +926,7 @@
                 myjitdriver.can_enter_jit(codeno=codeno, frame=frame, n=n, x=x)
                 myjitdriver.jit_merge_point(codeno=codeno, frame=frame, n=n,
                                             x=x)
-                frame.s = hint(frame.s, promote=True)
+                frame.s = promote(frame.s)
                 n -= 1
                 s = frame.s
                 assert s >= 0
@@ -1193,6 +1194,51 @@
                 i -= 1
         self.meta_interp(portal, [0, 10], inline=True)
 
+    def test_trace_from_start_always(self):
+        from pypy.rlib.nonconst import NonConstant
+        
+        driver = JitDriver(greens = ['c'], reds = ['i', 'v'])
+
+        def portal(c, i, v):
+            while i > 0:
+                driver.jit_merge_point(c=c, i=i, v=v)
+                portal(c, i - 1, v)
+                if v:
+                    driver.can_enter_jit(c=c, i=i, v=v)
+                break
+
+        def main(c, i, set_param, v):
+            if set_param:
+                driver.set_param('function_threshold', 0)
+            portal(c, i, v)
+
+        self.meta_interp(main, [10, 10, False, False], inline=True)
+        self.check_tree_loop_count(1)
+        self.check_loop_count(0)
+        self.meta_interp(main, [3, 10, True, False], inline=True)
+        self.check_tree_loop_count(0)
+        self.check_loop_count(0)
+
+    def test_trace_from_start_does_not_prevent_inlining(self):
+        driver = JitDriver(greens = ['c', 'bc'], reds = ['i'])
+        
+        def portal(bc, c, i):
+            while True:
+                driver.jit_merge_point(c=c, bc=bc, i=i)
+                if bc == 0:
+                    portal(1, 8, 0)
+                    c += 1
+                else:
+                    return
+                if c == 10: # bc == 0                    
+                    c = 0
+                    if i >= 100:
+                        return
+                    driver.can_enter_jit(c=c, bc=bc, i=i)
+                i += 1
+
+        self.meta_interp(portal, [0, 0, 0], inline=True)
+        self.check_loops(call=0, call_may_force=0)
 
 class TestLLtype(RecursiveTests, LLJitMixin):
     pass
diff --git a/pypy/jit/metainterp/test/test_resume.py b/pypy/jit/metainterp/test/test_resume.py
--- a/pypy/jit/metainterp/test/test_resume.py
+++ b/pypy/jit/metainterp/test/test_resume.py
@@ -6,7 +6,7 @@
 from pypy.jit.metainterp.resume import *
 from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt
 from pypy.jit.metainterp.history import ConstPtr, ConstFloat
-from pypy.jit.metainterp.test.test_optimizeutil import LLtypeMixin
+from pypy.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin
 from pypy.jit.metainterp import executor
 from pypy.jit.codewriter import heaptracker, longlong
 
@@ -1238,7 +1238,7 @@
     liveboxes = []
     modifier._number_virtuals(liveboxes, values, 0)
     assert liveboxes == [b2s, b4s] or liveboxes == [b4s, b2s]
-    modifier._add_pending_fields([(LLtypeMixin.nextdescr, b2s, b4s)])
+    modifier._add_pending_fields([(LLtypeMixin.nextdescr, b2s, b4s, -1)])
     storage.rd_consts = memo.consts[:]
     storage.rd_numb = None
     # resume
@@ -1259,6 +1259,106 @@
     assert len(expected) == len(trace)
     assert demo55.next == demo66
 
+def test_virtual_adder_pending_fields_and_arrayitems():
+    class Storage(object):
+        pass
+    storage = Storage()
+    modifier = ResumeDataVirtualAdder(storage, None)
+    modifier._add_pending_fields([])
+    assert not storage.rd_pendingfields
+    #
+    class FieldDescr(object):
+        pass
+    field_a = FieldDescr()
+    storage = Storage()
+    modifier = ResumeDataVirtualAdder(storage, None)
+    modifier.liveboxes_from_env = {42: rffi.cast(rffi.SHORT, 1042),
+                                   61: rffi.cast(rffi.SHORT, 1061)}
+    modifier._add_pending_fields([(field_a, 42, 61, -1)])
+    pf = storage.rd_pendingfields
+    assert len(pf) == 1
+    assert (annlowlevel.cast_base_ptr_to_instance(FieldDescr, pf[0].lldescr)
+            is field_a)
+    assert rffi.cast(lltype.Signed, pf[0].num) == 1042
+    assert rffi.cast(lltype.Signed, pf[0].fieldnum) == 1061
+    assert rffi.cast(lltype.Signed, pf[0].itemindex) == -1
+    #
+    array_a = FieldDescr()
+    storage = Storage()
+    modifier = ResumeDataVirtualAdder(storage, None)
+    modifier.liveboxes_from_env = {42: rffi.cast(rffi.SHORT, 1042),
+                                   61: rffi.cast(rffi.SHORT, 1061),
+                                   62: rffi.cast(rffi.SHORT, 1062),
+                                   63: rffi.cast(rffi.SHORT, 1063)}
+    modifier._add_pending_fields([(array_a, 42, 61, 0),
+                                  (array_a, 42, 62, 2147483647)])
+    pf = storage.rd_pendingfields
+    assert len(pf) == 2
+    assert (annlowlevel.cast_base_ptr_to_instance(FieldDescr, pf[0].lldescr)
+            is array_a)
+    assert rffi.cast(lltype.Signed, pf[0].num) == 1042
+    assert rffi.cast(lltype.Signed, pf[0].fieldnum) == 1061
+    assert rffi.cast(lltype.Signed, pf[0].itemindex) == 0
+    assert (annlowlevel.cast_base_ptr_to_instance(FieldDescr, pf[1].lldescr)
+            is array_a)
+    assert rffi.cast(lltype.Signed, pf[1].num) == 1042
+    assert rffi.cast(lltype.Signed, pf[1].fieldnum) == 1062
+    assert rffi.cast(lltype.Signed, pf[1].itemindex) == 2147483647
+    #
+    from pypy.jit.metainterp.pyjitpl import SwitchToBlackhole
+    py.test.raises(SwitchToBlackhole, modifier._add_pending_fields,
+                   [(array_a, 42, 63, 2147483648)])
+
+def test_resume_reader_fields_and_arrayitems():
+    class ResumeReader(AbstractResumeDataReader):
+        def __init__(self, got=None, got_array=None):
+            self.got = got
+            self.got_array = got_array
+        def setfield(self, descr, struct, fieldnum):
+            assert lltype.typeOf(struct) is lltype.Signed
+            assert lltype.typeOf(fieldnum) is rffi.SHORT
+            fieldnum = rffi.cast(lltype.Signed, fieldnum)
+            self.got.append((descr, struct, fieldnum))
+        def setarrayitem(self, arraydescr, array, index, fieldnum):
+            assert lltype.typeOf(array) is lltype.Signed
+            assert lltype.typeOf(index) is lltype.Signed
+            assert lltype.typeOf(fieldnum) is rffi.SHORT
+            fieldnum = rffi.cast(lltype.Signed, fieldnum)
+            self.got_array.append((arraydescr, array, index, fieldnum))
+        def decode_ref(self, num):
+            return rffi.cast(lltype.Signed, num) * 100
+    got = []
+    pf = lltype.nullptr(PENDINGFIELDSP.TO)
+    ResumeReader(got)._prepare_pendingfields(pf)
+    assert got == []
+    #
+    class FieldDescr(AbstractDescr):
+        pass
+    field_a = FieldDescr()
+    field_b = FieldDescr()
+    pf = lltype.malloc(PENDINGFIELDSP.TO, 2)
+    pf[0].lldescr = annlowlevel.cast_instance_to_base_ptr(field_a)
+    pf[0].num = rffi.cast(rffi.SHORT, 1042)
+    pf[0].fieldnum = rffi.cast(rffi.SHORT, 1061)
+    pf[0].itemindex = rffi.cast(rffi.INT, -1)
+    pf[1].lldescr = annlowlevel.cast_instance_to_base_ptr(field_b)
+    pf[1].num = rffi.cast(rffi.SHORT, 2042)
+    pf[1].fieldnum = rffi.cast(rffi.SHORT, 2061)
+    pf[1].itemindex = rffi.cast(rffi.INT, -1)
+    got = []
+    ResumeReader(got)._prepare_pendingfields(pf)
+    assert got == [(field_a, 104200, 1061), (field_b, 204200, 2061)]
+    #
+    array_a = FieldDescr()
+    pf = lltype.malloc(PENDINGFIELDSP.TO, 1)
+    pf[0].lldescr = annlowlevel.cast_instance_to_base_ptr(array_a)
+    pf[0].num = rffi.cast(rffi.SHORT, 1042)
+    pf[0].fieldnum = rffi.cast(rffi.SHORT, 1063)
+    pf[0].itemindex = rffi.cast(rffi.INT, 123)
+    got_array = []
+    ResumeReader(got_array=got_array)._prepare_pendingfields(pf)
+    assert got_array == [(array_a, 104200, 123, 1063)]
+
 
 def test_invalidation_needed():
     class options:
diff --git a/pypy/jit/metainterp/test/test_send.py b/pypy/jit/metainterp/test/test_send.py
--- a/pypy/jit/metainterp/test/test_send.py
+++ b/pypy/jit/metainterp/test/test_send.py
@@ -1,5 +1,5 @@
 import py
-from pypy.rlib.jit import JitDriver, hint, purefunction
+from pypy.rlib.jit import JitDriver, promote, elidable
 from pypy.jit.codewriter.policy import StopAtXPolicy
 from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin
 
@@ -604,7 +604,7 @@
     def test_constfold_pure_oosend(self):
         myjitdriver = JitDriver(greens=[], reds = ['i', 'obj'])
         class A:
-            @purefunction
+            @elidable
             def foo(self):
                 return 42
         def fn(n, i):
@@ -613,7 +613,7 @@
             while i > 0:
                 myjitdriver.can_enter_jit(i=i, obj=obj)
                 myjitdriver.jit_merge_point(i=i, obj=obj)
-                obj = hint(obj, promote=True)
+                promote(obj)
                 res = obj.foo()
                 i-=1
             return res
diff --git a/pypy/jit/metainterp/test/test_virtual.py b/pypy/jit/metainterp/test/test_virtual.py
--- a/pypy/jit/metainterp/test/test_virtual.py
+++ b/pypy/jit/metainterp/test/test_virtual.py
@@ -1,5 +1,5 @@
 import py
-from pypy.rlib.jit import JitDriver, hint
+from pypy.rlib.jit import JitDriver, promote
 from pypy.rlib.objectmodel import compute_unique_id
 from pypy.jit.codewriter.policy import StopAtXPolicy
 from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin
@@ -300,7 +300,7 @@
             while n > 0:
                 myjitdriver.can_enter_jit(n=n, i=i, stufflist=stufflist)
                 myjitdriver.jit_merge_point(n=n, i=i, stufflist=stufflist)
-                i = hint(i, promote=True)
+                promote(i)
                 v = Stuff(i)
                 n -= stufflist.lst[v.x].x
             return n
diff --git a/pypy/jit/metainterp/test/test_virtualizable.py b/pypy/jit/metainterp/test/test_virtualizable.py
--- a/pypy/jit/metainterp/test/test_virtualizable.py
+++ b/pypy/jit/metainterp/test/test_virtualizable.py
@@ -5,13 +5,13 @@
 from pypy.rpython.rclass import IR_IMMUTABLE, IR_IMMUTABLE_ARRAY
 from pypy.jit.codewriter.policy import StopAtXPolicy
 from pypy.jit.codewriter import heaptracker
-from pypy.rlib.jit import JitDriver, hint, dont_look_inside
+from pypy.rlib.jit import JitDriver, hint, dont_look_inside, promote
 from pypy.rlib.rarithmetic import intmask
 from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin
 from pypy.rpython.rclass import FieldListAccessor
 from pypy.jit.metainterp.warmspot import get_stats, get_translator
 from pypy.jit.metainterp import history
-from pypy.jit.metainterp.test.test_optimizeutil import LLtypeMixin
+from pypy.jit.metainterp.optimizeopt.test.test_util import LLtypeMixin
 
 def promote_virtualizable(*args):
     pass
@@ -480,7 +480,7 @@
             while n > 0:
                 myjitdriver.can_enter_jit(frame=frame, n=n, x=x)
                 myjitdriver.jit_merge_point(frame=frame, n=n, x=x)
-                frame.s = hint(frame.s, promote=True)
+                frame.s = promote(frame.s)
                 n -= 1
                 s = frame.s
                 assert s >= 0
diff --git a/pypy/jit/metainterp/virtualref.py b/pypy/jit/metainterp/virtualref.py
--- a/pypy/jit/metainterp/virtualref.py
+++ b/pypy/jit/metainterp/virtualref.py
@@ -1,5 +1,5 @@
 from pypy.rpython.rmodel import inputconst, log
-from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass
+from pypy.rpython.lltypesystem import lltype, llmemory, rclass
 from pypy.jit.metainterp import history
 from pypy.jit.codewriter import heaptracker
 from pypy.rlib.jit import InvalidVirtualRef
diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py
--- a/pypy/jit/metainterp/warmspot.py
+++ b/pypy/jit/metainterp/warmspot.py
@@ -1,6 +1,5 @@
 import sys, py
-from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr
-from pypy.rpython.ootypesystem import ootype
+from pypy.rpython.lltypesystem import lltype, llmemory
 from pypy.rpython.annlowlevel import llhelper, MixLevelHelperAnnotator,\
      cast_base_ptr_to_instance, hlstr
 from pypy.annotation import model as annmodel
@@ -10,16 +9,12 @@
 from pypy.objspace.flow.model import checkgraph, Link, copygraph
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.unroll import unrolling_iterable
-from pypy.rlib.rarithmetic import r_uint, intmask
-from pypy.rlib.debug import debug_print, fatalerror
-from pypy.rlib.debug import debug_start, debug_stop
-from pypy.rpython.lltypesystem.lloperation import llop
-from pypy.translator.simplify import get_funcobj, get_functype
+from pypy.rlib.debug import fatalerror
+from pypy.translator.simplify import get_functype
 from pypy.translator.unsimplify import call_final_function
 
 from pypy.jit.metainterp import history, pyjitpl, gc, memmgr
-from pypy.jit.metainterp.pyjitpl import MetaInterpStaticData, MetaInterp
-from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper
+from pypy.jit.metainterp.pyjitpl import MetaInterpStaticData
 from pypy.jit.metainterp.jitprof import Profiler, EmptyProfiler
 from pypy.jit.metainterp.jitexc import JitException
 from pypy.jit.metainterp.jitdriver import JitDriverStaticData
@@ -66,6 +61,7 @@
 def jittify_and_run(interp, graph, args, repeat=1,
                     backendopt=False, trace_limit=sys.maxint,
                     inline=False, loop_longevity=0, retrace_limit=5,
+                    function_threshold=4,
                     enable_opts=ALL_OPTS_NAMES, **kwds):
     from pypy.config.config import ConfigError
     translator = interp.typer.annotator.translator
@@ -77,9 +73,14 @@
         translator.config.translation.list_comprehension_operations = True
     except ConfigError:
         pass
+    try:
+        translator.config.translation.jit_ffi = True
+    except ConfigError:
+        pass
     warmrunnerdesc = WarmRunnerDesc(translator, backendopt=backendopt, **kwds)
     for jd in warmrunnerdesc.jitdrivers_sd:
         jd.warmstate.set_param_threshold(3)          # for tests
+        jd.warmstate.set_param_function_threshold(function_threshold)
         jd.warmstate.set_param_trace_eagerness(2)    # for tests
         jd.warmstate.set_param_trace_limit(trace_limit)
         jd.warmstate.set_param_inlining(inline)
@@ -291,9 +292,6 @@
         self.stats = stats
         if translate_support_code:
             self.annhelper = MixLevelHelperAnnotator(self.translator.rtyper)
-            annhelper = self.annhelper
-        else:
-            annhelper = None
         cpu = CPUClass(self.translator.rtyper, self.stats, self.opt,
                        translate_support_code, gcdescr=self.gcdescr)
         self.cpu = cpu
@@ -422,7 +420,7 @@
         if self.translator.rtyper.type_system.name == 'lltypesystem':
             def maybe_enter_jit(*args):
                 try:
-                    maybe_compile_and_run(*args)
+                    maybe_compile_and_run(state.increment_threshold, *args)
                 except JitException:
                     raise     # go through
                 except Exception, e:
@@ -430,15 +428,12 @@
             maybe_enter_jit._always_inline_ = True
         else:
             def maybe_enter_jit(*args):
-                maybe_compile_and_run(*args)
+                maybe_compile_and_run(state.increment_threshold, *args)
             maybe_enter_jit._always_inline_ = True
         jd._maybe_enter_jit_fn = maybe_enter_jit
 
-        can_inline = state.can_inline_greenargs
-        num_green_args = jd.num_green_args
         def maybe_enter_from_start(*args):
-            if not can_inline(*args[:num_green_args]):
-                maybe_compile_and_run(*args)
+            maybe_compile_and_run(state.increment_function_threshold, *args)
         maybe_enter_from_start._always_inline_ = True
         jd._maybe_enter_from_start_fn = maybe_enter_from_start
 
@@ -549,7 +544,6 @@
             self.rewrite_can_enter_jit(jd, sublist)
 
     def rewrite_can_enter_jit(self, jd, can_enter_jits):
-        FUNC = jd._JIT_ENTER_FUNCTYPE
         FUNCPTR = jd._PTR_JIT_ENTER_FUNCTYPE
         jit_enter_fnptr = self.helper_func(FUNCPTR, jd._maybe_enter_jit_fn)
 
diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py
--- a/pypy/jit/metainterp/warmstate.py
+++ b/pypy/jit/metainterp/warmstate.py
@@ -1,7 +1,7 @@
 import sys, weakref
 from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rffi
 from pypy.rpython.ootypesystem import ootype
-from pypy.rpython.annlowlevel import hlstr, llstr, cast_base_ptr_to_instance
+from pypy.rpython.annlowlevel import hlstr, cast_base_ptr_to_instance
 from pypy.rpython.annlowlevel import cast_object_to_ptr
 from pypy.rlib.objectmodel import specialize, we_are_translated, r_dict
 from pypy.rlib.rarithmetic import intmask
@@ -208,15 +208,20 @@
             meth = getattr(self, 'set_param_' + name)
             meth(default_value)
 
-    def set_param_threshold(self, threshold):
+    def _compute_threshold(self, threshold):
         if threshold <= 0:
-            self.increment_threshold = 0   # never reach the THRESHOLD_LIMIT
-            return
+            return 0 # never reach the THRESHOLD_LIMIT
         if threshold < 2:
             threshold = 2
-        self.increment_threshold = (self.THRESHOLD_LIMIT // threshold) + 1
+        return (self.THRESHOLD_LIMIT // threshold) + 1
         # the number is at least 1, and at most about half THRESHOLD_LIMIT
 
+    def set_param_threshold(self, threshold):
+        self.increment_threshold = self._compute_threshold(threshold)
+
+    def set_param_function_threshold(self, threshold):
+        self.increment_function_threshold = self._compute_threshold(threshold)
+
     def set_param_trace_eagerness(self, value):
         self.trace_eagerness = value
 
@@ -232,7 +237,7 @@
         d = {}
         if NonConstant(False):
             value = 'blah' # not a constant ''
-        if value is None:
+        if value is None or value == 'all':
             value = ALL_OPTS_NAMES
         for name in value.split(":"):
             if name:
@@ -291,7 +296,7 @@
         self.make_jitdriver_callbacks()
         confirm_enter_jit = self.confirm_enter_jit
 
-        def maybe_compile_and_run(*args):
+        def maybe_compile_and_run(threshold, *args):
             """Entry point to the JIT.  Called at the point with the
             can_enter_jit() hint.
             """
@@ -307,7 +312,7 @@
 
             if cell.counter >= 0:
                 # update the profiling counter
-                n = cell.counter + self.increment_threshold
+                n = cell.counter + threshold
                 if n <= self.THRESHOLD_LIMIT:       # bound not reached
                     cell.counter = n
                     return
@@ -497,7 +502,6 @@
         if hasattr(self, 'set_future_values'):
             return self.set_future_values
 
-        warmrunnerdesc = self.warmrunnerdesc
         jitdriver_sd   = self.jitdriver_sd
         cpu = self.cpu
         vinfo = jitdriver_sd.virtualizable_info
@@ -513,7 +517,6 @@
         #
         if vinfo is not None:
             i0 = len(jitdriver_sd._red_args_types)
-            num_green_args = jitdriver_sd.num_green_args
             index_of_virtualizable = jitdriver_sd.index_of_virtualizable
             vable_static_fields = unrolling_iterable(
                 zip(vinfo.static_extra_types, vinfo.static_fields))
diff --git a/pypy/jit/tl/spli/interpreter.py b/pypy/jit/tl/spli/interpreter.py
--- a/pypy/jit/tl/spli/interpreter.py
+++ b/pypy/jit/tl/spli/interpreter.py
@@ -2,7 +2,7 @@
 from pypy.tool import stdlib_opcode
 from pypy.jit.tl.spli import objects, pycode
 from pypy.rlib.unroll import unrolling_iterable
-from pypy.rlib.jit import JitDriver, hint, dont_look_inside
+from pypy.rlib.jit import JitDriver, promote, dont_look_inside
 from pypy.rlib.objectmodel import we_are_translated
 
 opcode_method_names = stdlib_opcode.host_bytecode_spec.method_names
@@ -78,7 +78,7 @@
         while True:
             jitdriver.jit_merge_point(code=code, instr_index=instr_index,
                                       frame=self)
-            self.stack_depth = hint(self.stack_depth, promote=True)
+            self.stack_depth = promote(self.stack_depth)
             op = ord(code[instr_index])
             instr_index += 1
             if op >= HAVE_ARGUMENT:
diff --git a/pypy/jit/tl/tiny2.py b/pypy/jit/tl/tiny2.py
--- a/pypy/jit/tl/tiny2.py
+++ b/pypy/jit/tl/tiny2.py
@@ -27,7 +27,7 @@
     { #1 #1 1 SUB ->#1 #1 }    => when called with 5, gives '5 4 3 2 1'
 
 """
-from pypy.rlib.jit import hint
+from pypy.rlib.jit import hint, promote
 
 #
 # See pypy/doc/jit.txt for a higher-level overview of the JIT techniques
@@ -75,9 +75,9 @@
     # ones.  The JIT compiler cannot look into indirect calls, but it
     # can analyze and inline the code in directly-called functions.
     y = stack.pop()
-    hint(y.__class__, promote=True)
+    promote(y.__class__)
     x = stack.pop()
-    hint(x.__class__, promote=True)
+    promote(x.__class__)
     try:
         z = IntBox(func_int(x.as_int(), y.as_int()))
     except ValueError:
@@ -108,7 +108,7 @@
     # doesn't have to worry about the 'args' list being unpredictably
     # modified.
     oldargs = args
-    argcount = hint(len(oldargs), promote=True)
+    argcount = promote(len(oldargs))
     args = []
     n = 0
     while n < argcount:
@@ -160,8 +160,7 @@
                 # read out of the 'loops' list will be a compile-time constant
                 # because it was pushed as a compile-time constant by the '{'
                 # case above into 'loops', which is a virtual list, so the
-                # promotion below is just a way to make the colors match.
-                pos = hint(pos, promote=True)
+                promote(pos)
         else:
             stack.append(StrBox(opcode))
     return stack
diff --git a/pypy/jit/tl/tiny2_hotpath.py b/pypy/jit/tl/tiny2_hotpath.py
--- a/pypy/jit/tl/tiny2_hotpath.py
+++ b/pypy/jit/tl/tiny2_hotpath.py
@@ -27,7 +27,7 @@
     { #1 #1 1 SUB ->#1 #1 }    => when called with 5, gives '5 4 3 2 1'
 
 """
-from pypy.rlib.jit import hint, JitDriver
+from pypy.rlib.jit import hint, promote, JitDriver
 
 #
 # See pypy/doc/jit.txt for a higher-level overview of the JIT techniques
@@ -77,9 +77,9 @@
     # ones.  The JIT compiler cannot look into indirect calls, but it
     # can analyze and inline the code in directly-called functions.
     stack, y = stack.pop()
-    hint(y.__class__, promote=True)
+    promote(y.__class__)
     stack, x = stack.pop()
-    hint(x.__class__, promote=True)
+    promote(x.__class__)
     try:
         z = IntBox(func_int(x.as_int(), y.as_int()))
     except ValueError:
@@ -120,7 +120,7 @@
         # modified.
         oldloops = invariants
         oldargs = reds.args
-        argcount = hint(len(oldargs), promote=True)
+        argcount = promote(len(oldargs))
         args = []
         n = 0
         while n < argcount:
@@ -189,7 +189,7 @@
                 # because it was pushed as a compile-time constant by the '{'
                 # case above into 'loops', which is a virtual list, so the
                 # promotion below is just a way to make the colors match.
-                pos = hint(pos, promote=True)
+                pos = promote(pos)
                 tinyjitdriver.can_enter_jit(args=args, loops=loops, stack=stack,
                                             bytecode=bytecode, pos=pos)
         else:
diff --git a/pypy/jit/tl/tiny3_hotpath.py b/pypy/jit/tl/tiny3_hotpath.py
--- a/pypy/jit/tl/tiny3_hotpath.py
+++ b/pypy/jit/tl/tiny3_hotpath.py
@@ -28,7 +28,7 @@
     { #1 #1 1 SUB ->#1 #1 }    => when called with 5, gives '5 4 3 2 1'
 
 """
-from pypy.rlib.jit import hint, JitDriver
+from pypy.rlib.jit import promote, hint, JitDriver
 from pypy.rlib.objectmodel import specialize
 
 #
@@ -83,9 +83,9 @@
     # ones.  The JIT compiler cannot look into indirect calls, but it
     # can analyze and inline the code in directly-called functions.
     stack, y = stack.pop()
-    hint(y.__class__, promote=True)
+    promote(y.__class__)
     stack, x = stack.pop()
-    hint(x.__class__, promote=True)
+    promote(x.__class__)
     if isinstance(x, IntBox) and isinstance(y, IntBox):
         z = IntBox(func_int(x.as_int(), y.as_int()))
     else:
@@ -125,7 +125,7 @@
         # modified.
         oldloops = invariants
         oldargs = reds.args
-        argcount = hint(len(oldargs), promote=True)
+        argcount = promote(len(oldargs))
         args = []
         n = 0
         while n < argcount:
@@ -194,7 +194,7 @@
                 # because it was pushed as a compile-time constant by the '{'
                 # case above into 'loops', which is a virtual list, so the
                 # promotion below is just a way to make the colors match.
-                pos = hint(pos, promote=True)
+                pos = promote(pos)
                 tinyjitdriver.can_enter_jit(args=args, loops=loops, stack=stack,
                                             bytecode=bytecode, pos=pos)
         else:
diff --git a/pypy/jit/tl/tl.py b/pypy/jit/tl/tl.py
--- a/pypy/jit/tl/tl.py
+++ b/pypy/jit/tl/tl.py
@@ -2,7 +2,7 @@
 
 import py
 from pypy.jit.tl.tlopcode import *
-from pypy.rlib.jit import JitDriver, hint, dont_look_inside
+from pypy.rlib.jit import JitDriver, hint, dont_look_inside, promote
 
 def char2int(c):
     t = ord(c)
@@ -81,7 +81,7 @@
             myjitdriver.jit_merge_point(pc=pc, code=code,
                                         stack=stack, inputarg=inputarg)
             opcode = ord(code[pc])
-            stack.stackpos = hint(stack.stackpos, promote=True)
+            stack.stackpos = promote(stack.stackpos)
             pc += 1
 
             if opcode == NOP:
diff --git a/pypy/jit/tl/tlc.py b/pypy/jit/tl/tlc.py
--- a/pypy/jit/tl/tlc.py
+++ b/pypy/jit/tl/tlc.py
@@ -5,7 +5,7 @@
 from pypy.rlib.objectmodel import specialize, we_are_translated
 from pypy.jit.tl.tlopcode import *
 from pypy.jit.tl import tlopcode
-from pypy.rlib.jit import JitDriver
+from pypy.rlib.jit import JitDriver, elidable
 
 class Obj(object):
 
@@ -71,6 +71,7 @@
 
     classes = [] # [(descr, cls), ...]
 
+    @elidable
     def get(key):
         for descr, cls in Class.classes:
             if key.attributes == descr.attributes and\
@@ -79,7 +80,6 @@
         result = Class(key)
         Class.classes.append((key, result))
         return result
-    get._pure_function_ = True
     get = staticmethod(get)
 
     def __init__(self, descr):
diff --git a/pypy/jit/tool/oparser.py b/pypy/jit/tool/oparser.py
--- a/pypy/jit/tool/oparser.py
+++ b/pypy/jit/tool/oparser.py
@@ -181,11 +181,8 @@
         args = []
         descr = None
         if argspec.strip():
-            if opname == 'debug_merge_point':
-                allargs = argspec.split(',', 1)
-            else:
-                allargs = [arg for arg in argspec.split(",")
-                           if arg != '']
+            allargs = [arg for arg in argspec.split(",")
+                       if arg != '']
 
             poss_descr = allargs[-1].strip()
             if poss_descr.startswith('descr='):
diff --git a/pypy/jit/tool/pypytrace-mode.el b/pypy/jit/tool/pypytrace-mode.el
--- a/pypy/jit/tool/pypytrace-mode.el
+++ b/pypy/jit/tool/pypytrace-mode.el
@@ -32,7 +32,7 @@
     ("<.*FieldDescr \\([^ ]*\\)" (1 'font-lock-variable-name-face))
     ;; comment out debug_merge_point, but then highlight specific part of it
     ("^debug_merge_point.*" . font-lock-comment-face)
-    ("^\\(debug_merge_point\\).*code object\\(.*\\), file \\('.*'\\), \\(line .*\\)> \\(.*\\)"
+    ("^\\(debug_merge_point\\).*code object\\(.*\\). file \\('.*'\\). \\(line .*\\)> \\(.*\\)"
      (1 'compilation-warning t)
      (2 'escape-glyph t)
      (3 'font-lock-string-face t)
diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py
--- a/pypy/jit/tool/test/test_oparser.py
+++ b/pypy/jit/tool/test/test_oparser.py
@@ -147,13 +147,13 @@
         []
         debug_merge_point(0, "info")
         debug_merge_point(0, 'info')
-        debug_merge_point(1, '<some ('other,')> info')
+        debug_merge_point(1, '<some ('other.')> info')
         debug_merge_point(0, '(stuff) #1')
         '''
         loop = self.parse(x)
         assert loop.operations[0].getarg(1)._get_str() == 'info'
         assert loop.operations[1].getarg(1)._get_str() == 'info'
-        assert loop.operations[2].getarg(1)._get_str() == "<some ('other,')> info"
+        assert loop.operations[2].getarg(1)._get_str() == "<some ('other.')> info"
         assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1"
 
 
diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py
--- a/pypy/module/__builtin__/__init__.py
+++ b/pypy/module/__builtin__/__init__.py
@@ -5,20 +5,6 @@
 
 # put builtins here that should be optimized somehow
 
-OPTIMIZED_BUILTINS = ["len", "range", "xrange", "min", "max", "enumerate",
-        "isinstance", "type", "zip", "file", "format", "open", "abs", "chr",
-        "unichr", "ord", "pow", "repr", "hash", "oct", "hex", "round", "cmp",
-        "getattr", "setattr", "delattr", "callable", "int", "str", "float"]
-
-assert len(OPTIMIZED_BUILTINS) <= 256
-
-BUILTIN_TO_INDEX = {}
-
-for i, name in enumerate(OPTIMIZED_BUILTINS):
-    BUILTIN_TO_INDEX[name] = i
-
-assert len(OPTIMIZED_BUILTINS) == len(BUILTIN_TO_INDEX)
-
 class Module(MixedModule):
     """Built-in functions, exceptions, and other objects."""
     expose__file__attribute = False
@@ -141,9 +127,6 @@
     def setup_after_space_initialization(self):
         """NOT_RPYTHON"""
         space = self.space
-        self.builtins_by_index = [None] * len(OPTIMIZED_BUILTINS)
-        for i, name in enumerate(OPTIMIZED_BUILTINS):
-            self.builtins_by_index[i] = space.getattr(self, space.wrap(name))
         # install the more general version of isinstance() & co. in the space
         from pypy.module.__builtin__ import abstractinst as ab
         space.abstract_isinstance_w = ab.abstract_isinstance_w.__get__(space)
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -294,7 +294,7 @@
             break
         new_frame = space.createframe(code, w_func.w_func_globals,
                                       w_func.closure)
-        new_frame.fastlocals_w[0] = w_item
+        new_frame.locals_stack_w[0] = w_item
         w_res = new_frame.run()
         result_w.append(w_res)
     return result_w
diff --git a/pypy/module/__builtin__/interp_classobj.py b/pypy/module/__builtin__/interp_classobj.py
--- a/pypy/module/__builtin__/interp_classobj.py
+++ b/pypy/module/__builtin__/interp_classobj.py
@@ -12,7 +12,7 @@
 
 
 def raise_type_err(space, argument, expected, w_obj):
-    type_name = space.type(w_obj).getname(space, '?')
+    type_name = space.type(w_obj).getname(space)
     raise operationerrfmt(space.w_TypeError,
                           "argument %s must be %s, not %s",
                           argument, expected, type_name)
diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py
--- a/pypy/module/__builtin__/test/test_builtin.py
+++ b/pypy/module/__builtin__/test/test_builtin.py
@@ -1,5 +1,6 @@
 import autopath
 import sys
+from pypy import conftest
 
 class AppTestBuiltinApp:
     def setup_class(cls):
@@ -15,6 +16,15 @@
             cls.w_sane_lookup = cls.space.wrap(True)
         except KeyError:
             cls.w_sane_lookup = cls.space.wrap(False)
+        # starting with CPython 2.6, when the stack is almost out, we
+        # can get a random error, instead of just a RuntimeError.
+        # For example if an object x has a __getattr__, we can get
+        # AttributeError if attempting to call x.__getattr__ runs out
+        # of stack.  That's annoying, so we just work around it.
+        if conftest.option.runappdirect:
+            cls.w_safe_runtimerror = cls.space.wrap(True)
+        else:
+            cls.w_safe_runtimerror = cls.space.wrap(sys.version_info < (2, 6))
 
     def test_bytes_alias(self):
         assert bytes is str
@@ -399,6 +409,8 @@
     def test_cmp_cyclic(self):
         if not self.sane_lookup:
             skip("underlying Python implementation has insane dict lookup")
+        if not self.safe_runtimerror:
+            skip("underlying Python may raise random exceptions on stack ovf")
         a = []; a.append(a)
         b = []; b.append(b)
         from UserList import UserList
@@ -619,62 +631,6 @@
         raises(TypeError, pr, end=3)
         raises(TypeError, pr, sep=42)
 
-class AppTestBuiltinOptimized(object):
-    def setup_class(cls):
-        from pypy.conftest import gettestobjspace
-        cls.space = gettestobjspace(**{"objspace.opcodes.CALL_LIKELY_BUILTIN": True})
-
-    # hum, we need to invoke the compiler explicitely
-    def test_xrange_len(self):
-        s = """def test():
-        x = xrange(33)
-        assert len(x) == 33
-        x = xrange(33.2)
-        assert len(x) == 33
-        x = xrange(33,0,-1)
-        assert len(x) == 33
-        x = xrange(33,0)
-        assert len(x) == 0
-        x = xrange(33,0.2)
-        assert len(x) == 0
-        x = xrange(0,33)
-        assert len(x) == 33
-        x = xrange(0,33,-1)
-        assert len(x) == 0
-        x = xrange(0,33,2)
-        assert len(x) == 17
-        x = xrange(0,32,2)
-        assert len(x) == 16
-        """
-        ns = {}
-        exec s in ns
-        ns["test"]()
-
-    def test_delete_from_builtins(self):
-        s = """ """
-        # XXX write this test!
-
-    def test_shadow_case_bound_method(self):
-        s = """def test(l):
-        n = len(l)
-        old_len = len
-        class A(object):
-            x = 5
-            def length(self, o):
-                return self.x*old_len(o)
-        import __builtin__
-        __builtin__.len = A().length
-        try:
-            m = len(l)
-        finally:
-            __builtin__.len = old_len
-        return n+m
-        """
-        ns = {}
-        exec s in ns
-        res = ns["test"]([2,3,4])
-        assert res == 18
-
     def test_round(self):
         assert round(11.234) == 11.0
         assert round(11.234, -1) == 10.0
diff --git a/pypy/module/__builtin__/test/test_classobj.py b/pypy/module/__builtin__/test/test_classobj.py
--- a/pypy/module/__builtin__/test/test_classobj.py
+++ b/pypy/module/__builtin__/test/test_classobj.py
@@ -987,9 +987,9 @@
         if option.runappdirect:
             py.test.skip("can only be run on py.py")
         def is_strdict(space, w_class):
-            from pypy.objspace.std.dictmultiobject import StrDictImplementation
+            from pypy.objspace.std.dictmultiobject import StringDictStrategy
             w_d = w_class.getdict(space)
-            return space.wrap(isinstance(w_d, StrDictImplementation) and w_d.r_dict_content is None)
+            return space.wrap(isinstance(w_d.strategy, StringDictStrategy))
 
         cls.w_is_strdict = cls.space.wrap(gateway.interp2app(is_strdict))
 
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -3,6 +3,14 @@
 from pypy.interpreter.mixedmodule import MixedModule
 from pypy.module.imp.importing import get_pyc_magic
 
+
+class BuildersModule(MixedModule):
+    appleveldefs = {}
+
+    interpleveldefs = {
+        "UnicodeBuilder": "interp_builders.W_UnicodeBuilder",
+    }
+
 class Module(MixedModule):
     appleveldefs = {
     }
@@ -19,6 +27,10 @@
         'lookup_special'            : 'interp_magic.lookup_special',
     }
 
+    submodules = {
+        "builders": BuildersModule,
+    }
+
     def setup_after_space_initialization(self):
         """NOT_RPYTHON"""
         if not self.space.config.translating:
diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/__pypy__/interp_builders.py
@@ -0,0 +1,50 @@
+from pypy.interpreter.baseobjspace import Wrappable
+from pypy.interpreter.error import OperationError
+from pypy.interpreter.gateway import interp2app, unwrap_spec
+from pypy.interpreter.typedef import TypeDef
+from pypy.rlib.rstring import UnicodeBuilder
+
+
+class W_UnicodeBuilder(Wrappable):
+    def __init__(self, space, size):
+        if size == -1:
+            self.builder = UnicodeBuilder()
+        else:
+            self.builder = UnicodeBuilder(size)
+        self.done = False
+
+    def _check_done(self, space):
+        if self.done:
+            raise OperationError(space.w_ValueError, space.wrap("Can't operate on a done builder"))
+
+    @unwrap_spec(size=int)
+    def descr__new__(space, w_subtype, size=-1):
+        return W_UnicodeBuilder(space, size)
+
+    @unwrap_spec(s=unicode)
+    def descr_append(self, space, s):
+        self._check_done(space)
+        self.builder.append(s)
+
+    @unwrap_spec(s=unicode, start=int, end=int)
+    def descr_append_slice(self, space, s, start, end):
+        self._check_done(space)
+        if not 0 <= start <= end <= len(s):
+            raise OperationError(space.w_ValueError, space.wrap("bad start/stop"))
+        self.builder.append_slice(s, start, end)
+
+    def descr_build(self, space):
+        self._check_done(space)
+        w_s = space.wrap(self.builder.build())
+        self.done = True
+        return w_s
+
+
+W_UnicodeBuilder.typedef = TypeDef("UnicodeBuilder",
+    __new__ = interp2app(W_UnicodeBuilder.descr__new__.im_func),
+
+    append = interp2app(W_UnicodeBuilder.descr_append),
+    append_slice = interp2app(W_UnicodeBuilder.descr_append_slice),
+    build = interp2app(W_UnicodeBuilder.descr_build),
+)
+W_UnicodeBuilder.typedef.acceptable_as_base_class = False
\ No newline at end of file
diff --git a/pypy/module/__pypy__/interp_debug.py b/pypy/module/__pypy__/interp_debug.py
--- a/pypy/module/__pypy__/interp_debug.py
+++ b/pypy/module/__pypy__/interp_debug.py
@@ -1,15 +1,19 @@
 from pypy.interpreter.gateway import interp2app, NoneNotWrapped, unwrap_spec
 from pypy.interpreter.error import OperationError
-from pypy.rlib import debug
+from pypy.rlib import debug, jit
 
+
+ at jit.dont_look_inside
 @unwrap_spec(category=str)
 def debug_start(space, category):
     debug.debug_start(category)
 
+ at jit.dont_look_inside
 def debug_print(space, args_w):
     parts = [space.str_w(space.str(w_item)) for w_item in args_w]
     debug.debug_print(' '.join(parts))
 
+ at jit.dont_look_inside
 @unwrap_spec(category=str)
 def debug_stop(space, category):
     debug.debug_stop(category)
diff --git a/pypy/module/__pypy__/test/test_builders.py b/pypy/module/__pypy__/test/test_builders.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/__pypy__/test/test_builders.py
@@ -0,0 +1,34 @@
+from pypy.conftest import gettestobjspace
+
+
+class AppTestBuilders(object):
+    def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=['__pypy__'])
+
+    def test_simple(self):
+        from __pypy__.builders import UnicodeBuilder
+        b = UnicodeBuilder()
+        b.append(u"abc")
+        b.append(u"123")
+        b.append(u"1")
+        s = b.build()
+        assert s == u"abc1231"
+        raises(ValueError, b.build)
+        raises(ValueError, b.append, u"123")
+
+    def test_preallocate(self):
+        from __pypy__.builders import UnicodeBuilder
+        b = UnicodeBuilder(10)
+        b.append(u"abc")
+        b.append(u"123")
+        s = b.build()
+        assert s == u"abc123"
+
+    def test_append_slice(self):
+        from __pypy__.builders import UnicodeBuilder
+        b = UnicodeBuilder()
+        b.append_slice(u"abcdefgh", 2, 5)
+        raises(ValueError, b.append_slice, u"1", 2, 1)
+        s = b.build()
+        assert s == "cde"
+        raises(ValueError, b.append_slice, u"abc", 1, 2)
\ No newline at end of file
diff --git a/pypy/module/_codecs/interp_codecs.py b/pypy/module/_codecs/interp_codecs.py
--- a/pypy/module/_codecs/interp_codecs.py
+++ b/pypy/module/_codecs/interp_codecs.py
@@ -186,7 +186,7 @@
         text = u'\ufffd' * size
         return space.newtuple([space.wrap(text), w_end])
     else:
-        typename = space.type(w_exc).getname(space, '?')
+        typename = space.type(w_exc).getname(space)
         raise operationerrfmt(space.w_TypeError,
             "don't know how to handle %s in error callback", typename)
 
@@ -207,7 +207,7 @@
             pos += 1
         return space.newtuple([space.wrap(builder.build()), w_end])
     else:
-        typename = space.type(w_exc).getname(space, '?')
+        typename = space.type(w_exc).getname(space)
         raise operationerrfmt(space.w_TypeError,
             "don't know how to handle %s in error callback", typename)
 
@@ -240,7 +240,7 @@
             pos += 1
         return space.newtuple([space.wrap(builder.build()), w_end])
     else:
-        typename = space.type(w_exc).getname(space, '?')
+        typename = space.type(w_exc).getname(space)
         raise operationerrfmt(space.w_TypeError,
             "don't know how to handle %s in error callback", typename)
 
diff --git a/pypy/module/_ffi/interp_ffi.py b/pypy/module/_ffi/interp_ffi.py
--- a/pypy/module/_ffi/interp_ffi.py
+++ b/pypy/module/_ffi/interp_ffi.py
@@ -235,7 +235,7 @@
         argchain.arg_longlong(floatval)
 
     def call(self, space, args_w):
-        self = jit.hint(self, promote=True)
+        self = jit.promote(self)
         argchain = self.build_argchain(space, args_w)
         w_restype = self.w_restype
         if w_restype.is_longlong():
diff --git a/pypy/module/_io/interp_bufferedio.py b/pypy/module/_io/interp_bufferedio.py
--- a/pypy/module/_io/interp_bufferedio.py
+++ b/pypy/module/_io/interp_bufferedio.py
@@ -175,7 +175,7 @@
         return space.call_method(self.w_raw, "isatty")
 
     def repr_w(self, space):
-        typename = space.type(self).getname(space, '?')
+        typename = space.type(self).getname(space)
         module = space.str_w(space.type(self).get_module())
         try:
             w_name = space.getattr(self, space.wrap("name"))
diff --git a/pypy/module/_io/interp_io.py b/pypy/module/_io/interp_io.py
--- a/pypy/module/_io/interp_io.py
+++ b/pypy/module/_io/interp_io.py
@@ -119,7 +119,7 @@
     if buffering < 0:
         buffering = DEFAULT_BUFFER_SIZE
 
-        if "st_blksize" in STAT_FIELD_TYPES:
+        if space.config.translation.type_system == 'lltype' and 'st_blksize' in STAT_FIELD_TYPES:
             fileno = space.int_w(space.call_method(w_raw, "fileno"))
             try:
                 st = os.fstat(fileno)
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -155,7 +155,7 @@
                     raise operationerrfmt(
                         space.w_IOError,
                         "peek() should have returned a bytes object, "
-                        "not '%s'", space.type(w_readahead).getname(space, '?'))
+                        "not '%s'", space.type(w_readahead).getname(space))
                 length = space.len_w(w_readahead)
                 if length > 0:
                     n = 0
@@ -181,7 +181,7 @@
                 raise operationerrfmt(
                     space.w_IOError,
                     "peek() should have returned a bytes object, "
-                    "not '%s'", space.type(w_read).getname(space, '?'))
+                    "not '%s'", space.type(w_read).getname(space))
             read = space.str_w(w_read)
             if not read:
                 break
diff --git a/pypy/module/_io/interp_stringio.py b/pypy/module/_io/interp_stringio.py
--- a/pypy/module/_io/interp_stringio.py
+++ b/pypy/module/_io/interp_stringio.py
@@ -129,7 +129,7 @@
         if not space.isinstance_w(w_obj, space.w_unicode):
             raise operationerrfmt(space.w_TypeError,
                                   "string argument expected, got '%s'",
-                                  space.type(w_obj).getname(space, '?'))
+                                  space.type(w_obj).getname(space))
         self._check_closed(space)
 
         orig_size = space.len_w(w_obj)
diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py
--- a/pypy/module/_lsprof/interp_lsprof.py
+++ b/pypy/module/_lsprof/interp_lsprof.py
@@ -149,7 +149,7 @@
                             factor * float(self.ll_it), w_sublist)
         return space.wrap(w_se)
 
-    @jit.purefunction
+    @jit.elidable
     def _get_or_make_subentry(self, entry, make=True):
         try:
             return self.calls[entry]
@@ -167,7 +167,7 @@
         self.previous = profobj.current_context
         entry.recursionLevel += 1
         if profobj.subcalls and self.previous:
-            caller = jit.hint(self.previous.entry, promote=True)
+            caller = jit.promote(self.previous.entry)
             subentry = caller._get_or_make_subentry(entry)
             subentry.recursionLevel += 1
         self.ll_t0 = profobj.ll_timer()
@@ -179,7 +179,7 @@
             self.previous.ll_subt += tt
         entry._stop(tt, it)
         if profobj.subcalls and self.previous:
-            caller = jit.hint(self.previous.entry, promote=True)
+            caller = jit.promote(self.previous.entry)
             subentry = caller._get_or_make_subentry(entry, False)
             if subentry is not None:
                 subentry._stop(tt, it)
@@ -212,7 +212,7 @@
                 module += '.'
         return '{%s%s}' % (module, w_arg.name)
     else:
-        class_name = space.type(w_arg).getname(space, '?')
+        class_name = space.type(w_arg).getname(space)
         return "{'%s' object}" % (class_name,)
 
 def lsprof_call(space, w_self, frame, event, w_arg):
@@ -282,7 +282,7 @@
         c_setup_profiling()
         space.getexecutioncontext().setllprofile(lsprof_call, space.wrap(self))
 
-    @jit.purefunction
+    @jit.elidable
     def _get_or_make_entry(self, f_code, make=True):
         try:
             return self.data[f_code]
@@ -293,7 +293,7 @@
                 return entry
             return None
 
-    @jit.purefunction
+    @jit.elidable
     def _get_or_make_builtin_entry(self, key, make=True):
         try:
             return self.builtin_data[key]
@@ -306,7 +306,7 @@
 
     def _enter_call(self, f_code):
         # we have a superb gc, no point in freelist :)
-        self = jit.hint(self, promote=True)
+        self = jit.promote(self)
         entry = self._get_or_make_entry(f_code)
         self.current_context = ProfilerContext(self, entry)
 
@@ -314,14 +314,14 @@
         context = self.current_context
         if context is None:
             return
-        self = jit.hint(self, promote=True)
+        self = jit.promote(self)
         entry = self._get_or_make_entry(f_code, False)
         if entry is not None:
             context._stop(self, entry)
         self.current_context = context.previous
 
     def _enter_builtin_call(self, key):
-        self = jit.hint(self, promote=True)
+        self = jit.promote(self)
         entry = self._get_or_make_builtin_entry(key)
         self.current_context = ProfilerContext(self, entry)
 
@@ -329,7 +329,7 @@
         context = self.current_context
         if context is None:
             return
-        self = jit.hint(self, promote=True)
+        self = jit.promote(self)
         entry = self._get_or_make_builtin_entry(key, False)
         if entry is not None:
             context._stop(self, entry)
diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py
--- a/pypy/module/_lsprof/test/test_cprofile.py
+++ b/pypy/module/_lsprof/test/test_cprofile.py
@@ -181,8 +181,7 @@
 
 
 class AppTestWithDifferentBytecodes(AppTestCProfile):
-    keywords = {'objspace.opcodes.CALL_LIKELY_BUILTIN': True,
-                'objspace.opcodes.CALL_METHOD': True}
+    keywords = {'objspace.opcodes.CALL_METHOD': True}
 
 
 expected_output = {}
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -360,7 +360,7 @@
         conn_type = ["read-only", "write-only", "read-write"][self.flags]
 
         return space.wrap("<%s %s, handle %zd>" % (
-            conn_type, space.type(self).getname(space, '?'), self.do_fileno()))
+            conn_type, space.type(self).getname(space), self.do_fileno()))
 
     def is_valid(self):
         return self.handle != self.INVALID_HANDLE_VALUE
diff --git a/pypy/module/_rawffi/interp_rawffi.py b/pypy/module/_rawffi/interp_rawffi.py
--- a/pypy/module/_rawffi/interp_rawffi.py
+++ b/pypy/module/_rawffi/interp_rawffi.py
@@ -176,7 +176,7 @@
             except KeyError:
                 raise operationerrfmt(space.w_AttributeError,
                     "No symbol %s found in library %s", name, self.name)
-        
+
         elif (_MS_WINDOWS and
               space.is_true(space.isinstance(w_name, space.w_int))):
             ordinal = space.int_w(w_name)
@@ -261,7 +261,7 @@
     def descr_size_alignment(self, space, n=1):
         return space.newtuple([space.wrap(self.size * n),
                                space.wrap(self.alignment)])
-    
+
 
 class W_DataInstance(Wrappable):
     def __init__(self, space, size, address=r_uint(0)):
@@ -427,7 +427,7 @@
                     if not (argletter in TYPEMAP_PTR_LETTERS and
                             letter in TYPEMAP_PTR_LETTERS):
                         msg = "Argument %d should be typecode %s, got %s"
-                        raise operationerrfmt(space.w_TypeError, msg, 
+                        raise operationerrfmt(space.w_TypeError, msg,
                                               i+1, argletter, letter)
             args_ll.append(arg.ll_buffer)
             # XXX we could avoid the intermediate list args_ll
@@ -480,17 +480,25 @@
 alignment = _create_new_accessor('alignment', 'c_alignment')
 
 @unwrap_spec(address=r_uint, maxlength=int)
-def charp2string(space, address, maxlength=sys.maxint):
+def charp2string(space, address, maxlength=-1):
     if address == 0:
         return space.w_None
-    s = rffi.charp2strn(rffi.cast(rffi.CCHARP, address), maxlength)
+    charp_addr = rffi.cast(rffi.CCHARP, address)
+    if maxlength == -1:
+        s = rffi.charp2str(charp_addr)
+    else:
+        s = rffi.charp2strn(charp_addr, maxlength)
     return space.wrap(s)
 
 @unwrap_spec(address=r_uint, maxlength=int)
-def wcharp2unicode(space, address, maxlength=sys.maxint):
+def wcharp2unicode(space, address, maxlength=-1):
     if address == 0:
         return space.w_None
-    s = rffi.wcharp2unicoden(rffi.cast(rffi.CWCHARP, address), maxlength)
+    wcharp_addr = rffi.cast(rffi.CWCHARP, address)
+    if maxlength == -1:
+        s = rffi.wcharp2unicode(wcharp_addr)
+    else:
+        s = rffi.wcharp2unicoden(wcharp_addr, maxlength)
     return space.wrap(s)
 
 @unwrap_spec(address=r_uint, maxlength=int)
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -1,3 +1,4 @@
+from __future__ import with_statement
 from pypy.rpython.lltypesystem import rffi, lltype
 from pypy.interpreter.error import OperationError
 from pypy.interpreter.baseobjspace import W_Root, ObjSpace, Wrappable
@@ -899,7 +900,7 @@
 
 def _ssl_thread_id_function():
     from pypy.module.thread import ll_thread
-    return rffi.cast(rffi.INT, ll_thread.get_ident())
+    return rffi.cast(rffi.LONG, ll_thread.get_ident())
 
 def setup_ssl_threads():
     from pypy.module.thread import ll_thread
diff --git a/pypy/module/_stackless/interp_coroutine.py b/pypy/module/_stackless/interp_coroutine.py
--- a/pypy/module/_stackless/interp_coroutine.py
+++ b/pypy/module/_stackless/interp_coroutine.py
@@ -40,7 +40,7 @@
             raise operationerrfmt(
                 space.w_TypeError, 
                 "'%s' object is not callable",
-                space.type(w_obj).getname(space, '?'))
+                space.type(w_obj).getname(space))
         self.w_func = w_obj
         self.args = args
 
diff --git a/pypy/module/_stackless/test/test_greenlet.py b/pypy/module/_stackless/test/test_greenlet.py
--- a/pypy/module/_stackless/test/test_greenlet.py
+++ b/pypy/module/_stackless/test/test_greenlet.py
@@ -72,6 +72,23 @@
         g1 = greenlet(f)
         raises(ValueError, g2.switch)
 
+
+    def test_exc_info_save_restore(self):
+        from _stackless import greenlet
+        import sys
+        def f():
+            try:
+                raise ValueError('fun')
+            except:
+                exc_info = sys.exc_info()
+                greenlet(h).switch()
+                assert exc_info == sys.exc_info()
+
+        def h():
+            assert sys.exc_info() == (None, None, None)
+
+        greenlet(f).switch()
+
     def test_exception(self):
         from _stackless import greenlet
         import sys
diff --git a/pypy/module/_weakref/interp__weakref.py b/pypy/module/_weakref/interp__weakref.py
--- a/pypy/module/_weakref/interp__weakref.py
+++ b/pypy/module/_weakref/interp__weakref.py
@@ -129,7 +129,7 @@
         if w_obj is None:
             state = '; dead'
         else:
-            typename = space.type(w_obj).getname(space, '?')
+            typename = space.type(w_obj).getname(space)
             objname = w_obj.getname(space, '')
             if objname:
                 state = "; to '%s' (%s)" % (typename, objname)
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -1,18 +1,21 @@
 from __future__ import with_statement
 
+from pypy.interpreter.baseobjspace import Wrappable
+from pypy.interpreter.buffer import RWBuffer
 from pypy.interpreter.error import OperationError
+from pypy.interpreter.gateway import interp2app, unwrap_spec
 from pypy.interpreter.typedef import TypeDef, GetSetProperty, make_weakref_descr
-from pypy.rpython.lltypesystem import lltype, rffi
-from pypy.interpreter.gateway import interp2app, unwrap_spec
-from pypy.rlib.unroll import unrolling_iterable
-from pypy.rlib.rarithmetic import ovfcheck
-from pypy.interpreter.baseobjspace import Wrappable
+from pypy.module._file.interp_file import W_File
+from pypy.objspace.std.model import W_Object
+from pypy.objspace.std.multimethod import FailedToImplement
 from pypy.objspace.std.stdtypedef import SMM, StdTypeDef
 from pypy.objspace.std.register_all import register_all
-from pypy.objspace.std.model import W_Object
-from pypy.module._file.interp_file import W_File
-from pypy.interpreter.buffer import RWBuffer
-from pypy.objspace.std.multimethod import FailedToImplement
+from pypy.rlib.rarithmetic import ovfcheck
+from pypy.rlib.unroll import unrolling_iterable
+from pypy.rpython.lltypesystem import lltype, rffi
+
+
+memcpy = rffi.llexternal("memcpy", [rffi.VOIDP, rffi.VOIDP, rffi.SIZE_T], lltype.Void)
 
 @unwrap_spec(typecode=str)
 def w_array(space, w_cls, typecode, __args__):
@@ -37,7 +40,7 @@
             if len(__args__.arguments_w) > 0:
                 w_initializer = __args__.arguments_w[0]
                 if space.type(w_initializer) is space.w_str:
-                    a.fromstring(w_initializer)
+                    a.fromstring(space.str_w(w_initializer))
                 elif space.type(w_initializer) is space.w_unicode:
                     a.fromsequence(w_initializer)
                 elif space.type(w_initializer) is space.w_list:
@@ -73,6 +76,7 @@
 
 array_buffer_info = SMM('buffer_info', 1)
 array_reduce = SMM('__reduce__', 1)
+array_copy = SMM('__copy__', 1)
 array_byteswap = SMM('byteswap', 1)
 
 
@@ -96,7 +100,7 @@
     itemsize = GetSetProperty(descr_itemsize),
     typecode = GetSetProperty(descr_typecode),
     __weakref__ = make_weakref_descr(W_ArrayBase),
-    )
+)
 W_ArrayBase.typedef.registermethods(globals())
 
 
@@ -159,8 +163,6 @@
         self.data[index] = char
 
 
-
-
 def make_array(mytype):
     class W_Array(W_ArrayBase):
         itemsize = mytype.bytes
@@ -268,12 +270,10 @@
                 raise
             self.setlen(oldlen + i)
 
-        def fromstring(self, w_s):
-            space = self.space
-            s = space.str_w(w_s)
+        def fromstring(self, s):
             if len(s) % self.itemsize != 0:
                 msg = 'string length not a multiple of item size'
-                raise OperationError(space.w_ValueError, space.wrap(msg))
+                raise OperationError(self.space.w_ValueError, self.space.wrap(msg))
             oldlen = self.len
             new = len(s) / mytype.bytes
             self.setlen(oldlen + new)
@@ -311,6 +311,14 @@
         def charbuf(self):
             return  rffi.cast(rffi.CCHARP, self.buffer)
 
+        def w_getitem(self, space, idx):
+            item = self.buffer[idx]
+            if mytype.typecode in 'bBhHil':
+                item = rffi.cast(lltype.Signed, item)
+            elif mytype.typecode == 'f':
+                item = float(item)
+            return space.wrap(item)
+
     # Basic get/set/append/extend methods
 
     def len__Array(space, self):
@@ -319,12 +327,7 @@
     def getitem__Array_ANY(space, self, w_idx):
         idx, stop, step = space.decode_index(w_idx, self.len)
         assert step == 0
-        item = self.buffer[idx]
-        if mytype.typecode in 'bBhHil':
-            item = rffi.cast(lltype.Signed, item)
-        elif mytype.typecode == 'f':
-            item = float(item)
-        return self.space.wrap(item)
+        return self.w_getitem(space, idx)
 
     def getitem__Array_Slice(space, self, w_slice):
         start, stop, step, size = space.decode_index4(w_slice, self.len)
@@ -387,7 +390,7 @@
     def array_count__Array_ANY(space, self, w_val):
         cnt = 0
         for i in range(self.len):
-            w_item = getitem__Array_ANY(space, self, space.wrap(i))
+            w_item = self.w_getitem(space, i)
             if space.is_true(space.eq(w_item, w_val)):
                 cnt += 1
         return space.wrap(cnt)
@@ -395,7 +398,7 @@
     def array_index__Array_ANY(space, self, w_val):
         cnt = 0
         for i in range(self.len):
-            w_item = getitem__Array_ANY(space, self, space.wrap(i))
+            w_item = self.w_getitem(space, i)
             if space.is_true(space.eq(w_item, w_val)):
                 return space.wrap(i)
         msg = 'array.index(x): x not in list'
@@ -413,7 +416,7 @@
         if i < 0 or i >= self.len:
             msg = 'pop index out of range'
             raise OperationError(space.w_IndexError, space.wrap(msg))
-        w_val = getitem__Array_ANY(space, self, space.wrap(i))
+        w_val = self.w_getitem(space, i)
         while i < self.len - 1:
             self.buffer[i] = self.buffer[i + 1]
             i += 1
@@ -515,14 +518,14 @@
     def array_tolist__Array(space, self):
         w_l = space.newlist([])
         for i in range(self.len):
-            w_l.append(getitem__Array_ANY(space, self, space.wrap(i)))
+            w_l.append(self.w_getitem(space, i))
         return w_l
 
     def array_fromlist__Array_List(space, self, w_lst):
         self.fromlist(w_lst)
 
     def array_fromstring__Array_ANY(space, self, w_s):
-        self.fromstring(w_s)
+        self.fromstring(space.str_w(w_s))
 
     def array_tostring__Array(space, self):
         cbuf = self.charbuf()
@@ -615,6 +618,16 @@
             dct = space.w_None
         return space.newtuple([space.type(self), space.newtuple(args), dct])
 
+    def array_copy__Array(space, self):
+        w_a = mytype.w_class(self.space)
+        w_a.setlen(self.len)
+        memcpy(
+            rffi.cast(rffi.VOIDP, w_a.buffer),
+            rffi.cast(rffi.VOIDP, self.buffer),
+            self.len * mytype.bytes
+        )
+        return w_a
+
     def array_byteswap__Array(space, self):
         if mytype.bytes not in [1, 2, 4, 8]:
             msg = "byteswap not supported for this array"
diff --git a/pypy/module/cpyext/__init__.py b/pypy/module/cpyext/__init__.py
--- a/pypy/module/cpyext/__init__.py
+++ b/pypy/module/cpyext/__init__.py
@@ -39,6 +39,7 @@
 import pypy.module.cpyext.object
 import pypy.module.cpyext.stringobject
 import pypy.module.cpyext.tupleobject
+import pypy.module.cpyext.setobject
 import pypy.module.cpyext.dictobject
 import pypy.module.cpyext.intobject
 import pypy.module.cpyext.longobject
@@ -64,6 +65,7 @@
 import pypy.module.cpyext.memoryobject
 import pypy.module.cpyext.codecs
 import pypy.module.cpyext.pyfile
+import pypy.module.cpyext.pystrtod
 
 # now that all rffi_platform.Struct types are registered, configure them
 api.configure_types()
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -562,7 +562,8 @@
             elif callable.api_func.restype is not lltype.Void:
                 retval = rffi.cast(callable.api_func.restype, result)
         except Exception, e:
-            print 'Fatal error in cpyext, calling', callable.__name__
+            print 'Fatal error in cpyext, CPython compatibility layer, calling', callable.__name__
+            print 'Either report a bug or consider not using this particular extension'
             if not we_are_translated():
                 import traceback
                 traceback.print_exc()
diff --git a/pypy/module/cpyext/methodobject.py b/pypy/module/cpyext/methodobject.py
--- a/pypy/module/cpyext/methodobject.py
+++ b/pypy/module/cpyext/methodobject.py
@@ -122,7 +122,7 @@
         return self.space.unwrap(self.descr_method_repr())
 
     def descr_method_repr(self):
-        return self.getrepr(self.space, "built-in method '%s' of '%s' object" % (self.name, self.w_objclass.getname(self.space, '?')))
+        return self.getrepr(self.space, "built-in method '%s' of '%s' object" % (self.name, self.w_objclass.getname(self.space)))
 
 PyCFunction_Check, PyCFunction_CheckExact = build_type_checkers("CFunction", W_PyCFunctionObject)
 
@@ -151,7 +151,7 @@
 
     def descr_method_repr(self):
         return self.space.wrap("<slot wrapper '%s' of '%s' objects>" % (self.method_name,
-            self.w_objclass.getname(self.space, '?')))
+            self.w_objclass.getname(self.space)))
 
 def cwrapper_descr_call(space, w_self, __args__):
     self = space.interp_w(W_PyCWrapperObject, w_self)
diff --git a/pypy/module/cpyext/pystrtod.py b/pypy/module/cpyext/pystrtod.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/pystrtod.py
@@ -0,0 +1,68 @@
+import errno
+from pypy.interpreter.error import OperationError
+from pypy.module.cpyext.api import cpython_api
+from pypy.module.cpyext.pyobject import PyObject
+from pypy.rlib import rdtoa
+from pypy.rlib import rfloat
+from pypy.rlib import rposix
+from pypy.rpython.lltypesystem import lltype
+from pypy.rpython.lltypesystem import rffi
+
+
+ at cpython_api([rffi.CCHARP, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0)
+def PyOS_string_to_double(space, s, endptr, w_overflow_exception):
+    """Convert a string s to a double, raising a Python
+    exception on failure.  The set of accepted strings corresponds to
+    the set of strings accepted by Python's float() constructor,
+    except that s must not have leading or trailing whitespace.
+    The conversion is independent of the current locale.
+
+    If endptr is NULL, convert the whole string.  Raise
+    ValueError and return -1.0 if the string is not a valid
+    representation of a floating-point number.
+
+    If endptr is not NULL, convert as much of the string as
+    possible and set *endptr to point to the first unconverted
+    character.  If no initial segment of the string is the valid
+    representation of a floating-point number, set *endptr to point
+    to the beginning of the string, raise ValueError, and return
+    -1.0.
+
+    If s represents a value that is too large to store in a float
+    (for example, "1e500" is such a string on many platforms) then
+    if overflow_exception is NULL return Py_HUGE_VAL (with
+    an appropriate sign) and don't set any exception.  Otherwise,
+    overflow_exception must point to a Python exception object;
+    raise that exception and return -1.0.  In both cases, set
+    *endptr to point to the first character after the converted value.
+
+    If any other error occurs during the conversion (for example an
+    out-of-memory error), set the appropriate Python exception and
+    return -1.0.
+    """
+    user_endptr = True
+    try:
+        if not endptr:
+            endptr = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
+            user_endptr = False
+        result = rdtoa.dg_strtod(s, endptr)
+        endpos = (rffi.cast(rffi.LONG, endptr[0]) -
+                  rffi.cast(rffi.LONG, s))
+        if endpos == 0 or (not user_endptr and not endptr[0][0] == '\0'):
+            raise OperationError(
+                space.w_ValueError,
+                space.wrap('invalid input at position %s' % endpos))
+        if rposix.get_errno() == errno.ERANGE:
+            rposix.set_errno(0)
+            if w_overflow_exception is None:
+                if result > 0:
+                    return rfloat.INFINITY
+                else:
+                    return -rfloat.INFINITY
+            else:
+                raise OperationError(w_overflow_exception,
+                                     space.wrap('value too large'))
+        return result
+    finally:
+        if not user_endptr:
+            lltype.free(endptr, flavor='raw')
diff --git a/pypy/module/cpyext/setobject.py b/pypy/module/cpyext/setobject.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/setobject.py
@@ -0,0 +1,46 @@
+from pypy.interpreter.error import OperationError
+from pypy.rpython.lltypesystem import rffi, lltype
+from pypy.module.cpyext.api import (cpython_api, Py_ssize_t, CANNOT_FAIL,
+                                    build_type_checkers)
+from pypy.module.cpyext.pyobject import (PyObject, PyObjectP, Py_DecRef,
+    borrow_from, make_ref, from_ref)
+from pypy.module.cpyext.pyerrors import PyErr_BadInternalCall
+from pypy.objspace.std.setobject import W_SetObject, newset
+from pypy.objspace.std.smalltupleobject import W_SmallTupleObject
+
+
+PySet_Check, PySet_CheckExact = build_type_checkers("Set")
+
+
+ at cpython_api([PyObject], PyObject)
+def PySet_New(space, w_iterable):
+    if w_iterable is None:
+        return space.call_function(space.w_set)
+    else:
+        return space.call_function(space.w_set, w_iterable)
+
+ at cpython_api([PyObject, PyObject], rffi.INT_real, error=-1)
+def PySet_Add(space, w_s, w_obj):
+    if not PySet_Check(space, w_s):
+        PyErr_BadInternalCall(space)
+    space.call_method(w_s, 'add', w_obj)
+    return 0
+
+ at cpython_api([PyObject, PyObject], rffi.INT_real, error=-1)
+def PySet_Discard(space, w_s, w_obj):
+    if not PySet_Check(space, w_s):
+        PyErr_BadInternalCall(space)
+    space.call_method(w_s, 'discard', w_obj)
+    return 0
+
+
+ at cpython_api([PyObject], Py_ssize_t, error=CANNOT_FAIL)
+def PySet_GET_SIZE(space, w_s):
+    return space.int_w(space.len(w_s))
+
+ at cpython_api([PyObject], Py_ssize_t, error=-1)
+def PySet_Size(space, ref):
+    if not PySet_Check(space, ref):
+        raise OperationError(space.w_TypeError,
+                             space.wrap("expected set object"))
+    return PySet_GET_SIZE(space, ref)
diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py
--- a/pypy/module/cpyext/stubs.py
+++ b/pypy/module/cpyext/stubs.py
@@ -480,39 +480,6 @@
     """Create a new Python complex number object from a C Py_complex value."""
     raise NotImplementedError
 
- at cpython_api([rffi.CCHARP, rffi.CCHARPP, PyObject], rffi.DOUBLE, error=-1.0)
-def PyOS_string_to_double(space, s, endptr, overflow_exception):
-    """Convert a string s to a double, raising a Python
-    exception on failure.  The set of accepted strings corresponds to
-    the set of strings accepted by Python's float() constructor,
-    except that s must not have leading or trailing whitespace.
-    The conversion is independent of the current locale.
-
-    If endptr is NULL, convert the whole string.  Raise
-    ValueError and return -1.0 if the string is not a valid
-    representation of a floating-point number.
-
-    If endptr is not NULL, convert as much of the string as
-    possible and set *endptr to point to the first unconverted
-    character.  If no initial segment of the string is the valid
-    representation of a floating-point number, set *endptr to point
-    to the beginning of the string, raise ValueError, and return
-    -1.0.
-
-    If s represents a value that is too large to store in a float
-    (for example, "1e500" is such a string on many platforms) then
-    if overflow_exception is NULL return Py_HUGE_VAL (with
-    an appropriate sign) and don't set any exception.  Otherwise,
-    overflow_exception must point to a Python exception object;
-    raise that exception and return -1.0.  In both cases, set
-    *endptr to point to the first character after the converted value.
-
-    If any other error occurs during the conversion (for example an
-    out-of-memory error), set the appropriate Python exception and
-    return -1.0.
-    """
-    raise NotImplementedError
-
 @cpython_api([rffi.CCHARP, rffi.CCHARPP], rffi.DOUBLE, error=CANNOT_FAIL)
 def PyOS_ascii_strtod(space, nptr, endptr):
     """Convert a string to a double. This function behaves like the Standard C
diff --git a/pypy/module/cpyext/test/test_pystrtod.py b/pypy/module/cpyext/test/test_pystrtod.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/test/test_pystrtod.py
@@ -0,0 +1,93 @@
+import math
+
+from pypy.module.cpyext.test.test_api import BaseApiTest
+from pypy.rpython.lltypesystem import rffi
+from pypy.rpython.lltypesystem import lltype
+
+
+class TestPyOS_string_to_double(BaseApiTest):
+
+    def test_simple_float(self, api):
+        s = rffi.str2charp('0.4')
+        null = lltype.nullptr(rffi.CCHARPP.TO)
+        r = api.PyOS_string_to_double(s, null, None)
+        assert r == 0.4
+        rffi.free_charp(s)
+
+    def test_empty_string(self, api):
+        s = rffi.str2charp('')
+        null = lltype.nullptr(rffi.CCHARPP.TO)
+        r = api.PyOS_string_to_double(s, null, None)
+        assert r == -1.0
+        raises(ValueError)
+        api.PyErr_Clear()
+        rffi.free_charp(s)
+
+    def test_bad_string(self, api):
+        s = rffi.str2charp(' 0.4')
+        null = lltype.nullptr(rffi.CCHARPP.TO)
+        r = api.PyOS_string_to_double(s, null, None)
+        assert r == -1.0
+        raises(ValueError)
+        api.PyErr_Clear()
+        rffi.free_charp(s)
+
+    def test_overflow_pos(self, api):
+        s = rffi.str2charp('1e500')
+        null = lltype.nullptr(rffi.CCHARPP.TO)
+        r = api.PyOS_string_to_double(s, null, None)
+        assert math.isinf(r)
+        assert r > 0
+        rffi.free_charp(s)
+
+    def test_overflow_neg(self, api):
+        s = rffi.str2charp('-1e500')
+        null = lltype.nullptr(rffi.CCHARPP.TO)
+        r = api.PyOS_string_to_double(s, null, None)
+        assert math.isinf(r)
+        assert r < 0
+        rffi.free_charp(s)
+
+    def test_overflow_exc(self, space, api):
+        s = rffi.str2charp('1e500')
+        null = lltype.nullptr(rffi.CCHARPP.TO)
+        r = api.PyOS_string_to_double(s, null, space.w_ValueError)
+        assert r == -1.0
+        raises(ValueError)
+        api.PyErr_Clear()
+        rffi.free_charp(s)
+
+    def test_endptr_number(self, api):
+        s = rffi.str2charp('0.4')
+        endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
+        r = api.PyOS_string_to_double(s, endp, None)
+        assert r == 0.4
+        endp_addr = rffi.cast(rffi.LONG, endp[0])
+        s_addr = rffi.cast(rffi.LONG, s)
+        assert endp_addr == s_addr + 3
+        rffi.free_charp(s)
+        lltype.free(endp, flavor='raw')
+
+    def test_endptr_tail(self, api):
+        s = rffi.str2charp('0.4 foo')
+        endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
+        r = api.PyOS_string_to_double(s, endp, None)
+        assert r == 0.4
+        endp_addr = rffi.cast(rffi.LONG, endp[0])
+        s_addr = rffi.cast(rffi.LONG, s)
+        assert endp_addr == s_addr + 3
+        rffi.free_charp(s)
+        lltype.free(endp, flavor='raw')
+
+    def test_endptr_no_conversion(self, api):
+        s = rffi.str2charp('foo')
+        endp = lltype.malloc(rffi.CCHARPP.TO, 1, flavor='raw')
+        r = api.PyOS_string_to_double(s, endp, None)
+        assert r == -1.0
+        raises(ValueError)
+        endp_addr = rffi.cast(rffi.LONG, endp[0])
+        s_addr = rffi.cast(rffi.LONG, s)
+        assert endp_addr == s_addr
+        api.PyErr_Clear()
+        rffi.free_charp(s)
+        lltype.free(endp, flavor='raw')
diff --git a/pypy/module/cpyext/test/test_setobject.py b/pypy/module/cpyext/test/test_setobject.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/test/test_setobject.py
@@ -0,0 +1,29 @@
+import py
+
+from pypy.module.cpyext.pyobject import PyObject, PyObjectP, make_ref, from_ref
+from pypy.module.cpyext.test.test_api import BaseApiTest
+from pypy.rpython.lltypesystem import rffi, lltype
+from pypy.conftest import gettestobjspace
+
+
+class TestTupleObject(BaseApiTest):
+    def test_setobj(self, space, api):
+        assert not api.PySet_Check(space.w_None)
+        assert api.PySet_Add(space.w_None, space.w_None) == -1
+        api.PyErr_Clear()
+        w_set = space.call_function(space.w_set)
+        space.call_method(w_set, 'update', space.wrap([1,2,3,4]))
+        assert api.PySet_Size(w_set) == 4
+        assert api.PySet_GET_SIZE(w_set) == 4
+        raises(TypeError, api.PySet_Size(space.newlist([])))
+        api.PyErr_Clear()
+
+    def test_set_add_discard(self, space, api):
+        w_set = api.PySet_New(None)
+        assert api.PySet_Size(w_set) == 0
+        w_set = api.PySet_New(space.wrap([1,2,3,4]))
+        assert api.PySet_Size(w_set) == 4
+        api.PySet_Add(w_set, space.wrap(6))
+        assert api.PySet_Size(w_set) == 5
+        api.PySet_Discard(w_set, space.wrap(6))
+        assert api.PySet_Size(w_set) == 4
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -367,3 +367,14 @@
                     data, len(u), lltype.nullptr(rffi.CCHARP.TO))
         rffi.free_wcharp(data)
 
+    def test_format(self, space, api):
+        w_format = space.wrap(u'hi %s')
+        w_args = space.wrap((u'test',))
+        w_formated = api.PyUnicode_Format(w_format, w_args)
+        assert space.unwrap(w_formated) == space.unwrap(space.mod(w_format, w_args))
+
+    def test_join(self, space, api):
+        w_sep = space.wrap(u'<sep>')
+        w_seq = space.wrap([u'a', u'b'])
+        w_joined = api.PyUnicode_Join(w_sep, w_seq)
+        assert space.unwrap(w_joined) == u'a<sep>b'
diff --git a/pypy/module/cpyext/test/test_weakref.py b/pypy/module/cpyext/test/test_weakref.py
--- a/pypy/module/cpyext/test/test_weakref.py
+++ b/pypy/module/cpyext/test/test_weakref.py
@@ -7,6 +7,7 @@
         w_ref = api.PyWeakref_NewRef(w_obj, space.w_None)
         assert w_ref is not None
         assert space.is_w(api.PyWeakref_GetObject(w_ref), w_obj)
+        assert space.is_w(api.PyWeakref_GET_OBJECT(w_ref), w_obj)
         assert space.is_w(api.PyWeakref_LockObject(w_ref), w_obj)
 
         w_obj = space.newtuple([])
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -450,7 +450,7 @@
             PyObject_Del.api_func.get_wrapper(space))
     pto.c_tp_alloc = llhelper(PyType_GenericAlloc.api_func.functype,
             PyType_GenericAlloc.api_func.get_wrapper(space))
-    pto.c_tp_name = rffi.str2charp(w_type.getname(space, "?"))
+    pto.c_tp_name = rffi.str2charp(w_type.getname(space))
     pto.c_tp_basicsize = -1 # hopefully this makes malloc bail out
     pto.c_tp_itemsize = 0
     # uninitialized fields:
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -523,3 +523,11 @@
     copies sizeof(Py_UNICODE) * length bytes from source to target"""
     for i in range(0, length):
         target[i] = source[i]
+
+ at cpython_api([PyObject, PyObject], PyObject)
+def PyUnicode_Format(space, w_format, w_args):
+    return space.mod(w_format, w_args)
+
+ at cpython_api([PyObject, PyObject], PyObject)
+def PyUnicode_Join(space, w_sep, w_seq):
+    return space.call_method(w_sep, 'join', w_seq)
diff --git a/pypy/module/cpyext/weakrefobject.py b/pypy/module/cpyext/weakrefobject.py
--- a/pypy/module/cpyext/weakrefobject.py
+++ b/pypy/module/cpyext/weakrefobject.py
@@ -21,6 +21,10 @@
     """Return the referenced object from a weak reference.  If the referent is
     no longer live, returns None. This function returns a borrowed reference.
     """
+    return PyWeakref_GET_OBJECT(space, w_ref)
+
+ at cpython_api([PyObject], PyObject)
+def PyWeakref_GET_OBJECT(space, w_ref):
     return borrow_from(w_ref, space.call_function(w_ref))
 
 @cpython_api([PyObject], PyObject)
diff --git a/pypy/module/exceptions/interp_exceptions.py b/pypy/module/exceptions/interp_exceptions.py
--- a/pypy/module/exceptions/interp_exceptions.py
+++ b/pypy/module/exceptions/interp_exceptions.py
@@ -136,7 +136,7 @@
             args_repr = space.str_w(space.repr(space.newtuple(self.args_w)))
         else:
             args_repr = "()"
-        clsname = self.getclass(space).getname(space, '?')
+        clsname = self.getclass(space).getname(space)
         return space.wrap(clsname + args_repr)
 
     def descr_getargs(self, space):
@@ -546,7 +546,7 @@
             w_tuple = space.newtuple(values_w + [self.w_lastlineno])
             args_w = [self.args_w[0], w_tuple]
             args_repr = space.str_w(space.repr(space.newtuple(args_w)))
-            clsname = self.getclass(space).getname(space, '?')
+            clsname = self.getclass(space).getname(space)
             return space.wrap(clsname + args_repr)
         else:
             return W_StandardError.descr_repr(self, space)
diff --git a/pypy/module/imp/importing.py b/pypy/module/imp/importing.py
--- a/pypy/module/imp/importing.py
+++ b/pypy/module/imp/importing.py
@@ -120,7 +120,7 @@
 def check_sys_modules_w(space, modulename):
     return space.finditem_str(space.sys.get('modules'), modulename)
 
- at jit.purefunction
+ at jit.elidable
 def _get_dot_position(str, n):
     # return the index in str of the '.' such that there are n '.'-separated
     # strings after it
@@ -133,8 +133,8 @@
 def _get_relative_name(space, modulename, level, w_globals):
     w = space.wrap
     ctxt_w_package = space.finditem_str(w_globals, '__package__')
-    ctxt_w_package = jit.hint(ctxt_w_package, promote=True)
-    level = jit.hint(level, promote=True)
+    ctxt_w_package = jit.promote(ctxt_w_package)
+    level = jit.promote(level)
 
     ctxt_package = None
     if ctxt_w_package is not None and ctxt_w_package is not space.w_None:
@@ -184,7 +184,7 @@
         ctxt_w_name = space.finditem_str(w_globals, '__name__')
         ctxt_w_path = space.finditem_str(w_globals, '__path__')
 
-        ctxt_w_name = jit.hint(ctxt_w_name, promote=True)
+        ctxt_w_name = jit.promote(ctxt_w_name)
         ctxt_name = None
         if ctxt_w_name is not None:
             try:
@@ -622,7 +622,13 @@
         try:
             if find_info:
                 w_mod = load_module(space, w_modulename, find_info)
-                w_mod = space.getitem(space.sys.get("modules"), w_modulename)
+                try:
+                    w_mod = space.getitem(space.sys.get("modules"),
+                                          w_modulename)
+                except OperationError, oe:
+                    if not oe.match(space, space.w_KeyError):
+                        raise
+                    raise OperationError(space.w_ImportError, w_modulename)
                 if w_parent is not None:
                     space.setattr(w_parent, space.wrap(partname), w_mod)
                 return w_mod
@@ -793,14 +799,13 @@
 
 """
 
-# XXX picking a magic number is a mess.  So far it works because we
-# have only two extra opcodes, which bump the magic number by +1 and
-# +2 respectively, and CPython leaves a gap of 10 when it increases
+# picking a magic number is a mess.  So far it works because we
+# have only one extra opcode, which bumps the magic number by +2, and CPython
+# leaves a gap of 10 when it increases
 # its own magic number.  To avoid assigning exactly the same numbers
 # as CPython we always add a +2.  We'll have to think again when we
-# get at the fourth new opcode :-(
+# get three more new opcodes
 #
-#  * CALL_LIKELY_BUILTIN    +1
 #  * CALL_METHOD            +2
 #
 # In other words:
@@ -823,8 +828,6 @@
             return struct.unpack('<i', magic)[0]
 
     result = default_magic
-    if space.config.objspace.opcodes.CALL_LIKELY_BUILTIN:
-        result += 1
     if space.config.objspace.opcodes.CALL_METHOD:
         result += 2
     return result
diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
--- a/pypy/module/imp/test/test_import.py
+++ b/pypy/module/imp/test/test_import.py
@@ -37,6 +37,7 @@
                     ambig = "imamodule = 1",
                     test_reload = "def test():\n    raise ValueError\n",
                     infinite_reload = "import infinite_reload; reload(infinite_reload)",
+                    del_sys_module = "import sys\ndel sys.modules['del_sys_module']\n",
                     )
     root.ensure("notapackage", dir=1)    # empty, no __init__.py
     setuppkg("pkg",
@@ -562,6 +563,14 @@
         except ImportError:
             pass
 
+    def test_del_from_sys_modules(self):
+        try:
+            import del_sys_module
+        except ImportError:
+            pass    # ok
+        else:
+            assert False, 'should not work'
+
 class TestAbi:
     def test_abi_tag(self):
         space1 = gettestobjspace(soabi='TEST')
diff --git a/pypy/module/math/__init__.py b/pypy/module/math/__init__.py
--- a/pypy/module/math/__init__.py
+++ b/pypy/module/math/__init__.py
@@ -4,6 +4,7 @@
 
 class Module(MixedModule):
     appleveldefs = {
+       'factorial' : 'app_math.factorial'
     }
 
     interpleveldefs = {
@@ -40,7 +41,6 @@
        'isnan'          : 'interp_math.isnan',
        'trunc'          : 'interp_math.trunc',
        'fsum'           : 'interp_math.fsum',
-       'factorial'      : 'interp_math.factorial',
        'asinh'          : 'interp_math.asinh',
        'acosh'          : 'interp_math.acosh',
        'atanh'          : 'interp_math.atanh',
diff --git a/pypy/module/math/app_math.py b/pypy/module/math/app_math.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/math/app_math.py
@@ -0,0 +1,13 @@
+def factorial(x):
+    """Find x!."""
+    if isinstance(x, float):
+        fl = int(x)
+        if fl != x:
+            raise ValueError("float arguments must be integral")
+        x = fl
+    if x < 0:
+        raise ValueError("x must be >= 0")
+    res = 1
+    for i in range(1, x + 1):
+        res *= i
+    return res
diff --git a/pypy/module/math/interp_math.py b/pypy/module/math/interp_math.py
--- a/pypy/module/math/interp_math.py
+++ b/pypy/module/math/interp_math.py
@@ -373,22 +373,6 @@
                 hi = v
     return space.wrap(hi)
 
-def factorial(space, w_x):
-    """Find x!."""
-    if space.isinstance_w(w_x, space.w_float):
-        fl = space.float_w(w_x)
-        if math.floor(fl) != fl:
-            raise OperationError(space.w_ValueError,
-                                 space.wrap("float arguments must be integral"))
-        w_x = space.long(w_x)
-    x = space.int_w(w_x)
-    if x < 0:
-        raise OperationError(space.w_ValueError, space.wrap("x must be >= 0"))
-    w_res = space.wrap(1)
-    for i in range(1, x + 1):
-        w_res = space.mul(w_res, space.wrap(i))
-    return w_res
-
 def log1p(space, w_x):
     """Find log(x + 1)."""
     return math1(space, rfloat.log1p, w_x)
diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -9,11 +9,14 @@
         'array': 'interp_numarray.SingleDimArray',
         'zeros': 'interp_numarray.zeros',
         'empty': 'interp_numarray.zeros',
+        'ones': 'interp_numarray.ones',
 
         # ufuncs
+        'abs': 'interp_ufuncs.absolute',
         'absolute': 'interp_ufuncs.absolute',
         'copysign': 'interp_ufuncs.copysign',
         'exp': 'interp_ufuncs.exp',
+        'floor': 'interp_ufuncs.floor',
         'maximum': 'interp_ufuncs.maximum',
         'minimum': 'interp_ufuncs.minimum',
         'negative': 'interp_ufuncs.negative',
@@ -21,4 +24,7 @@
         'sign': 'interp_ufuncs.sign',
     }
 
-    appleveldefs = {}
+    appleveldefs = {
+        'average': 'app_numpy.average',
+        'mean': 'app_numpy.mean',
+    }
diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/micronumpy/app_numpy.py
@@ -0,0 +1,11 @@
+import numpy
+
+def average(a):
+    # This implements a weighted average, for now we don't implement the
+    # weighting, just the average part!
+    return mean(a)
+
+def mean(a):
+    if not hasattr(a, "mean"):
+        a = numpy.array(a)
+    return a.mean()
\ No newline at end of file
diff --git a/pypy/module/micronumpy/compile.py b/pypy/module/micronumpy/compile.py
--- a/pypy/module/micronumpy/compile.py
+++ b/pypy/module/micronumpy/compile.py
@@ -40,6 +40,11 @@
         elif b == '/':
             right = stack.pop()
             stack.append(stack.pop().descr_div(space, right))
+        elif b == '%':
+            right = stack.pop()
+            stack.append(stack.pop().descr_mod(space, right))
+        elif b == '|':
+            stack.append(stack.pop().descr_abs(space))
         else:
             print "Unknown opcode: %s" % b
             raise BogusBytecode()
diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
--- a/pypy/module/micronumpy/interp_numarray.py
+++ b/pypy/module/micronumpy/interp_numarray.py
@@ -1,10 +1,11 @@
 from pypy.interpreter.baseobjspace import ObjSpace, W_Root, Wrappable
 from pypy.interpreter.error import operationerrfmt
 from pypy.interpreter.gateway import interp2app, unwrap_spec
-from pypy.interpreter.typedef import TypeDef
+from pypy.interpreter.typedef import TypeDef, GetSetProperty
 from pypy.rlib import jit
 from pypy.rpython.lltypesystem import lltype
 from pypy.tool.sourcetools import func_with_new_name
+import math
 
 
 def dummy1(v):
@@ -30,6 +31,12 @@
         self.transitions[target] = new = Signature()
         return new
 
+def pos(v):
+    return v
+def neg(v):
+    return -v
+def absolute(v):
+    return abs(v)
 def add(v1, v2):
     return v1 + v2
 def sub(v1, v2):
@@ -38,16 +45,40 @@
     return v1 * v2
 def div(v1, v2):
     return v1 / v2
+def pow(v1, v2):
+    return math.pow(v1, v2)
+def mod(v1, v2):
+    return math.fmod(v1, v2)
 
 class BaseArray(Wrappable):
     def __init__(self):
         self.invalidates = []
 
     def invalidated(self):
+        if self.invalidates:
+            self._invalidated()
+
+    def _invalidated(self):
         for arr in self.invalidates:
             arr.force_if_needed()
         del self.invalidates[:]
 
+    def _unop_impl(function):
+        signature = Signature()
+        def impl(self, space):
+            new_sig = self.signature.transition(signature)
+            res = Call1(
+                function,
+                self,
+                new_sig)
+            self.invalidates.append(res)
+            return space.wrap(res)
+        return func_with_new_name(impl, "uniop_%s_impl" % function.__name__)
+
+    descr_pos = _unop_impl(pos)
+    descr_neg = _unop_impl(neg)
+    descr_abs = _unop_impl(absolute)
+
     def _binop_impl(function):
         signature = Signature()
         def impl(self, space, w_other):
@@ -76,10 +107,15 @@
     descr_sub = _binop_impl(sub)
     descr_mul = _binop_impl(mul)
     descr_div = _binop_impl(div)
+    descr_pow = _binop_impl(pow)
+    descr_mod = _binop_impl(mod)
 
     def get_concrete(self):
         raise NotImplementedError
 
+    def descr_get_shape(self, space):
+        return space.newtuple([self.descr_len(space)])
+
     def descr_len(self, space):
         return self.get_concrete().descr_len(space)
 
@@ -99,6 +135,15 @@
         self.invalidated()
         return self.get_concrete().descr_setitem(space, item, value)
 
+    def descr_mean(self, space):
+        s = 0
+        concrete = self.get_concrete()
+        size = concrete.find_size()
+        for i in xrange(size):
+            s += concrete.getitem(i)
+        return space.wrap(s / size)
+
+
 class FloatWrapper(BaseArray):
     """
     Intermediate class representing a float literal.
@@ -185,6 +230,7 @@
     Intermediate class for performing binary operations.
     """
     _immutable_fields_ = ["function", "left", "right"]
+
     def __init__(self, function, left, right, signature):
         VirtualArray.__init__(self, signature)
         self.function = function
@@ -208,9 +254,11 @@
 
 class ViewArray(BaseArray):
     """
-    Class for representing views of arrays, they will reflect changes of parrent arrays. Example: slices
+    Class for representing views of arrays, they will reflect changes of parent
+    arrays. Example: slices
     """
     _immutable_fields_ = ["parent"]
+
     def __init__(self, parent, signature):
         BaseArray.__init__(self)
         self.signature = signature
@@ -218,7 +266,10 @@
         self.invalidates = parent.invalidates
 
     def get_concrete(self):
-        return self # in fact, ViewArray never gets "concrete" as it never stores data. This implementation is needed for BaseArray getitem/setitem to work, can be refactored.
+        # in fact, ViewArray never gets "concrete" as it never stores data.
+        # This implementation is needed for BaseArray getitem/setitem to work,
+        # can be refactored.
+        return self
 
     def eval(self, i):
         return self.parent.eval(self.calc_index(i))
@@ -308,20 +359,36 @@
         i += 1
     return space.wrap(arr)
 
- at unwrap_spec(ObjSpace, int)
+ at unwrap_spec(size=int)
 def zeros(space, size):
     return space.wrap(SingleDimArray(size))
 
+ at unwrap_spec(size=int)
+def ones(space, size):
+    arr = SingleDimArray(size)
+    for i in xrange(size):
+        arr.storage[i] = 1.0
+    return space.wrap(arr)
 
 BaseArray.typedef = TypeDef(
     'numarray',
     __new__ = interp2app(descr_new_numarray),
+
+    shape = GetSetProperty(BaseArray.descr_get_shape),
+
     __len__ = interp2app(BaseArray.descr_len),
     __getitem__ = interp2app(BaseArray.descr_getitem),
     __setitem__ = interp2app(BaseArray.descr_setitem),
 
+    __pos__ = interp2app(BaseArray.descr_pos),
+    __neg__ = interp2app(BaseArray.descr_neg),
+    __abs__ = interp2app(BaseArray.descr_abs),
     __add__ = interp2app(BaseArray.descr_add),
     __sub__ = interp2app(BaseArray.descr_sub),
     __mul__ = interp2app(BaseArray.descr_mul),
     __div__ = interp2app(BaseArray.descr_div),
+    __pow__ = interp2app(BaseArray.descr_pow),
+    __mod__ = interp2app(BaseArray.descr_mod),
+
+    mean = interp2app(BaseArray.descr_mean),
 )
diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
--- a/pypy/module/micronumpy/interp_ufuncs.py
+++ b/pypy/module/micronumpy/interp_ufuncs.py
@@ -8,22 +8,24 @@
 
 def ufunc(func):
     signature = Signature()
-    @unwrap_spec(array=BaseArray)
-    def impl(space, array):
-        w_res = Call1(func, array, array.signature.transition(signature))
-        array.invalidates.append(w_res)
-        return w_res
+    def impl(space, w_obj):
+        if isinstance(w_obj, BaseArray):
+            w_res = Call1(func, w_obj, w_obj.signature.transition(signature))
+            w_obj.invalidates.append(w_res)
+            return w_res
+        return space.wrap(func(space.float_w(w_obj)))
     return func_with_new_name(impl, "%s_dispatcher" % func.__name__)
 
 def ufunc2(func):
     signature = Signature()
-    @unwrap_spec(larray=BaseArray, rarray=BaseArray)
-    def impl(space, larray, rarray):
-        new_sig = larray.signature.transition(signature).transition(rarray.signature)
-        w_res = Call2(func, larray, rarray, new_sig)
-        larray.invalidates.append(w_res)
-        rarray.invalidates.append(w_res)
-        return w_res
+    def impl(space, w_lhs, w_rhs):
+        if isinstance(w_lhs, BaseArray) and isinstance(w_rhs, BaseArray):
+            new_sig = w_lhs.signature.transition(signature).transition(w_rhs.signature)
+            w_res = Call2(func, w_lhs, w_rhs, new_sig)
+            w_lhs.invalidates.append(w_res)
+            w_rhs.invalidates.append(w_res)
+            return w_res
+        return space.wrap(func(space.float_w(w_lhs), space.float_w(w_rhs)))
     return func_with_new_name(impl, "%s_dispatcher" % func.__name__)
 
 @ufunc
@@ -60,6 +62,10 @@
     return 1.0 / value
 
 @ufunc
+def floor(value):
+    return math.floor(value)
+
+ at ufunc
 def sign(value):
     if value == 0.0:
         return 0.0
diff --git a/pypy/module/micronumpy/test/test_module.py b/pypy/module/micronumpy/test/test_module.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/micronumpy/test/test_module.py
@@ -0,0 +1,13 @@
+from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
+
+
+class AppTestNumPyModule(BaseNumpyAppTest):
+    def test_mean(self):
+        from numpy import array, mean
+        assert mean(array(range(5))) == 2.0
+        assert mean(range(5)) == 2.0
+
+    def test_average(self):
+        from numpy import array, average
+        assert average(range(10)) == 4.5
+        assert average(array(range(10))) == 4.5
\ No newline at end of file
diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
--- a/pypy/module/micronumpy/test/test_numarray.py
+++ b/pypy/module/micronumpy/test/test_numarray.py
@@ -28,6 +28,15 @@
         a[1] = 1.0
         assert a[1] == 1.0
 
+    def test_ones(self):
+        from numpy import ones
+        a = ones(3)
+        assert len(a) == 3
+        assert a[0] == 1
+        raises(IndexError, "a[3]")
+        a[2] = 4
+        assert a[2] == 4
+
     def test_iterator_init(self):
         from numpy import array
         a = array(range(5))
@@ -56,6 +65,15 @@
         assert len(a) == 5
         assert len(a + a) == 5
 
+    def test_shape(self):
+        from numpy import array
+        a = array(range(5))
+        assert a.shape == (5,)
+        b = a + a
+        assert b.shape == (5,)
+        c = a[:3]
+        assert c.shape == (3,)
+
     def test_add(self):
         from numpy import array
         a = array(range(5))
@@ -136,6 +154,72 @@
         for i in range(5):
             assert b[i] == i / 5.0
 
+    def test_pow(self):
+        from numpy import array
+        a = array(range(5))
+        b = a ** a
+        for i in range(5):
+            print b[i], i**i
+            assert b[i] == i**i
+
+    def test_pow_other(self):
+        from numpy import array
+        a = array(range(5))
+        b = array([2, 2, 2, 2, 2])
+        c = a ** b
+        for i in range(5):
+            assert c[i] == i ** 2
+
+    def test_pow_constant(self):
+        from numpy import array
+        a = array(range(5))
+        b = a ** 2
+        for i in range(5):
+            assert b[i] == i ** 2
+
+    def test_mod(self):
+        from numpy import array
+        a = array(range(1,6))
+        b = a % a
+        for i in range(5):
+            assert b[i] == 0
+
+    def test_mod_other(self):
+        from numpy import array
+        a = array(range(5))
+        b = array([2, 2, 2, 2, 2])
+        c = a % b
+        for i in range(5):
+            assert c[i] == i % 2
+
+    def test_mod_constant(self):
+        from numpy import array
+        a = array(range(5))
+        b = a % 2
+        for i in range(5):
+            assert b[i] == i % 2
+
+    def test_pos(self):
+        from numpy import array
+        a = array([1.,-2.,3.,-4.,-5.])
+        b = +a
+        for i in range(5):
+            assert b[i] == a[i]
+
+    def test_neg(self):
+        from numpy import array
+        a = array([1.,-2.,3.,-4.,-5.])
+        b = -a
+        for i in range(5):
+            assert b[i] == -a[i]
+
+    def test_abs(self):
+        from numpy import array
+        a = array([1.,-2.,3.,-4.,-5.])
+        b = abs(a)
+        for i in range(5):
+            assert b[i] == abs(a[i])
+
     def test_auto_force(self):
         from numpy import array
         a = array(range(5))
@@ -165,7 +249,7 @@
         assert len(s) == 4
         for i in range(4):
             assert s[i] == a[2*i+1]
-        
+
     def test_slice_update(self):
         from numpy import array
         a = array(range(5))
@@ -177,17 +261,22 @@
 
 
     def test_slice_invaidate(self):
-        # check that slice shares invalidation list with 
+        # check that slice shares invalidation list with
         from numpy import array
         a = array(range(5))
         s = a[0:2]
         b = array([10,11])
         c = s + b
-        a[0]=100
+        a[0] = 100
         assert c[0] == 10
         assert c[1] == 12
         d = s + b
-        a[1]=101
+        a[1] = 101
         assert d[0] == 110
         assert d[1] == 12
 
+    def test_mean(self):
+        from numpy import array, mean
+        a = array(range(5))
+        assert a.mean() == 2.0
+        assert a[:4].mean() == 1.5
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -3,6 +3,13 @@
 
 
 class AppTestUfuncs(BaseNumpyAppTest):
+    def test_single_item(self):
+        from numpy import negative, sign, minimum
+
+        assert negative(5.0) == -5.0
+        assert sign(-0.0) == 0.0
+        assert minimum(2.0, 3.0) == 2.0
+
     def test_negative(self):
         from numpy import array, negative
 
@@ -60,6 +67,15 @@
         for i in range(4):
             assert b[i] == reference[i]
 
+    def test_floor(self):
+        from numpy import array, floor
+
+        reference = [-2.0, -1.0, 0.0, 1.0, 1.0]
+        a = array([-1.4, -1.0, 0.0, 1.0, 1.4])
+        b = floor(a)
+        for i in range(5):
+            assert b[i] == reference[i]
+
     def test_copysign(self):
         from numpy import array, copysign
 
diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -1,7 +1,7 @@
 from pypy.jit.metainterp.test.support import LLJitMixin
 from pypy.rpython.test.test_llinterp import interpret
 from pypy.module.micronumpy.interp_numarray import (SingleDimArray, Signature,
-    FloatWrapper, Call1, Call2, SingleDimSlice, add, mul)
+    FloatWrapper, Call2, SingleDimSlice, add, mul, neg, Call1)
 from pypy.module.micronumpy.interp_ufuncs import negative
 from pypy.module.micronumpy.compile import numpy_compile
 
@@ -13,8 +13,6 @@
         cls.space = FakeSpace()
 
     def test_add(self):
-        space = self.space
-
         def f(i):
             ar = SingleDimArray(i)
             v = Call2(add, ar, ar, Signature())
@@ -27,8 +25,6 @@
         assert result == f(5)
 
     def test_floatadd(self):
-        space = self.space
-
         def f(i):
             ar = SingleDimArray(i)
             v = Call2(add, ar, FloatWrapper(4.5), Signature())
@@ -40,11 +36,24 @@
                           "int_lt": 1, "guard_true": 1, "jump": 1})
         assert result == f(5)
 
-    def test_already_forecd(self):
+    def test_neg(self):
         space = self.space
 
         def f(i):
             ar = SingleDimArray(i)
+            v = Call1(neg, ar, Signature())
+            return v.get_concrete().storage[3]
+
+        result = self.meta_interp(f, [5], listops=True, backendopt=True)
+        self.check_loops({"getarrayitem_raw": 1, "float_neg": 1,
+                          "setarrayitem_raw": 1, "int_add": 1,
+                          "int_lt": 1, "guard_true": 1, "jump": 1})
+
+        assert result == f(5)
+
+    def test_already_forecd(self):
+        def f(i):
+            ar = SingleDimArray(i)
             v1 = Call2(add, ar, FloatWrapper(4.5), Signature())
             v2 = Call2(mul, v1, FloatWrapper(4.5), Signature())
             v1.force_if_needed()
@@ -95,8 +104,6 @@
         self.check_loop_count(3)
 
     def test_slice(self):
-        space = self.space
-
         def f(i):
             step = 3
             ar = SingleDimArray(step*i)
@@ -111,8 +118,6 @@
         assert result == f(5)
 
     def test_slice2(self):
-        space = self.space
-
         def f(i):
             step1 = 2
             step2 = 3
diff --git a/pypy/module/operator/app_operator.py b/pypy/module/operator/app_operator.py
--- a/pypy/module/operator/app_operator.py
+++ b/pypy/module/operator/app_operator.py
@@ -4,6 +4,7 @@
 This module exports a set of operators as functions. E.g. operator.add(x,y) is
 equivalent to x+y.
 '''
+from __pypy__ import builtinify
 
 def countOf(a,b): 
     'countOf(a, b) -- Return the number of times b occurs in a.'
@@ -66,50 +67,56 @@
     a[b:c] = d 
 __setslice__ = setslice
 
-class attrgetter(object):
 
-    def __init__(self, attr, *attrs):
-        self.attrs = (attr,) + attrs
+def attrgetter(attr, *attrs):
+    if attrs:
+        getters = [single_attr_getter(a) for a in (attr,) + attrs]
+        def getter(obj):
+            return tuple([getter(obj) for getter in getters])
+    else:
+        getter = single_attr_getter(attr)
+    return builtinify(getter)
 
-    def _resolve_attr(self, obj, attr):
-        last = 0
-        while True:
-            try:
-                dot = attr.find(".", last)
-            except AttributeError:
-                raise TypeError
-            if dot > 0:
-                obj = getattr(obj, attr[last:dot])
-                last = dot + 1
-            else:
-                return getattr(obj, attr[last:])
+def single_attr_getter(attr):
+    if not isinstance(attr, str):
+        if not isinstance(attr, unicode):
+            def _raise_typeerror(obj):
+                raise TypeError("argument must be a string, not %r" %
+                                (type(attr).__name__,))
+            return _raise_typeerror
+        attr = attr.encode('ascii')
+    #
+    def make_getter(name, prevfn=None):
+        if prevfn is None:
+            def getter(obj):
+                return getattr(obj, name)
+        else:
+            def getter(obj):
+                return getattr(prevfn(obj), name)
+        return getter
+    #
+    last = 0
+    getter = None
+    while True:
+        dot = attr.find(".", last)
+        if dot < 0: break
+        getter = make_getter(attr[last:dot], getter)
+        last = dot + 1
+    return make_getter(attr[last:], getter)
 
-    def __call__(self, obj):
-        if len(self.attrs) == 1:
-            return self._resolve_attr(obj, self.attrs[0])
-        return tuple(self._resolve_attr(obj, attr) for attr in self.attrs)
 
-class itemgetter(object):
+def itemgetter(item, *items):
+    if items:
+        list_of_indices = [item] + list(items)
+        def getter(obj):
+            return tuple([obj[i] for i in list_of_indices])
+    else:
+        def getter(obj):
+            return obj[item]
+    return builtinify(getter)
 
-    def __init__(self, item, *args):
-        self.items = args
-        self.item = item
 
-    def __call__(self, obj):
-        result = obj[self.item]
-
-        if self.items:
-            list = [result] + [obj[item] for item in self.items]
-            return tuple(list)
-
-        return result
-
-class methodcaller(object):
-
-    def __init__(self, method_name, *args, **kwargs):
-        self.method_name = method_name
-        self.args = args
-        self.kwargs = kwargs
-
-    def __call__(self, obj):
-        return getattr(obj, self.method_name)(*self.args, **self.kwargs)
+def methodcaller(method_name, *args, **kwargs):
+    def call(obj):
+        return getattr(obj, method_name)(*args, **kwargs)
+    return builtinify(call)
diff --git a/pypy/module/oracle/interp_variable.py b/pypy/module/oracle/interp_variable.py
--- a/pypy/module/oracle/interp_variable.py
+++ b/pypy/module/oracle/interp_variable.py
@@ -1484,7 +1484,7 @@
     raise OperationError(
         moduledict.w_NotSupportedError,
         space.wrap("Variable_TypeByValue(): unhandled data type %s" %
-                   (space.type(w_value).getname(space, '?'),)))
+                   (space.type(w_value).getname(space),)))
 
 def newByInputTypeHandler(space, cursor, w_inputTypeHandler, w_value, numElements):
     w_var = space.call(w_inputTypeHandler,
diff --git a/pypy/module/pyexpat/__init__.py b/pypy/module/pyexpat/__init__.py
--- a/pypy/module/pyexpat/__init__.py
+++ b/pypy/module/pyexpat/__init__.py
@@ -2,6 +2,22 @@
 
 from pypy.interpreter.mixedmodule import MixedModule
 
+class ErrorsModule(MixedModule):
+    "Definition of pyexpat.errors module."
+
+    appleveldefs = {
+        }
+
+    interpleveldefs = {
+        }
+
+    def setup_after_space_initialization(self):
+        from pypy.module.pyexpat import interp_pyexpat
+        for name in interp_pyexpat.xml_error_list:
+            self.space.setattr(self, self.space.wrap(name),
+                    interp_pyexpat.ErrorString(self.space,
+                    getattr(interp_pyexpat, name)))
+
 class Module(MixedModule):
     "Python wrapper for Expat parser."
 
@@ -21,6 +37,10 @@
         'version_info':  'interp_pyexpat.get_expat_version_info(space)',
         }
 
+    submodules = {
+        'errors': ErrorsModule,
+    }
+
     for name in ['XML_PARAM_ENTITY_PARSING_NEVER',
                  'XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE',
                  'XML_PARAM_ENTITY_PARSING_ALWAYS']:
diff --git a/pypy/module/pyexpat/interp_pyexpat.py b/pypy/module/pyexpat/interp_pyexpat.py
--- a/pypy/module/pyexpat/interp_pyexpat.py
+++ b/pypy/module/pyexpat/interp_pyexpat.py
@@ -31,6 +31,48 @@
 XML_Content_Ptr = lltype.Ptr(lltype.ForwardReference())
 XML_Parser = rffi.COpaquePtr(typedef='XML_Parser')
 
+xml_error_list = [
+    "XML_ERROR_NO_MEMORY",
+    "XML_ERROR_SYNTAX",
+    "XML_ERROR_NO_ELEMENTS",
+    "XML_ERROR_INVALID_TOKEN",
+    "XML_ERROR_UNCLOSED_TOKEN",
+    "XML_ERROR_PARTIAL_CHAR",
+    "XML_ERROR_TAG_MISMATCH",
+    "XML_ERROR_DUPLICATE_ATTRIBUTE",
+    "XML_ERROR_JUNK_AFTER_DOC_ELEMENT",
+    "XML_ERROR_PARAM_ENTITY_REF",
+    "XML_ERROR_UNDEFINED_ENTITY",
+    "XML_ERROR_RECURSIVE_ENTITY_REF",
+    "XML_ERROR_ASYNC_ENTITY",
+    "XML_ERROR_BAD_CHAR_REF",
+    "XML_ERROR_BINARY_ENTITY_REF",
+    "XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF",
+    "XML_ERROR_MISPLACED_XML_PI",
+    "XML_ERROR_UNKNOWN_ENCODING",
+    "XML_ERROR_INCORRECT_ENCODING",
+    "XML_ERROR_UNCLOSED_CDATA_SECTION",
+    "XML_ERROR_EXTERNAL_ENTITY_HANDLING",
+    "XML_ERROR_NOT_STANDALONE",
+    "XML_ERROR_UNEXPECTED_STATE",
+    "XML_ERROR_ENTITY_DECLARED_IN_PE",
+    "XML_ERROR_FEATURE_REQUIRES_XML_DTD",
+    "XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING",
+    # Added in Expat 1.95.7.
+    "XML_ERROR_UNBOUND_PREFIX",
+    # Added in Expat 1.95.8.
+    "XML_ERROR_UNDECLARING_PREFIX",
+    "XML_ERROR_INCOMPLETE_PE",
+    "XML_ERROR_XML_DECL",
+    "XML_ERROR_TEXT_DECL",
+    "XML_ERROR_PUBLICID",
+    "XML_ERROR_SUSPENDED",
+    "XML_ERROR_NOT_SUSPENDED",
+    "XML_ERROR_ABORTED",
+    "XML_ERROR_FINISHED",
+    "XML_ERROR_SUSPEND_PE",
+    ]
+
 class CConfigure:
     _compilation_info_ = eci
     XML_Content = rffi_platform.Struct('XML_Content', [
@@ -56,6 +98,9 @@
     XML_FALSE = rffi_platform.ConstantInteger('XML_FALSE')
     XML_TRUE = rffi_platform.ConstantInteger('XML_TRUE')
 
+    for name in xml_error_list:
+        locals()[name] = rffi_platform.ConstantInteger(name)
+
 for k, v in rffi_platform.configure(CConfigure).items():
     globals()[k] = v
 
@@ -298,7 +343,8 @@
 XML_GetErrorCode = expat_external(
     'XML_GetErrorCode', [XML_Parser], rffi.INT)
 XML_ErrorString = expat_external(
-    'XML_ErrorString', [rffi.INT], rffi.CCHARP)
+    'XML_ErrorString', [rffi.INT],
+    rffi.CCHARP)
 XML_GetCurrentLineNumber = expat_external(
     'XML_GetCurrentLineNumber', [XML_Parser], rffi.INT)
 XML_GetErrorLineNumber = XML_GetCurrentLineNumber
@@ -691,7 +737,7 @@
     elif space.is_true(space.isinstance(w_encoding, space.w_str)):
         encoding = space.str_w(w_encoding)
     else:
-        type_name = space.type(w_encoding).getname(space, '?')
+        type_name = space.type(w_encoding).getname(space)
         raise OperationError(
             space.w_TypeError,
             space.wrap('ParserCreate() argument 1 must be string or None,'
@@ -711,7 +757,7 @@
                 space.wrap('namespace_separator must be at most one character,'
                            ' omitted, or None'))
     else:
-        type_name = space.type(w_namespace_separator).getname(space, '?')
+        type_name = space.type(w_namespace_separator).getname(space)
         raise OperationError(
             space.w_TypeError,
             space.wrap('ParserCreate() argument 2 must be string or None,'
diff --git a/pypy/module/pyexpat/test/test_parser.py b/pypy/module/pyexpat/test/test_parser.py
--- a/pypy/module/pyexpat/test/test_parser.py
+++ b/pypy/module/pyexpat/test/test_parser.py
@@ -38,7 +38,7 @@
         parser = pyexpat.ParserCreate()
         raises(pyexpat.ExpatError, "parser.Parse(xml, True)")
 
-    def test_encoding(self):
+    def test_encoding_argument(self):
         import pyexpat
         for encoding_arg in (None, 'utf-8', 'iso-8859-1'):
             for namespace_arg in (None, '{'):
@@ -68,7 +68,7 @@
         assert p.buffer_size == 150
         raises(TypeError, setattr, p, 'buffer_size', sys.maxint + 1)
 
-    def test_encoding(self):
+    def test_encoding_xml(self):
         # use one of the few encodings built-in in expat
         xml = "<?xml version='1.0' encoding='iso-8859-1'?><s>caf\xe9</s>"
         import pyexpat
@@ -120,3 +120,14 @@
             return True
         p.ExternalEntityRefHandler = handler
         p.Parse(xml)
+
+    def test_errors(self):
+        import types
+        import pyexpat
+        assert isinstance(pyexpat.errors, types.ModuleType)
+        # check a few random errors
+        assert pyexpat.errors.XML_ERROR_SYNTAX == 'syntax error'
+        assert (pyexpat.errors.XML_ERROR_INCORRECT_ENCODING ==
+               'encoding specified in XML declaration is incorrect')
+        assert (pyexpat.errors.XML_ERROR_XML_DECL ==
+                'XML declaration not well-formed')
diff --git a/pypy/module/pypyjit/__init__.py b/pypy/module/pypyjit/__init__.py
--- a/pypy/module/pypyjit/__init__.py
+++ b/pypy/module/pypyjit/__init__.py
@@ -8,6 +8,7 @@
         'set_param':    'interp_jit.set_param',
         'residual_call': 'interp_jit.residual_call',
         'set_compile_hook': 'interp_jit.set_compile_hook',
+        'DebugMergePoint': 'interp_resop.W_DebugMergePoint',
     }
 
     def setup_after_space_initialization(self):
diff --git a/pypy/module/pypyjit/interp_jit.py b/pypy/module/pypyjit/interp_jit.py
--- a/pypy/module/pypyjit/interp_jit.py
+++ b/pypy/module/pypyjit/interp_jit.py
@@ -17,10 +17,11 @@
 from opcode import opmap
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.nonconst import NonConstant
+from pypy.jit.metainterp.resoperation import rop
+from pypy.module.pypyjit.interp_resop import debug_merge_point_from_boxes
 
 PyFrame._virtualizable2_ = ['last_instr', 'pycode',
-                            'valuestackdepth', 'valuestack_w[*]',
-                            'fastlocals_w[*]',
+                            'valuestackdepth', 'locals_stack_w[*]',
                             'last_exception',
                             'lastblock',
                             'is_being_profiled',
@@ -47,6 +48,16 @@
     return (bytecode.co_flags & CO_GENERATOR) != 0
 
 
+def wrap_oplist(space, logops, operations):
+    list_w = []
+    for op in operations:
+        if op.getopnum() == rop.DEBUG_MERGE_POINT:
+            list_w.append(space.wrap(debug_merge_point_from_boxes(
+                op.getarglist())))
+        else:
+            list_w.append(space.wrap(logops.repr_of_resop(op)))
+    return list_w
+
 class PyPyJitDriver(JitDriver):
     reds = ['frame', 'ec']
     greens = ['next_instr', 'is_being_profiled', 'pycode']
@@ -62,8 +73,7 @@
             return
         if space.is_true(cache.w_compile_hook):
             logops = logger._make_log_operations()
-            list_w = [space.wrap(logops.repr_of_resop(op))
-                      for op in operations]
+            list_w = wrap_oplist(space, logops, operations)
             pycode = cast_base_ptr_to_instance(PyCode, ll_pycode)
             cache.in_recursion = True
             try:
@@ -85,8 +95,7 @@
             return
         if space.is_true(cache.w_compile_hook):
             logops = logger._make_log_operations()
-            list_w = [space.wrap(logops.repr_of_resop(op))
-                      for op in operations]
+            list_w = wrap_oplist(space, logops, operations)
             cache.in_recursion = True
             try:
                 space.call_function(cache.w_compile_hook,
@@ -167,6 +176,8 @@
     '''Configure the tunable JIT parameters.
         * set_param(name=value, ...)            # as keyword arguments
         * set_param("name=value,name=value")    # as a user-supplied string
+        * set_param("off")                      # disable the jit
+        * set_param("default")                  # restore all defaults
     '''
     # XXXXXXXXX
     args_w, kwds_w = __args__.unpack()
diff --git a/pypy/module/pypyjit/interp_resop.py b/pypy/module/pypyjit/interp_resop.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/pypyjit/interp_resop.py
@@ -0,0 +1,41 @@
+
+from pypy.interpreter.typedef import TypeDef, interp_attrproperty
+from pypy.interpreter.baseobjspace import Wrappable, ObjSpace, W_Root
+from pypy.interpreter.gateway import unwrap_spec, interp2app
+from pypy.interpreter.pycode import PyCode
+from pypy.rpython.lltypesystem import lltype, llmemory
+from pypy.rpython.annlowlevel import cast_base_ptr_to_instance
+from pypy.rpython.lltypesystem.rclass import OBJECT
+
+class W_DebugMergePoint(Wrappable):
+    """ A class representing debug_merge_point JIT operation
+    """
+
+    def __init__(self, mp_no, offset, pycode):
+        self.mp_no = mp_no
+        self.offset = offset
+        self.pycode = pycode
+
+    def descr_repr(self, space):
+        return space.wrap('DebugMergePoint()')
+
+ at unwrap_spec(mp_no=int, offset=int, pycode=PyCode)
+def new_debug_merge_point(space, w_tp, mp_no, offset, pycode):
+    return W_DebugMergePoint(mp_no, offset, pycode)
+
+def debug_merge_point_from_boxes(boxes):
+    mp_no = boxes[0].getint()
+    offset = boxes[2].getint()
+    llcode = lltype.cast_opaque_ptr(lltype.Ptr(OBJECT),
+                                    boxes[4].getref_base())
+    pycode = cast_base_ptr_to_instance(PyCode, llcode)
+    assert pycode is not None
+    return W_DebugMergePoint(mp_no, offset, pycode)
+
+W_DebugMergePoint.typedef = TypeDef(
+    'DebugMergePoint',
+    __new__ = interp2app(new_debug_merge_point),
+    __doc__ = W_DebugMergePoint.__doc__,
+    __repr__ = interp2app(W_DebugMergePoint.descr_repr),
+    code = interp_attrproperty('pycode', W_DebugMergePoint),
+)
diff --git a/pypy/module/pypyjit/policy.py b/pypy/module/pypyjit/policy.py
--- a/pypy/module/pypyjit/policy.py
+++ b/pypy/module/pypyjit/policy.py
@@ -14,7 +14,8 @@
             modname, _ = modname.split('.', 1)
         if modname in ['pypyjit', 'signal', 'micronumpy', 'math', 'exceptions',
                        'imp', 'sys', 'array', '_ffi', 'itertools', 'operator',
-                       'posix', '_socket', '_sre', '_lsprof', '_weakref']:
+                       'posix', '_socket', '_sre', '_lsprof', '_weakref',
+                       '__pypy__', 'cStringIO']:
             return True
         return False
 
diff --git a/pypy/module/pypyjit/test/test_jit_hook.py b/pypy/module/pypyjit/test/test_jit_hook.py
--- a/pypy/module/pypyjit/test/test_jit_hook.py
+++ b/pypy/module/pypyjit/test/test_jit_hook.py
@@ -8,12 +8,13 @@
 from pypy.jit.metainterp.logger import Logger
 from pypy.rpython.annlowlevel import (cast_instance_to_base_ptr,
                                       cast_base_ptr_to_instance)
+from pypy.rpython.lltypesystem import lltype, llmemory
 from pypy.module.pypyjit.interp_jit import pypyjitdriver
 from pypy.jit.tool.oparser import parse
 from pypy.jit.metainterp.typesystem import llhelper
 
 class MockSD(object):
-    class cpu:
+    class cpu(object):
         ts = llhelper
 
 class AppTestJitHook(object):
@@ -27,14 +28,17 @@
             pass
         return f
         """)
+        cls.w_f = w_f
         ll_code = cast_instance_to_base_ptr(w_f.code)
+        code_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, ll_code)
         logger = Logger(MockSD())
 
         oplist = parse("""
         [i1, i2]
         i3 = int_add(i1, i2)
+        debug_merge_point(0, 0, 0, 0, ConstPtr(ptr0))
         guard_true(i3) []
-        """).operations
+        """, namespace={'ptr0': code_gcref}).operations
 
         def interp_on_compile():
             pypyjitdriver.on_compile(logger, LoopToken(), oplist, 'loop',
@@ -63,7 +67,7 @@
         assert all[0][0][0].co_name == 'f'
         assert all[0][0][1] == 0
         assert all[0][0][2] == False
-        assert len(all[0][1]) == 2
+        assert len(all[0][1]) == 3
         assert 'int_add' in all[0][1][0]
         self.on_compile_bridge()
         assert len(all) == 2
@@ -103,3 +107,20 @@
         self.on_compile_bridge()
         assert len(l) == 2 # and did not crash
         
+    def test_on_compile_types(self):
+        import pypyjit
+        l = []
+
+        def hook(*args):
+            l.append(args)
+
+        pypyjit.set_compile_hook(hook)
+        self.on_compile()
+        dmp = l[0][3][1]
+        assert isinstance(dmp, pypyjit.DebugMergePoint)
+        assert dmp.code is self.f.func_code
+
+    def test_creation(self):
+        import pypyjit
+        dmp = pypyjit.DebugMergePoint(0, 0, self.f.func_code)
+        assert dmp.code is self.f.func_code 
diff --git a/pypy/module/pypyjit/test/test_jit_setup.py b/pypy/module/pypyjit/test/test_jit_setup.py
--- a/pypy/module/pypyjit/test/test_jit_setup.py
+++ b/pypy/module/pypyjit/test/test_jit_setup.py
@@ -9,21 +9,42 @@
         # this just checks that the module is setting up things correctly, and
         # the resulting code makes sense on top of CPython.
         import pypyjit
-        pypyjit.set_param(threshold=5, inlining=1)
-        pypyjit.set_param("trace_eagerness=3,inlining=0")
+        try:
+            pypyjit.set_param(threshold=5, inlining=1)
+            pypyjit.set_param("trace_eagerness=3,inlining=0")
 
-        def f(x, y):
-            return x*y+1
+            def f(x, y):
+                return x*y+1
 
-        assert f(6, 7) == 43
+            assert f(6, 7) == 43
 
-        def gen(x):
-            i = 0
-            while i < x:
-                yield i*i
-                i += 1
+            def gen(x):
+                i = 0
+                while i < x:
+                    yield i*i
+                    i += 1
 
-        assert list(gen(3)) == [0, 1, 4]
+            assert list(gen(3)) == [0, 1, 4]
+        finally:
+            pypyjit.set_param('default')
+
+    def test_no_jit(self):
+        import pypyjit
+        was_called = []
+        def should_not_be_called(*args, **kwds):
+            was_called.append((args, kwds))
+        try:
+            pypyjit.set_param('off')
+            pypyjit.set_compile_hook(should_not_be_called)
+            def f():
+                pass
+            for i in range(2500):
+                f()
+            assert not was_called
+        finally:
+            pypyjit.set_compile_hook(None)
+            pypyjit.set_param('default')
+
 
 def test_interface_residual_call():
     space = gettestobjspace(usemodules=['pypyjit'])
diff --git a/pypy/module/pypyjit/test_pypy_c/model.py b/pypy/module/pypyjit/test_pypy_c/model.py
--- a/pypy/module/pypyjit/test_pypy_c/model.py
+++ b/pypy/module/pypyjit/test_pypy_c/model.py
@@ -2,6 +2,7 @@
 import sys
 import re
 import os.path
+from _pytest.assertion import newinterpret
 from pypy.tool.jitlogparser.parser import SimpleParser, Function, TraceForOpcode
 from pypy.tool.jitlogparser.storage import LoopStorage
 
@@ -194,7 +195,7 @@
             # transform self._assert(x, 'foo') into assert x, 'foo'
             source = source.replace('self._assert(', 'assert ')
             source = source[:-1] # remove the trailing ')'
-            self.msg = py.code._reinterpret(source, f, should_fail=True)
+            self.msg = newinterpret.interpret(source, f, should_fail=True)
         else:
             self.msg = "<could not determine information>"
 
diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
--- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
@@ -58,6 +58,8 @@
         stdout, stderr = pipe.communicate()
         if stderr.startswith('SKIP:'):
             py.test.skip(stderr)
+        if stderr.startswith('debug_alloc.h:'):   # lldebug builds
+            stderr = ''
         assert not stderr
         #
         # parse the JIT log
diff --git a/pypy/module/pypyjit/test_pypy_c/test_array.py b/pypy/module/pypyjit/test_pypy_c/test_array.py
--- a/pypy/module/pypyjit/test_pypy_c/test_array.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_array.py
@@ -46,7 +46,7 @@
             guard_no_overflow(descr=<Guard4>)
             i18 = int_add(i7, 1)
             --TICK--
-            jump(p0, p1, p2, p3, p4, p5, p6, i18, i16, i9, i10, descr=<Loop0>)
+            jump(p0, p1, p2, p3, p4, p5, i18, i16, p8, i9, i10, descr=<Loop0>)
         """)
 
     def test_array_intimg(self):
@@ -83,7 +83,7 @@
             setarrayitem_raw(i11, i8, _, descr=<.*ArrayNoLengthDescr>)
             i28 = int_add(i8, 1)
             --TICK--
-            jump(p0, p1, p2, p3, p4, p5, p6, p7, i28, i15, i10, i11, descr=<Loop0>)
+            jump(p0, p1, p2, p3, p4, p5, p6, i28, i15, p9, i10, i11, descr=<Loop0>)
         """)
 
 
diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py
--- a/pypy/module/pypyjit/test_pypy_c/test_call.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_call.py
@@ -11,21 +11,14 @@
                 return 1 + rec(n-1)
             #
             # this loop is traced and then aborted, because the trace is too
-            # long. But then "rec" is marked as "don't inline"
-            i = 0
-            j = 0
-            while i < 20:
-                i += 1
-                j += rec(100)
-            #
-            # next time we try to trace "rec", instead of inlining we compile
-            # it separately and generate a call_assembler
+            # long. But then "rec" is marked as "don't inline". Since we
+            # already traced function from the start (because of number),
+            # now we can inline it as call assembler
             i = 0
             j = 0
             while i < 20:
                 i += 1
                 j += rec(100) # ID: call_rec
-                a = 0
             return j
         #
         log = self.run(fn, [], threshold=18)
@@ -38,6 +31,20 @@
             ...
         """)
 
+    def test_fib(self):
+        def fib(n):
+            if n == 0 or n == 1:
+                return 1
+            return fib(n - 1) + fib(n - 2) # ID: call_rec
+
+        log = self.run(fib, [7], function_threshold=15)
+        loop, = log.loops_by_filename(self.filepath, is_entry_bridge='*')
+        #assert loop.match_by_id('call_rec', '''
+        #...
+        #p1 = call_assembler(..., descr=...)
+        #...
+        #''')
+
     def test_simple_call(self):
         src = """
             OFFSET = 0
@@ -59,13 +66,13 @@
         ops = entry_bridge.ops_by_id('cond', opcode='LOAD_GLOBAL')
         assert log.opnames(ops) == ["guard_value",
                                     "getfield_gc", "guard_value",
-                                    "getfield_gc", "guard_isnull",
+                                    "getfield_gc", "guard_value",
                                     "getfield_gc", "guard_nonnull_class"]
         # LOAD_GLOBAL of OFFSET but in different function partially folded
         # away
         # XXX could be improved
         ops = entry_bridge.ops_by_id('add', opcode='LOAD_GLOBAL')
-        assert log.opnames(ops) == ["guard_value", "getfield_gc", "guard_isnull"]
+        assert log.opnames(ops) == ["guard_value", "getfield_gc", "guard_value"]
         #
         # two LOAD_GLOBAL of f, the second is folded away
         ops = entry_bridge.ops_by_id('call', opcode='LOAD_GLOBAL')
@@ -180,7 +187,7 @@
             guard_no_overflow(descr=<Guard5>)
             i18 = force_token()
             --TICK--
-            jump(p0, p1, p2, p3, p4, p5, i8, p7, i17, i9, p10, p11, p12, descr=<Loop0>)
+            jump(p0, p1, p2, p3, p4, i8, p7, i17, p8, i9, p10, p11, p12, descr=<Loop0>)
         """)
 
     def test_default_and_kw(self):
@@ -202,6 +209,26 @@
             i16 = force_token()
         """)
 
+    def test_kwargs_empty(self):
+        def main(x):
+            def g(**args):
+                return len(args) + 1
+            #
+            s = 0
+            d = {}
+            i = 0
+            while i < x:
+                s += g(**d)       # ID: call
+                i += 1
+            return s
+        #
+        log = self.run(main, [1000])
+        assert log.result == 1000
+        loop, = log.loops_by_id('call')
+        ops = log.opnames(loop.ops_by_id('call'))
+        guards = [ops for ops in ops if ops.startswith('guard')]
+        assert guards == ["guard_no_overflow"]
+
     def test_kwargs(self):
         # this is not a very precise test, could be improved
         def main(x):
@@ -209,20 +236,24 @@
                 return len(args)
             #
             s = 0
-            d = {}
-            for i in range(x):
+            d = {"a": 1}
+            i = 0
+            while i < x:
                 s += g(**d)       # ID: call
                 d[str(i)] = i
                 if i % 100 == 99:
-                    d = {}
+                    d = {"a": 1}
+                i += 1
             return s
         #
         log = self.run(main, [1000])
-        assert log.result == 49500
+        assert log.result == 50500
         loop, = log.loops_by_id('call')
+        print loop.ops_by_id('call')
         ops = log.opnames(loop.ops_by_id('call'))
         guards = [ops for ops in ops if ops.startswith('guard')]
-        assert len(guards) <= 5
+        print guards
+        assert len(guards) <= 20
 
     def test_stararg_virtual(self):
         def main(x):
@@ -270,12 +301,12 @@
             i21 = force_token()
             setfield_gc(p4, i20, descr=<.* .*W_AbstractSeqIterObject.inst_index .*>)
             i23 = int_add_ovf(i9, 3)
-            guard_no_overflow(descr=<Guard37>)
+            guard_no_overflow(descr=...)
         """)
         assert loop1.match_by_id('h2', """
             i25 = force_token()
             i27 = int_add_ovf(i23, 2)
-            guard_no_overflow(descr=<Guard38>)
+            guard_no_overflow(descr=...)
         """)
 
     def test_stararg(self):
diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py
@@ -0,0 +1,25 @@
+
+import py, sys
+from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
+
+
+class TestDicts(BaseTestPyPyC):
+    def test_strdict(self):
+        def fn(n):
+            import sys
+            d = {}
+            class A(object):
+                pass
+            a = A()
+            a.x = 1
+            for s in sys.modules.keys() * 1000:
+                inc = a.x # ID: look
+                d[s] = d.get(s, 0) + inc
+            return sum(d.values())
+        #
+        log = self.run(fn, [1000])
+        assert log.result % 1000 == 0
+        loop, = log.loops_by_filename(self.filepath)
+        ops = loop.ops_by_id('look')
+        assert log.opnames(ops) == ['setfield_gc',
+                                    'guard_not_invalidated']
diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py
--- a/pypy/module/pypyjit/test_pypy_c/test_instance.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py
@@ -115,7 +115,6 @@
         # ----------------------
         loop, = log.loops_by_filename(self.filepath)
         assert loop.match("""
-            i8 = getfield_gc_pure(p5, descr=<SignedFieldDescr .*W_IntObject.inst_intval.*>)
             i9 = int_lt(i8, i7)
             guard_true(i9, descr=.*)
             guard_not_invalidated(descr=.*)
@@ -125,7 +124,7 @@
             p20 = new_with_vtable(ConstClass(W_IntObject))
             setfield_gc(p20, i11, descr=<SignedFieldDescr.*W_IntObject.inst_intval .*>)
             setfield_gc(ConstPtr(ptr21), p20, descr=<GcPtrFieldDescr .*TypeCell.inst_w_value .*>)
-            jump(p0, p1, p2, p3, p4, p20, p6, i7, descr=<Loop.>)
+            jump(p0, p1, p2, p3, p4, p20, p6, i11, i7, descr=<Loop.>)
         """)
 
     def test_oldstyle_newstyle_mix(self):
diff --git a/pypy/module/pypyjit/test_pypy_c/test_intbound.py b/pypy/module/pypyjit/test_pypy_c/test_intbound.py
--- a/pypy/module/pypyjit/test_pypy_c/test_intbound.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_intbound.py
@@ -97,7 +97,7 @@
             guard_no_overflow(descr=...)
             i17 = int_add(i8, 1)
             --TICK--
-            jump(p0, p1, p2, p3, p4, p5, i14, i12, i17, i9, descr=<Loop0>)
+            jump(p0, p1, p2, p3, p4, i14, i12, i17, p8, i9, descr=<Loop0>)
         """)
 
     def test_intbound_sub_lt(self):
@@ -149,7 +149,7 @@
             guard_no_overflow(descr=...)
             i19 = int_add(i8, 1)
             --TICK--
-            jump(p0, p1, p2, p3, p4, p5, i16, i14, i19, i9, descr=<Loop0>)
+            jump(p0, p1, p2, p3, p4, i16, i14, i19, p8, i9, descr=<Loop0>)
         """)
 
     def test_intbound_addmul_ge(self):
@@ -177,7 +177,7 @@
             guard_no_overflow(descr=...)
             i21 = int_add(i8, 1)
             --TICK--
-            jump(p0, p1, p2, p3, p4, p5, i18, i14, i21, descr=<Loop0>)
+            jump(p0, p1, p2, p3, p4, i18, i14, i21, p8, descr=<Loop0>)
         """)
 
     def test_intbound_eq(self):
@@ -209,7 +209,7 @@
             guard_no_overflow(descr=...)
             i16 = int_add(i8, 1)
             --TICK--
-            jump(p0, p1, p2, p3, p4, p5, p6, i14, i16, descr=<Loop0>)
+            jump(p0, p1, p2, p3, p4, p6, i14, i16, p8, descr=<Loop0>)
         """)
 
     def test_intbound_mul(self):
diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py
--- a/pypy/module/pypyjit/test_pypy_c/test_misc.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py
@@ -167,7 +167,7 @@
             guard_false(i16, descr=<Guard3>)
             p17 = getarrayitem_gc(p15, i12, descr=<GcPtrArrayDescr>)
             i19 = int_add(i12, 1)
-            setfield_gc(p4, i19, descr=<SignedFieldDescr .*W_AbstractSeqIterObject.inst_index .*>)
+            setfield_gc(p9, i19, descr=<SignedFieldDescr .*W_AbstractSeqIterObject.inst_index .*>)
             guard_nonnull_class(p17, 146982464, descr=<Guard4>)
             i21 = getfield_gc(p17, descr=<SignedFieldDescr .*W_ArrayTypei.inst_len .*>)
             i23 = int_lt(0, i21)
@@ -179,7 +179,7 @@
             i28 = int_add_ovf(i10, i25)
             guard_no_overflow(descr=<Guard7>)
             --TICK--
-            jump(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, i28, i25, i19, i13, p14, p15, descr=<Loop0>)
+            jump(p0, p1, p2, p3, p4, p5, p6, i28, i25, p9, p10, p11, i19, i13, p14, p15, descr=<Loop0>)
         """)
 
 
diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/pypyjit/test_pypy_c/test_string.py
@@ -0,0 +1,42 @@
+from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
+
+class TestString(BaseTestPyPyC):
+    def test_lookup_default_encoding(self):
+        def main(n):
+            import string
+            i = 0
+            letters = string.letters
+            uletters = unicode(string.letters)
+            while i < n:
+                i += letters[i % len(letters)] == uletters[i % len(letters)]
+            return i
+
+        log = self.run(main, [300])
+        assert log.result == 300
+        loop, = log.loops_by_filename(self.filepath)
+        assert loop.match("""
+            i14 = int_lt(i6, i9)
+            guard_true(i14, descr=<Guard42>)
+            i15 = int_mod(i6, i10)
+            i17 = int_rshift(i15, 63)
+            i18 = int_and(i10, i17)
+            i19 = int_add(i15, i18)
+            i21 = int_lt(i19, 0)
+            guard_false(i21, descr=<Guard43>)
+            i22 = int_ge(i19, i10)
+            guard_false(i22, descr=<Guard44>)
+            i23 = strgetitem(p11, i19)
+            i24 = int_ge(i19, i12)
+            guard_false(i24, descr=<Guard45>)
+            i25 = unicodegetitem(p13, i19)
+            guard_not_invalidated(descr=<Guard46>)
+            p27 = newstr(1)
+            strsetitem(p27, 0, i23)
+            p30 = call(ConstClass(ll_str2unicode__rpy_stringPtr), p27, descr=<GcPtrCallDescr>)
+            guard_no_exception(descr=<Guard47>)
+            i32 = call(ConstClass(_ll_2_str_eq_checknull_char__rpy_unicodePtr_UniChar), p30, i25, descr=<SignedCallDescr>)
+            guard_true(i32, descr=<Guard48>)
+            i34 = int_add(i6, 1)
+            --TICK--
+            jump(p0, p1, p2, p3, p4, p5, i34, p7, p8, i9, i10, p11, i12, p13, descr=<Loop4>)
+        """)
\ No newline at end of file
diff --git a/pypy/module/signal/interp_signal.py b/pypy/module/signal/interp_signal.py
--- a/pypy/module/signal/interp_signal.py
+++ b/pypy/module/signal/interp_signal.py
@@ -80,7 +80,7 @@
 
 pypysig_getaddr_occurred = external('pypysig_getaddr_occurred', [],
                                     lltype.Ptr(LONG_STRUCT), _nowrapper=True,
-                                    pure_function=True)
+                                    elidable_function=True)
 c_alarm = external('alarm', [rffi.INT], rffi.INT)
 c_pause = external('pause', [], rffi.INT)
 c_siginterrupt = external('siginterrupt', [rffi.INT, rffi.INT], rffi.INT)
diff --git a/pypy/module/sys/__init__.py b/pypy/module/sys/__init__.py
--- a/pypy/module/sys/__init__.py
+++ b/pypy/module/sys/__init__.py
@@ -7,6 +7,8 @@
 
 class Module(MixedModule):
     """Sys Builtin Module. """
+    _immutable_fields_ = ["defaultencoding?"]
+
     def __init__(self, space, w_name):
         """NOT_RPYTHON""" # because parent __init__ isn't
         if space.config.translating:
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -57,7 +57,8 @@
         raise OperationError(space.w_ValueError,
                              space.wrap("recursion limit must be positive"))
     space.sys.recursionlimit = new_limit
-    _stack_set_length_fraction(new_limit * 0.001)
+    if space.config.translation.type_system == 'lltype':
+        _stack_set_length_fraction(new_limit * 0.001)
 
 def getrecursionlimit(space):
     """Return the last value set by setrecursionlimit().
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_commethods.py b/pypy/module/test_lib_pypy/ctypes_tests/test_commethods.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_commethods.py
@@ -0,0 +1,82 @@
+# unittest for SOME ctypes com function calls.
+# Can't resist from implementing some kind of mini-comtypes
+# theller ;-)
+
+import py
+import sys
+if sys.platform != "win32":
+    py.test.skip('windows only test')
+
+import ctypes, new, unittest
+from ctypes.wintypes import HRESULT
+from _ctypes import COMError
+
+oleaut32 = ctypes.OleDLL("oleaut32")
+
+class UnboundMethod(object):
+    def __init__(self, func, index, name):
+        self.func = func
+        self.index = index
+        self.name = name
+        self.__doc__ = func.__doc__
+
+    def __repr__(self):
+        return "<Unbound COM method index %d: %s at %x>" % (self.index, self.name, id(self))
+
+    def __get__(self, instance, owner):
+        if instance is None:
+            return self
+        return new.instancemethod(self.func, instance, owner)
+    
+def commethod(index, restype, *argtypes):
+    """A decorator that generates COM methods.  The decorated function
+    itself is not used except for it's name."""
+    def make_commethod(func):
+        comfunc = ctypes.WINFUNCTYPE(restype, *argtypes)(index, func.__name__)
+        comfunc.__name__ = func.__name__
+        comfunc.__doc__ = func.__doc__
+        return UnboundMethod(comfunc, index, func.__name__)
+    return make_commethod
+
+class ICreateTypeLib2(ctypes.c_void_p):
+
+    @commethod(1, ctypes.c_long)
+    def AddRef(self):
+        pass
+
+    @commethod(2, ctypes.c_long)
+    def Release(self):
+        pass
+
+    @commethod(4, HRESULT, ctypes.c_wchar_p)
+    def SetName(self):
+        """Set the name of the library."""
+
+    @commethod(12, HRESULT)
+    def SaveAllChanges(self):
+        pass
+
+
+CreateTypeLib2 = oleaut32.CreateTypeLib2
+CreateTypeLib2.argtypes = (ctypes.c_int, ctypes.c_wchar_p, ctypes.POINTER(ICreateTypeLib2))
+
+################################################################
+
+def test_basic_comtypes():
+    punk = ICreateTypeLib2()
+    hr = CreateTypeLib2(0, "foobar.tlb", punk)
+    assert hr == 0
+
+    assert 2 == punk.AddRef()
+    assert 3 == punk.AddRef()
+    assert 4 == punk.AddRef()
+
+    punk.SetName("TypeLib_ByPYPY")
+    py.test.raises(COMError, lambda: punk.SetName(None))
+
+    # This would save the typelib to disk.
+    ## punk.SaveAllChanges()
+
+    assert 3 == punk.Release()
+    assert 2 == punk.Release()
+    assert 1 == punk.Release()
diff --git a/pypy/module/test_lib_pypy/test_pwd.py b/pypy/module/test_lib_pypy/test_pwd.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/test_lib_pypy/test_pwd.py
@@ -0,0 +1,12 @@
+from pypy.conftest import gettestobjspace
+
+class AppTestPwd:
+    def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=('_ffi', '_rawffi'))
+        cls.space.appexec((), "(): import pwd")
+
+    def test_getpwuid(self):
+        import os, pwd
+        passwd_info = pwd.getpwuid(os.getuid())
+        assert type(passwd_info).__name__ == 'struct_passwd'
+        assert repr(passwd_info).startswith("pwd.struct_passwd(pw_name=")
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -416,7 +416,7 @@
             # obscure circumstances.
             return default_identity_hash(space, w_obj)
         if space.is_w(w_hash, space.w_None):
-            typename = space.type(w_obj).getname(space, '?')
+            typename = space.type(w_obj).getname(space)
             raise operationerrfmt(space.w_TypeError,
                                   "'%s' objects are unhashable", typename)
         w_result = space.get_and_call_function(w_hash, w_obj)
diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py
--- a/pypy/objspace/flow/flowcontext.py
+++ b/pypy/objspace/flow/flowcontext.py
@@ -384,8 +384,9 @@
     # hack for unrolling iterables, don't use this
     def replace_in_stack(self, oldvalue, newvalue):
         w_new = Constant(newvalue)
-        stack_items_w = self.crnt_frame.valuestack_w
-        for i in range(self.crnt_frame.valuestackdepth-1, -1, -1):
+        f = self.crnt_frame
+        stack_items_w = f.locals_stack_w
+        for i in range(f.valuestackdepth-1, f.nlocals-1, -1):
             w_v = stack_items_w[i]
             if isinstance(w_v, Constant):
                 if w_v.value is oldvalue:
diff --git a/pypy/objspace/flow/framestate.py b/pypy/objspace/flow/framestate.py
--- a/pypy/objspace/flow/framestate.py
+++ b/pypy/objspace/flow/framestate.py
@@ -10,7 +10,7 @@
     def __init__(self, state):
         if isinstance(state, PyFrame):
             # getfastscope() can return real None, for undefined locals
-            data = state.getfastscope() + state.savevaluestack()
+            data = state.save_locals_stack()
             if state.last_exception is None:
                 data.append(Constant(None))
                 data.append(Constant(None))
@@ -36,11 +36,9 @@
 
     def restoreframe(self, frame):
         if isinstance(frame, PyFrame):
-            fastlocals = len(frame.fastlocals_w)
             data = self.mergeable[:]
             recursively_unflatten(frame.space, data)
-            frame.setfastscope(data[:fastlocals])  # Nones == undefined locals
-            frame.restorevaluestack(data[fastlocals:-2])
+            frame.restore_locals_stack(data[:-2])  # Nones == undefined locals
             if data[-2] == Constant(None):
                 assert data[-1] == Constant(None)
                 frame.last_exception = None
diff --git a/pypy/objspace/flow/operation.py b/pypy/objspace/flow/operation.py
--- a/pypy/objspace/flow/operation.py
+++ b/pypy/objspace/flow/operation.py
@@ -143,9 +143,6 @@
 def mod_ovf(x, y):
     return ovfcheck(x % y)
 
-##def pow_ovf(*two_or_three_args):
-##    return ovfcheck(pow(*two_or_three_args))
-
 def lshift_ovf(x, y):
     return ovfcheck_lshift(x, y)
 
diff --git a/pypy/objspace/flow/test/test_framestate.py b/pypy/objspace/flow/test/test_framestate.py
--- a/pypy/objspace/flow/test/test_framestate.py
+++ b/pypy/objspace/flow/test/test_framestate.py
@@ -25,7 +25,7 @@
         dummy = Constant(None)
         #dummy.dummy = True
         arg_list = ([Variable() for i in range(formalargcount)] +
-                    [dummy] * (len(frame.fastlocals_w) - formalargcount))
+                    [dummy] * (frame.nlocals - formalargcount))
         frame.setfastscope(arg_list)
         return frame
 
@@ -42,7 +42,7 @@
     def test_neq_hacked_framestate(self):
         frame = self.getframe(self.func_simple)
         fs1 = FrameState(frame)
-        frame.fastlocals_w[-1] = Variable()
+        frame.locals_stack_w[frame.nlocals-1] = Variable()
         fs2 = FrameState(frame)
         assert fs1 != fs2
 
@@ -55,7 +55,7 @@
     def test_union_on_hacked_framestates(self):
         frame = self.getframe(self.func_simple)
         fs1 = FrameState(frame)
-        frame.fastlocals_w[-1] = Variable()
+        frame.locals_stack_w[frame.nlocals-1] = Variable()
         fs2 = FrameState(frame)
         assert fs1.union(fs2) == fs2  # fs2 is more general
         assert fs2.union(fs1) == fs2  # fs2 is more general
@@ -63,7 +63,7 @@
     def test_restore_frame(self):
         frame = self.getframe(self.func_simple)
         fs1 = FrameState(frame)
-        frame.fastlocals_w[-1] = Variable()
+        frame.locals_stack_w[frame.nlocals-1] = Variable()
         fs1.restoreframe(frame)
         assert fs1 == FrameState(frame)
 
@@ -82,25 +82,26 @@
     def test_getoutputargs(self):
         frame = self.getframe(self.func_simple)
         fs1 = FrameState(frame)
-        frame.fastlocals_w[-1] = Variable()
+        frame.locals_stack_w[frame.nlocals-1] = Variable()
         fs2 = FrameState(frame)
         outputargs = fs1.getoutputargs(fs2)
         # 'x' -> 'x' is a Variable
-        # fastlocals_w[-1] -> fastlocals_w[-1] is Constant(None)
-        assert outputargs == [frame.fastlocals_w[0], Constant(None)]
+        # locals_w[n-1] -> locals_w[n-1] is Constant(None)
+        assert outputargs == [frame.locals_stack_w[0], Constant(None)]
 
     def test_union_different_constants(self):
         frame = self.getframe(self.func_simple)
         fs1 = FrameState(frame)
-        frame.fastlocals_w[-1] = Constant(42)
+        frame.locals_stack_w[frame.nlocals-1] = Constant(42)
         fs2 = FrameState(frame)
         fs3 = fs1.union(fs2)
         fs3.restoreframe(frame)
-        assert isinstance(frame.fastlocals_w[-1], Variable) # generalized
+        assert isinstance(frame.locals_stack_w[frame.nlocals-1], Variable)
+                                 # ^^^ generalized
 
     def test_union_spectag(self):
         frame = self.getframe(self.func_simple)
         fs1 = FrameState(frame)
-        frame.fastlocals_w[-1] = Constant(SpecTag())
+        frame.locals_stack_w[frame.nlocals-1] = Constant(SpecTag())
         fs2 = FrameState(frame)
         assert fs1.union(fs2) is None   # UnionError
diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py
--- a/pypy/objspace/std/bytearrayobject.py
+++ b/pypy/objspace/std/bytearrayobject.py
@@ -374,7 +374,7 @@
             raise operationerrfmt(
                 space.w_TypeError,
                 "sequence item %d: expected string, %s "
-                "found", i, space.type(w_s).getname(space, '?'))
+                "found", i, space.type(w_s).getname(space))
 
         if data and i != 0:
             newdata.extend(data)
diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py
--- a/pypy/objspace/std/celldict.py
+++ b/pypy/objspace/std/celldict.py
@@ -4,8 +4,9 @@
 speed up global lookups a lot."""
 
 from pypy.objspace.std.dictmultiobject import IteratorImplementation
-from pypy.objspace.std.dictmultiobject import W_DictMultiObject, _is_sane_hash
-from pypy.rlib import jit
+from pypy.objspace.std.dictmultiobject import DictStrategy, _never_equal_to_string
+from pypy.objspace.std.dictmultiobject import ObjectDictStrategy
+from pypy.rlib import jit, rerased
 
 class ModuleCell(object):
     def __init__(self, w_value=None):
@@ -19,49 +20,59 @@
     def __repr__(self):
         return "<ModuleCell: %s>" % (self.w_value, )
 
-class ModuleDictImplementation(W_DictMultiObject):
+class ModuleDictStrategy(DictStrategy):
+
+    erase, unerase = rerased.new_erasing_pair("modulecell")
+    erase = staticmethod(erase)
+    unerase = staticmethod(unerase)
+
     def __init__(self, space):
         self.space = space
-        self.content = {}
 
-    def getcell(self, key, makenew):
+    def get_empty_storage(self):
+       return self.erase({})
+
+    def getcell(self, w_dict, key, makenew):
         if makenew or jit.we_are_jitted():
             # when we are jitting, we always go through the pure function
             # below, to ensure that we have no residual dict lookup
-            self = jit.hint(self, promote=True)
-            return self._getcell_makenew(key)
-        return self.content.get(key, None)
+            w_dict = jit.promote(w_dict)
+            self = jit.promote(self)
+            return self._getcell_makenew(w_dict, key)
+        return self.unerase(w_dict.dstorage).get(key, None)
 
-    @jit.purefunction
-    def _getcell_makenew(self, key):
-        return self.content.setdefault(key, ModuleCell())
+    @jit.elidable
+    def _getcell_makenew(self, w_dict, key):
+        return self.unerase(w_dict.dstorage).setdefault(key, ModuleCell())
 
-    def impl_setitem(self, w_key, w_value):
+    def setitem(self, w_dict, w_key, w_value):
         space = self.space
         if space.is_w(space.type(w_key), space.w_str):
-            self.impl_setitem_str(self.space.str_w(w_key), w_value)
+            self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
         else:
-            self._as_rdict().impl_fallback_setitem(w_key, w_value)
+            self.switch_to_object_strategy(w_dict)
+            w_dict.setitem(w_key, w_value)
 
-    def impl_setitem_str(self, name, w_value):
-        self.getcell(name, True).w_value = w_value
+    def setitem_str(self, w_dict, key, w_value):
+        self.getcell(w_dict, key, True).w_value = w_value
 
-    def impl_setdefault(self, w_key, w_default):
+    def setdefault(self, w_dict, w_key, w_default):
         space = self.space
         if space.is_w(space.type(w_key), space.w_str):
-            cell = self.getcell(space.str_w(w_key), True)
+            cell = self.getcell(w_dict, space.str_w(w_key), True)
             if cell.w_value is None:
                 cell.w_value = w_default
             return cell.w_value
         else:
-            return self._as_rdict().impl_fallback_setdefault(w_key, w_default)
+            self.switch_to_object_strategy(w_dict)
+            return w_dict.setdefault(w_key, w_default)
 
-    def impl_delitem(self, w_key):
+    def delitem(self, w_dict, w_key):
         space = self.space
         w_key_type = space.type(w_key)
         if space.is_w(w_key_type, space.w_str):
             key = space.str_w(w_key)
-            cell = self.getcell(key, False)
+            cell = self.getcell(w_dict, key, False)
             if cell is None or cell.w_value is None:
                 raise KeyError
             # note that we don't remove the cell from self.content, to make
@@ -69,75 +80,91 @@
             # maps to the same cell later (even if this cell no longer
             # represents a key)
             cell.invalidate()
-        elif _is_sane_hash(space, w_key_type):
+        elif _never_equal_to_string(space, w_key_type):
             raise KeyError
         else:
-            self._as_rdict().impl_fallback_delitem(w_key)
-        
-    def impl_length(self):
+            self.switch_to_object_strategy(w_dict)
+            w_dict.delitem(w_key)
+
+    def length(self, w_dict):
         # inefficient, but do we care?
         res = 0
-        for cell in self.content.itervalues():
+        for cell in self.unerase(w_dict.dstorage).itervalues():
             if cell.w_value is not None:
                 res += 1
         return res
 
-    def impl_getitem(self, w_lookup):
+    def getitem(self, w_dict, w_key):
         space = self.space
-        w_lookup_type = space.type(w_lookup)
+        w_lookup_type = space.type(w_key)
         if space.is_w(w_lookup_type, space.w_str):
-            return self.impl_getitem_str(space.str_w(w_lookup))
+            return self.getitem_str(w_dict, space.str_w(w_key))
 
-        elif _is_sane_hash(space, w_lookup_type):
+        elif _never_equal_to_string(space, w_lookup_type):
             return None
         else:
-            return self._as_rdict().impl_fallback_getitem(w_lookup)
+            self.switch_to_object_strategy(w_dict)
+            return w_dict.getitem(w_key)
 
-    def impl_getitem_str(self, lookup):
-        res = self.getcell(lookup, False)
+    def getitem_str(self, w_dict, key):
+        res = self.getcell(w_dict, key, False)
         if res is None:
             return None
         # note that even if the res.w_value is None, the next line is fine
         return res.w_value
 
-    def impl_iter(self):
-        return ModuleDictIteratorImplementation(self.space, self)
+    def iter(self, w_dict):
+        return ModuleDictIteratorImplementation(self.space, self, w_dict)
 
-    def impl_keys(self):
+    def keys(self, w_dict):
         space = self.space
-        return [space.wrap(key) for key, cell in self.content.iteritems()
+        iterator = self.unerase(w_dict.dstorage).iteritems
+        return [space.wrap(key) for key, cell in iterator()
                     if cell.w_value is not None]
 
-    def impl_values(self):
-        return [cell.w_value for cell in self.content.itervalues()
+    def values(self, w_dict):
+        iterator = self.unerase(w_dict.dstorage).itervalues
+        return [cell.w_value for cell in iterator()
                     if cell.w_value is not None]
 
-    def impl_items(self):
+    def items(self, w_dict):
         space = self.space
+        iterator = self.unerase(w_dict.dstorage).iteritems
         return [space.newtuple([space.wrap(key), cell.w_value])
-                    for (key, cell) in self.content.iteritems()
+                    for (key, cell) in iterator()
                         if cell.w_value is not None]
 
-    def impl_clear(self):
-        for k, cell in self.content.iteritems():
+    def clear(self, w_dict):
+        iterator = self.unerase(w_dict.dstorage).iteritems
+        for k, cell in iterator():
             cell.invalidate()
 
-    def _as_rdict(self):
-        r_dict_content = self.initialize_as_rdict()
-        for k, cell in self.content.iteritems():
+    def popitem(self, w_dict):
+        # This is O(n) if called repeatadly, you probably shouldn't be on a
+        # Module's dict though
+        for k, cell in self.unerase(w_dict.dstorage).iteritems():
             if cell.w_value is not None:
-                r_dict_content[self.space.wrap(k)] = cell.w_value
-            cell.invalidate()
-        self._clear_fields()
-        return self
+                w_value = cell.w_value
+                cell.invalidate()
+                return self.space.wrap(k), w_value
+        else:
+            raise KeyError
 
-    def _clear_fields(self):
-        self.content = None
+    def switch_to_object_strategy(self, w_dict):
+        d = self.unerase(w_dict.dstorage)
+        strategy = self.space.fromcache(ObjectDictStrategy)
+        d_new = strategy.unerase(strategy.get_empty_storage())
+        for key, cell in d.iteritems():
+            if cell.w_value is not None:
+                d_new[self.space.wrap(key)] = cell.w_value
+        w_dict.strategy = strategy
+        w_dict.dstorage = strategy.erase(d_new)
 
 class ModuleDictIteratorImplementation(IteratorImplementation):
-    def __init__(self, space, dictimplementation):
+    def __init__(self, space, strategy, dictimplementation):
         IteratorImplementation.__init__(self, space, dictimplementation)
-        self.iterator = dictimplementation.content.iteritems()
+        dict_w = strategy.unerase(dictimplementation.dstorage)
+        self.iterator = dict_w.iteritems()
 
     def next_entry(self):
         for key, cell in self.iterator:
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -1,18 +1,20 @@
 import py, sys
 from pypy.objspace.std.model import registerimplementation, W_Object
 from pypy.objspace.std.register_all import register_all
+from pypy.objspace.std.settype import set_typedef as settypedef
 from pypy.interpreter import gateway
 from pypy.interpreter.argument import Signature
 from pypy.interpreter.error import OperationError, operationerrfmt
-from pypy.module.__builtin__.__init__ import BUILTIN_TO_INDEX, OPTIMIZED_BUILTINS
 
-from pypy.rlib.objectmodel import r_dict, we_are_translated
-from pypy.objspace.std.settype import set_typedef as settypedef
+from pypy.rlib.objectmodel import r_dict, we_are_translated, specialize
+from pypy.rlib.debug import mark_dict_non_null
+
+from pypy.rlib import rerased
 
 def _is_str(space, w_key):
     return space.is_w(space.type(w_key), space.w_str)
 
-def _is_sane_hash(space, w_lookup_type):
+def _never_equal_to_string(space, w_lookup_type):
     """ Handles the case of a non string key lookup.
     Types that have a sane hash/eq function should allow us to return True
     directly to signal that the key is not in the dict in any case.
@@ -28,48 +30,38 @@
 class W_DictMultiObject(W_Object):
     from pypy.objspace.std.dicttype import dict_typedef as typedef
 
-    r_dict_content = None
-
     @staticmethod
     def allocate_and_init_instance(space, w_type=None, module=False,
                                    instance=False, classofinstance=None,
                                    strdict=False):
+
         if space.config.objspace.std.withcelldict and module:
-            from pypy.objspace.std.celldict import ModuleDictImplementation
+            from pypy.objspace.std.celldict import ModuleDictStrategy
             assert w_type is None
-            return ModuleDictImplementation(space)
-        elif space.config.objspace.opcodes.CALL_LIKELY_BUILTIN and module:
-            assert w_type is None
-            return WaryDictImplementation(space)
-        elif space.config.objspace.std.withdictmeasurement:
-            assert w_type is None
-            return MeasuringDictImplementation(space)
+            strategy = space.fromcache(ModuleDictStrategy)
+
         elif instance or strdict or module:
             assert w_type is None
-            return StrDictImplementation(space)
+            strategy = space.fromcache(StringDictStrategy)
+
         else:
-            if w_type is None:
-                w_type = space.w_dict
-            w_self = space.allocate_instance(W_DictMultiObject, w_type)
-            W_DictMultiObject.__init__(w_self, space)
-            return w_self
+            strategy = space.fromcache(EmptyDictStrategy)
 
-    def __init__(self, space):
+        if w_type is None:
+            w_type = space.w_dict
+        storage = strategy.get_empty_storage()
+        w_self = space.allocate_instance(W_DictMultiObject, w_type)
+        W_DictMultiObject.__init__(w_self, space, strategy, storage)
+        return w_self
+
+    def __init__(self, space, strategy, storage):
         self.space = space
-
-    def initialize_as_rdict(self):
-        assert self.r_dict_content is None
-        self.r_dict_content = r_dict(self.space.eq_w, self.space.hash_w)
-        return self.r_dict_content
-
-
-    def initialize_content(w_self, list_pairs_w):
-        for w_k, w_v in list_pairs_w:
-            w_self.setitem(w_k, w_v)
+        self.strategy = strategy
+        self.dstorage = storage
 
     def __repr__(w_self):
         """ representation for debugging purposes """
-        return "%s()" % (w_self.__class__.__name__, )
+        return "%s(%s)" % (w_self.__class__.__name__, w_self.strategy)
 
     def unwrap(w_dict, space):
         result = {}
@@ -88,51 +80,38 @@
         else:
             return None
 
-    # _________________________________________________________________
-    # implementation methods
-    def impl_getitem(self, w_key):
-        #return w_value or None
-        # in case the key is unhashable, try to hash it
-        self.space.hash(w_key)
-        # return None anyway
-        return None
+    def initialize_content(w_self, list_pairs_w):
+        for w_k, w_v in list_pairs_w:
+            w_self.setitem(w_k, w_v)
 
-    def impl_getitem_str(self, key):
-        #return w_value or None
-        return None
+def _add_indirections():
+    dict_methods = "setitem setitem_str getitem \
+                    getitem_str delitem length \
+                    clear keys values \
+                    items iter setdefault \
+                    popitem".split()
 
-    def impl_setdefault(self, w_key, w_default):
-        # here the dict is always empty
-        self._as_rdict().impl_fallback_setitem(w_key, w_default)
-        return w_default
+    def make_method(method):
+        def f(self, *args):
+            return getattr(self.strategy, method)(self, *args)
+        f.func_name = method
+        return f
 
-    def impl_setitem(self, w_key, w_value):
-        self._as_rdict().impl_fallback_setitem(w_key, w_value)
+    for method in dict_methods:
+        setattr(W_DictMultiObject, method, make_method(method))
 
-    def impl_setitem_str(self, key, w_value):
-        self._as_rdict().impl_fallback_setitem_str(key, w_value)
+_add_indirections()
 
-    def impl_delitem(self, w_key):
-        # in case the key is unhashable, try to hash it
-        self.space.hash(w_key)
-        raise KeyError
+class DictStrategy(object):
 
-    def impl_length(self):
-        return 0
+    def __init__(self, space):
+        self.space = space
 
-    def impl_iter(self):
-        # XXX I guess it's not important to be fast in this case?
-        return self._as_rdict().impl_fallback_iter()
+    def get_empty_storage(self):
+        raise NotImplementedError
 
-    def impl_clear(self):
-        self.r_dict_content = None
-
-    def _as_rdict(self):
-        r_dict_content = self.initialize_as_rdict()
-        return self
-
-    def impl_keys(self):
-        iterator = self.impl_iter()
+    def keys(self, w_dict):
+        iterator = self.iter(w_dict)
         result = []
         while 1:
             w_key, w_value = iterator.next()
@@ -140,8 +119,9 @@
                 result.append(w_key)
             else:
                 return result
-    def impl_values(self):
-        iterator = self.impl_iter()
+
+    def values(self, w_dict):
+        iterator = self.iter(w_dict)
         result = []
         while 1:
             w_key, w_value = iterator.next()
@@ -149,8 +129,9 @@
                 result.append(w_value)
             else:
                 return result
-    def impl_items(self):
-        iterator = self.impl_iter()
+
+    def items(self, w_dict):
+        iterator = self.iter(w_dict)
         result = []
         while 1:
             w_key, w_value = iterator.next()
@@ -159,106 +140,90 @@
             else:
                 return result
 
-    # the following method only makes sense when the option to use the
-    # CALL_LIKELY_BUILTIN opcode is set. Otherwise it won't even be seen
-    # by the annotator
-    def impl_get_builtin_indexed(self, i):
-        key = OPTIMIZED_BUILTINS[i]
-        return self.impl_getitem_str(key)
+    def clear(self, w_dict):
+        strategy = self.space.fromcache(EmptyDictStrategy)
+        storage = strategy.get_empty_storage()
+        w_dict.strategy = strategy
+        w_dict.dstorage = storage
 
-    def impl_popitem(self):
-        # default implementation
-        space = self.space
-        iterator = self.impl_iter()
-        w_key, w_value = iterator.next()
-        if w_key is None:
-            raise KeyError
-        self.impl_delitem(w_key)
-        return w_key, w_value
 
-    # _________________________________________________________________
-    # fallback implementation methods
+class EmptyDictStrategy(DictStrategy):
 
-    def impl_fallback_setdefault(self, w_key, w_default):
-        return self.r_dict_content.setdefault(w_key, w_default)
+    erase, unerase = rerased.new_erasing_pair("empty")
+    erase = staticmethod(erase)
+    unerase = staticmethod(unerase)
 
-    def impl_fallback_setitem(self, w_key, w_value):
-        self.r_dict_content[w_key] = w_value
+    def get_empty_storage(self):
+       return self.erase(None)
 
-    def impl_fallback_setitem_str(self, key, w_value):
-        return self.impl_fallback_setitem(self.space.wrap(key), w_value)
+    def switch_to_correct_strategy(self, w_dict, w_key):
+        #XXX implement other strategies later
+        if type(w_key) is self.space.StringObjectCls:
+            self.switch_to_string_strategy(w_dict)
+        elif self.space.is_w(self.space.type(w_key), self.space.w_int):
+            self.switch_to_int_strategy(w_dict)
+        else:
+            self.switch_to_object_strategy(w_dict)
 
-    def impl_fallback_delitem(self, w_key):
-        del self.r_dict_content[w_key]
+    def switch_to_string_strategy(self, w_dict):
+        strategy = self.space.fromcache(StringDictStrategy)
+        storage = strategy.get_empty_storage()
+        w_dict.strategy = strategy
+        w_dict.dstorage = storage
 
-    def impl_fallback_length(self):
-        return len(self.r_dict_content)
+    def switch_to_int_strategy(self, w_dict):
+        strategy = self.space.fromcache(IntDictStrategy)
+        storage = strategy.get_empty_storage()
+        w_dict.strategy = strategy
+        w_dict.dstorage = storage
 
-    def impl_fallback_getitem(self, w_key):
-        return self.r_dict_content.get(w_key, None)
+    def switch_to_object_strategy(self, w_dict):
+        strategy = self.space.fromcache(ObjectDictStrategy)
+        storage = strategy.get_empty_storage()
+        w_dict.strategy = strategy
+        w_dict.dstorage = storage
 
-    def impl_fallback_getitem_str(self, key):
-        return self.r_dict_content.get(self.space.wrap(key), None)
+    def getitem(self, w_dict, w_key):
+        #return w_value or None
+        # in case the key is unhashable, try to hash it
+        self.space.hash(w_key)
+        # return None anyway
+        return None
 
-    def impl_fallback_iter(self):
-        return RDictIteratorImplementation(self.space, self)
+    def getitem_str(self, w_dict, key):
+        #return w_value or None
+        return None
 
-    def impl_fallback_keys(self):
-        return self.r_dict_content.keys()
-    def impl_fallback_values(self):
-        return self.r_dict_content.values()
-    def impl_fallback_items(self):
-        return [self.space.newtuple([w_key, w_val])
-                    for w_key, w_val in self.r_dict_content.iteritems()]
+    def setdefault(self, w_dict, w_key, w_default):
+        # here the dict is always empty
+        self.switch_to_correct_strategy(w_dict, w_key)
+        w_dict.setitem(w_key, w_default)
+        return w_default
 
-    def impl_fallback_clear(self):
-        self.r_dict_content.clear()
+    def setitem(self, w_dict, w_key, w_value):
+        self.switch_to_correct_strategy(w_dict, w_key)
+        w_dict.setitem(w_key, w_value)
 
-    def impl_fallback_get_builtin_indexed(self, i):
-        key = OPTIMIZED_BUILTINS[i]
-        return self.impl_fallback_getitem_str(key)
+    def setitem_str(self, w_dict, key, w_value):
+        self.switch_to_string_strategy(w_dict)
+        w_dict.setitem_str(key, w_value)
 
-    def impl_fallback_popitem(self):
-        return self.r_dict_content.popitem()
+    def delitem(self, w_dict, w_key):
+        # in case the key is unhashable, try to hash it
+        self.space.hash(w_key)
+        raise KeyError
 
+    def length(self, w_dict):
+        return 0
 
-implementation_methods = [
-    ("getitem", 1),
-    ("getitem_str", 1),
-    ("length", 0),
-    ("setitem_str", 2),
-    ("setitem", 2),
-    ("setdefault", 2),
-    ("delitem", 1),
-    ("iter", 0),
-    ("items", 0),
-    ("values", 0),
-    ("keys", 0),
-    ("clear", 0),
-    ("get_builtin_indexed", 1),
-    ("popitem", 0),
-]
+    def iter(self, w_dict):
+        return EmptyIteratorImplementation(self.space, w_dict)
 
+    def clear(self, w_dict):
+        return
 
-def _make_method(name, implname, fallback, numargs):
-    args = ", ".join(["a" + str(i) for i in range(numargs)])
-    code = """def %s(self, %s):
-        if self.r_dict_content is not None:
-            return self.%s(%s)
-        return self.%s(%s)""" % (name, args, fallback, args, implname, args)
-    d = {}
-    exec py.code.Source(code).compile() in d
-    implementation_method = d[name]
-    implementation_method.func_defaults = getattr(W_DictMultiObject, implname).func_defaults
-    return implementation_method
-
-def _install_methods():
-    for name, numargs in implementation_methods:
-        implname = "impl_" + name
-        fallbackname = "impl_fallback_" + name
-        func = _make_method(name, implname, fallbackname, numargs)
-        setattr(W_DictMultiObject, name, func)
-_install_methods()
+    def popitem(self, w_dict):
+        raise KeyError
 
 registerimplementation(W_DictMultiObject)
 
@@ -300,319 +265,255 @@
             return self.len - self.pos
         return 0
 
+class EmptyIteratorImplementation(IteratorImplementation):
+    def next(self):
+        return (None, None)
+
 
 
 # concrete subclasses of the above
 
-class StrDictImplementation(W_DictMultiObject):
-    def __init__(self, space):
-        self.space = space
-        self.content = {}
+class AbstractTypedStrategy(object):
+    _mixin_ = True
 
-    def impl_setitem(self, w_key, w_value):
+    @staticmethod
+    def erase(storage):
+        raise NotImplementedError("abstract base class")
+
+    @staticmethod
+    def unerase(obj):
+        raise NotImplementedError("abstract base class")
+
+    def wrap(self, unwrapped):
+        raise NotImplementedError
+
+    def unwrap(self, wrapped):
+        raise NotImplementedError
+
+    def is_correct_type(self, w_obj):
+        raise NotImplementedError("abstract base class")
+
+    def get_empty_storage(self):
+        raise NotImplementedError("abstract base class")
+
+    def _never_equal_to(self, w_lookup_type):
+        raise NotImplementedError("abstract base class")
+
+    def setitem(self, w_dict, w_key, w_value):
         space = self.space
-        if space.is_w(space.type(w_key), space.w_str):
-            self.impl_setitem_str(self.space.str_w(w_key), w_value)
+        if self.is_correct_type(w_key):
+            self.unerase(w_dict.dstorage)[self.unwrap(w_key)] = w_value
+            return
         else:
-            self._as_rdict().impl_fallback_setitem(w_key, w_value)
+            self.switch_to_object_strategy(w_dict)
+            w_dict.setitem(w_key, w_value)
 
-    def impl_setitem_str(self, key, w_value):
-        self.content[key] = w_value
+    def setitem_str(self, w_dict, key, w_value):
+        self.switch_to_object_strategy(w_dict)
+        w_dict.setitem(self.space.wrap(key), w_value)
 
-    def impl_setdefault(self, w_key, w_default):
+    def setdefault(self, w_dict, w_key, w_default):
         space = self.space
-        if space.is_w(space.type(w_key), space.w_str):
-            return self.content.setdefault(space.str_w(w_key), w_default)
+        if self.is_correct_type(w_key):
+            return self.unerase(w_dict.dstorage).setdefault(self.unwrap(w_key), w_default)
         else:
-            return self._as_rdict().impl_fallback_setdefault(w_key, w_default)
+            self.switch_to_object_strategy(w_dict)
+            return w_dict.setdefault(w_key, w_default)
 
-
-    def impl_delitem(self, w_key):
+    def delitem(self, w_dict, w_key):
         space = self.space
         w_key_type = space.type(w_key)
-        if space.is_w(w_key_type, space.w_str):
-            del self.content[space.str_w(w_key)]
+        if self.is_correct_type(w_key):
+            del self.unerase(w_dict.dstorage)[self.unwrap(w_key)]
             return
-        elif _is_sane_hash(space, w_key_type):
-            raise KeyError
         else:
-            self._as_rdict().impl_fallback_delitem(w_key)
+            self.switch_to_object_strategy(w_dict)
+            return w_dict.delitem(w_key)
 
-    def impl_length(self):
-        return len(self.content)
+    def length(self, w_dict):
+        return len(self.unerase(w_dict.dstorage))
 
-    def impl_getitem_str(self, key):
-        return self.content.get(key, None)
+    def getitem_str(self, w_dict, key):
+        return self.getitem(w_dict, self.space.wrap(key))
 
-    def impl_getitem(self, w_key):
+    def getitem(self, w_dict, w_key):
+        space = self.space
+
+        if self.is_correct_type(w_key):
+            return self.unerase(w_dict.dstorage).get(self.unwrap(w_key), None)
+        elif self._never_equal_to(space.type(w_key)):
+            return None
+        else:
+            self.switch_to_object_strategy(w_dict)
+            return w_dict.getitem(w_key)
+
+    def keys(self, w_dict):
+        return [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()]
+
+    def values(self, w_dict):
+        return self.unerase(w_dict.dstorage).values()
+
+    def items(self, w_dict):
+        space = self.space
+        dict_w = self.unerase(w_dict.dstorage)
+        return [space.newtuple([self.wrap(key), w_value])
+                    for (key, w_value) in dict_w.iteritems()]
+
+    def popitem(self, w_dict):
+        key, value = self.unerase(w_dict.dstorage).popitem()
+        return (self.wrap(key), value)
+
+    def clear(self, w_dict):
+        self.unerase(w_dict.dstorage).clear()
+
+    def switch_to_object_strategy(self, w_dict):
+        d = self.unerase(w_dict.dstorage)
+        strategy = self.space.fromcache(ObjectDictStrategy)
+        d_new = strategy.unerase(strategy.get_empty_storage())
+        for key, value in d.iteritems():
+            d_new[self.wrap(key)] = value
+        w_dict.strategy = strategy
+        w_dict.dstorage = strategy.erase(d_new)
+
+class ObjectDictStrategy(AbstractTypedStrategy, DictStrategy):
+
+    erase, unerase = rerased.new_erasing_pair("object")
+    erase = staticmethod(erase)
+    unerase = staticmethod(unerase)
+
+    def wrap(self, unwrapped):
+        return unwrapped
+
+    def unwrap(self, wrapped):
+        return wrapped
+
+    def is_correct_type(self, w_obj):
+        return True
+
+    def get_empty_storage(self):
+       new_dict = r_dict(self.space.eq_w, self.space.hash_w,
+                         force_non_null=True)
+       return self.erase(new_dict)
+
+    def _never_equal_to(self, w_lookup_type):
+        return False
+
+    def iter(self, w_dict):
+        return ObjectIteratorImplementation(self.space, self, w_dict)
+
+    def keys(self, w_dict):
+        return self.unerase(w_dict.dstorage).keys()
+
+class StringDictStrategy(AbstractTypedStrategy, DictStrategy):
+
+    erase, unerase = rerased.new_erasing_pair("string")
+    erase = staticmethod(erase)
+    unerase = staticmethod(unerase)
+
+    def wrap(self, unwrapped):
+        return self.space.wrap(unwrapped)
+
+    def unwrap(self, wrapped):
+        return self.space.str_w(wrapped)
+
+    def is_correct_type(self, w_obj):
+        space = self.space
+        return space.is_w(space.type(w_obj), space.w_str)
+
+    def get_empty_storage(self):
+        res = {}
+        mark_dict_non_null(res)
+        return self.erase(res)
+
+    def _never_equal_to(self, w_lookup_type):
+        return _never_equal_to_string(self.space, w_lookup_type)
+
+    def setitem_str(self, w_dict, key, w_value):
+        assert key is not None
+        self.unerase(w_dict.dstorage)[key] = w_value
+
+    def getitem(self, w_dict, w_key):
         space = self.space
         # -- This is called extremely often.  Hack for performance --
         if type(w_key) is space.StringObjectCls:
-            return self.impl_getitem_str(w_key.unwrap(space))
+            return self.getitem_str(w_dict, w_key.unwrap(space))
         # -- End of performance hack --
-        w_lookup_type = space.type(w_key)
-        if space.is_w(w_lookup_type, space.w_str):
-            return self.impl_getitem_str(space.str_w(w_key))
-        elif _is_sane_hash(space, w_lookup_type):
-            return None
-        else:
-            return self._as_rdict().impl_fallback_getitem(w_key)
+        return AbstractTypedStrategy.getitem(self, w_dict, w_key)
 
-    def impl_iter(self):
-        return StrIteratorImplementation(self.space, self)
+    def getitem_str(self, w_dict, key):
+        assert key is not None
+        return self.unerase(w_dict.dstorage).get(key, None)
 
-    def impl_keys(self):
-        space = self.space
-        return [space.wrap(key) for key in self.content.iterkeys()]
+    def iter(self, w_dict):
+        return StrIteratorImplementation(self.space, self, w_dict)
 
-    def impl_values(self):
-        return self.content.values()
-
-    def impl_items(self):
-        space = self.space
-        return [space.newtuple([space.wrap(key), w_value])
-                    for (key, w_value) in self.content.iteritems()]
-
-    def impl_clear(self):
-        self.content.clear()
-
-
-    def _as_rdict(self):
-        r_dict_content = self.initialize_as_rdict()
-        for k, w_v in self.content.items():
-            r_dict_content[self.space.wrap(k)] = w_v
-        self._clear_fields()
-        return self
-
-    def _clear_fields(self):
-        self.content = None
 
 class StrIteratorImplementation(IteratorImplementation):
-    def __init__(self, space, dictimplementation):
+    def __init__(self, space, strategy, dictimplementation):
         IteratorImplementation.__init__(self, space, dictimplementation)
-        self.iterator = dictimplementation.content.iteritems()
+        self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
 
     def next_entry(self):
         # note that this 'for' loop only runs once, at most
-        for str, w_value in self.iterator:
-            return self.space.wrap(str), w_value
+        for key, w_value in self.iterator:
+            return self.space.wrap(key), w_value
         else:
             return None, None
 
 
-class WaryDictImplementation(StrDictImplementation):
-    def __init__(self, space):
-        StrDictImplementation.__init__(self, space)
-        self.shadowed = [None] * len(BUILTIN_TO_INDEX)
+class IntDictStrategy(AbstractTypedStrategy, DictStrategy):
+    erase, unerase = rerased.new_erasing_pair("int")
+    erase = staticmethod(erase)
+    unerase = staticmethod(unerase)
 
-    def impl_setitem_str(self, key, w_value):
-        i = BUILTIN_TO_INDEX.get(key, -1)
-        if i != -1:
-            self.shadowed[i] = w_value
-        self.content[key] = w_value
+    def wrap(self, unwrapped):
+        return self.space.wrap(unwrapped)
 
-    def impl_delitem(self, w_key):
+    def unwrap(self, wrapped):
+        return self.space.int_w(wrapped)
+
+    def get_empty_storage(self):
+        return self.erase({})
+
+    def is_correct_type(self, w_obj):
         space = self.space
-        w_key_type = space.type(w_key)
-        if space.is_w(w_key_type, space.w_str):
-            key = space.str_w(w_key)
-            del self.content[key]
-            i = BUILTIN_TO_INDEX.get(key, -1)
-            if i != -1:
-                self.shadowed[i] = None
-        elif _is_sane_hash(space, w_key_type):
-            raise KeyError
-        else:
-            self._as_rdict().impl_fallback_delitem(w_key)
+        return space.is_w(space.type(w_obj), space.w_int)
 
-    def impl_get_builtin_indexed(self, i):
-        return self.shadowed[i]
+    def _never_equal_to(self, w_lookup_type):
+        space = self.space
+        # XXX there are many more types
+        return (space.is_w(w_lookup_type, space.w_NoneType) or
+                space.is_w(w_lookup_type, space.w_str) or
+                space.is_w(w_lookup_type, space.w_unicode)
+                )
 
+    def iter(self, w_dict):
+        return IntIteratorImplementation(self.space, self, w_dict)
 
-class RDictIteratorImplementation(IteratorImplementation):
-    def __init__(self, space, dictimplementation):
+class IntIteratorImplementation(IteratorImplementation):
+    def __init__(self, space, strategy, dictimplementation):
         IteratorImplementation.__init__(self, space, dictimplementation)
-        self.iterator = dictimplementation.r_dict_content.iteritems()
+        self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
 
     def next_entry(self):
         # note that this 'for' loop only runs once, at most
-        for item in self.iterator:
-            return item
+        for key, w_value in self.iterator:
+            return self.space.wrap(key), w_value
         else:
             return None, None
 
 
+class ObjectIteratorImplementation(IteratorImplementation):
+    def __init__(self, space, strategy, dictimplementation):
+        IteratorImplementation.__init__(self, space, dictimplementation)
+        self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
 
-# XXX fix this thing
-import time
-
-class DictInfo(object):
-    _dict_infos = []
-    def __init__(self):
-        self.id = len(self._dict_infos)
-
-        self.setitem_strs = 0; self.setitems = 0;  self.delitems = 0
-        self.lengths = 0;   self.gets = 0
-        self.iteritems = 0; self.iterkeys = 0; self.itervalues = 0
-        self.keys = 0;      self.values = 0;   self.items = 0
-
-        self.maxcontents = 0
-
-        self.reads = 0
-        self.hits = self.misses = 0
-        self.writes = 0
-        self.iterations = 0
-        self.listings = 0
-
-        self.seen_non_string_in_write = 0
-        self.seen_non_string_in_read_first = 0
-        self.size_on_non_string_seen_in_read = -1
-        self.size_on_non_string_seen_in_write = -1
-
-        self.createtime = time.time()
-        self.lifetime = -1.0
-
-        if not we_are_translated():
-            # very probable stack from here:
-            # 0 - us
-            # 1 - MeasuringDictImplementation.__init__
-            # 2 - W_DictMultiObject.__init__
-            # 3 - space.newdict
-            # 4 - newdict's caller.  let's look at that
-            try:
-                frame = sys._getframe(4)
-            except ValueError:
-                pass # might be at import time
-            else:
-                self.sig = '(%s:%s)%s'%(frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name)
-
-        self._dict_infos.append(self)
-    def __repr__(self):
-        args = []
-        for k in sorted(self.__dict__):
-            v = self.__dict__[k]
-            if v != 0:
-                args.append('%s=%r'%(k, v))
-        return '<DictInfo %s>'%(', '.join(args),)
-
-class OnTheWayOut:
-    def __init__(self, info):
-        self.info = info
-    def __del__(self):
-        self.info.lifetime = time.time() - self.info.createtime
-
-class MeasuringDictImplementation(W_DictMultiObject):
-    def __init__(self, space):
-        self.space = space
-        self.content = r_dict(space.eq_w, space.hash_w)
-        self.info = DictInfo()
-        self.thing_with_del = OnTheWayOut(self.info)
-
-    def __repr__(self):
-        return "%s<%s>" % (self.__class__.__name__, self.content)
-
-    def _is_str(self, w_key):
-        space = self.space
-        return space.is_true(space.isinstance(w_key, space.w_str))
-    def _read(self, w_key):
-        self.info.reads += 1
-        if not self.info.seen_non_string_in_write \
-               and not self.info.seen_non_string_in_read_first \
-               and not self._is_str(w_key):
-            self.info.seen_non_string_in_read_first = True
-            self.info.size_on_non_string_seen_in_read = len(self.content)
-        hit = w_key in self.content
-        if hit:
-            self.info.hits += 1
+    def next_entry(self):
+        # note that this 'for' loop only runs once, at most
+        for w_key, w_value in self.iterator:
+            return w_key, w_value
         else:
-            self.info.misses += 1
-
-    def impl_setitem(self, w_key, w_value):
-        if not self.info.seen_non_string_in_write and not self._is_str(w_key):
-            self.info.seen_non_string_in_write = True
-            self.info.size_on_non_string_seen_in_write = len(self.content)
-        self.info.setitems += 1
-        self.info.writes += 1
-        self.content[w_key] = w_value
-        self.info.maxcontents = max(self.info.maxcontents, len(self.content))
-    def impl_setitem_str(self, key, w_value):
-        self.info.setitem_strs += 1
-        self.impl_setitem(self.space.wrap(key), w_value)
-    def impl_delitem(self, w_key):
-        if not self.info.seen_non_string_in_write \
-               and not self.info.seen_non_string_in_read_first \
-               and not self._is_str(w_key):
-            self.info.seen_non_string_in_read_first = True
-            self.info.size_on_non_string_seen_in_read = len(self.content)
-        self.info.delitems += 1
-        self.info.writes += 1
-        del self.content[w_key]
-
-    def impl_length(self):
-        self.info.lengths += 1
-        return len(self.content)
-    def impl_getitem_str(self, key):
-        return self.impl_getitem(self.space.wrap(key))
-    def impl_getitem(self, w_key):
-        self.info.gets += 1
-        self._read(w_key)
-        return self.content.get(w_key, None)
-
-    def impl_iteritems(self):
-        self.info.iteritems += 1
-        self.info.iterations += 1
-        return RDictItemIteratorImplementation(self.space, self)
-    def impl_iterkeys(self):
-        self.info.iterkeys += 1
-        self.info.iterations += 1
-        return RDictKeyIteratorImplementation(self.space, self)
-    def impl_itervalues(self):
-        self.info.itervalues += 1
-        self.info.iterations += 1
-        return RDictValueIteratorImplementation(self.space, self)
-
-    def impl_keys(self):
-        self.info.keys += 1
-        self.info.listings += 1
-        return self.content.keys()
-    def impl_values(self):
-        self.info.values += 1
-        self.info.listings += 1
-        return self.content.values()
-    def impl_items(self):
-        self.info.items += 1
-        self.info.listings += 1
-        return [self.space.newtuple([w_key, w_val])
-                    for w_key, w_val in self.content.iteritems()]
-
-
-_example = DictInfo()
-del DictInfo._dict_infos[-1]
-tmpl = 'os.write(fd, "%(attr)s" + ": " + str(info.%(attr)s) + "\\n")'
-bodySrc = []
-for attr in sorted(_example.__dict__):
-    if attr == 'sig':
-        continue
-    bodySrc.append(tmpl%locals())
-exec py.code.Source('''
-from pypy.rlib.objectmodel import current_object_addr_as_int
-def _report_one(fd, info):
-    os.write(fd, "_address" + ": " + str(current_object_addr_as_int(info))
-                 + "\\n")
-    %s
-'''%'\n    '.join(bodySrc)).compile()
-
-def report():
-    if not DictInfo._dict_infos:
-        return
-    os.write(2, "Starting multidict report.\n")
-    fd = os.open('dictinfo.txt', os.O_CREAT|os.O_WRONLY|os.O_TRUNC, 0644)
-    for info in DictInfo._dict_infos:
-        os.write(fd, '------------------\n')
-        _report_one(fd, info)
-    os.close(fd)
-    os.write(2, "Reporting done.\n")
-
+            return None, None
 
 
 init_signature = Signature(['seq_or_map'], None, 'kwargs')
@@ -919,7 +820,7 @@
 def repr__DictViewKeys(space, w_dictview):
     w_seq = space.call_function(space.w_list, w_dictview)
     w_repr = space.repr(w_seq)
-    return space.wrap("%s(%s)" % (space.type(w_dictview).getname(space, "?"),
+    return space.wrap("%s(%s)" % (space.type(w_dictview).getname(space),
                                   space.str_w(w_repr)))
 repr__DictViewItems  = repr__DictViewKeys
 repr__DictViewValues = repr__DictViewKeys
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -1,96 +1,98 @@
 from pypy.objspace.std.model import registerimplementation, W_Object
 from pypy.objspace.std.register_all import register_all
 from pypy.objspace.std.dictmultiobject import W_DictMultiObject, IteratorImplementation
+from pypy.objspace.std.dictmultiobject import DictStrategy
 from pypy.objspace.std.typeobject import unwrap_cell
 from pypy.interpreter.error import OperationError
 
+from pypy.rlib import rerased
 
-class W_DictProxyObject(W_DictMultiObject):
-    def __init__(w_self, space, w_type):
-        W_DictMultiObject.__init__(w_self, space)
-        w_self.w_type = w_type
 
-    def impl_getitem(self, w_lookup):
+class DictProxyStrategy(DictStrategy):
+
+    erase, unerase = rerased.new_erasing_pair("dictproxy")
+    erase = staticmethod(erase)
+    unerase = staticmethod(unerase)
+
+    def __init__(w_self, space):
+        DictStrategy.__init__(w_self, space)
+
+    def getitem(self, w_dict, w_key):
         space = self.space
-        w_lookup_type = space.type(w_lookup)
+        w_lookup_type = space.type(w_key)
         if space.is_w(w_lookup_type, space.w_str):
-            return self.impl_getitem_str(space.str_w(w_lookup))
+            return self.getitem_str(w_dict, space.str_w(w_key))
         else:
             return None
 
-    def impl_getitem_str(self, lookup):
-        return self.w_type.getdictvalue(self.space, lookup)
+    def getitem_str(self, w_dict, key):
+        return self.unerase(w_dict.dstorage).getdictvalue(self.space, key)
 
-    def impl_setitem(self, w_key, w_value):
+    def setitem(self, w_dict, w_key, w_value):
         space = self.space
         if space.is_w(space.type(w_key), space.w_str):
-            self.impl_setitem_str(self.space.str_w(w_key), w_value)
+            self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
         else:
             raise OperationError(space.w_TypeError, space.wrap("cannot add non-string keys to dict of a type"))
 
-    def impl_setitem_str(self, name, w_value):
+    def setitem_str(self, w_dict, key, w_value):
+        w_type = self.unerase(w_dict.dstorage)
         try:
-            self.w_type.setdictvalue(self.space, name, w_value)
+            w_type.setdictvalue(self.space, key, w_value)
         except OperationError, e:
             if not e.match(self.space, self.space.w_TypeError):
                 raise
-            w_type = self.w_type
             if not w_type.is_cpytype():
                 raise
             # xxx obscure workaround: allow cpyext to write to type->tp_dict.
             # xxx like CPython, we assume that this is only done early after
             # xxx the type is created, and we don't invalidate any cache.
-            w_type.dict_w[name] = w_value
+            w_type.dict_w[key] = w_value
 
-    def impl_setdefault(self, w_key, w_default):
+    def setdefault(self, w_dict, w_key, w_default):
         space = self.space
-        w_result = self.impl_getitem(w_key)
+        w_result = self.getitem(w_dict, w_key)
         if w_result is not None:
             return w_result
-        self.impl_setitem(w_key, w_default)
+        self.setitem(w_dict, w_key, w_default)
         return w_default
 
-    def impl_delitem(self, w_key):
+    def delitem(self, w_dict, w_key):
         space = self.space
         w_key_type = space.type(w_key)
         if space.is_w(w_key_type, space.w_str):
-            if not self.w_type.deldictvalue(space, w_key):
+            if not self.unerase(w_dict.dstorage).deldictvalue(space, w_key):
                 raise KeyError
         else:
             raise KeyError
 
-    def impl_length(self):
-        return len(self.w_type.dict_w)
+    def length(self, w_dict):
+        return len(self.unerase(w_dict.dstorage).dict_w)
 
-    def impl_iter(self):
-        return DictProxyIteratorImplementation(self.space, self)
+    def iter(self, w_dict):
+        return DictProxyIteratorImplementation(self.space, self, w_dict)
 
-    def impl_keys(self):
+    def keys(self, w_dict):
         space = self.space
-        return [space.wrap(key) for key in self.w_type.dict_w.iterkeys()]
+        return [space.wrap(key) for key in self.unerase(w_dict.dstorage).dict_w.iterkeys()]
 
-    def impl_values(self):
-        return [unwrap_cell(self.space, w_value) for w_value in self.w_type.dict_w.itervalues()]
+    def values(self, w_dict):
+        return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()]
 
-    def impl_items(self):
+    def items(self, w_dict):
         space = self.space
         return [space.newtuple([space.wrap(key), unwrap_cell(self.space, w_value)])
-                    for (key, w_value) in self.w_type.dict_w.iteritems()]
+                    for (key, w_value) in self.unerase(w_dict.dstorage).dict_w.iteritems()]
 
-    def impl_clear(self):
-        self.w_type.dict_w.clear()
-        self.w_type.mutated()
-
-    def _as_rdict(self):
-        assert 0, "should be unreachable"
-
-    def _clear_fields(self):
-        assert 0, "should be unreachable"
+    def clear(self, w_dict):
+        self.unerase(w_dict.dstorage).dict_w.clear()
+        self.unerase(w_dict.dstorage).mutated()
 
 class DictProxyIteratorImplementation(IteratorImplementation):
-    def __init__(self, space, dictimplementation):
+    def __init__(self, space, strategy, dictimplementation):
         IteratorImplementation.__init__(self, space, dictimplementation)
-        self.iterator = dictimplementation.w_type.dict_w.iteritems()
+        w_type = strategy.unerase(dictimplementation.dstorage)
+        self.iterator = w_type.dict_w.iteritems()
 
     def next_entry(self):
         for key, w_value in self.iterator:
diff --git a/pypy/objspace/std/frame.py b/pypy/objspace/std/frame.py
--- a/pypy/objspace/std/frame.py
+++ b/pypy/objspace/std/frame.py
@@ -6,7 +6,7 @@
 from pypy.interpreter import pyopcode, function
 from pypy.interpreter.pyframe import PyFrame
 from pypy.interpreter.error import OperationError, operationerrfmt
-from pypy.module.__builtin__ import OPTIMIZED_BUILTINS, Module
+from pypy.module.__builtin__ import Module
 from pypy.objspace.std import intobject, smallintobject
 from pypy.objspace.std.multimethod import FailedToImplement
 from pypy.objspace.std.dictmultiobject import W_DictMultiObject
@@ -66,41 +66,6 @@
         w_result = f.space.getitem(w_1, w_2)
     f.pushvalue(w_result)
 
-def CALL_LIKELY_BUILTIN(f, oparg, next_instr):
-    w_globals = f.w_globals
-    num = oparg >> 8
-    assert isinstance(w_globals, W_DictMultiObject)
-    w_value = w_globals.get_builtin_indexed(num)
-    if w_value is None:
-        builtins = f.get_builtin()
-        assert isinstance(builtins, Module)
-        w_builtin_dict = builtins.getdict(f.space)
-        assert isinstance(w_builtin_dict, W_DictMultiObject)
-        w_value = w_builtin_dict.get_builtin_indexed(num)
-    if w_value is None:
-        varname = OPTIMIZED_BUILTINS[num]
-        message = "global name '%s' is not defined"
-        raise operationerrfmt(f.space.w_NameError,
-                              message, varname)
-    nargs = oparg & 0xff
-    w_function = w_value
-    try:
-        w_result = call_likely_builtin(f, w_function, nargs)
-    finally:
-        f.dropvalues(nargs)
-    f.pushvalue(w_result)
-
-def call_likely_builtin(f, w_function, nargs):
-    if isinstance(w_function, function.Function):
-        executioncontext = f.space.getexecutioncontext()
-        executioncontext.c_call_trace(f, w_function)
-        res = w_function.funccall_valuestack(nargs, f)
-        executioncontext.c_return_trace(f, w_function)
-        return res
-    args = f.make_arguments(nargs)
-    return f.space.call_args(w_function, args)
-
-
 compare_table = [
     "lt",   # "<"
     "le",   # "<="
@@ -145,8 +110,6 @@
             StdObjSpaceFrame.BINARY_ADD = int_BINARY_ADD
     if space.config.objspace.std.optimized_list_getitem:
         StdObjSpaceFrame.BINARY_SUBSCR = list_BINARY_SUBSCR
-    if space.config.objspace.opcodes.CALL_LIKELY_BUILTIN:
-        StdObjSpaceFrame.CALL_LIKELY_BUILTIN = CALL_LIKELY_BUILTIN
     if space.config.objspace.opcodes.CALL_METHOD:
         from pypy.objspace.std.callmethod import LOOKUP_METHOD, CALL_METHOD
         StdObjSpaceFrame.LOOKUP_METHOD = LOOKUP_METHOD
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -4,9 +4,9 @@
 from pypy.rlib import rerased
 
 from pypy.interpreter.baseobjspace import W_Root
-from pypy.objspace.std.dictmultiobject import W_DictMultiObject
+from pypy.objspace.std.dictmultiobject import W_DictMultiObject, DictStrategy, ObjectDictStrategy
 from pypy.objspace.std.dictmultiobject import IteratorImplementation
-from pypy.objspace.std.dictmultiobject import _is_sane_hash
+from pypy.objspace.std.dictmultiobject import _never_equal_to_string
 from pypy.objspace.std.objectobject import W_ObjectObject
 from pypy.objspace.std.typeobject import TypeCell
 
@@ -53,7 +53,7 @@
         else:
             return self._index_indirection(selector)
 
-    @jit.purefunction
+    @jit.elidable
     def _index_jit_pure(self, name, index):
         return self._index_indirection((name, index))
 
@@ -113,14 +113,14 @@
     def set_terminator(self, obj, terminator):
         raise NotImplementedError("abstract base class")
 
-    @jit.purefunction
+    @jit.elidable
     def size_estimate(self):
         return self._size_estimate >> NUM_DIGITS
 
     def search(self, attrtype):
         return None
 
-    @jit.purefunction
+    @jit.elidable
     def _get_new_attr(self, name, index):
         selector = name, index
         cache = self.cache_attrs
@@ -154,7 +154,7 @@
         obj._set_mapdict_map(attr)
         obj._mapdict_write_storage(attr.position, w_value)
 
-    def materialize_r_dict(self, space, obj, w_d):
+    def materialize_r_dict(self, space, obj, dict_w):
         raise NotImplementedError("abstract base class")
 
     def remove_dict_entries(self, obj):
@@ -205,7 +205,7 @@
         Terminator.__init__(self, space, w_cls)
         self.devolved_dict_terminator = DevolvedDictTerminator(space, w_cls)
 
-    def materialize_r_dict(self, space, obj, w_d):
+    def materialize_r_dict(self, space, obj, dict_w):
         result = Object()
         result.space = space
         result._init_empty(self.devolved_dict_terminator)
@@ -297,11 +297,11 @@
             return self
         return self.back.search(attrtype)
 
-    def materialize_r_dict(self, space, obj, w_d):
-        new_obj = self.back.materialize_r_dict(space, obj, w_d)
+    def materialize_r_dict(self, space, obj, dict_w):
+        new_obj = self.back.materialize_r_dict(space, obj, dict_w)
         if self.selector[1] == DICT:
             w_attr = space.wrap(self.selector[0])
-            w_d.r_dict_content[w_attr] = obj._mapdict_read_storage(self.position)
+            dict_w[w_attr] = obj._mapdict_read_storage(self.position)
         else:
             self._copy_attr(obj, new_obj)
         return new_obj
@@ -357,7 +357,7 @@
         self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
 
     def _get_mapdict_map(self):
-        return jit.hint(self.map, promote=True)
+        return jit.promote(self.map)
     def _set_mapdict_map(self, map):
         self.map = map
     # _____________________________________________
@@ -382,7 +382,10 @@
         if w_dict is not None:
             assert isinstance(w_dict, W_DictMultiObject)
             return w_dict
-        w_dict = MapDictImplementation(space, self)
+
+        strategy = space.fromcache(MapDictStrategy)
+        storage = strategy.erase(self)
+        w_dict = W_DictMultiObject(space, strategy, storage)
         flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict)
         assert flag
         return w_dict
@@ -392,8 +395,8 @@
         w_dict = check_new_dictionary(space, w_dict)
         w_olddict = self.getdict(space)
         assert isinstance(w_dict, W_DictMultiObject)
-        if w_olddict.r_dict_content is None:
-            w_olddict._as_rdict()
+        if type(w_olddict.strategy) is not ObjectDictStrategy:
+            w_olddict.strategy.switch_to_object_strategy(w_olddict)
         flag = self._get_mapdict_map().write(self, ("dict", SPECIAL), w_dict)
         assert flag
 
@@ -575,105 +578,121 @@
 # ____________________________________________________________
 # dict implementation
 
+class MapDictStrategy(DictStrategy):
 
-class MapDictImplementation(W_DictMultiObject):
-    def __init__(self, space, w_obj):
+    erase, unerase = rerased.new_erasing_pair("map")
+    erase = staticmethod(erase)
+    unerase = staticmethod(unerase)
+
+    def __init__(self, space):
         self.space = space
-        self.w_obj = w_obj
 
-    def impl_getitem(self, w_lookup):
+    def switch_to_object_strategy(self, w_dict):
+        w_obj = self.unerase(w_dict.dstorage)
+        strategy = self.space.fromcache(ObjectDictStrategy)
+        dict_w = strategy.unerase(strategy.get_empty_storage())
+        w_dict.strategy = strategy
+        w_dict.dstorage = strategy.erase(dict_w)
+        assert w_obj.getdict(self.space) is w_dict
+        materialize_r_dict(self.space, w_obj, dict_w)
+
+    def getitem(self, w_dict, w_key):
         space = self.space
-        w_lookup_type = space.type(w_lookup)
+        w_lookup_type = space.type(w_key)
         if space.is_w(w_lookup_type, space.w_str):
-            return self.impl_getitem_str(space.str_w(w_lookup))
-        elif _is_sane_hash(space, w_lookup_type):
+            return self.getitem_str(w_dict, space.str_w(w_key))
+        elif _never_equal_to_string(space, w_lookup_type):
             return None
         else:
-            return self._as_rdict().impl_fallback_getitem(w_lookup)
+            self.switch_to_object_strategy(w_dict)
+            return w_dict.getitem(w_key)
 
-    def impl_getitem_str(self, key):
-        return self.w_obj.getdictvalue(self.space, key)
+    def getitem_str(self, w_dict, key):
+        w_obj = self.unerase(w_dict.dstorage)
+        return w_obj.getdictvalue(self.space, key)
 
-    def impl_setitem_str(self,  key, w_value):
-        flag = self.w_obj.setdictvalue(self.space, key, w_value)
+    def setitem_str(self, w_dict, key, w_value):
+        w_obj = self.unerase(w_dict.dstorage)
+        flag = w_obj.setdictvalue(self.space, key, w_value)
         assert flag
 
-    def impl_setitem(self,  w_key, w_value):
+    def setitem(self, w_dict, w_key, w_value):
         space = self.space
         if space.is_w(space.type(w_key), space.w_str):
-            self.impl_setitem_str(self.space.str_w(w_key), w_value)
+            self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
         else:
-            self._as_rdict().impl_fallback_setitem(w_key, w_value)
+            self.switch_to_object_strategy(w_dict)
+            w_dict.setitem(w_key, w_value)
 
-    def impl_setdefault(self,  w_key, w_default):
+    def setdefault(self, w_dict, w_key, w_default):
         space = self.space
         if space.is_w(space.type(w_key), space.w_str):
             key = space.str_w(w_key)
-            w_result = self.impl_getitem_str(key)
+            w_result = self.getitem_str(w_dict, key)
             if w_result is not None:
                 return w_result
-            self.impl_setitem_str(key, w_default)
+            self.setitem_str(w_dict, key, w_default)
             return w_default
         else:
-            return self._as_rdict().impl_fallback_setdefault(w_key, w_default)
+            self.switch_to_object_strategy(w_dict)
+            return w_dict.setdefault(w_key, w_default)
 
-    def impl_delitem(self, w_key):
+    def delitem(self, w_dict, w_key):
         space = self.space
         w_key_type = space.type(w_key)
+        w_obj = self.unerase(w_dict.dstorage)
         if space.is_w(w_key_type, space.w_str):
-            flag = self.w_obj.deldictvalue(space, w_key)
+            flag = w_obj.deldictvalue(space, w_key)
             if not flag:
                 raise KeyError
-        elif _is_sane_hash(space, w_key_type):
+        elif _never_equal_to_string(space, w_key_type):
             raise KeyError
         else:
-            self._as_rdict().impl_fallback_delitem(w_key)
+            self.switch_to_object_strategy(w_dict)
+            w_dict.delitem(w_key)
 
-    def impl_length(self):
+    def length(self, w_dict):
         res = 0
-        curr = self.w_obj._get_mapdict_map().search(DICT)
+        curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT)
         while curr is not None:
             curr = curr.back
             curr = curr.search(DICT)
             res += 1
         return res
 
-    def impl_iter(self):
-        return MapDictIteratorImplementation(self.space, self)
+    def iter(self, w_dict):
+        return MapDictIteratorImplementation(self.space, self, w_dict)
 
-    def impl_clear(self):
-        w_obj = self.w_obj
+    def clear(self, w_dict):
+        w_obj = self.unerase(w_dict.dstorage)
         new_obj = w_obj._get_mapdict_map().remove_dict_entries(w_obj)
         _become(w_obj, new_obj)
 
-    def _clear_fields(self):
-        self.w_obj = None
+    def popitem(self, w_dict):
+        curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT)
+        if curr is None:
+            raise KeyError
+        key = curr.selector[0]
+        w_value = self.getitem_str(w_dict, key)
+        w_key = self.space.wrap(key)
+        self.delitem(w_dict, w_key)
+        return (w_key, w_value)
 
-    def _as_rdict(self):
-        self.initialize_as_rdict()
-        space = self.space
-        w_obj = self.w_obj
-        materialize_r_dict(space, w_obj, self)
-        self._clear_fields()
-        return self
-
-
-def materialize_r_dict(space, obj, w_d):
+def materialize_r_dict(space, obj, dict_w):
     map = obj._get_mapdict_map()
-    assert obj.getdict(space) is w_d
-    new_obj = map.materialize_r_dict(space, obj, w_d)
+    new_obj = map.materialize_r_dict(space, obj, dict_w)
     _become(obj, new_obj)
 
 class MapDictIteratorImplementation(IteratorImplementation):
-    def __init__(self, space, dictimplementation):
+    def __init__(self, space, strategy, dictimplementation):
         IteratorImplementation.__init__(self, space, dictimplementation)
-        w_obj = dictimplementation.w_obj
+        w_obj = strategy.unerase(dictimplementation.dstorage)
         self.w_obj = w_obj
         self.orig_map = self.curr_map = w_obj._get_mapdict_map()
 
     def next_entry(self):
         implementation = self.dictimplementation
-        assert isinstance(implementation, MapDictImplementation)
+        assert isinstance(implementation.strategy, MapDictStrategy)
         if self.orig_map is not self.w_obj._get_mapdict_map():
             return None, None
         if self.curr_map:
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -11,7 +11,7 @@
 from pypy.rlib.debug import make_sure_not_resized
 from pypy.rlib.rarithmetic import base_int, widen
 from pypy.rlib.objectmodel import we_are_translated
-from pypy.rlib.jit import hint
+from pypy.rlib import jit
 from pypy.rlib.rbigint import rbigint
 from pypy.tool.sourcetools import func_with_new_name
 
@@ -255,7 +255,7 @@
             w_result = self.wrap_exception_cls(x)
             if w_result is not None:
                 return w_result
-        from fake import fake_object
+        from pypy.objspace.std.fake import fake_object
         return fake_object(self, x)
 
     def wrap_exception_cls(self, x):
@@ -311,6 +311,10 @@
                 classofinstance=classofinstance,
                 strdict=strdict)
 
+    def newset(self):
+        from pypy.objspace.std.setobject import newset
+        return W_SetObject(self, newset(self))
+
     def newslice(self, w_start, w_end, w_step):
         return W_SliceObject(w_start, w_end, w_step)
 
@@ -318,7 +322,7 @@
         return W_SeqIterObject(w_obj)
 
     def type(self, w_obj):
-        hint(w_obj.__class__, promote=True)
+        jit.promote(w_obj.__class__)
         return w_obj.getclass(self)
 
     def lookup(self, w_obj, name):
diff --git a/pypy/objspace/std/ropeunicodeobject.py b/pypy/objspace/std/ropeunicodeobject.py
--- a/pypy/objspace/std/ropeunicodeobject.py
+++ b/pypy/objspace/std/ropeunicodeobject.py
@@ -986,7 +986,7 @@
 ##        return space.wrap(0)
 ##    return space.wrap(result)
 
-import unicodetype
+from pypy.objspace.std import unicodetype
 register_all(vars(), unicodetype)
 
 # str.strip(unicode) needs to convert self to unicode and call unicode.strip we
@@ -997,7 +997,7 @@
 # methods?
 
 class str_methods:
-    import stringtype
+    from pypy.objspace.std import stringtype
     W_RopeUnicodeObject = W_RopeUnicodeObject
     from pypy.objspace.std.ropeobject import W_RopeObject
     def str_strip__Rope_RopeUnicode(space, w_self, w_chars):
diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py
--- a/pypy/objspace/std/setobject.py
+++ b/pypy/objspace/std/setobject.py
@@ -112,7 +112,7 @@
 # some helper functions
 
 def newset(space):
-    return r_dict(space.eq_w, space.hash_w)
+    return r_dict(space.eq_w, space.hash_w, force_non_null=True)
 
 def make_setdata_from_w_iterable(space, w_iterable=None):
     """Return a new r_dict with the content of w_iterable."""
@@ -466,12 +466,11 @@
     return space.wrap(hash)
 
 def set_pop__Set(space, w_left):
-    for w_key in w_left.setdata:
-        break
-    else:
+    try:
+        w_key, _ = w_left.setdata.popitem()
+    except KeyError:
         raise OperationError(space.w_KeyError,
                                 space.wrap('pop from an empty set'))
-    del w_left.setdata[w_key]
     return w_key
 
 def and__Set_Set(space, w_left, w_other):
diff --git a/pypy/objspace/std/test/test_celldict.py b/pypy/objspace/std/test/test_celldict.py
--- a/pypy/objspace/std/test/test_celldict.py
+++ b/pypy/objspace/std/test/test_celldict.py
@@ -1,6 +1,7 @@
 import py
 from pypy.conftest import gettestobjspace, option
-from pypy.objspace.std.celldict import ModuleCell, ModuleDictImplementation
+from pypy.objspace.std.dictmultiobject import W_DictMultiObject
+from pypy.objspace.std.celldict import ModuleCell, ModuleDictStrategy
 from pypy.objspace.std.test.test_dictmultiobject import FakeSpace
 from pypy.interpreter import gateway
 
@@ -8,7 +9,15 @@
 
 class TestCellDict(object):
     def test_basic_property(self):
-        d = ModuleDictImplementation(space)
+        strategy = ModuleDictStrategy(space)
+        storage = strategy.get_empty_storage()
+        d = W_DictMultiObject(space, strategy, storage)
+
+        # replace getcell with getcell from strategy
+        def f(key, makenew):
+            return strategy.getcell(d, key, makenew)
+        d.getcell = f
+
         d.setitem("a", 1)
         assert d.getcell("a", False) is d.getcell("a", False)
         acell = d.getcell("a", False)
@@ -29,3 +38,33 @@
         assert d.getitem("a") is None
         assert d.getcell("a", False) is acell
         assert d.length() == 0
+
+class AppTestCellDict(object):
+    OPTIONS = {"objspace.std.withcelldict": True}
+
+    def setup_class(cls):
+        if option.runappdirect:
+            py.test.skip("__repr__ doesn't work on appdirect")
+        strategy = ModuleDictStrategy(cls.space)
+        storage = strategy.get_empty_storage()
+        cls.w_d = W_DictMultiObject(cls.space, strategy, storage)
+
+    def test_popitem(self):
+        import __pypy__
+
+        d = self.d
+        assert "ModuleDict" in __pypy__.internal_repr(d)
+        raises(KeyError, d.popitem)
+        d["a"] = 3
+        x = d.popitem()
+        assert x == ("a", 3)
+
+    def test_degenerate(self):
+        import __pypy__
+
+        d = self.d
+        assert "ModuleDict" in __pypy__.internal_repr(d)
+        d["a"] = 3
+        del d["a"]
+        d[object()] = 5
+        assert d.values() == [5]
\ No newline at end of file
diff --git a/pypy/objspace/std/test/test_dictmultiobject.py b/pypy/objspace/std/test/test_dictmultiobject.py
--- a/pypy/objspace/std/test/test_dictmultiobject.py
+++ b/pypy/objspace/std/test/test_dictmultiobject.py
@@ -1,12 +1,13 @@
+import py
 import sys
 from pypy.interpreter.error import OperationError
 from pypy.objspace.std.dictmultiobject import \
      W_DictMultiObject, setitem__DictMulti_ANY_ANY, getitem__DictMulti_ANY, \
-     StrDictImplementation
+     StringDictStrategy, ObjectDictStrategy
 
-from pypy.objspace.std.celldict import ModuleDictImplementation
+from pypy.objspace.std.celldict import ModuleDictStrategy
 from pypy.conftest import gettestobjspace
-
+from pypy.conftest import option
 
 class TestW_DictObject:
 
@@ -17,7 +18,7 @@
         space = self.space
         d = self.space.newdict()
         assert not self.space.is_true(d)
-        assert d.r_dict_content is None
+        assert type(d.strategy) is not ObjectDictStrategy
 
     def test_nonempty(self):
         space = self.space
@@ -137,31 +138,31 @@
         cls.w_on_pypy = cls.space.wrap("__pypy__" in sys.builtin_module_names)
 
     def test_equality(self):
-        d = {1:2}
-        f = {1:2}
+        d = {1: 2}
+        f = {1: 2}
         assert d == f
-        assert d != {1:3}
+        assert d != {1: 3}
 
     def test_clear(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         d.clear()
         assert len(d) == 0
 
     def test_copy(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         dd = d.copy()
         assert d == dd
         assert not d is dd
 
     def test_get(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         assert d.get(1) == 2
-        assert d.get(1,44) == 2
+        assert d.get(1, 44) == 2
         assert d.get(33) == None
-        assert d.get(33,44) == 44
+        assert d.get(33, 44) == 44
 
     def test_pop(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         dd = d.copy()
         result = dd.pop(1)
         assert result == 2
@@ -176,18 +177,18 @@
         raises(KeyError, dd.pop, 33)
 
     def test_has_key(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         assert d.has_key(1)
         assert not d.has_key(33)
 
     def test_items(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         its = d.items()
         its.sort()
-        assert its == [(1,2),(3,4)]
+        assert its == [(1, 2), (3, 4)]
 
     def test_iteritems(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         dd = d.copy()
         for k, v in d.iteritems():
             assert v == dd[k]
@@ -195,33 +196,33 @@
         assert not dd
 
     def test_iterkeys(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         dd = d.copy()
         for k in d.iterkeys():
             del dd[k]
         assert not dd
 
     def test_itervalues(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         values = []
         for k in d.itervalues():
             values.append(k)
         assert values == d.values()
 
     def test_keys(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         kys = d.keys()
         kys.sort()
-        assert kys == [1,3]
+        assert kys == [1, 3]
 
     def test_popitem(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         it = d.popitem()
         assert len(d) == 1
-        assert it==(1,2) or it==(3,4)
+        assert it == (1, 2) or it == (3, 4)
         it1 = d.popitem()
         assert len(d) == 0
-        assert (it!=it1) and (it1==(1,2) or it1==(3,4))
+        assert (it != it1) and (it1 == (1, 2) or it1 == (3, 4))
         raises(KeyError, d.popitem)
 
     def test_popitem_2(self):
@@ -233,8 +234,33 @@
         assert it1 == ('x', 5)
         raises(KeyError, d.popitem)
 
+    def test_popitem3(self):
+        #object
+        d = {"a": 1, 2: 2, "c": 3}
+        l = []
+        while True:
+            try:
+                l.append(d.popitem())
+            except KeyError:
+                break;
+        assert ("a", 1) in l
+        assert (2, 2) in l
+        assert ("c", 3) in l
+
+        #string
+        d = {"a": 1, "b":2, "c":3}
+        l = []
+        while True:
+            try:
+                l.append(d.popitem())
+            except KeyError:
+                break;
+        assert ("a", 1) in l
+        assert ("b", 2) in l
+        assert ("c", 3) in l
+
     def test_setdefault(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         dd = d.copy()
         x = dd.setdefault(1, 99)
         assert d == dd
@@ -267,12 +293,12 @@
             assert k.calls == 1
 
     def test_update(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         dd = d.copy()
         d.update({})
         assert d == dd
-        d.update({3:5, 6:7})
-        assert d == {1:2, 3:5, 6:7}
+        d.update({3: 5, 6: 7})
+        assert d == {1: 2, 3: 5, 6: 7}
 
     def test_update_iterable(self):
         d = {}
@@ -297,15 +323,15 @@
         assert d == {'foo': 'bar', 'baz': 1}
 
     def test_values(self):
-        d = {1:2, 3:4}
+        d = {1: 2, 3: 4}
         vals = d.values()
         vals.sort()
         assert vals == [2,4]
 
     def test_eq(self):
-        d1 = {1:2, 3:4}
-        d2 = {1:2, 3:4}
-        d3 = {1:2}
+        d1 = {1: 2, 3: 4}
+        d2 = {1: 2, 3: 4}
+        d3 = {1: 2}
         bool = d1 == d2
         assert bool == True
         bool = d1 == d3
@@ -316,10 +342,10 @@
         assert bool == True
 
     def test_lt(self):
-        d1 = {1:2, 3:4}
-        d2 = {1:2, 3:4}
-        d3 = {1:2, 3:5}
-        d4 = {1:2}
+        d1 = {1: 2, 3: 4}
+        d2 = {1: 2, 3: 4}
+        d3 = {1: 2, 3: 5}
+        d4 = {1: 2}
         bool = d1 < d2
         assert bool == False
         bool = d1 < d3
@@ -366,21 +392,17 @@
     def test_new(self):
         d = dict()
         assert d == {}
-        args = [['a',2], [23,45]]
+        args = [['a', 2], [23, 45]]
         d = dict(args)
-        assert d == {'a':2, 23:45}
+        assert d == {'a': 2, 23: 45}
         d = dict(args, a=33, b=44)
-        assert d == {'a':33, 'b':44, 23:45}
+        assert d == {'a': 33, 'b': 44, 23: 45}
         d = dict(a=33, b=44)
-        assert d == {'a':33, 'b':44}
-        d = dict({'a':33, 'b':44})
-        assert d == {'a':33, 'b':44}
-        try: d = dict(23)
-        except (TypeError, ValueError): pass
-        else: self.fail("dict(23) should raise!")
-        try: d = dict([[1,2,3]])
-        except (TypeError, ValueError): pass
-        else: self.fail("dict([[1,2,3]]) should raise!")
+        assert d == {'a': 33, 'b': 44}
+        d = dict({'a': 33, 'b': 44})
+        assert d == {'a': 33, 'b': 44}
+        raises((TypeError, ValueError), dict, 23)
+        raises((TypeError, ValueError), dict, [[1, 2, 3]])
 
     def test_fromkeys(self):
         assert {}.fromkeys([1, 2], 1) == {1: 1, 2: 1}
@@ -527,6 +549,12 @@
             __missing__ = SpecialDescr(missing)
         assert X()['hi'] == 42
 
+    def test_empty_dict(self):
+        d = {}
+        raises(KeyError, d.popitem)
+        assert d.items() == []
+        assert d.values() == []
+        assert d.keys() == []
 
 class AppTest_DictMultiObject(AppTest_DictObject):
 
@@ -706,10 +734,12 @@
 class AppTestModuleDict(object):
     def setup_class(cls):
         cls.space = gettestobjspace(**{"objspace.std.withcelldict": True})
+        if option.runappdirect:
+            py.test.skip("__repr__ doesn't work on appdirect")
 
     def w_impl_used(self, obj):
         import __pypy__
-        assert "ModuleDictImplementation" in __pypy__.internal_repr(obj)
+        assert "ModuleDictStrategy" in __pypy__.internal_repr(obj)
 
     def test_check_module_uses_module_dict(self):
         m = type(__builtins__)("abc")
@@ -719,6 +749,64 @@
         d = type(__builtins__)("abc").__dict__
         raises(KeyError, "d['def']")
 
+    def test_fallback_evil_key(self):
+        class F(object):
+            def __hash__(self):
+                return hash("s")
+            def __eq__(self, other):
+                return other == "s"
+        d = type(__builtins__)("abc").__dict__
+        d["s"] = 12
+        assert d["s"] == 12
+        assert d[F()] == d["s"]
+
+        d = type(__builtins__)("abc").__dict__
+        x = d.setdefault("s", 12)
+        assert x == 12
+        x = d.setdefault(F(), 12)
+        assert x == 12
+
+        d = type(__builtins__)("abc").__dict__
+        x = d.setdefault(F(), 12)
+        assert x == 12
+
+        d = type(__builtins__)("abc").__dict__
+        d["s"] = 12
+        del d[F()]
+
+        assert "s" not in d
+        assert F() not in d
+
+class AppTestStrategies(object):
+    def setup_class(cls):
+        if option.runappdirect:
+            py.test.skip("__repr__ doesn't work on appdirect")
+
+    def w_get_strategy(self, obj):
+        import __pypy__
+        r = __pypy__.internal_repr(obj)
+        return r[r.find("(") + 1: r.find(")")]
+
+    def test_empty_to_string(self):
+        d = {}
+        assert "EmptyDictStrategy" in self.get_strategy(d)
+        d["a"] = 1
+        assert "StringDictStrategy" in self.get_strategy(d)
+
+        class O(object):
+            pass
+        o = O()
+        d = o.__dict__ = {}
+        assert "EmptyDictStrategy" in self.get_strategy(d)
+        o.a = 1
+        assert "StringDictStrategy" in self.get_strategy(d)
+
+    def test_empty_to_int(self):
+        import sys
+        d = {}
+        d[1] = "hi"
+        assert "IntDictStrategy" in self.get_strategy(d)
+        assert d[1L] == "hi"
 
 
 class FakeString(str):
@@ -759,6 +847,10 @@
         assert isinstance(string, str)
         return string
 
+    def int_w(self, integer):
+        assert isinstance(integer, int)
+        return integer
+
     def wrap(self, obj):
         return obj
 
@@ -790,6 +882,10 @@
 
     w_StopIteration = StopIteration
     w_None = None
+    w_NoneType = type(None, None)
+    w_int = int
+    w_bool = bool
+    w_float = float
     StringObjectCls = FakeString
     w_dict = W_DictMultiObject
     iter = iter
@@ -799,12 +895,9 @@
 class Config:
     class objspace:
         class std:
-            withdictmeasurement = False
             withsmalldicts = False
             withcelldict = False
             withmethodcache = False
-        class opcodes:
-            CALL_LIKELY_BUILTIN = False
 
 FakeSpace.config = Config()
 
@@ -834,14 +927,20 @@
         self.impl = self.get_impl()
 
     def get_impl(self):
-        return self.ImplementionClass(self.fakespace)
+        strategy = self.StrategyClass(self.fakespace)
+        storage = strategy.get_empty_storage()
+        w_dict = self.fakespace.allocate_instance(W_DictMultiObject, None)
+        W_DictMultiObject.__init__(w_dict, self.fakespace, strategy, storage)
+        return w_dict
 
     def fill_impl(self):
         self.impl.setitem(self.string, 1000)
         self.impl.setitem(self.string2, 2000)
 
     def check_not_devolved(self):
-        assert self.impl.r_dict_content is None
+        #XXX check if strategy changed!?
+        assert type(self.impl.strategy) is self.StrategyClass
+        #assert self.impl.r_dict_content is None
 
     def test_setitem(self):
         self.impl.setitem(self.string, 1000)
@@ -913,7 +1012,7 @@
         for x in xrange(100):
             impl.setitem(self.fakespace.str_w(str(x)), x)
             impl.setitem(x, x)
-        assert impl.r_dict_content is not None
+        assert type(impl.strategy) is ObjectDictStrategy
 
     def test_setdefault_fast(self):
         on_pypy = "__pypy__" in sys.builtin_module_names
@@ -928,8 +1027,38 @@
         if on_pypy:
             assert key.hash_count == 2
 
+    def test_fallback_evil_key(self):
+        class F(object):
+            def __hash__(self):
+                return hash("s")
+            def __eq__(self, other):
+                return other == "s"
+
+        d = self.get_impl()
+        d.setitem("s", 12)
+        assert d.getitem("s") == 12
+        assert d.getitem(F()) == d.getitem("s")
+
+        d = self.get_impl()
+        x = d.setdefault("s", 12)
+        assert x == 12
+        x = d.setdefault(F(), 12)
+        assert x == 12
+
+        d = self.get_impl()
+        x = d.setdefault(F(), 12)
+        assert x == 12
+
+        d = self.get_impl()
+        d.setitem("s", 12)
+        d.delitem(F())
+
+        assert "s" not in d.keys()
+        assert F() not in d.keys()
+
 class TestStrDictImplementation(BaseTestRDictImplementation):
-    ImplementionClass = StrDictImplementation
+    StrategyClass = StringDictStrategy
+    #ImplementionClass = StrDictImplementation
 
     def test_str_shortcut(self):
         self.fill_impl()
@@ -942,10 +1071,10 @@
 ##     DevolvedClass = MeasuringDictImplementation
 
 class TestModuleDictImplementation(BaseTestRDictImplementation):
-    ImplementionClass = ModuleDictImplementation
+    StrategyClass = ModuleDictStrategy
 
 class TestModuleDictImplementationWithBuiltinNames(BaseTestRDictImplementation):
-    ImplementionClass = ModuleDictImplementation
+    StrategyClass = ModuleDictStrategy
 
     string = "int"
     string2 = "isinstance"
@@ -954,19 +1083,19 @@
 class BaseTestDevolvedDictImplementation(BaseTestRDictImplementation):
     def fill_impl(self):
         BaseTestRDictImplementation.fill_impl(self)
-        self.impl._as_rdict()
+        self.impl.strategy.switch_to_object_strategy(self.impl)
 
     def check_not_devolved(self):
         pass
 
 class TestDevolvedStrDictImplementation(BaseTestDevolvedDictImplementation):
-    ImplementionClass = StrDictImplementation
+    StrategyClass = StringDictStrategy
 
 class TestDevolvedModuleDictImplementation(BaseTestDevolvedDictImplementation):
-    ImplementionClass = ModuleDictImplementation
+    StrategyClass = ModuleDictStrategy
 
 class TestDevolvedModuleDictImplementationWithBuiltinNames(BaseTestDevolvedDictImplementation):
-    ImplementionClass = ModuleDictImplementation
+    StrategyClass = ModuleDictStrategy
 
     string = "int"
     string2 = "isinstance"
@@ -975,5 +1104,4 @@
 def test_module_uses_strdict():
     fakespace = FakeSpace()
     d = fakespace.newdict(module=True)
-    assert isinstance(d, StrDictImplementation)
-
+    assert type(d.strategy) is StringDictStrategy
diff --git a/pypy/objspace/std/test/test_mapdict.py b/pypy/objspace/std/test/test_mapdict.py
--- a/pypy/objspace/std/test/test_mapdict.py
+++ b/pypy/objspace/std/test/test_mapdict.py
@@ -250,13 +250,18 @@
 
     class FakeDict(W_DictMultiObject):
         def __init__(self, d):
-            self.r_dict_content = d
+            self.dstorage = d
+
+        class strategy:
+            def unerase(self, x):
+                return d
+        strategy = strategy()
 
     d = {}
     w_d = FakeDict(d)
     flag = obj.map.write(obj, ("dict", SPECIAL), w_d)
     assert flag
-    materialize_r_dict(space, obj, w_d)
+    materialize_r_dict(space, obj, d)
     assert d == {"a": 5, "b": 6, "c": 7}
     assert obj.storage == [50, 60, 70, w_d]
 
@@ -291,18 +296,18 @@
     w_obj = cls.instantiate(self.fakespace)
     return w_obj.getdict(self.fakespace)
 class TestMapDictImplementation(BaseTestRDictImplementation):
-    ImplementionClass = MapDictImplementation
+    StrategyClass = MapDictStrategy
     get_impl = get_impl
 class TestDevolvedMapDictImplementation(BaseTestDevolvedDictImplementation):
     get_impl = get_impl
-    ImplementionClass = MapDictImplementation
+    StrategyClass = MapDictStrategy
 
 # ___________________________________________________________
 # tests that check the obj interface after the dict has devolved
 
 def devolve_dict(space, obj):
     w_d = obj.getdict(space)
-    w_d._as_rdict()
+    w_d.strategy.switch_to_object_strategy(w_d)
 
 def test_get_setdictvalue_after_devolve():
     cls = Class()
@@ -463,6 +468,20 @@
         d['dd'] = 43
         assert a.dd == 41
 
+    def test_popitem(self):
+        class A(object):
+            pass
+        a = A()
+        a.x = 5
+        a.y = 6
+        it1 = a.__dict__.popitem()
+        assert it1 == ("y", 6)
+        it2 = a.__dict__.popitem()
+        assert it2 == ("x", 5)
+        assert a.__dict__ == {}
+        raises(KeyError, a.__dict__.popitem)
+
+
 
     def test_slot_name_conflict(self):
         class A(object):
@@ -604,6 +623,14 @@
         assert a.__dict__ is d
         assert isinstance(a, B)
 
+    def test_setdict(self):
+        class A(object):
+            pass
+
+        a = A()
+        a.__dict__ = {}
+        a.__dict__ = {}
+
 
 class AppTestWithMapDictAndCounters(object):
     def setup_class(cls):
diff --git a/pypy/objspace/std/test/test_setobject.py b/pypy/objspace/std/test/test_setobject.py
--- a/pypy/objspace/std/test/test_setobject.py
+++ b/pypy/objspace/std/test/test_setobject.py
@@ -50,6 +50,10 @@
         u = self.space.wrap(set('simsalabim'))
         assert self.space.eq_w(s,u)
 
+    def test_space_newset(self):
+        s = self.space.newset()
+        assert self.space.str_w(self.space.repr(s)) == 'set([])'
+
 class AppTestAppSetTest:
     def test_subtype(self):
         class subset(set):pass
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -9,8 +9,8 @@
 from pypy.objspace.std.objecttype import object_typedef
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.objectmodel import current_object_addr_as_int, compute_hash
-from pypy.rlib.jit import hint, purefunction_promote, we_are_jitted
-from pypy.rlib.jit import purefunction, dont_look_inside, unroll_safe
+from pypy.rlib.jit import promote, elidable_promote, we_are_jitted
+from pypy.rlib.jit import elidable, dont_look_inside, unroll_safe
 from pypy.rlib.rarithmetic import intmask, r_uint
 
 class TypeCell(W_Root):
@@ -177,7 +177,7 @@
         # prebuilt objects cannot get their version_tag changed
         return w_self._pure_version_tag()
 
-    @purefunction_promote()
+    @elidable_promote()
     def _pure_version_tag(w_self):
         return w_self._version_tag
 
@@ -247,7 +247,7 @@
                     return w_value
         return w_value
 
-    @purefunction
+    @elidable
     def _pure_getdictvalue_no_unwrapping(w_self, space, version_tag, attr):
         return w_self._getdictvalue_no_unwrapping(space, attr)
 
@@ -351,16 +351,16 @@
 
     def lookup_where_with_method_cache(w_self, name):
         space = w_self.space
-        w_self = hint(w_self, promote=True)
+        promote(w_self)
         assert space.config.objspace.std.withmethodcache
-        version_tag = hint(w_self.version_tag(), promote=True)
+        version_tag = promote(w_self.version_tag())
         if version_tag is None:
             tup = w_self._lookup_where(name)
             return tup
         w_class, w_value = w_self._pure_lookup_where_with_method_cache(name, version_tag)
         return w_class, unwrap_cell(space, w_value)
 
-    @purefunction
+    @elidable
     def _pure_lookup_where_with_method_cache(w_self, name, version_tag):
         space = w_self.space
         cache = space.fromcache(MethodCache)
@@ -423,10 +423,13 @@
         return False
 
     def getdict(w_self, space): # returning a dict-proxy!
-        from pypy.objspace.std.dictproxyobject import W_DictProxyObject
+        from pypy.objspace.std.dictproxyobject import DictProxyStrategy
+        from pypy.objspace.std.dictmultiobject import W_DictMultiObject
         if w_self.lazyloaders:
             w_self._freeze_()    # force un-lazification
-        return W_DictProxyObject(space, w_self)
+        strategy = space.fromcache(DictProxyStrategy)
+        storage = strategy.erase(w_self)
+        return W_DictMultiObject(space, strategy, storage)
 
     def unwrap(w_self, space):
         if w_self.instancetypedef.fakedcpytype is not None:
@@ -447,8 +450,8 @@
         w_self.flag_abstract = bool(abstract)
 
     def issubtype(w_self, w_type):
-        w_self = hint(w_self, promote=True)
-        w_type = hint(w_type, promote=True)
+        promote(w_self)
+        promote(w_type)
         if w_self.space.config.objspace.std.withtypeversion and we_are_jitted():
             version_tag1 = w_self.version_tag()
             version_tag2 = w_type.version_tag()
@@ -774,7 +777,7 @@
 # ____________________________________________________________
 
 def call__Type(space, w_type, __args__):
-    w_type = hint(w_type, promote=True)
+    promote(w_type)
     # special case for type(x)
     if space.is_w(w_type, space.w_type):
         try:
@@ -820,7 +823,7 @@
 def _issubtype(w_sub, w_type):
     return w_type in w_sub.mro_w
 
- at purefunction_promote()
+ at elidable_promote()
 def _pure_issubtype(w_sub, w_type, version_tag1, version_tag2):
     return _issubtype(w_sub, w_type)
 
diff --git a/pypy/objspace/std/unicodeobject.py b/pypy/objspace/std/unicodeobject.py
--- a/pypy/objspace/std/unicodeobject.py
+++ b/pypy/objspace/std/unicodeobject.py
@@ -937,7 +937,7 @@
     return formatter.format_string(space.unicode_w(w_unicode))
 
 
-import unicodetype
+from pypy.objspace.std import unicodetype
 register_all(vars(), unicodetype)
 
 # str.strip(unicode) needs to convert self to unicode and call unicode.strip we
@@ -948,7 +948,7 @@
 # methods?
 
 class str_methods:
-    import stringtype
+    from pypy.objspace.std import stringtype
     W_UnicodeObject = W_UnicodeObject
     from pypy.objspace.std.stringobject import W_StringObject
     from pypy.objspace.std.ropeobject import W_RopeObject
diff --git a/pypy/objspace/taint.py b/pypy/objspace/taint.py
--- a/pypy/objspace/taint.py
+++ b/pypy/objspace/taint.py
@@ -92,8 +92,8 @@
     w_realtype = space.type(w_obj)
     if not space.is_w(w_realtype, w_expectedtype):
         #msg = "expected an object of type '%s'" % (
-        #    w_expectedtype.getname(space, '?'),)
-        #    #w_realtype.getname(space, '?'))
+        #    w_expectedtype.getname(space),)
+        #    #w_realtype.getname(space))
         raise OperationError(space.w_TaintError, space.w_None)
     return w_obj
 app_untaint = gateway.interp2app(untaint)
diff --git a/pypy/pytest.ini b/pypy/pytest.ini
new file mode 100644
--- /dev/null
+++ b/pypy/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+addopts = --assertmode=old
\ No newline at end of file
diff --git a/pypy/rlib/clibffi.py b/pypy/rlib/clibffi.py
--- a/pypy/rlib/clibffi.py
+++ b/pypy/rlib/clibffi.py
@@ -10,6 +10,7 @@
 from pypy.rlib.rmmap import alloc
 from pypy.rlib.rdynload import dlopen, dlclose, dlsym, dlsym_byordinal
 from pypy.rlib.rdynload import DLOpenError, DLLHANDLE
+from pypy.rlib import jit
 from pypy.tool.autopath import pypydir
 from pypy.translator.tool.cbuild import ExternalCompilationInfo
 from pypy.translator.platform import platform
@@ -270,6 +271,7 @@
 elif _MSVC:
     get_libc_handle = external('pypy_get_libc_handle', [], DLLHANDLE)
 
+    @jit.dont_look_inside
     def get_libc_name():
         return rwin32.GetModuleFileName(get_libc_handle())
 
diff --git a/pypy/rlib/debug.py b/pypy/rlib/debug.py
--- a/pypy/rlib/debug.py
+++ b/pypy/rlib/debug.py
@@ -262,6 +262,28 @@
         return hop.inputarg(hop.args_r[0], arg=0)
 
 
+def mark_dict_non_null(d):
+    """ Mark dictionary as having non-null keys and values. A warning would
+    be emitted (not an error!) in case annotation disagrees.
+    """
+    assert isinstance(d, dict)
+    return d
+
+
+class DictMarkEntry(ExtRegistryEntry):
+    _about_ = mark_dict_non_null
+    
+    def compute_result_annotation(self, s_dict):
+        from pypy.annotation.model import SomeDict, s_None
+
+        assert isinstance(s_dict, SomeDict)
+        s_dict.dictdef.force_non_null = True
+        return s_dict
+
+    def specialize_call(self, hop):
+        hop.exception_cannot_occur()
+        return hop.inputarg(hop.args_r[0], arg=0)
+
 class IntegerCanBeNegative(Exception):
     pass
 
diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py
--- a/pypy/rlib/jit.py
+++ b/pypy/rlib/jit.py
@@ -6,21 +6,26 @@
 from pypy.rlib.unroll import unrolling_iterable
 from pypy.rlib.nonconst import NonConstant
 
-def purefunction(func):
-    """ Decorate a function as pure. Pure means precisely that:
+def elidable(func):
+    """ Decorate a function as "trace-elidable". This means precisely that:
 
     (1) the result of the call should not change if the arguments are
         the same (same numbers or same pointers)
     (2) it's fine to remove the call completely if we can guess the result
     according to rule 1
 
-    Most importantly it doesn't mean that pure function has no observable
-    side effect, but those side effects can be ommited (ie caching).
+    Most importantly it doesn't mean that an elidable function has no observable
+    side effect, but those side effects are idempotent (ie caching).
     For now, such a function should never raise an exception.
     """
-    func._pure_function_ = True
+    func._elidable_function_ = True
     return func
 
+def purefunction(*args, **kwargs):
+    import warnings
+    warnings.warn("purefunction is deprecated, use elidable instead", DeprecationWarning)
+    return elidable(*args, **kwargs)
+
 def hint(x, **kwds):
     """ Hint for the JIT
 
@@ -36,6 +41,10 @@
     """
     return x
 
+ at specialize.argtype(0)
+def promote(x):
+    return hint(x, promote=True)
+
 def dont_look_inside(func):
     """ Make sure the JIT does not trace inside decorated function
     (it becomes a call instead)
@@ -60,13 +69,13 @@
     func._jit_loop_invariant_ = True
     return func
 
-def purefunction_promote(promote_args='all'):
+def elidable_promote(promote_args='all'):
     """ A decorator that promotes all arguments and then calls the supplied
     function
     """
     def decorator(func):
         import inspect
-        purefunction(func)
+        elidable(func)
         args, varargs, varkw, defaults = inspect.getargspec(func)
         args = ["v%s" % (i, ) for i in range(len(args))]
         assert varargs is None and varkw is None
@@ -85,6 +94,12 @@
         return result
     return decorator
 
+def purefunction_promote(*args, **kwargs):
+    import warnings
+    warnings.warn("purefunction_promote is deprecated, use elidable_promote instead", DeprecationWarning)
+    return elidable_promote(*args, **kwargs)
+
+
 def oopspec(spec):
     def decorator(func):
         func.oopspec = spec
@@ -273,15 +288,17 @@
 class JitHintError(Exception):
     """Inconsistency in the JIT hints."""
 
-PARAMETERS = {'threshold': 1000,
+PARAMETERS = {'threshold': 1032, # just above 1024
+              'function_threshold': 1617, # slightly more than one above 
               'trace_eagerness': 200,
               'trace_limit': 12000,
-              'inlining': 0,
+              'inlining': 1,
               'loop_longevity': 1000,
               'retrace_limit': 5,
-              'enable_opts': None, # patched later by optimizeopt/__init__.py
+              'enable_opts': 'all',
               }
 unroll_parameters = unrolling_iterable(PARAMETERS.items())
+DEFAULT = object()
 
 # ____________________________________________________________
 
@@ -336,22 +353,33 @@
     def _set_param(self, name, value):
         # special-cased by ExtRegistryEntry
         # (internal, must receive a constant 'name')
+        # if value is DEFAULT, sets the default value.
         assert name in PARAMETERS
 
     @specialize.arg(0, 1)
     def set_param(self, name, value):
         """Set one of the tunable JIT parameter."""
-        for name1, _ in unroll_parameters:
-            if name1 == name:
-                self._set_param(name1, value)
-                return
-        raise ValueError("no such parameter")
+        self._set_param(name, value)
+
+    @specialize.arg(0, 1)
+    def set_param_to_default(self, name):
+        """Reset one of the tunable JIT parameters to its default value."""
+        self._set_param(name, DEFAULT)
 
     def set_user_param(self, text):
         """Set the tunable JIT parameters from a user-supplied string
-        following the format 'param=value,param=value'.  For programmatic
-        setting of parameters, use directly JitDriver.set_param().
+        following the format 'param=value,param=value', or 'off' to
+        disable the JIT.  For programmatic setting of parameters, use
+        directly JitDriver.set_param().
         """
+        if text == 'off':
+            self.set_param('threshold', -1)
+            self.set_param('function_threshold', -1)
+            return
+        if text == 'default':
+            for name1, _ in unroll_parameters:
+                self.set_param_to_default(name1)
+            return
         for s in text.split(','):
             s = s.strip(' ')
             parts = s.split('=')
@@ -574,15 +602,17 @@
     def compute_result_annotation(self, s_name, s_value):
         from pypy.annotation import model as annmodel
         assert s_name.is_constant()
-        if s_name.const == 'enable_opts':
-            assert annmodel.SomeString(can_be_None=True).contains(s_value)
-        else:
-            assert annmodel.SomeInteger().contains(s_value)
+        if not self.bookkeeper.immutablevalue(DEFAULT).contains(s_value):
+            if s_name.const == 'enable_opts':
+                assert annmodel.SomeString(can_be_None=True).contains(s_value)
+            else:
+                assert annmodel.SomeInteger().contains(s_value)
         return annmodel.s_None
 
     def specialize_call(self, hop):
         from pypy.rpython.lltypesystem import lltype
         from pypy.rpython.lltypesystem.rstr import string_repr
+        from pypy.objspace.flow.model import Constant
 
         hop.exception_cannot_occur()
         driver = self.instance.im_self
@@ -591,7 +621,12 @@
             repr = string_repr
         else:
             repr = lltype.Signed
-        v_value = hop.inputarg(repr, arg=1)
+        if (isinstance(hop.args_v[1], Constant) and
+            hop.args_v[1].value is DEFAULT):
+            value = PARAMETERS[name]
+            v_value = hop.inputconst(repr, value)
+        else:
+            v_value = hop.inputarg(repr, arg=1)
         vlist = [hop.inputconst(lltype.Void, "set_param"),
                  hop.inputconst(lltype.Void, driver),
                  hop.inputconst(lltype.Void, name),
diff --git a/pypy/rlib/libffi.py b/pypy/rlib/libffi.py
--- a/pypy/rlib/libffi.py
+++ b/pypy/rlib/libffi.py
@@ -40,7 +40,7 @@
         del cls._import
 
     @staticmethod
-    @jit.purefunction
+    @jit.elidable
     def getkind(ffi_type):
         """Returns 'v' for void, 'f' for float, 'i' for signed integer,
         and 'u' for unsigned integer.
@@ -74,7 +74,7 @@
         raise KeyError
 
     @staticmethod
-    @jit.purefunction
+    @jit.elidable
     def is_struct(ffi_type):
         return intmask(ffi_type.c_type) == intmask(FFI_TYPE_STRUCT)
 
@@ -253,7 +253,7 @@
         # the optimizer will fail to recognize the pattern and won't turn it
         # into a fast CALL.  Note that "arg = arg.next" is optimized away,
         # assuming that archain is completely virtual.
-        self = jit.hint(self, promote=True)
+        self = jit.promote(self)
         if argchain.numargs != len(self.argtypes):
             raise TypeError, 'Wrong number of arguments: %d expected, got %d' %\
                 (argchain.numargs, len(self.argtypes))
diff --git a/pypy/rlib/longlong2float.py b/pypy/rlib/longlong2float.py
--- a/pypy/rlib/longlong2float.py
+++ b/pypy/rlib/longlong2float.py
@@ -49,9 +49,9 @@
 longlong2float = rffi.llexternal(
     "pypy__longlong2float", [rffi.LONGLONG], rffi.DOUBLE,
     _callable=longlong2float_emulator, compilation_info=eci,
-    _nowrapper=True, pure_function=True)
+    _nowrapper=True, elidable_function=True)
 
 float2longlong = rffi.llexternal(
     "pypy__float2longlong", [rffi.DOUBLE], rffi.LONGLONG,
     _callable=float2longlong_emulator, compilation_info=eci,
-    _nowrapper=True, pure_function=True)
+    _nowrapper=True, elidable_function=True)
diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py
--- a/pypy/rlib/objectmodel.py
+++ b/pypy/rlib/objectmodel.py
@@ -448,10 +448,11 @@
     The functions key_eq() and key_hash() are used by the key comparison
     algorithm."""
 
-    def __init__(self, key_eq, key_hash):
+    def __init__(self, key_eq, key_hash, force_non_null=False):
         self._dict = {}
         self.key_eq = key_eq
         self.key_hash = key_hash
+        self.force_non_null = force_non_null
 
     def __getitem__(self, key):
         return self._dict[_r_dictkey(self, key)]
diff --git a/pypy/rlib/rbigint.py b/pypy/rlib/rbigint.py
--- a/pypy/rlib/rbigint.py
+++ b/pypy/rlib/rbigint.py
@@ -124,7 +124,7 @@
         return len(self._digits)
 
     @staticmethod
-    @jit.purefunction
+    @jit.elidable
     def fromint(intval):
         # This function is marked as pure, so you must not call it and
         # then modify the result.
@@ -156,7 +156,7 @@
         return v
 
     @staticmethod
-    @jit.purefunction
+    @jit.elidable
     def frombool(b):
         # This function is marked as pure, so you must not call it and
         # then modify the result.
@@ -179,7 +179,7 @@
             raise OverflowError
 
     @staticmethod
-    @jit.purefunction
+    @jit.elidable
     def _fromfloat_finite(dval):
         sign = 1
         if dval < 0.0:
@@ -201,7 +201,7 @@
         return v
 
     @staticmethod
-    @jit.purefunction
+    @jit.elidable
     @specialize.argtype(0)
     def fromrarith_int(i):
         # This function is marked as pure, so you must not call it and
@@ -209,7 +209,7 @@
         return rbigint(*args_from_rarith_int(i))
 
     @staticmethod
-    @jit.purefunction
+    @jit.elidable
     def fromdecimalstr(s):
         # This function is marked as pure, so you must not call it and
         # then modify the result.
diff --git a/pypy/rlib/rgc.py b/pypy/rlib/rgc.py
--- a/pypy/rlib/rgc.py
+++ b/pypy/rlib/rgc.py
@@ -272,7 +272,9 @@
     if isinstance(TP.OF, lltype.Ptr) and TP.OF.TO._gckind == 'gc':
         # perform a write barrier that copies necessary flags from
         # source to dest
-        if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest):
+        if not llop.gc_writebarrier_before_copy(lltype.Bool, source, dest,
+                                                source_start, dest_start,
+                                                length):
             # if the write barrier is not supported, copy by hand
             for i in range(length):
                 dest[i + dest_start] = source[i + source_start]
diff --git a/pypy/rlib/rmd5.py b/pypy/rlib/rmd5.py
--- a/pypy/rlib/rmd5.py
+++ b/pypy/rlib/rmd5.py
@@ -51,7 +51,7 @@
     _rotateLeft = rffi.llexternal(
         "pypy__rotateLeft", [lltype.Unsigned, lltype.Signed], lltype.Unsigned,
         _callable=_rotateLeft_emulator, compilation_info=eci,
-        _nowrapper=True, pure_function=True)
+        _nowrapper=True, elidable_function=True)
     # we expect the function _rotateLeft to be actually inlined
 
 
diff --git a/pypy/rlib/ropenssl.py b/pypy/rlib/ropenssl.py
--- a/pypy/rlib/ropenssl.py
+++ b/pypy/rlib/ropenssl.py
@@ -151,7 +151,7 @@
                 [rffi.INT, rffi.INT, rffi.CCHARP, rffi.INT], lltype.Void))],
              lltype.Void)
 ssl_external('CRYPTO_set_id_callback',
-             [lltype.Ptr(lltype.FuncType([], rffi.INT))],
+             [lltype.Ptr(lltype.FuncType([], rffi.LONG))],
              lltype.Void)
              
 if HAVE_OPENSSL_RAND:
diff --git a/pypy/rlib/rsdl/RMix.py b/pypy/rlib/rsdl/RMix.py
--- a/pypy/rlib/rsdl/RMix.py
+++ b/pypy/rlib/rsdl/RMix.py
@@ -52,7 +52,8 @@
                                ChunkPtr)
 
 def LoadWAV(filename_ccharp):
-    return LoadWAV_RW(RSDL.RWFromFile(filename_ccharp, rffi.str2charp('rb')), 1)
+    with rffi.scoped_str2charp('rb') as mode:
+        return LoadWAV_RW(RSDL.RWFromFile(filename_ccharp, mode), 1)
 
 
 PlayChannelTimed    = external('Mix_PlayChannelTimed',
@@ -64,4 +65,4 @@
 
 """Returns zero if the channel is not playing. 
 Otherwise if you passed in -1, the number of channels playing is returned"""
-ChannelPlaying  = external('Mix_Playing', [ rffi.INT])
\ No newline at end of file
+ChannelPlaying  = external('Mix_Playing', [rffi.INT], rffi.INT)
diff --git a/pypy/rlib/test/test_debug.py b/pypy/rlib/test/test_debug.py
--- a/pypy/rlib/test/test_debug.py
+++ b/pypy/rlib/test/test_debug.py
@@ -1,11 +1,12 @@
 
 import py
-from pypy.rlib.debug import check_annotation, make_sure_not_resized
-from pypy.rlib.debug import debug_print, debug_start, debug_stop
-from pypy.rlib.debug import have_debug_prints, debug_offset, debug_flush
-from pypy.rlib.debug import check_nonneg, IntegerCanBeNegative
+from pypy.rlib.debug import (check_annotation, make_sure_not_resized,
+                             debug_print, debug_start, debug_stop,
+                             have_debug_prints, debug_offset, debug_flush,
+                             check_nonneg, IntegerCanBeNegative,
+                             mark_dict_non_null)
 from pypy.rlib import debug
-from pypy.rpython.test.test_llinterp import interpret
+from pypy.rpython.test.test_llinterp import interpret, gengraph
 
 def test_check_annotation():
     class Error(Exception):
@@ -52,8 +53,17 @@
     py.test.raises(ListChangeUnallowed, interpret, f, [], 
                    list_comprehension_operations=True)
 
+def test_mark_dict_non_null():
+    def f():
+        d = {"ac": "bx"}
+        mark_dict_non_null(d)
+        return d
 
-class DebugTests:
+    t, typer, graph = gengraph(f, [])
+    assert sorted(graph.returnblock.inputargs[0].concretetype.TO.entries.TO.OF._flds.keys()) == ['key', 'value']
+
+
+class DebugTests(object):
 
     def test_debug_print_start_stop(self):
         def f(x):
diff --git a/pypy/rlib/test/test_jit.py b/pypy/rlib/test/test_jit.py
--- a/pypy/rlib/test/test_jit.py
+++ b/pypy/rlib/test/test_jit.py
@@ -1,6 +1,6 @@
 import py
 from pypy.conftest import option
-from pypy.rlib.jit import hint, we_are_jitted, JitDriver, purefunction_promote
+from pypy.rlib.jit import hint, we_are_jitted, JitDriver, elidable_promote
 from pypy.rlib.jit import JitHintError, oopspec
 from pypy.translator.translator import TranslationContext, graphof
 from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin
@@ -31,8 +31,8 @@
         res = self.interpret(f, [4])
         assert res == 5
 
-    def test_purefunction_promote(self):
-        @purefunction_promote()
+    def test_elidable_promote(self):
+        @elidable_promote()
         def g(func):
             return func + 1
         def f(x):
@@ -40,8 +40,8 @@
         res = self.interpret(f, [2])
         assert res == 5
 
-    def test_purefunction_promote_args(self):
-        @purefunction_promote(promote_args='0')
+    def test_elidable_promote_args(self):
+        @elidable_promote(promote_args='0')
         def g(func, x):
             return func + 1
         def f(x):
diff --git a/pypy/rpython/llinterp.py b/pypy/rpython/llinterp.py
--- a/pypy/rpython/llinterp.py
+++ b/pypy/rpython/llinterp.py
@@ -737,9 +737,12 @@
     def op_zero_gc_pointers_inside(self, obj):
         raise NotImplementedError("zero_gc_pointers_inside")
 
-    def op_gc_writebarrier_before_copy(self, source, dest):
+    def op_gc_writebarrier_before_copy(self, source, dest,
+                                       source_start, dest_start, length):
         if hasattr(self.heap, 'writebarrier_before_copy'):
-            return self.heap.writebarrier_before_copy(source, dest)
+            return self.heap.writebarrier_before_copy(source, dest,
+                                                      source_start, dest_start,
+                                                      length)
         else:
             return True
 
diff --git a/pypy/rpython/lltypesystem/ll2ctypes.py b/pypy/rpython/lltypesystem/ll2ctypes.py
--- a/pypy/rpython/lltypesystem/ll2ctypes.py
+++ b/pypy/rpython/lltypesystem/ll2ctypes.py
@@ -37,7 +37,9 @@
     if far_regions:
         import random
         pieces = far_regions._ll2ctypes_pieces
-        num = random.randrange(len(pieces))
+        num = random.randrange(len(pieces)+1)
+        if num == len(pieces):
+            return ctype()
         i1, stop = pieces[num]
         i2 = i1 + ((ctypes.sizeof(ctype) or 1) + 7) & ~7
         if i2 > stop:
diff --git a/pypy/rpython/lltypesystem/ll_str.py b/pypy/rpython/lltypesystem/ll_str.py
--- a/pypy/rpython/lltypesystem/ll_str.py
+++ b/pypy/rpython/lltypesystem/ll_str.py
@@ -1,12 +1,13 @@
 from pypy.rpython.lltypesystem.lltype import GcArray, Array, Char, malloc
 from pypy.rpython.annlowlevel import llstr
 from pypy.rlib.rarithmetic import r_uint, r_longlong, r_ulonglong
+from pypy.rlib import jit
 
 CHAR_ARRAY = GcArray(Char)
 
+ at jit.elidable
 def ll_int_str(repr, i):
     return ll_int2dec(i)
-ll_int_str._pure_function_ = True
 
 def ll_unsigned(i):
     if isinstance(i, r_longlong) or isinstance(i, r_ulonglong):
@@ -14,6 +15,7 @@
     else:
         return r_uint(i)
 
+ at jit.elidable
 def ll_int2dec(i):
     from pypy.rpython.lltypesystem.rstr import mallocstr
     temp = malloc(CHAR_ARRAY, 20)
@@ -44,13 +46,13 @@
         result.chars[j] = temp[len-j-1]
         j += 1
     return result
-ll_int2dec._pure_function_ = True
 
 hex_chars = malloc(Array(Char), 16, immortal=True)
 
 for i in range(16):
     hex_chars[i] = "%x"%i
 
+ at jit.elidable
 def ll_int2hex(i, addPrefix):
     from pypy.rpython.lltypesystem.rstr import mallocstr
     temp = malloc(CHAR_ARRAY, 20)
@@ -86,8 +88,8 @@
         result.chars[j] = temp[len-j-1]
         j += 1
     return result
-ll_int2hex._pure_function_ = True
 
+ at jit.elidable
 def ll_int2oct(i, addPrefix):
     from pypy.rpython.lltypesystem.rstr import mallocstr
     if i == 0:
@@ -123,9 +125,8 @@
         result.chars[j] = temp[len-j-1]
         j += 1
     return result
-ll_int2oct._pure_function_ = True
 
+ at jit.elidable
 def ll_float_str(repr, f):
     from pypy.rlib.rfloat import formatd
     return llstr(formatd(f, 'f', 6))
-ll_float_str._pure_function_ = True
diff --git a/pypy/rpython/lltypesystem/module/ll_math.py b/pypy/rpython/lltypesystem/module/ll_math.py
--- a/pypy/rpython/lltypesystem/module/ll_math.py
+++ b/pypy/rpython/lltypesystem/module/ll_math.py
@@ -58,7 +58,7 @@
 math_log10 = llexternal('log10', [rffi.DOUBLE], rffi.DOUBLE)
 math_copysign = llexternal(underscore + 'copysign',
                            [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE,
-                           pure_function=True)
+                           elidable_function=True)
 math_atan2 = llexternal('atan2', [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE)
 math_frexp = llexternal('frexp', [rffi.DOUBLE, rffi.INTP], rffi.DOUBLE)
 math_modf  = llexternal('modf',  [rffi.DOUBLE, rffi.DOUBLEP], rffi.DOUBLE)
@@ -67,11 +67,11 @@
 math_fmod  = llexternal('fmod',  [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE)
 math_hypot = llexternal(underscore + 'hypot',
                         [rffi.DOUBLE, rffi.DOUBLE], rffi.DOUBLE)
-math_floor = llexternal('floor', [rffi.DOUBLE], rffi.DOUBLE, pure_function=True)
+math_floor = llexternal('floor', [rffi.DOUBLE], rffi.DOUBLE, elidable_function=True)
 
 math_sqrt = llexternal('sqrt', [rffi.DOUBLE], rffi.DOUBLE)
 
- at jit.purefunction
+ at jit.elidable
 def sqrt_nonneg(x):
     return math_sqrt(x)
 sqrt_nonneg.oopspec = "math.sqrt_nonneg(x)"
diff --git a/pypy/rpython/lltypesystem/opimpl.py b/pypy/rpython/lltypesystem/opimpl.py
--- a/pypy/rpython/lltypesystem/opimpl.py
+++ b/pypy/rpython/lltypesystem/opimpl.py
@@ -473,12 +473,16 @@
     checkadr(addr2)
     return addr1 - addr2
 
-def op_gc_writebarrier_before_copy(source, dest):
+def op_gc_writebarrier_before_copy(source, dest,
+                                   source_start, dest_start, length):
     A = lltype.typeOf(source)
     assert A == lltype.typeOf(dest)
     assert isinstance(A.TO, lltype.GcArray)
     assert isinstance(A.TO.OF, lltype.Ptr)
     assert A.TO.OF.TO._gckind == 'gc'
+    assert type(source_start) is int
+    assert type(dest_start) is int
+    assert type(length) is int
     return True
 
 def op_getfield(p, name):
diff --git a/pypy/rpython/lltypesystem/rdict.py b/pypy/rpython/lltypesystem/rdict.py
--- a/pypy/rpython/lltypesystem/rdict.py
+++ b/pypy/rpython/lltypesystem/rdict.py
@@ -9,6 +9,7 @@
 from pypy.rpython import robject
 from pypy.rlib import objectmodel, jit
 from pypy.rpython import rmodel
+from pypy.rpython.error import TyperError
 
 HIGHEST_BIT = intmask(1 << (LONG_BIT - 1))
 MASK = intmask(HIGHEST_BIT - 1)
@@ -42,7 +43,7 @@
 class DictRepr(AbstractDictRepr):
 
     def __init__(self, rtyper, key_repr, value_repr, dictkey, dictvalue,
-                 custom_eq_hash=None):
+                 custom_eq_hash=None, force_non_null=False):
         self.rtyper = rtyper
         self.DICT = lltype.GcForwardReference()
         self.lowleveltype = lltype.Ptr(self.DICT)
@@ -61,6 +62,7 @@
         self.dictvalue = dictvalue
         self.dict_cache = {}
         self._custom_eq_hash_repr = custom_eq_hash
+        self.force_non_null = force_non_null
         # setup() needs to be called to finish this initialization
 
     def _externalvsinternal(self, rtyper, item_repr):
@@ -97,6 +99,13 @@
             s_value = self.dictvalue.s_value
             nullkeymarker = not self.key_repr.can_ll_be_null(s_key)
             nullvaluemarker = not self.value_repr.can_ll_be_null(s_value)
+            if self.force_non_null:
+                if not nullkeymarker:
+                    rmodel.warning("%s can be null, but forcing non-null in dict key" % s_key)
+                    nullkeymarker = True
+                if not nullvaluemarker:
+                    rmodel.warning("%s can be null, but forcing non-null in dict value" % s_value)
+                    nullvaluemarker = True
             dummykeyobj = self.key_repr.get_ll_dummyval_obj(self.rtyper,
                                                             s_key)
             dummyvalueobj = self.value_repr.get_ll_dummyval_obj(self.rtyper,
@@ -206,7 +215,7 @@
         if dictobj is None:
             return lltype.nullptr(self.DICT)
         if not isinstance(dictobj, (dict, objectmodel.r_dict)):
-            raise TyperError("expected a dict: %r" % (dictobj,))
+            raise TypeError("expected a dict: %r" % (dictobj,))
         try:
             key = Constant(dictobj)
             return self.dict_cache[key]
@@ -640,12 +649,15 @@
     pass
 
 
-def rtype_r_dict(hop):
+def rtype_r_dict(hop, i_force_non_null=None):
     r_dict = hop.r_result
     if not r_dict.custom_eq_hash:
         raise TyperError("r_dict() call does not return an r_dict instance")
-    v_eqfn, v_hashfn = hop.inputargs(r_dict.r_rdict_eqfn,
-                                     r_dict.r_rdict_hashfn)
+    v_eqfn = hop.inputarg(r_dict.r_rdict_eqfn, arg=0)
+    v_hashfn = hop.inputarg(r_dict.r_rdict_hashfn, arg=1)
+    if i_force_non_null is not None:
+        assert i_force_non_null == 2
+        hop.inputarg(lltype.Void, arg=2)
     cDICT = hop.inputconst(lltype.Void, r_dict.DICT)
     hop.exception_cannot_occur()
     v_result = hop.gendirectcall(ll_newdict, cDICT)
@@ -833,10 +845,16 @@
 POPITEMINDEX = lltype.Struct('PopItemIndex', ('nextindex', lltype.Signed))
 global_popitem_index = lltype.malloc(POPITEMINDEX, zero=True, immortal=True)
 
-def ll_popitem(ELEM, dic):
+def _ll_getnextitem(dic):
     entries = dic.entries
+    ENTRY = lltype.typeOf(entries).TO.OF
     dmask = len(entries) - 1
-    base = global_popitem_index.nextindex
+    if hasattr(ENTRY, 'f_hash'):
+        if entries.valid(0):
+            return 0
+        base = entries[0].f_hash
+    else:
+        base = global_popitem_index.nextindex
     counter = 0
     while counter <= dmask:
         i = (base + counter) & dmask
@@ -845,8 +863,16 @@
             break
     else:
         raise KeyError
-    global_popitem_index.nextindex += counter
-    entry = entries[i]
+    if hasattr(ENTRY, 'f_hash'):
+        entries[0].f_hash = base + counter
+    else:
+        global_popitem_index.nextindex = base + counter
+    return i
+
+ at jit.dont_look_inside
+def ll_popitem(ELEM, dic):
+    i = _ll_getnextitem(dic)
+    entry = dic.entries[i]
     r = lltype.malloc(ELEM.TO)
     r.item0 = recast(ELEM.TO.item0, entry.key)
     r.item1 = recast(ELEM.TO.item1, entry.value)
diff --git a/pypy/rpython/lltypesystem/rffi.py b/pypy/rpython/lltypesystem/rffi.py
--- a/pypy/rpython/lltypesystem/rffi.py
+++ b/pypy/rpython/lltypesystem/rffi.py
@@ -55,7 +55,7 @@
                compilation_info=ExternalCompilationInfo(),
                sandboxsafe=False, threadsafe='auto',
                _nowrapper=False, calling_conv='c',
-               oo_primitive=None, pure_function=False,
+               oo_primitive=None, elidable_function=False,
                macro=None):
     """Build an external function that will invoke the C function 'name'
     with the given 'args' types and 'result' type.
@@ -87,8 +87,8 @@
                 name, macro, ext_type, compilation_info)
         else:
             _callable = ll2ctypes.LL2CtypesCallable(ext_type, calling_conv)
-    if pure_function:
-        _callable._pure_function_ = True
+    if elidable_function:
+        _callable._elidable_function_ = True
     kwds = {}
     if oo_primitive:
         kwds['oo_primitive'] = oo_primitive
diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py
--- a/pypy/rpython/lltypesystem/rlist.py
+++ b/pypy/rpython/lltypesystem/rlist.py
@@ -250,12 +250,11 @@
     length = l.length
     l.length = length + 1
     l.ll_setitem_fast(length, newitem)
-ll_append_noresize.oopspec = 'list.append(l, newitem)'
 
 
 def ll_both_none(lst1, lst2):
     return not lst1 and not lst2
-        
+
 
 # ____________________________________________________________
 #
diff --git a/pypy/rpython/lltypesystem/rstr.py b/pypy/rpython/lltypesystem/rstr.py
--- a/pypy/rpython/lltypesystem/rstr.py
+++ b/pypy/rpython/lltypesystem/rstr.py
@@ -4,7 +4,7 @@
 from pypy.rlib.objectmodel import malloc_zero_filled, we_are_translated
 from pypy.rlib.objectmodel import _hash_string, enforceargs
 from pypy.rlib.debug import ll_assert
-from pypy.rlib.jit import purefunction, we_are_jitted
+from pypy.rlib.jit import elidable, we_are_jitted, dont_look_inside
 from pypy.rlib.rarithmetic import ovfcheck
 from pypy.rpython.robject import PyObjRepr, pyobj_repr
 from pypy.rpython.rmodel import inputconst, IntegerRepr
@@ -57,6 +57,8 @@
                 llmemory.itemoffsetof(TP.chars, 0) +
                 llmemory.sizeof(CHAR_TP) * item)
 
+    # It'd be nice to be able to look inside this function.
+    @dont_look_inside
     @enforceargs(None, None, int, int, int)
     def copy_string_contents(src, dst, srcstart, dststart, length):
         assert srcstart >= 0
@@ -142,7 +144,7 @@
         self.ll = LLHelpers
         self.malloc = mallocunicode
 
-    @purefunction
+    @elidable
     def ll_str(self, s):
         # XXX crazy that this is here, but I don't want to break
         #     rmodel logic
@@ -157,7 +159,7 @@
             result.chars[i] = cast_primitive(Char, c)
         return result
 
-    @purefunction
+    @elidable
     def ll_encode_latin1(self, s):
         length = len(s.chars)
         result = mallocstr(length)
@@ -256,7 +258,7 @@
 
 
 class LLHelpers(AbstractLLHelpers):
-    @purefunction
+    @elidable
     def ll_str_mul(s, times):
         if times < 0:
             times = 0
@@ -278,7 +280,7 @@
             i += j
         return newstr
 
-    @purefunction
+    @elidable
     def ll_char_mul(ch, times):
         if typeOf(ch) is Char:
             malloc = mallocstr
@@ -323,6 +325,7 @@
         return s
     ll_str2unicode.oopspec = 'str.str2unicode(str)'
 
+    @elidable
     def ll_strhash(s):
         # unlike CPython, there is no reason to avoid to return -1
         # but our malloc initializes the memory to zero, so we use zero as the
@@ -334,12 +337,11 @@
                 x = 29872897
             s.hash = x
         return x
-    ll_strhash._pure_function_ = True # it's pure but it does not look like it
 
     def ll_strfasthash(s):
         return s.hash     # assumes that the hash is already computed
 
-    @purefunction
+    @elidable
     def ll_strconcat(s1, s2):
         len1 = len(s1.chars)
         len2 = len(s2.chars)
@@ -349,7 +351,7 @@
         return newstr
     ll_strconcat.oopspec = 'stroruni.concat(s1, s2)'
 
-    @purefunction
+    @elidable
     def ll_strip(s, ch, left, right):
         s_len = len(s.chars)
         if s_len == 0:
@@ -367,7 +369,7 @@
         s.copy_contents(s, result, lpos, 0, r_len)
         return result
 
-    @purefunction
+    @elidable
     def ll_upper(s):
         s_chars = s.chars
         s_len = len(s_chars)
@@ -384,7 +386,7 @@
             i += 1
         return result
 
-    @purefunction
+    @elidable
     def ll_lower(s):
         s_chars = s.chars
         s_len = len(s_chars)
@@ -425,7 +427,7 @@
             i += 1
         return result
 
-    @purefunction
+    @elidable
     def ll_strcmp(s1, s2):
         if not s1 and not s2:
             return True
@@ -448,7 +450,7 @@
             i += 1
         return len1 - len2
 
-    @purefunction
+    @elidable
     def ll_streq(s1, s2):
         if s1 == s2:       # also if both are NULLs
             return True
@@ -468,7 +470,7 @@
         return True
     ll_streq.oopspec = 'stroruni.equal(s1, s2)'
 
-    @purefunction
+    @elidable
     def ll_startswith(s1, s2):
         len1 = len(s1.chars)
         len2 = len(s2.chars)
@@ -484,7 +486,7 @@
 
         return True
 
-    @purefunction
+    @elidable
     def ll_endswith(s1, s2):
         len1 = len(s1.chars)
         len2 = len(s2.chars)
@@ -501,7 +503,7 @@
 
         return True
 
-    @purefunction
+    @elidable
     def ll_find_char(s, ch, start, end):
         i = start
         if end > len(s.chars):
@@ -513,7 +515,7 @@
         return -1
     ll_find_char._annenforceargs_ = [None, None, int, int]
 
-    @purefunction
+    @elidable
     def ll_rfind_char(s, ch, start, end):
         if end > len(s.chars):
             end = len(s.chars)
@@ -524,7 +526,7 @@
                 return i
         return -1
 
-    @purefunction
+    @elidable
     def ll_count_char(s, ch, start, end):
         count = 0
         i = start
@@ -592,7 +594,7 @@
             res = 0
         return res
 
-    @purefunction
+    @elidable
     def ll_search(s1, s2, start, end, mode):
         count = 0
         n = end - start
@@ -715,7 +717,7 @@
             i += 1
         return result
 
-    @purefunction
+    @elidable
     def _ll_stringslice(s1, start, stop):
         lgt = stop - start
         assert start >= 0
@@ -813,7 +815,7 @@
         item.copy_contents(s, item, j, 0, i - j)
         return res
 
-    @purefunction
+    @elidable
     def ll_replace_chr_chr(s, c1, c2):
         length = len(s.chars)
         newstr = s.malloc(length)
@@ -828,7 +830,7 @@
             j += 1
         return newstr
 
-    @purefunction
+    @elidable
     def ll_contains(s, c):
         chars = s.chars
         strlen = len(chars)
@@ -839,7 +841,7 @@
             i += 1
         return False
 
-    @purefunction
+    @elidable
     def ll_int(s, base):
         if not 2 <= base <= 36:
             raise ValueError
diff --git a/pypy/rpython/memory/gc/generation.py b/pypy/rpython/memory/gc/generation.py
--- a/pypy/rpython/memory/gc/generation.py
+++ b/pypy/rpython/memory/gc/generation.py
@@ -517,7 +517,8 @@
             objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
             self.last_generation_root_objects.append(addr_struct)
 
-    def writebarrier_before_copy(self, source_addr, dest_addr):
+    def writebarrier_before_copy(self, source_addr, dest_addr,
+                                 source_start, dest_start, length):
         """ This has the same effect as calling writebarrier over
         each element in dest copied from source, except it might reset
         one of the following flags a bit too eagerly, which means we'll have
diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py
--- a/pypy/rpython/memory/gc/minimark.py
+++ b/pypy/rpython/memory/gc/minimark.py
@@ -75,10 +75,16 @@
 
 first_gcflag = 1 << (LONG_BIT//2)
 
-# The following flag is never set on young objects.  It is initially set
-# on all prebuilt and old objects, and gets cleared by the write_barrier()
-# when we write in them a pointer to a young object.
-GCFLAG_NO_YOUNG_PTRS = first_gcflag << 0
+# The following flag is set on objects if we need to do something to
+# track the young pointers that it might contain.  The flag is not set
+# on young objects (unless they are large arrays, see below), and we
+# simply assume that any young object can point to any other young object.
+# For old and prebuilt objects, the flag is usually set, and is cleared
+# when we write a young pointer to it.  For large arrays with
+# GCFLAG_HAS_CARDS, we rely on card marking to track where the
+# young pointers are; the flag GCFLAG_TRACK_YOUNG_PTRS is set in this
+# case too, to speed up the write barrier.
+GCFLAG_TRACK_YOUNG_PTRS = first_gcflag << 0
 
 # The following flag is set on some prebuilt objects.  The flag is set
 # unless the object is already listed in 'prebuilt_root_objects'.
@@ -246,17 +252,23 @@
         self.ac = ArenaCollectionClass(arena_size, page_size,
                                        small_request_threshold)
         #
-        # Used by minor collection: a list of non-young objects that
+        # Used by minor collection: a list of (mostly non-young) objects that
         # (may) contain a pointer to a young object.  Populated by
-        # the write barrier.
-        self.old_objects_pointing_to_young = self.AddressStack()
+        # the write barrier: when we clear GCFLAG_TRACK_YOUNG_PTRS, we
+        # add it to this list.
+        class Cls(self.AddressStack):
+            def append(self2, addr):
+                assert addr not in self2.tolist()
+                self.AddressStack.append(self2, addr)
+        self.objects_pointing_to_young = self.AddressStack()
         #
-        # Similar to 'old_objects_pointing_to_young', but lists objects
+        # Similar to 'objects_pointing_to_young', but lists objects
         # that have the GCFLAG_CARDS_SET bit.  For large arrays.  Note
         # that it is possible for an object to be listed both in here
-        # and in 'old_objects_pointing_to_young', in which case we
+        # and in 'objects_pointing_to_young', in which case we
         # should just clear the cards and trace it fully, as usual.
-        self.old_objects_with_cards_set = self.AddressStack()
+        # Note also that young array objects may be added to this list.
+        self.objects_with_cards_set = self.AddressStack()
         #
         # A list of all prebuilt GC objects that contain pointers to the heap
         self.prebuilt_root_objects = self.AddressStack()
@@ -625,7 +637,7 @@
             # if 'can_make_young'.  The interesting case of 'can_make_young'
             # is for large objects, bigger than the 'large_objects' threshold,
             # which are raw-malloced but still young.
-            extra_flags = GCFLAG_NO_YOUNG_PTRS
+            extra_flags = GCFLAG_TRACK_YOUNG_PTRS
             #
         else:
             # No, so proceed to allocate it externally with raw_malloc().
@@ -643,7 +655,7 @@
                 # Reserve N extra words containing card bits before the object.
                 extra_words = self.card_marking_words_for_length(length)
                 cardheadersize = WORD * extra_words
-                extra_flags = GCFLAG_HAS_CARDS
+                extra_flags = GCFLAG_HAS_CARDS | GCFLAG_TRACK_YOUNG_PTRS
                 # note that if 'can_make_young', then card marking will only
                 # be used later, after (and if) the object becomes old
             #
@@ -686,7 +698,7 @@
                 self.young_rawmalloced_objects.add(result + size_gc_header)
             else:
                 self.old_rawmalloced_objects.append(result + size_gc_header)
-                extra_flags |= GCFLAG_NO_YOUNG_PTRS
+                extra_flags |= GCFLAG_TRACK_YOUNG_PTRS
         #
         # Common code to fill the header and length of the object.
         self.init_gc_object(result, typeid, extra_flags)
@@ -777,7 +789,7 @@
     def init_gc_object_immortal(self, addr, typeid16, flags=0):
         # For prebuilt GC objects, the flags must contain
         # GCFLAG_NO_xxx_PTRS, at least initially.
-        flags |= GCFLAG_NO_HEAP_PTRS | GCFLAG_NO_YOUNG_PTRS
+        flags |= GCFLAG_NO_HEAP_PTRS | GCFLAG_TRACK_YOUNG_PTRS
         self.init_gc_object(addr, typeid16, flags)
 
     def is_in_nursery(self, addr):
@@ -870,8 +882,8 @@
         ll_assert(not self.is_in_nursery(obj),
                   "object in nursery after collection")
         # similarily, all objects should have this flag:
-        ll_assert(self.header(obj).tid & GCFLAG_NO_YOUNG_PTRS,
-                  "missing GCFLAG_NO_YOUNG_PTRS")
+        ll_assert(self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS,
+                  "missing GCFLAG_TRACK_YOUNG_PTRS")
         # the GCFLAG_VISITED should not be set between collections
         ll_assert(self.header(obj).tid & GCFLAG_VISITED == 0,
                   "unexpected GCFLAG_VISITED")
@@ -910,7 +922,7 @@
     # for the JIT: a minimal description of the write_barrier() method
     # (the JIT assumes it is of the shape
     #  "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()")
-    JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS
+    JIT_WB_IF_FLAG = GCFLAG_TRACK_YOUNG_PTRS
 
     @classmethod
     def JIT_max_size_of_young_obj(cls):
@@ -921,11 +933,11 @@
         return cls.minimal_size_in_nursery
 
     def write_barrier(self, newvalue, addr_struct):
-        if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS:
+        if self.header(addr_struct).tid & GCFLAG_TRACK_YOUNG_PTRS:
             self.remember_young_pointer(addr_struct, newvalue)
 
     def write_barrier_from_array(self, newvalue, addr_array, index):
-        if self.header(addr_array).tid & GCFLAG_NO_YOUNG_PTRS:
+        if self.header(addr_array).tid & GCFLAG_TRACK_YOUNG_PTRS:
             if self.card_page_indices > 0:     # <- constant-folded
                 self.remember_young_pointer_from_array2(addr_array, index)
             else:
@@ -943,20 +955,23 @@
         def remember_young_pointer(addr_struct, newvalue):
             # 'addr_struct' is the address of the object in which we write.
             # 'newvalue' is the address that we are going to write in there.
+            # We know that 'addr_struct' has GCFLAG_TRACK_YOUNG_PTRS so far.
+            #
             if DEBUG:   # note: PYPY_GC_DEBUG=1 does not enable this
-                ll_assert(self.debug_is_old_object(addr_struct),
-                          "young object with GCFLAG_NO_YOUNG_PTRS")
+                ll_assert(self.debug_is_old_object(addr_struct) or
+                          self.header(addr_struct).tid & GCFLAG_HAS_CARDS != 0,
+                      "young object with GCFLAG_TRACK_YOUNG_PTRS and no cards")
             #
-            # If it seems that what we are writing is a pointer to the nursery
+            # If it seems that what we are writing is a pointer to a young obj
             # (as checked with appears_to_be_young()), then we need
-            # to remove the flag GCFLAG_NO_YOUNG_PTRS and add the old object
-            # to the list 'old_objects_pointing_to_young'.  We know that
+            # to remove the flag GCFLAG_TRACK_YOUNG_PTRS and add the object
+            # to the list 'objects_pointing_to_young'.  We know that
             # 'addr_struct' cannot be in the nursery, because nursery objects
-            # never have the flag GCFLAG_NO_YOUNG_PTRS to start with.
+            # never have the flag GCFLAG_TRACK_YOUNG_PTRS to start with.
             objhdr = self.header(addr_struct)
             if self.appears_to_be_young(newvalue):
-                self.old_objects_pointing_to_young.append(addr_struct)
-                objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS
+                self.objects_pointing_to_young.append(addr_struct)
+                objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
             #
             # Second part: if 'addr_struct' is actually a prebuilt GC
             # object and it's the first time we see a write to it, we
@@ -980,16 +995,18 @@
             # 'addr_array' is the address of the object in which we write,
             # which must have an array part;  'index' is the index of the
             # item that is (or contains) the pointer that we write.
-            if DEBUG:   # note: PYPY_GC_DEBUG=1 does not enable this
-                ll_assert(self.debug_is_old_object(addr_array),
-                          "young array with GCFLAG_NO_YOUNG_PTRS")
+            # We know that 'addr_array' has GCFLAG_TRACK_YOUNG_PTRS so far.
+            #
             objhdr = self.header(addr_array)
             if objhdr.tid & GCFLAG_HAS_CARDS == 0:
                 #
+                if DEBUG:   # note: PYPY_GC_DEBUG=1 does not enable this
+                    ll_assert(self.debug_is_old_object(addr_array),
+                        "young array with no card but GCFLAG_TRACK_YOUNG_PTRS")
+                #
                 # no cards, use default logic.  Mostly copied from above.
-                self.old_objects_pointing_to_young.append(addr_array)
-                objhdr = self.header(addr_array)
-                objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS
+                self.objects_pointing_to_young.append(addr_array)
+                objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
                 if objhdr.tid & GCFLAG_NO_HEAP_PTRS:
                     objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
                     self.prebuilt_root_objects.append(addr_array)
@@ -1002,9 +1019,7 @@
             bitmask = 1 << (bitindex & 7)
             #
             # If the bit is already set, leave now.
-            size_gc_header = self.gcheaderbuilder.size_gc_header
-            addr_byte = addr_array - size_gc_header
-            addr_byte = llarena.getfakearenaaddress(addr_byte) + (~byteindex)
+            addr_byte = self.get_card(addr_array, byteindex)
             byte = ord(addr_byte.char[0])
             if byte & bitmask:
                 return
@@ -1016,7 +1031,7 @@
             addr_byte.char[0] = chr(byte | bitmask)
             #
             if objhdr.tid & GCFLAG_CARDS_SET == 0:
-                self.old_objects_with_cards_set.append(addr_array)
+                self.objects_with_cards_set.append(addr_array)
                 objhdr.tid |= GCFLAG_CARDS_SET
 
         remember_young_pointer_from_array2._dont_inline_ = True
@@ -1026,9 +1041,6 @@
 
         # xxx trying it out for the JIT: a 3-arguments version of the above
         def remember_young_pointer_from_array3(addr_array, index, newvalue):
-            if DEBUG:   # note: PYPY_GC_DEBUG=1 does not enable this
-                ll_assert(self.debug_is_old_object(addr_array),
-                          "young array with GCFLAG_NO_YOUNG_PTRS")
             objhdr = self.header(addr_array)
             #
             # a single check for the common case of neither GCFLAG_HAS_CARDS
@@ -1044,8 +1056,8 @@
             else:
                 # case with cards.
                 #
-                # If the newly written address does not actually point to the
-                # nursery, leave now.
+                # If the newly written address does not actually point to a
+                # young object, leave now.
                 if not self.appears_to_be_young(newvalue):
                     return
                 #
@@ -1056,46 +1068,53 @@
                 bitmask = 1 << (bitindex & 7)
                 #
                 # If the bit is already set, leave now.
-                size_gc_header = self.gcheaderbuilder.size_gc_header
-                addr_byte = addr_array - size_gc_header
-                addr_byte = llarena.getfakearenaaddress(addr_byte) + \
-                            (~byteindex)
+                addr_byte = self.get_card(addr_array, byteindex)
                 byte = ord(addr_byte.char[0])
                 if byte & bitmask:
                     return
                 addr_byte.char[0] = chr(byte | bitmask)
                 #
                 if objhdr.tid & GCFLAG_CARDS_SET == 0:
-                    self.old_objects_with_cards_set.append(addr_array)
+                    self.objects_with_cards_set.append(addr_array)
                     objhdr.tid |= GCFLAG_CARDS_SET
                 return
             #
             # Logic for the no-cards case, put here to minimize the number
             # of checks done at the start of the function
+            if DEBUG:   # note: PYPY_GC_DEBUG=1 does not enable this
+                ll_assert(self.debug_is_old_object(addr_array),
+                        "young array with no card but GCFLAG_TRACK_YOUNG_PTRS")
+            #
             if self.appears_to_be_young(newvalue):
-                self.old_objects_pointing_to_young.append(addr_array)
-                objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS
+                self.objects_pointing_to_young.append(addr_array)
+                objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
 
         remember_young_pointer_from_array3._dont_inline_ = True
         assert self.card_page_indices > 0
         self.remember_young_pointer_from_array3 = (
             remember_young_pointer_from_array3)
 
+    def get_card(self, obj, byteindex):
+        size_gc_header = self.gcheaderbuilder.size_gc_header
+        addr_byte = obj - size_gc_header
+        return llarena.getfakearenaaddress(addr_byte) + (~byteindex)
+
 
     def assume_young_pointers(self, addr_struct):
         """Called occasionally by the JIT to mean ``assume that 'addr_struct'
         may now contain young pointers.''
         """
         objhdr = self.header(addr_struct)
-        if objhdr.tid & GCFLAG_NO_YOUNG_PTRS:
-            self.old_objects_pointing_to_young.append(addr_struct)
-            objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS
+        if objhdr.tid & GCFLAG_TRACK_YOUNG_PTRS:
+            self.objects_pointing_to_young.append(addr_struct)
+            objhdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
             #
             if objhdr.tid & GCFLAG_NO_HEAP_PTRS:
                 objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
                 self.prebuilt_root_objects.append(addr_struct)
 
-    def writebarrier_before_copy(self, source_addr, dest_addr):
+    def writebarrier_before_copy(self, source_addr, dest_addr,
+                                 source_start, dest_start, length):
         """ This has the same effect as calling writebarrier over
         each element in dest copied from source, except it might reset
         one of the following flags a bit too eagerly, which means we'll have
@@ -1103,15 +1122,36 @@
         """
         source_hdr = self.header(source_addr)
         dest_hdr = self.header(dest_addr)
-        if dest_hdr.tid & GCFLAG_NO_YOUNG_PTRS == 0:
+        if dest_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
             return True
         # ^^^ a fast path of write-barrier
         #
-        if (source_hdr.tid & GCFLAG_NO_YOUNG_PTRS == 0 or
-            source_hdr.tid & GCFLAG_CARDS_SET != 0):
+        if source_hdr.tid & GCFLAG_HAS_CARDS != 0:
+            #
+            if source_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
+                # The source object may have random young pointers.
+                # Return False to mean "do it manually in ll_arraycopy".
+                return False
+            #
+            if source_hdr.tid & GCFLAG_CARDS_SET == 0:
+                # The source object has no young pointers at all.  Done.
+                return True
+            #
+            if dest_hdr.tid & GCFLAG_HAS_CARDS == 0:
+                # The dest object doesn't have cards.  Do it manually.
+                return False
+            #
+            if source_start != 0 or dest_start != 0:
+                # Misaligned.  Do it manually.
+                return False
+            #
+            self.manually_copy_card_bits(source_addr, dest_addr, length)
+            return True
+        #
+        if source_hdr.tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
             # there might be in source a pointer to a young object
-            self.old_objects_pointing_to_young.append(dest_addr)
-            dest_hdr.tid &= ~GCFLAG_NO_YOUNG_PTRS
+            self.objects_pointing_to_young.append(dest_addr)
+            dest_hdr.tid &= ~GCFLAG_TRACK_YOUNG_PTRS
         #
         if dest_hdr.tid & GCFLAG_NO_HEAP_PTRS:
             if source_hdr.tid & GCFLAG_NO_HEAP_PTRS == 0:
@@ -1119,6 +1159,22 @@
                 self.prebuilt_root_objects.append(dest_addr)
         return True
 
+    def manually_copy_card_bits(self, source_addr, dest_addr, length):
+        # manually copy the individual card marks from source to dest
+        bytes = self.card_marking_bytes_for_length(length)
+        #
+        i = 0
+        while i < bytes:
+            addr_srcbyte = self.get_card(source_addr, i)
+            addr_dstbyte = self.get_card(dest_addr, i)
+            byte = ord(addr_srcbyte.char[0])
+            addr_dstbyte.char[0] = chr(ord(addr_dstbyte.char[0]) | byte)
+            i += 1
+        #
+        dest_hdr = self.header(dest_addr)
+        if dest_hdr.tid & GCFLAG_CARDS_SET == 0:
+            self.objects_with_cards_set.append(dest_addr)
+            dest_hdr.tid |= GCFLAG_CARDS_SET
 
     # ----------
     # Nursery collection
@@ -1135,20 +1191,28 @@
         # Note that during this step, we ignore references to further
         # young objects; only objects directly referenced by roots
         # are copied out or flagged.  They are also added to the list
-        # 'old_objects_pointing_to_young'.
+        # 'objects_pointing_to_young'.
         self.collect_roots_in_nursery()
         #
-        # If we are using card marking, do a partial trace of the arrays
-        # that are flagged with GCFLAG_CARDS_SET.
-        if self.card_page_indices > 0:
-            self.collect_cardrefs_to_nursery()
-        #
-        # Now trace objects from 'old_objects_pointing_to_young'.
-        # All nursery objects they reference are copied out of the
-        # nursery, and again added to 'old_objects_pointing_to_young'.
-        # All young raw-malloced object found is flagged GCFLAG_VISITED.
-        # We proceed until 'old_objects_pointing_to_young' is empty.
-        self.collect_oldrefs_to_nursery()
+        while True:
+            # If we are using card marking, do a partial trace of the arrays
+            # that are flagged with GCFLAG_CARDS_SET.
+            if self.card_page_indices > 0:
+                self.collect_cardrefs_to_nursery()
+            #
+            # Now trace objects from 'objects_pointing_to_young'.
+            # All nursery objects they reference are copied out of the
+            # nursery, and again added to 'objects_pointing_to_young'.
+            # All young raw-malloced object found is flagged GCFLAG_VISITED.
+            # We proceed until 'objects_pointing_to_young' is empty.
+            self.collect_oldrefs_to_nursery()
+            #
+            # We have to loop back if collect_oldrefs_to_nursery caused
+            # new objects to show up in objects_with_cards_set
+            if self.card_page_indices > 0:
+                if self.objects_with_cards_set.non_empty():
+                    continue
+            break
         #
         # Now all live nursery objects should be out.  Update the young
         # weakrefs' targets.
@@ -1181,7 +1245,7 @@
         # we don't need to trace prebuilt GcStructs during a minor collect:
         # if a prebuilt GcStruct contains a pointer to a young object,
         # then the write_barrier must have ensured that the prebuilt
-        # GcStruct is in the list self.old_objects_pointing_to_young.
+        # GcStruct is in the list self.objects_pointing_to_young.
         self.root_walker.walk_roots(
             MiniMarkGC._trace_drag_out1,  # stack roots
             MiniMarkGC._trace_drag_out1,  # static in prebuilt non-gc
@@ -1189,7 +1253,7 @@
 
     def collect_cardrefs_to_nursery(self):
         size_gc_header = self.gcheaderbuilder.size_gc_header
-        oldlist = self.old_objects_with_cards_set
+        oldlist = self.objects_with_cards_set
         while oldlist.non_empty():
             obj = oldlist.pop()
             #
@@ -1205,11 +1269,11 @@
             bytes = self.card_marking_bytes_for_length(length)
             p = llarena.getfakearenaaddress(obj - size_gc_header)
             #
-            # If the object doesn't have GCFLAG_NO_YOUNG_PTRS, then it
-            # means that it is in 'old_objects_pointing_to_young' and
+            # If the object doesn't have GCFLAG_TRACK_YOUNG_PTRS, then it
+            # means that it is in 'objects_pointing_to_young' and
             # will be fully traced by collect_oldrefs_to_nursery() just
             # afterwards.
-            if self.header(obj).tid & GCFLAG_NO_YOUNG_PTRS == 0:
+            if self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
                 #
                 # In that case, we just have to reset all card bits.
                 while bytes > 0:
@@ -1245,19 +1309,30 @@
 
 
     def collect_oldrefs_to_nursery(self):
-        # Follow the old_objects_pointing_to_young list and move the
+        # Follow the objects_pointing_to_young list and move the
         # young objects they point to out of the nursery.
-        oldlist = self.old_objects_pointing_to_young
+        oldlist = self.objects_pointing_to_young
         while oldlist.non_empty():
             obj = oldlist.pop()
             #
-            # Add the flag GCFLAG_NO_YOUNG_PTRS.  All live objects should have
-            # this flag set after a nursery collection.
-            self.header(obj).tid |= GCFLAG_NO_YOUNG_PTRS
+            # Check (somehow) that the flags are correct: we must not have
+            # GCFLAG_TRACK_YOUNG_PTRS so far.  But in a rare case, it's
+            # possible that the same obj is appended twice to the list
+            # (see _trace_drag_out, GCFLAG_VISITED case).  Filter it out
+            # here.
+            if self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS != 0:
+                ll_assert(self.header(obj).tid & GCFLAG_VISITED != 0,
+                          "objects_pointing_to_young contains obj with "
+                          "GCFLAG_TRACK_YOUNG_PTRS and not GCFLAG_VISITED")
+                continue
+            #
+            # Add the flag GCFLAG_TRACK_YOUNG_PTRS.  All live objects should
+            # have this flag set after a nursery collection.
+            self.header(obj).tid |= GCFLAG_TRACK_YOUNG_PTRS
             #
             # Trace the 'obj' to replace pointers to nursery with pointers
             # outside the nursery, possibly forcing nursery objects out
-            # and adding them to 'old_objects_pointing_to_young' as well.
+            # and adding them to 'objects_pointing_to_young' as well.
             self.trace_and_drag_out_of_nursery(obj)
 
     def trace_and_drag_out_of_nursery(self, obj):
@@ -1296,7 +1371,19 @@
                 # 'obj' points to a young, raw-malloced object
                 if (self.header(obj).tid & GCFLAG_VISITED) == 0:
                     self.header(obj).tid |= GCFLAG_VISITED
-                    self.old_objects_pointing_to_young.append(obj)
+                    #
+                    # we just made 'obj' old, so we may need to add it
+                    # in the correct list:
+                    if self.header(obj).tid & GCFLAG_TRACK_YOUNG_PTRS == 0:
+                        # common case: GCFLAG_TRACK_YOUNG_PTRS is not set, so
+                        # the object may contain young pointers anywhere
+                        self.objects_pointing_to_young.append(obj)
+                    else:
+                        # large array case: the object contains card marks
+                        # that tell us where young pointers are, and it
+                        # is already in objects_with_cards_set.
+                        ll_assert(self.header(obj).tid & GCFLAG_HAS_CARDS != 0,
+                                  "neither YOUNG_PTRS nor HAS_CARDS??")
             return
         #
         # If 'obj' was already forwarded, change it to its forwarding address.
@@ -1343,11 +1430,11 @@
         # Change the original pointer to this object.
         root.address[0] = newobj
         #
-        # Add the newobj to the list 'old_objects_pointing_to_young',
+        # Add the newobj to the list 'objects_pointing_to_young',
         # because it can contain further pointers to other young objects.
         # We will fix such references to point to the copy of the young
-        # objects when we walk 'old_objects_pointing_to_young'.
-        self.old_objects_pointing_to_young.append(newobj)
+        # objects when we walk 'objects_pointing_to_young'.
+        self.objects_pointing_to_young.append(newobj)
 
 
     def _malloc_out_of_nursery(self, totalsize):
diff --git a/pypy/rpython/memory/gc/test/test_direct.py b/pypy/rpython/memory/gc/test/test_direct.py
--- a/pypy/rpython/memory/gc/test/test_direct.py
+++ b/pypy/rpython/memory/gc/test/test_direct.py
@@ -522,5 +522,78 @@
             self.stackroots.pop()
     test_card_marker.GC_PARAMS = {"card_page_indices": 4}
 
+    def test_writebarrier_before_copy(self):
+        from pypy.rpython.memory.gc import minimark
+        largeobj_size =  self.gc.nonlarge_max + 1
+        p_src = self.malloc(VAR, largeobj_size)
+        p_dst = self.malloc(VAR, largeobj_size)
+        # make them old
+        self.stackroots.append(p_src)
+        self.stackroots.append(p_dst)
+        self.gc.collect()
+        p_dst = self.stackroots.pop()
+        p_src = self.stackroots.pop()
+        #
+        addr_src = llmemory.cast_ptr_to_adr(p_src)
+        addr_dst = llmemory.cast_ptr_to_adr(p_dst)
+        hdr_src = self.gc.header(addr_src)
+        hdr_dst = self.gc.header(addr_dst)
+        #
+        assert hdr_src.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS
+        assert hdr_dst.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS
+        #
+        res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10)
+        assert res
+        assert hdr_dst.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS
+        #
+        hdr_src.tid &= ~minimark.GCFLAG_TRACK_YOUNG_PTRS  # pretend we have young ptrs
+        res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10)
+        assert res # we optimized it
+        assert hdr_dst.tid & minimark.GCFLAG_TRACK_YOUNG_PTRS == 0 # and we copied the flag
+        #
+        hdr_src.tid |= minimark.GCFLAG_TRACK_YOUNG_PTRS
+        hdr_dst.tid |= minimark.GCFLAG_TRACK_YOUNG_PTRS
+        hdr_src.tid |= minimark.GCFLAG_HAS_CARDS
+        hdr_src.tid |= minimark.GCFLAG_CARDS_SET
+        # hdr_dst.tid does not have minimark.GCFLAG_HAS_CARDS
+        res = self.gc.writebarrier_before_copy(addr_src, addr_dst, 0, 0, 10)
+        assert not res # there might be young ptrs, let ll_arraycopy to find them
+
+    def test_writebarrier_before_copy_preserving_cards(self):
+        from pypy.rpython.lltypesystem import llarena
+        from pypy.rpython.memory.gc import minimark
+        tid = self.get_type_id(VAR)
+        largeobj_size =  self.gc.nonlarge_max + 1
+        addr_src = self.gc.external_malloc(tid, largeobj_size)
+        addr_dst = self.gc.external_malloc(tid, largeobj_size)
+        hdr_src = self.gc.header(addr_src)
+        hdr_dst = self.gc.header(addr_dst)
+        #
+        assert hdr_src.tid & minimark.GCFLAG_HAS_CARDS
+        assert hdr_dst.tid & minimark.GCFLAG_HAS_CARDS
+        #
+        young_p = self.malloc(S)
+        self.gc.write_barrier_from_array(young_p, addr_src, 0)
+        index_in_third_page = int(2.5 * self.gc.card_page_indices)
+        assert index_in_third_page < largeobj_size
+        self.gc.write_barrier_from_array(young_p, addr_src,
+                                         index_in_third_page)
+        #
+        assert hdr_src.tid & minimark.GCFLAG_CARDS_SET
+        addr_byte = self.gc.get_card(addr_src, 0)
+        assert ord(addr_byte.char[0]) == 0x01 | 0x04  # bits 0 and 2
+        #
+        res = self.gc.writebarrier_before_copy(addr_src, addr_dst,
+                                             0, 0, 2*self.gc.card_page_indices)
+        assert res
+        #
+        assert hdr_dst.tid & minimark.GCFLAG_CARDS_SET
+        addr_byte = self.gc.get_card(addr_dst, 0)
+        assert ord(addr_byte.char[0]) == 0x01 | 0x04  # bits 0 and 2
+
+    test_writebarrier_before_copy_preserving_cards.GC_PARAMS = {
+        "card_page_indices": 4}
+
+
 class TestMiniMarkGCFull(DirectGCTest):
     from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass
diff --git a/pypy/rpython/memory/gctransform/framework.py b/pypy/rpython/memory/gctransform/framework.py
--- a/pypy/rpython/memory/gctransform/framework.py
+++ b/pypy/rpython/memory/gctransform/framework.py
@@ -317,7 +317,8 @@
         if hasattr(GCClass, 'writebarrier_before_copy'):
             self.wb_before_copy_ptr = \
                     getfn(GCClass.writebarrier_before_copy.im_func,
-                    [s_gc] + [annmodel.SomeAddress()] * 2, annmodel.SomeBool())
+                    [s_gc] + [annmodel.SomeAddress()] * 2 +
+                    [annmodel.SomeInteger()] * 3, annmodel.SomeBool())
         elif GCClass.needs_write_barrier:
             raise NotImplementedError("GC needs write barrier, but does not provide writebarrier_before_copy functionality")
 
@@ -886,7 +887,7 @@
         dest_addr = hop.genop('cast_ptr_to_adr', [op.args[1]],
                                 resulttype=llmemory.Address)
         hop.genop('direct_call', [self.wb_before_copy_ptr, self.c_const_gc,
-                                  source_addr, dest_addr],
+                                  source_addr, dest_addr] + op.args[2:],
                   resultvar=op.result)
 
     def gct_weakref_create(self, hop):
diff --git a/pypy/rpython/memory/gctransform/test/test_framework.py b/pypy/rpython/memory/gctransform/test/test_framework.py
--- a/pypy/rpython/memory/gctransform/test/test_framework.py
+++ b/pypy/rpython/memory/gctransform/test/test_framework.py
@@ -163,7 +163,8 @@
     GC_PARAMS = {}
     class GCClass(MarkSweepGC):
         needs_write_barrier = True
-        def writebarrier_before_copy(self, source, dest):
+        def writebarrier_before_copy(self, source, dest,
+                                     source_start, dest_start, length):
             return True
 
 def write_barrier_check(spaceop, needs_write_barrier=True):
diff --git a/pypy/rpython/memory/gcwrapper.py b/pypy/rpython/memory/gcwrapper.py
--- a/pypy/rpython/memory/gcwrapper.py
+++ b/pypy/rpython/memory/gcwrapper.py
@@ -136,11 +136,14 @@
         ptr = lltype.cast_opaque_ptr(llmemory.GCREF, ptr)
         return self.gc.id(ptr)
 
-    def writebarrier_before_copy(self, source, dest):
+    def writebarrier_before_copy(self, source, dest,
+                                 source_start, dest_start, length):
         if self.gc.needs_write_barrier:
             source_addr = llmemory.cast_ptr_to_adr(source)
             dest_addr   = llmemory.cast_ptr_to_adr(dest)
-            return self.gc.writebarrier_before_copy(source_addr, dest_addr)
+            return self.gc.writebarrier_before_copy(source_addr, dest_addr,
+                                                    source_start, dest_start,
+                                                    length)
         else:
             return True
 
diff --git a/pypy/rpython/memory/support.py b/pypy/rpython/memory/support.py
--- a/pypy/rpython/memory/support.py
+++ b/pypy/rpython/memory/support.py
@@ -140,6 +140,14 @@
             self.foreach(_add_in_dict, result)
             return result
 
+        def tolist(self):
+            """NOT_RPYTHON.  Returns the content as a list."""
+            lst = []
+            def _add(obj, lst):
+                lst.append(obj)
+            self.foreach(_add, lst)
+            return lst
+
         def remove(self, addr):
             """Remove 'addr' from the stack.  The addr *must* be in the list,
             and preferrably near the top.
diff --git a/pypy/rpython/ootypesystem/rclass.py b/pypy/rpython/ootypesystem/rclass.py
--- a/pypy/rpython/ootypesystem/rclass.py
+++ b/pypy/rpython/ootypesystem/rclass.py
@@ -264,7 +264,8 @@
 
         for name, attrdef in selfattrs.iteritems():
             if not attrdef.readonly and self.is_quasi_immutable(name):
-                ootype.addFields(self.lowleveltype, {'mutable_'+name: OBJECT})
+                name = mangle('mutable_' + name, self.rtyper.getconfig())
+                ootype.addFields(self.lowleveltype, {name: OBJECT})
 
         classattributes = {}
         baseInstance = self.lowleveltype._superclass
diff --git a/pypy/rpython/ootypesystem/rdict.py b/pypy/rpython/ootypesystem/rdict.py
--- a/pypy/rpython/ootypesystem/rdict.py
+++ b/pypy/rpython/ootypesystem/rdict.py
@@ -18,7 +18,7 @@
 
 class DictRepr(AbstractDictRepr):
     def __init__(self, rtyper, key_repr, value_repr, dictkey, dictvalue,
-                 custom_eq_hash=None):
+                 custom_eq_hash=None, force_non_null=False):
         self.rtyper = rtyper
         self.custom_eq_hash = custom_eq_hash is not None
 
diff --git a/pypy/rpython/ootypesystem/test/test_oopbc.py b/pypy/rpython/ootypesystem/test/test_oopbc.py
--- a/pypy/rpython/ootypesystem/test/test_oopbc.py
+++ b/pypy/rpython/ootypesystem/test/test_oopbc.py
@@ -81,3 +81,18 @@
     res = interpret(f, [1], type_system='ootype')
     assert res == 2
 
+def test_quasi_immutable():
+    class A(object):
+        _immutable_fields_ = ['x?']
+        def __init__(self):
+            self.x = 3
+        def foo(self):
+            return self.x
+
+    a = A()
+
+    def f():
+        return a.foo()
+    
+    res = interpret(f, [], type_system='ootype')
+    assert res == 3
diff --git a/pypy/rpython/rdict.py b/pypy/rpython/rdict.py
--- a/pypy/rpython/rdict.py
+++ b/pypy/rpython/rdict.py
@@ -15,6 +15,7 @@
         dictvalue = self.dictdef.dictvalue
         s_key     = dictkey  .s_value
         s_value   = dictvalue.s_value
+        force_non_null = self.dictdef.force_non_null
         if (s_key.__class__ is annmodel.SomeObject and s_key.knowntype == object and
             s_value.__class__ is annmodel.SomeObject and s_value.knowntype == object):
             return robject.pyobj_repr
@@ -29,7 +30,8 @@
                                                      lambda: rtyper.getrepr(s_value),
                                                      dictkey,
                                                      dictvalue,
-                                                     custom_eq_hash)
+                                                     custom_eq_hash,
+                                                     force_non_null)
 
     def rtyper_makekey(self):
         self.dictdef.dictkey  .dont_change_any_more = True
diff --git a/pypy/rpython/test/test_rdict.py b/pypy/rpython/test/test_rdict.py
--- a/pypy/rpython/test/test_rdict.py
+++ b/pypy/rpython/test/test_rdict.py
@@ -598,6 +598,29 @@
         res = self.interpret(func, [])
         assert res in [5263, 6352]
 
+    def test_dict_popitem_hash(self):
+        def deq(n, m):
+            return n == m
+        def dhash(n):
+            return ~n
+        def func():
+            d = r_dict(deq, dhash)
+            d[5] = 2
+            d[6] = 3
+            k1, v1 = d.popitem()
+            assert len(d) == 1
+            k2, v2 = d.popitem()
+            try:
+                d.popitem()
+            except KeyError:
+                pass
+            else:
+                assert 0, "should have raised KeyError"
+            assert len(d) == 0
+            return k1*1000 + v1*100 + k2*10 + v2
+
+        res = self.interpret(func, [])
+        assert res in [5263, 6352]
 
 class TestLLtype(BaseTestRdict, LLRtypeMixin):
     def test_dict_but_not_with_char_keys(self):
@@ -860,6 +883,25 @@
         res = f()
         assert res == 1
 
+    def test_nonnull_hint(self):
+        def eq(a, b):
+            return a == b
+        def rhash(a):
+            return 3
+        
+        def func(i):
+            d = r_dict(eq, rhash, force_non_null=True)
+            if not i:
+                d[None] = i
+            else:
+                d[str(i)] = i
+            return "12" in d, d
+
+        llres = self.interpret(func, [12])
+        assert llres.item0 == 1
+        DICT = lltype.typeOf(llres.item1)
+        assert sorted(DICT.TO.entries.TO.OF._flds) == ['f_hash', 'key', 'value']
+
     # ____________________________________________________________
 
 
diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py
--- a/pypy/tool/jitlogparser/parser.py
+++ b/pypy/tool/jitlogparser/parser.py
@@ -98,7 +98,7 @@
     def __init__(self, operations, storage):
         if operations[0].name == 'debug_merge_point':
             self.inline_level = int(operations[0].args[0])
-            m = re.search('<code object ([<>\w]+), file \'(.+?)\', line (\d+)> #(\d+) (\w+)',
+            m = re.search('<code object ([<>\w]+)\. file \'(.+?)\'\. line (\d+)> #(\d+) (\w+)',
                          operations[0].getarg(1))
             if m is None:
                 # a non-code loop, like StrLiteralSearch or something
@@ -121,6 +121,9 @@
     def getcode(self):
         return self.code
 
+    def has_valid_code(self):
+        return self.code is not None
+
     def getopcode(self):
         return self.code.map[self.bytecode_no]
 
@@ -220,6 +223,12 @@
         return self._lineset
     lineset = property(getlineset)
 
+    def has_valid_code(self):
+        for chunk in self.chunks:
+            if not chunk.has_valid_code():
+                return False
+        return True
+
     def _compute_linerange(self):
         self._lineset = set()
         minline = sys.maxint
diff --git a/pypy/tool/jitlogparser/test/test_parser.py b/pypy/tool/jitlogparser/test/test_parser.py
--- a/pypy/tool/jitlogparser/test/test_parser.py
+++ b/pypy/tool/jitlogparser/test/test_parser.py
@@ -38,10 +38,10 @@
 def test_split():
     ops = parse('''
     [i0]
-    debug_merge_point(0, "<code object stuff, file '/I/dont/exist.py', line 200> #10 ADD")
-    debug_merge_point(0, "<code object stuff, file '/I/dont/exist.py', line 200> #11 SUB")
+    debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 200> #10 ADD")
+    debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 200> #11 SUB")
     i1 = int_add(i0, 1)
-    debug_merge_point(0, "<code object stuff, file '/I/dont/exist.py', line 200> #11 SUB")
+    debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 200> #11 SUB")
     i2 = int_add(i1, 1)
     ''')
     res = Function.from_operations(ops.operations, LoopStorage())
@@ -54,12 +54,12 @@
 def test_inlined_call():
     ops = parse("""
     []
-    debug_merge_point(0, '<code object inlined_call, file 'source.py', line 12> #28 CALL_FUNCTION')
+    debug_merge_point(0, '<code object inlined_call. file 'source.py'. line 12> #28 CALL_FUNCTION')
     i18 = getfield_gc(p0, descr=<BoolFieldDescr pypy.interpreter.pyframe.PyFrame.inst_is_being_profiled 89>)
-    debug_merge_point(1, '<code object inner, file 'source.py', line 9> #0 LOAD_FAST')
-    debug_merge_point(1, '<code object inner, file 'source.py', line 9> #3 LOAD_CONST')
-    debug_merge_point(1, '<code object inner, file 'source.py', line 9> #7 RETURN_VALUE')
-    debug_merge_point(0, '<code object inlined_call, file 'source.py', line 12> #31 STORE_FAST')
+    debug_merge_point(1, '<code object inner. file 'source.py'. line 9> #0 LOAD_FAST')
+    debug_merge_point(1, '<code object inner. file 'source.py'. line 9> #3 LOAD_CONST')
+    debug_merge_point(1, '<code object inner. file 'source.py'. line 9> #7 RETURN_VALUE')
+    debug_merge_point(0, '<code object inlined_call. file 'source.py'. line 12> #31 STORE_FAST')
     """)
     res = Function.from_operations(ops.operations, LoopStorage())
     assert len(res.chunks) == 3 # two chunks + inlined call
@@ -72,10 +72,10 @@
 def test_name():
     ops = parse('''
     [i0]
-    debug_merge_point(0, "<code object stuff, file '/I/dont/exist.py', line 200> #10 ADD")
-    debug_merge_point(0, "<code object stuff, file '/I/dont/exist.py', line 201> #11 SUB")
+    debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 200> #10 ADD")
+    debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 201> #11 SUB")
     i1 = int_add(i0, 1)
-    debug_merge_point(0, "<code object stuff, file '/I/dont/exist.py', line 202> #11 SUB")
+    debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 202> #11 SUB")
     i2 = int_add(i1, 1)
     ''')
     res = Function.from_operations(ops.operations, LoopStorage())
@@ -89,10 +89,10 @@
     ops = parse('''
     [i0]
     i3 = int_add(i0, 1)
-    debug_merge_point(0, "<code object stuff, file '/I/dont/exist.py', line 200> #10 ADD")
-    debug_merge_point(0, "<code object stuff, file '/I/dont/exist.py', line 201> #11 SUB")
+    debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 200> #10 ADD")
+    debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 201> #11 SUB")
     i1 = int_add(i0, 1)
-    debug_merge_point(0, "<code object stuff, file '/I/dont/exist.py', line 202> #11 SUB")
+    debug_merge_point(0, "<code object stuff. file '/I/dont/exist.py'. line 202> #11 SUB")
     i2 = int_add(i1, 1)
     ''')
     res = Function.from_operations(ops.operations, LoopStorage())
@@ -102,10 +102,10 @@
     fname = str(py.path.local(__file__).join('..', 'x.py'))
     ops = parse('''
     [i0, i1]
-    debug_merge_point(0, "<code object f, file '%(fname)s', line 2> #0 LOAD_FAST")
-    debug_merge_point(0, "<code object f, file '%(fname)s', line 2> #3 LOAD_FAST")
-    debug_merge_point(0, "<code object f, file '%(fname)s', line 2> #6 BINARY_ADD")
-    debug_merge_point(0, "<code object f, file '%(fname)s', line 2> #7 RETURN_VALUE")
+    debug_merge_point(0, "<code object f. file '%(fname)s'. line 2> #0 LOAD_FAST")
+    debug_merge_point(0, "<code object f. file '%(fname)s'. line 2> #3 LOAD_FAST")
+    debug_merge_point(0, "<code object f. file '%(fname)s'. line 2> #6 BINARY_ADD")
+    debug_merge_point(0, "<code object f. file '%(fname)s'. line 2> #7 RETURN_VALUE")
     ''' % locals())
     res = Function.from_operations(ops.operations, LoopStorage())
     assert res.chunks[1].lineno == 3
@@ -114,11 +114,11 @@
     fname = str(py.path.local(__file__).join('..', 'x.py'))
     ops = parse('''
     [i0, i1]
-    debug_merge_point(0, "<code object g, file '%(fname)s', line 5> #9 LOAD_FAST")
-    debug_merge_point(0, "<code object g, file '%(fname)s', line 5> #12 LOAD_CONST")
-    debug_merge_point(0, "<code object g, file '%(fname)s', line 5> #22 LOAD_CONST")
-    debug_merge_point(0, "<code object g, file '%(fname)s', line 5> #28 LOAD_CONST")
-    debug_merge_point(0, "<code object g, file '%(fname)s', line 5> #6 SETUP_LOOP")
+    debug_merge_point(0, "<code object g. file '%(fname)s'. line 5> #9 LOAD_FAST")
+    debug_merge_point(0, "<code object g. file '%(fname)s'. line 5> #12 LOAD_CONST")
+    debug_merge_point(0, "<code object g. file '%(fname)s'. line 5> #22 LOAD_CONST")
+    debug_merge_point(0, "<code object g. file '%(fname)s'. line 5> #28 LOAD_CONST")
+    debug_merge_point(0, "<code object g. file '%(fname)s'. line 5> #6 SETUP_LOOP")
     ''' % locals())
     res = Function.from_operations(ops.operations, LoopStorage())
     assert res.linerange == (7, 9)
@@ -128,7 +128,7 @@
     fname = str(py.path.local(__file__).join('..', 'x.py'))
     ops = parse("""
     [p6, p1]
-    debug_merge_point(0, '<code object h, file '%(fname)s', line 11> #17 FOR_ITER')
+    debug_merge_point(0, '<code object h. file '%(fname)s'. line 11> #17 FOR_ITER')
     guard_class(p6, 144264192, descr=<Guard2>)
     p12 = getfield_gc(p6, descr=<GcPtrFieldDescr pypy.objspace.std.iterobject.W_AbstractSeqIterObject.inst_w_seq 12>)
     """ % locals())
@@ -168,7 +168,7 @@
     []
     int_add(0, 1)
     ''')
-    loops = LoopStorage().reconnect_loops([main, bridge])
+    LoopStorage().reconnect_loops([main, bridge])
     assert adjust_bridges(main, {})[1].name == 'guard_true'
     assert adjust_bridges(main, {'loop-13': True})[1].name == 'int_add'
 
diff --git a/pypy/tool/pytest/appsupport.py b/pypy/tool/pytest/appsupport.py
--- a/pypy/tool/pytest/appsupport.py
+++ b/pypy/tool/pytest/appsupport.py
@@ -1,8 +1,13 @@
 import autopath
 import py
-from pypy.interpreter import gateway
+from pypy.interpreter import gateway, pycode
 from pypy.interpreter.error import OperationError
 
+try:
+    from _pytest.assertion.newinterpret import interpret
+except ImportError:
+    from _pytest.assertion.oldinterpret import interpret
+
 # ____________________________________________________________
 
 class AppCode(object):
@@ -51,13 +56,11 @@
         space = self.space
         for key, w_value in vars.items():
             space.setitem(self.w_locals, space.wrap(key), w_value)
-        return space.eval(code, self.w_globals, self.w_locals)
-
-    def exec_(self, code, **vars):
-        space = self.space
-        for key, w_value in vars.items():
-            space.setitem(self.w_locals, space.wrap(key), w_value)
-        space.exec_(code, self.w_globals, self.w_locals)
+        if isinstance(code, str):
+            return space.eval(code, self.w_globals, self.w_locals)
+        pyc = pycode.PyCode._from_code(space, code)
+        return pyc.exec_host_bytecode(self.w_globals, self.w_locals)
+    exec_ = eval
 
     def repr(self, w_value):
         return self.space.unwrap(self.space.repr(w_value))
@@ -80,7 +83,7 @@
     def __init__(self, space, operr):
         self.space = space
         self.operr = operr
-        self.typename = operr.w_type.getname(space, "?")
+        self.typename = operr.w_type.getname(space)
         self.traceback = AppTraceback(space, self.operr.get_traceback())
         debug_excs = getattr(operr, 'debug_excs', [])
         if debug_excs:
@@ -163,8 +166,8 @@
             except py.error.ENOENT: 
                 source = None
             from pypy import conftest
-            if source and not py.test.config.option.nomagic:
-                msg = py.code._reinterpret_old(source, runner, should_fail=True)
+            if source and py.test.config._assertstate.mode != "off":
+                msg = interpret(source, runner, should_fail=True)
                 space.setattr(w_self, space.wrap('args'),
                             space.newtuple([space.wrap(msg)]))
                 w_msg = space.wrap(msg)
diff --git a/pypy/tool/pytest/test/test_pytestsupport.py b/pypy/tool/pytest/test/test_pytestsupport.py
--- a/pypy/tool/pytest/test/test_pytestsupport.py
+++ b/pypy/tool/pytest/test/test_pytestsupport.py
@@ -4,7 +4,7 @@
 from pypy.interpreter.pycode import PyCode
 from pypy.interpreter.pyframe import PyFrame
 from pypy.tool.pytest.appsupport import (AppFrame, build_pytest_assertion,
-    AppExceptionInfo)
+    AppExceptionInfo, interpret)
 import py
 from pypy.tool.udir import udir
 import os
@@ -22,8 +22,8 @@
     co = PyCode._from_code(space, somefunc.func_code)
     pyframe = PyFrame(space, co, space.newdict(), None)
     runner = AppFrame(space, pyframe)
-    py.code._reinterpret_old("f = lambda x: x+1", runner, should_fail=False)
-    msg = py.code._reinterpret_old("assert isinstance(f(2), float)", runner)
+    interpret("f = lambda x: x+1", runner, should_fail=False)
+    msg = interpret("assert isinstance(f(2), float)", runner)
     assert msg.startswith("assert isinstance(3, float)\n"
                           " +  where 3 = ")
 
@@ -58,6 +58,12 @@
     except AssertionError, e:
         assert e.msg == "Failed"
 
+def app_test_comparison():
+    try:
+        assert 3 > 4
+    except AssertionError, e:
+        assert "3 > 4" in e.msg
+
 
 def test_appexecinfo(space):
     try:
diff --git a/pypy/translator/c/gcc/test/msvc/track_and_esp.s b/pypy/translator/c/gcc/test/msvc/track_and_esp.s
--- a/pypy/translator/c/gcc/test/msvc/track_and_esp.s
+++ b/pypy/translator/c/gcc/test/msvc/track_and_esp.s
@@ -153,6 +153,7 @@
 	push	OFFSET _pypy_g_pypy_rpython_memory_gc_semispace_SemiSpaceGC
 $block12$88259:
 	call	_pypy_g_SemiSpaceGC_obtain_free_space
+    ;; expected {4(%ebp) | 16(%esp), 12(%esp), 8(%esp), (%ebp) | }
 
 ; 58362: 	l_v21669 = (&pypy_g_ExcData)->ed_exc_type;
 ; 58363: 	l_v21670 = (l_v21669 == NULL);
@@ -225,6 +226,7 @@
 	push	1
 $block14$88247:
 	call	_pypy_g__ll_malloc_varsize_no_length__Signed_Signed_Sign
+    ;; expected {4(%ebp) | 20(%esp), 16(%esp), 12(%esp), (%ebp) | }
 	mov	esi, eax
 
 ; 58377: 	OP_TRACK_ALLOC_START(l_v21672, /* nothing */);
@@ -232,6 +234,7 @@
 	push	OFFSET ??_C at _0BN@BIPHFGBC at pypy_g_ll_math_ll_math_frexp?$AA@
 	push	esi
 	call	_pypy_debug_alloc_start
+    ;; expected {4(%ebp) | 28(%esp), 24(%esp), 20(%esp), (%ebp) | }
 	add	esp, 20					; 00000014H
 
 ; 58378: 	l_exp_p_0 = (long *)l_v21672;
@@ -283,6 +286,7 @@
 	sub	esp, 8
 	fstp	QWORD PTR [esp]
 	call	_pypy_g_frexp__Float_arrayPtr_star_2
+    ;; expected {4(%ebp) | 20(%esp), 16(%esp), 12(%esp), (%ebp) | }
 
 ; 58387: 	l_v21675 = (&pypy_g_ExcData)->ed_exc_type;
 ; 58388: 	l_v21676 = (l_v21675 == NULL);
@@ -331,11 +335,13 @@
 	mov	DWORD PTR _pypy_g_ExcData+4, eax
 	mov	DWORD PTR _pypy_g_ExcData, eax
 	call	_pypy_debug_alloc_stop
+    ;; expected {4(%ebp) | 12(%esp), 8(%esp), 4(%esp), (%ebp) | }
 
 ; 58413: 	OP_RAW_FREE(l_v21688, /* nothing */);
 
 	push	esi
 	call	_PyObject_Free
+    ;; expected {4(%ebp) | 16(%esp), 12(%esp), 8(%esp), (%ebp) | }
 
 ; 58414: 	l_v21691 = (struct pypy_object0 *)l_v21687;
 ; 58415: 	pypy_g_RPyReRaiseException(l_v21683, l_v21691);
@@ -376,11 +382,13 @@
 
 	push	esi
 	call	_pypy_debug_alloc_stop
+    ;; expected {4(%ebp) | 12(%esp), 8(%esp), 4(%esp), (%ebp) | }
 
 ; 58399: 	OP_RAW_FREE(l_v21679, /* nothing */);
 
 	push	esi
 	call	_PyObject_Free
+    ;; expected {4(%ebp) | 16(%esp), 12(%esp), 8(%esp), (%ebp) | }
 
 ; 58400: 	l_v21637 = l_v21678;
 ; 58401: 	l_v21638 = l_mantissa_0;
diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py
--- a/pypy/translator/c/gcc/trackgcroot.py
+++ b/pypy/translator/c/gcc/trackgcroot.py
@@ -527,8 +527,9 @@
         target = match.group("target")
         if target == self.ESP:
             # only for  andl $-16, %esp  used to align the stack in main().
-            # main() should not be seen at all.
-            raise AssertionError("instruction unexpected outside of main()")
+            # main() should not be seen at all.  But on e.g. MSVC we see
+            # the instruction somewhere else too...
+            return InsnCannotFollowEsp()
         else:
             return self.binary_insn(line)
 
@@ -1176,7 +1177,7 @@
     r_gcroot_marker = re.compile(r"$1") # never matches
     r_gcroot_marker_var = re.compile(r"DWORD PTR .+_constant_always_one_.+pypy_asm_gcroot")
     r_gcnocollect_marker = re.compile(r"\spypy_asm_gc_nocollect\(("+OPERAND+")\);")
-    r_bottom_marker = re.compile(r"; .+\tpypy_asm_stack_bottom\(\);")
+    r_bottom_marker = re.compile(r"; .+\spypy_asm_stack_bottom\(\);")
 
     FUNCTIONS_NOT_RETURNING = {
         '__exit': None,
diff --git a/pypy/translator/c/genc.py b/pypy/translator/c/genc.py
--- a/pypy/translator/c/genc.py
+++ b/pypy/translator/c/genc.py
@@ -570,7 +570,10 @@
             mk.definition('ASMFILES', sfiles)
             mk.definition('ASMLBLFILES', lblsfiles)
             mk.definition('GCMAPFILES', gcmapfiles)
-            mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g')
+            if sys.platform == 'win32':
+                mk.definition('DEBUGFLAGS', '/Zi')
+            else:
+                mk.definition('DEBUGFLAGS', '-O2 -fomit-frame-pointer -g')
 
             if self.config.translation.shared:
                 mk.definition('PYPY_MAIN_FUNCTION', "pypy_main_startup")
@@ -623,7 +626,10 @@
                 mk.rule('.PRECIOUS', '%.s', "# don't remove .s files if Ctrl-C'ed")
 
         else:
-            mk.definition('DEBUGFLAGS', '-O1 -g')
+            if sys.platform == 'win32':
+                mk.definition('DEBUGFLAGS', '/Zi')
+            else:
+                mk.definition('DEBUGFLAGS', '-O1 -g')
         mk.write()
         #self.translator.platform,
         #                           ,
diff --git a/pypy/translator/c/node.py b/pypy/translator/c/node.py
--- a/pypy/translator/c/node.py
+++ b/pypy/translator/c/node.py
@@ -1031,7 +1031,7 @@
             if (issubclass(value, BaseException) and
                 value.__module__ == 'exceptions'):
                 return 'PyExc_' + value.__name__
-            if value is py.code._AssertionError:
+            if issubclass(value, AssertionError):
                 return 'PyExc_AssertionError'
             if value is _StackOverflow:
                 return 'PyExc_RuntimeError'
diff --git a/pypy/translator/c/src/main.h b/pypy/translator/c/src/main.h
--- a/pypy/translator/c/src/main.h
+++ b/pypy/translator/c/src/main.h
@@ -79,6 +79,7 @@
     fprintf(stderr, "Fatal error during initialization: %s\n", errmsg);
 #endif
     abort();
+    return 1;
 }
 
 int PYPY_MAIN_FUNCTION(int argc, char *argv[])
diff --git a/pypy/translator/c/test/test_newgc.py b/pypy/translator/c/test/test_newgc.py
--- a/pypy/translator/c/test/test_newgc.py
+++ b/pypy/translator/c/test/test_newgc.py
@@ -1117,6 +1117,7 @@
         S = lltype.GcStruct('S', ('u', lltype.Ptr(U)))
         A = lltype.GcArray(lltype.Ptr(S))
         filename = self.filename_dump_typeids_z
+        open_flags = os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0)
 
         def fn():
             s = lltype.malloc(S)
@@ -1128,7 +1129,7 @@
             #
             p = rgc.get_typeids_z()
             s = ''.join([p[i] for i in range(len(p))])
-            fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0666)
+            fd = os.open(filename, open_flags, 0666)
             os.write(fd, s)
             os.close(fd)
             return 0
@@ -1137,7 +1138,7 @@
 
     def test_write_typeids_z(self):
         self.run("write_typeids_z")
-        f = open(self.filename_dump_typeids_z)
+        f = open(self.filename_dump_typeids_z, 'rb')
         data_z = f.read()
         f.close()
         import zlib
diff --git a/pypy/translator/goal/app_main.py b/pypy/translator/goal/app_main.py
--- a/pypy/translator/goal/app_main.py
+++ b/pypy/translator/goal/app_main.py
@@ -143,6 +143,7 @@
     for key, value in items:
         print '  --jit %s=N %slow-level JIT parameter (default %s)' % (
             key, ' '*(18-len(key)), value)
+    print '  --jit off                  turn off the JIT'
 
 def print_version(*args):
     print "Python", sys.version
diff --git a/pypy/translator/goal/translate.py b/pypy/translator/goal/translate.py
--- a/pypy/translator/goal/translate.py
+++ b/pypy/translator/goal/translate.py
@@ -103,6 +103,8 @@
     specname = os.path.splitext(os.path.basename(targetspec))[0]
     sys.path.insert(0, os.path.dirname(targetspec))
     mod = __import__(specname)
+    if 'target' not in mod.__dict__:
+        raise Exception("file %r is not a valid targetxxx.py." % (targetspec,))
     return mod.__dict__
 
 def parse_options_and_load_target():
@@ -184,7 +186,7 @@
             print "\n\nTarget specific help:\n\n"
             targetspec_dic['print_help'](config)
         print "\n\nFor detailed descriptions of the command line options see"
-        print "http://codespeak.net/pypy/dist/pypy/doc/config/commandline.html"
+        print "http://pypy.readthedocs.org/en/latest/config/commandline.html"
         sys.exit(0)
     
     return targetspec_dic, translateconfig, config, args
diff --git a/pypy/translator/jvm/opcodes.py b/pypy/translator/jvm/opcodes.py
--- a/pypy/translator/jvm/opcodes.py
+++ b/pypy/translator/jvm/opcodes.py
@@ -98,6 +98,7 @@
     'jit_marker':               Ignore,
     'jit_force_virtualizable':  Ignore,
     'jit_force_virtual':        DoNothing,
+    'jit_force_quasi_immutable': Ignore,
 
     'debug_assert':              [], # TODO: implement?
     'debug_start_traceback':    Ignore,
diff --git a/pypy/translator/jvm/src/pypy/PyPy.java b/pypy/translator/jvm/src/pypy/PyPy.java
--- a/pypy/translator/jvm/src/pypy/PyPy.java
+++ b/pypy/translator/jvm/src/pypy/PyPy.java
@@ -964,12 +964,15 @@
         return a + File.separator + b;
     }
 
-    public String ll_strtod_formatd(String format, double d)
+    public String ll_strtod_formatd(double d, char code, int precision, int flags)
     {
         // XXX: this is really a quick hack to make things work.
         // it should disappear, because this function is not
         // supported by ootypesystem.
-        return Double.toString(d); // XXX: we are ignoring "format"
+        DecimalFormat format = new DecimalFormat("0.###");
+        format.setMinimumFractionDigits(precision);
+        format.setMaximumFractionDigits(precision);
+        return format.format(d);
     }
 
     // ----------------------------------------------------------------------
diff --git a/pypy/translator/jvm/test/test_float.py b/pypy/translator/jvm/test/test_float.py
--- a/pypy/translator/jvm/test/test_float.py
+++ b/pypy/translator/jvm/test/test_float.py
@@ -22,3 +22,14 @@
 
     def test_r_singlefloat(self):
         py.test.skip("not implemented: single-precision floats")
+
+    def test_format_float(self):
+        from pypy.rlib.rfloat import _formatd
+        def fn(precision):
+            return _formatd(10.01, 'd', precision, 0)
+
+        res = self.interpret(fn, [2])
+        assert res == "10.01"
+
+        res = self.interpret(fn, [1])
+        assert res == "10.0"
diff --git a/pypy/translator/platform/darwin.py b/pypy/translator/platform/darwin.py
--- a/pypy/translator/platform/darwin.py
+++ b/pypy/translator/platform/darwin.py
@@ -68,12 +68,10 @@
 
 class Darwin_i386(Darwin):
     name = "darwin_i386"
-    link_flags = ('-arch', 'i386', '-mmacosx-version-min=10.4')
-    cflags = ('-arch', 'i386', '-O3', '-fomit-frame-pointer',
-              '-mmacosx-version-min=10.4')
+    link_flags = ('-arch', 'i386')
+    cflags = ('-arch', 'i386', '-O3', '-fomit-frame-pointer')
 
 class Darwin_x86_64(Darwin):
     name = "darwin_x86_64"
-    link_flags = ('-arch', 'x86_64', '-mmacosx-version-min=10.4')
-    cflags = ('-arch', 'x86_64', '-O3', '-fomit-frame-pointer',
-              '-mmacosx-version-min=10.4')
+    link_flags = ('-arch', 'x86_64')
+    cflags = ('-arch', 'x86_64', '-O3', '-fomit-frame-pointer')
diff --git a/pytest.py b/pytest.py
old mode 100644
new mode 100755
--- a/pytest.py
+++ b/pytest.py
@@ -1,7 +1,6 @@
+#!/usr/bin/env python
 """
 unit and functional testing with Python.
-(pypy version of startup script)
-see http://pytest.org for details.
 """
 __all__ = ['main']
 
@@ -9,23 +8,6 @@
 from _pytest import core as cmdline
 from _pytest import __version__
 
-# This pytest.py script is located in the pypy source tree
-# which has a copy of pytest and py within its source tree.
-# If the environment also has an installed version of pytest/py
-# we are bound to get warnings so we disable them.
-# XXX eventually pytest and py should not be inlined shipped
-# with the pypy source code but become a requirement for installation.
-
-import warnings
-warnings.filterwarnings("ignore",
-    "Module py was already imported", category=UserWarning)
-warnings.filterwarnings("ignore",
-    "Module _pytest was already imported",
-    category=UserWarning)
-warnings.filterwarnings("ignore",
-    "Module pytest was already imported",
-    category=UserWarning)
-
 if __name__ == '__main__': # if run as a script or by 'python -m pytest'
     raise SystemExit(main())
 else:


More information about the pypy-commit mailing list