[pypy-commit] pypy py3k: hg merge default

antocuni noreply at buildbot.pypy.org
Mon Apr 16 14:01:22 CEST 2012


Author: Antonio Cuni <anto.cuni at gmail.com>
Branch: py3k
Changeset: r54409:f28e32b3135a
Date: 2012-04-16 13:50 +0200
http://bitbucket.org/pypy/pypy/changeset/f28e32b3135a/

Log:	hg merge default

diff too long, truncating to 10000 out of 14415 lines

diff --git a/_pytest/__init__.py b/_pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
 #
-__version__ = '2.1.0.dev4'
+__version__ = '2.2.4.dev2'
diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py
--- a/_pytest/assertion/__init__.py
+++ b/_pytest/assertion/__init__.py
@@ -2,35 +2,25 @@
 support for presenting detailed information in failing assertions.
 """
 import py
-import imp
-import marshal
-import struct
 import sys
 import pytest
 from _pytest.monkeypatch import monkeypatch
-from _pytest.assertion import reinterpret, util
-
-try:
-    from _pytest.assertion.rewrite import rewrite_asserts
-except ImportError:
-    rewrite_asserts = None
-else:
-    import ast
+from _pytest.assertion import util
 
 def pytest_addoption(parser):
     group = parser.getgroup("debugconfig")
-    group.addoption('--assertmode', action="store", dest="assertmode",
-                    choices=("on", "old", "off", "default"), default="default",
-                    metavar="on|old|off",
+    group.addoption('--assert', action="store", dest="assertmode",
+                    choices=("rewrite", "reinterp", "plain",),
+                    default="rewrite", metavar="MODE",
                     help="""control assertion debugging tools.
-'off' performs no assertion debugging.
-'old' reinterprets the expressions in asserts to glean information.
-'on' (the default) rewrites the assert statements in test modules to provide
-sub-expression results.""")
+'plain' performs no assertion debugging.
+'reinterp' reinterprets assert statements after they failed to provide assertion expression information.
+'rewrite' (the default) rewrites assert statements in test modules on import
+to provide assert expression information. """)
     group.addoption('--no-assert', action="store_true", default=False,
-        dest="noassert", help="DEPRECATED equivalent to --assertmode=off")
+        dest="noassert", help="DEPRECATED equivalent to --assert=plain")
     group.addoption('--nomagic', action="store_true", default=False,
-        dest="nomagic", help="DEPRECATED equivalent to --assertmode=off")
+        dest="nomagic", help="DEPRECATED equivalent to --assert=plain")
 
 class AssertionState:
     """State for the assertion plugin."""
@@ -40,89 +30,90 @@
         self.trace = config.trace.root.get("assertion")
 
 def pytest_configure(config):
-    warn_about_missing_assertion()
     mode = config.getvalue("assertmode")
     if config.getvalue("noassert") or config.getvalue("nomagic"):
-        if mode not in ("off", "default"):
-            raise pytest.UsageError("assertion options conflict")
-        mode = "off"
-    elif mode == "default":
-        mode = "on"
-    if mode != "off":
-        def callbinrepr(op, left, right):
-            hook_result = config.hook.pytest_assertrepr_compare(
-                config=config, op=op, left=left, right=right)
-            for new_expl in hook_result:
-                if new_expl:
-                    return '\n~'.join(new_expl)
+        mode = "plain"
+    if mode == "rewrite":
+        try:
+            import ast
+        except ImportError:
+            mode = "reinterp"
+        else:
+            if sys.platform.startswith('java'):
+                mode = "reinterp"
+    if mode != "plain":
+        _load_modules(mode)
         m = monkeypatch()
         config._cleanup.append(m.undo)
         m.setattr(py.builtin.builtins, 'AssertionError',
                   reinterpret.AssertionError)
-        m.setattr(util, '_reprcompare', callbinrepr)
-    if mode == "on" and rewrite_asserts is None:
-        mode = "old"
+    hook = None
+    if mode == "rewrite":
+        hook = rewrite.AssertionRewritingHook()
+        sys.meta_path.append(hook)
+    warn_about_missing_assertion(mode)
     config._assertstate = AssertionState(config, mode)
+    config._assertstate.hook = hook
     config._assertstate.trace("configured with mode set to %r" % (mode,))
 
-def _write_pyc(co, source_path):
-    if hasattr(imp, "cache_from_source"):
-        # Handle PEP 3147 pycs.
-        pyc = py.path.local(imp.cache_from_source(str(source_path)))
-        pyc.ensure()
-    else:
-        pyc = source_path + "c"
-    mtime = int(source_path.mtime())
-    fp = pyc.open("wb")
-    try:
-        fp.write(imp.get_magic())
-        fp.write(struct.pack("<l", mtime))
-        marshal.dump(co, fp)
-    finally:
-        fp.close()
-    return pyc
+def pytest_unconfigure(config):
+    hook = config._assertstate.hook
+    if hook is not None:
+        sys.meta_path.remove(hook)
 
-def before_module_import(mod):
-    if mod.config._assertstate.mode != "on":
-        return
-    # Some deep magic: load the source, rewrite the asserts, and write a
-    # fake pyc, so that it'll be loaded when the module is imported.
-    source = mod.fspath.read()
-    try:
-        tree = ast.parse(source)
-    except SyntaxError:
-        # Let this pop up again in the real import.
-        mod.config._assertstate.trace("failed to parse: %r" % (mod.fspath,))
-        return
-    rewrite_asserts(tree)
-    try:
-        co = compile(tree, str(mod.fspath), "exec")
-    except SyntaxError:
-        # It's possible that this error is from some bug in the assertion
-        # rewriting, but I don't know of a fast way to tell.
-        mod.config._assertstate.trace("failed to compile: %r" % (mod.fspath,))
-        return
-    mod._pyc = _write_pyc(co, mod.fspath)
-    mod.config._assertstate.trace("wrote pyc: %r" % (mod._pyc,))
+def pytest_collection(session):
+    # this hook is only called when test modules are collected
+    # so for example not in the master process of pytest-xdist
+    # (which does not collect test modules)
+    hook = session.config._assertstate.hook
+    if hook is not None:
+        hook.set_session(session)
 
-def after_module_import(mod):
-    if not hasattr(mod, "_pyc"):
-        return
-    state = mod.config._assertstate
-    try:
-        mod._pyc.remove()
-    except py.error.ENOENT:
-        state.trace("couldn't find pyc: %r" % (mod._pyc,))
-    else:
-        state.trace("removed pyc: %r" % (mod._pyc,))
+def pytest_runtest_setup(item):
+    def callbinrepr(op, left, right):
+        hook_result = item.ihook.pytest_assertrepr_compare(
+            config=item.config, op=op, left=left, right=right)
+        for new_expl in hook_result:
+            if new_expl:
+                res = '\n~'.join(new_expl)
+                if item.config.getvalue("assertmode") == "rewrite":
+                    # The result will be fed back a python % formatting
+                    # operation, which will fail if there are extraneous
+                    # '%'s in the string. Escape them here.
+                    res = res.replace("%", "%%")
+                return res
+    util._reprcompare = callbinrepr
 
-def warn_about_missing_assertion():
+def pytest_runtest_teardown(item):
+    util._reprcompare = None
+
+def pytest_sessionfinish(session):
+    hook = session.config._assertstate.hook
+    if hook is not None:
+        hook.session = None
+
+def _load_modules(mode):
+    """Lazily import assertion related code."""
+    global rewrite, reinterpret
+    from _pytest.assertion import reinterpret
+    if mode == "rewrite":
+        from _pytest.assertion import rewrite
+
+def warn_about_missing_assertion(mode):
     try:
         assert False
     except AssertionError:
         pass
     else:
-        sys.stderr.write("WARNING: failing tests may report as passing because "
-        "assertions are turned off!  (are you using python -O?)\n")
+        if mode == "rewrite":
+            specifically = ("assertions which are not in test modules "
+                            "will be ignored")
+        else:
+            specifically = "failing tests may report as passing"
+
+        sys.stderr.write("WARNING: " + specifically +
+                        " because assert statements are not executed "
+                        "by the underlying Python interpreter "
+                        "(are you using python -O?)\n")
 
 pytest_assertrepr_compare = util.assertrepr_compare
diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py
--- a/_pytest/assertion/newinterpret.py
+++ b/_pytest/assertion/newinterpret.py
@@ -53,7 +53,7 @@
     if should_fail:
         return ("(assertion failed, but when it was re-run for "
                 "printing intermediate values, it did not fail.  Suggestions: "
-                "compute assert expression before the assert or use --no-assert)")
+                "compute assert expression before the assert or use --assert=plain)")
 
 def run(offending_line, frame=None):
     if frame is None:
diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py
--- a/_pytest/assertion/oldinterpret.py
+++ b/_pytest/assertion/oldinterpret.py
@@ -1,8 +1,7 @@
 import py
 import sys, inspect
 from compiler import parse, ast, pycodegen
-from _pytest.assertion.util import format_explanation
-from _pytest.assertion.reinterpret import BuiltinAssertionError
+from _pytest.assertion.util import format_explanation, BuiltinAssertionError
 
 passthroughex = py.builtin._sysex
 
@@ -482,7 +481,7 @@
     if should_fail:
         return ("(assertion failed, but when it was re-run for "
                 "printing intermediate values, it did not fail.  Suggestions: "
-                "compute assert expression before the assert or use --nomagic)")
+                "compute assert expression before the assert or use --assert=plain)")
     else:
         return None
 
diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py
--- a/_pytest/assertion/reinterpret.py
+++ b/_pytest/assertion/reinterpret.py
@@ -1,7 +1,6 @@
 import sys
 import py
-
-BuiltinAssertionError = py.builtin.builtins.AssertionError
+from _pytest.assertion.util import BuiltinAssertionError
 
 class AssertionError(BuiltinAssertionError):
     def __init__(self, *args):
diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py
--- a/_pytest/assertion/rewrite.py
+++ b/_pytest/assertion/rewrite.py
@@ -1,14 +1,258 @@
 """Rewrite assertion AST to produce nice error messages"""
 
 import ast
-import collections
+import errno
 import itertools
+import imp
+import marshal
+import os
+import struct
 import sys
+import types
 
 import py
 from _pytest.assertion import util
 
 
+# Windows gives ENOENT in places *nix gives ENOTDIR.
+if sys.platform.startswith("win"):
+    PATH_COMPONENT_NOT_DIR = errno.ENOENT
+else:
+    PATH_COMPONENT_NOT_DIR = errno.ENOTDIR
+
+# py.test caches rewritten pycs in __pycache__.
+if hasattr(imp, "get_tag"):
+    PYTEST_TAG = imp.get_tag() + "-PYTEST"
+else:
+    if hasattr(sys, "pypy_version_info"):
+        impl = "pypy"
+    elif sys.platform == "java":
+        impl = "jython"
+    else:
+        impl = "cpython"
+    ver = sys.version_info
+    PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
+    del ver, impl
+
+PYC_EXT = ".py" + "c" if __debug__ else "o"
+PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
+
+REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+
+class AssertionRewritingHook(object):
+    """Import hook which rewrites asserts."""
+
+    def __init__(self):
+        self.session = None
+        self.modules = {}
+
+    def set_session(self, session):
+        self.fnpats = session.config.getini("python_files")
+        self.session = session
+
+    def find_module(self, name, path=None):
+        if self.session is None:
+            return None
+        sess = self.session
+        state = sess.config._assertstate
+        state.trace("find_module called for: %s" % name)
+        names = name.rsplit(".", 1)
+        lastname = names[-1]
+        pth = None
+        if path is not None and len(path) == 1:
+            pth = path[0]
+        if pth is None:
+            try:
+                fd, fn, desc = imp.find_module(lastname, path)
+            except ImportError:
+                return None
+            if fd is not None:
+                fd.close()
+            tp = desc[2]
+            if tp == imp.PY_COMPILED:
+                if hasattr(imp, "source_from_cache"):
+                    fn = imp.source_from_cache(fn)
+                else:
+                    fn = fn[:-1]
+            elif tp != imp.PY_SOURCE:
+                # Don't know what this is.
+                return None
+        else:
+            fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
+        fn_pypath = py.path.local(fn)
+        # Is this a test file?
+        if not sess.isinitpath(fn):
+            # We have to be very careful here because imports in this code can
+            # trigger a cycle.
+            self.session = None
+            try:
+                for pat in self.fnpats:
+                    if fn_pypath.fnmatch(pat):
+                        state.trace("matched test file %r" % (fn,))
+                        break
+                else:
+                    return None
+            finally:
+                self.session = sess
+        else:
+            state.trace("matched test file (was specified on cmdline): %r" % (fn,))
+        # The requested module looks like a test file, so rewrite it. This is
+        # the most magical part of the process: load the source, rewrite the
+        # asserts, and load the rewritten source. We also cache the rewritten
+        # module code in a special pyc. We must be aware of the possibility of
+        # concurrent py.test processes rewriting and loading pycs. To avoid
+        # tricky race conditions, we maintain the following invariant: The
+        # cached pyc is always a complete, valid pyc. Operations on it must be
+        # atomic. POSIX's atomic rename comes in handy.
+        write = not sys.dont_write_bytecode
+        cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
+        if write:
+            try:
+                os.mkdir(cache_dir)
+            except OSError:
+                e = sys.exc_info()[1].errno
+                if e == errno.EEXIST:
+                    # Either the __pycache__ directory already exists (the
+                    # common case) or it's blocked by a non-dir node. In the
+                    # latter case, we'll ignore it in _write_pyc.
+                    pass
+                elif e == PATH_COMPONENT_NOT_DIR:
+                    # One of the path components was not a directory, likely
+                    # because we're in a zip file.
+                    write = False
+                elif e == errno.EACCES:
+                    state.trace("read only directory: %r" % (fn_pypath.dirname,))
+                    write = False
+                else:
+                    raise
+        cache_name = fn_pypath.basename[:-3] + PYC_TAIL
+        pyc = os.path.join(cache_dir, cache_name)
+        # Notice that even if we're in a read-only directory, I'm going to check
+        # for a cached pyc. This may not be optimal...
+        co = _read_pyc(fn_pypath, pyc)
+        if co is None:
+            state.trace("rewriting %r" % (fn,))
+            co = _rewrite_test(state, fn_pypath)
+            if co is None:
+                # Probably a SyntaxError in the test.
+                return None
+            if write:
+                _make_rewritten_pyc(state, fn_pypath, pyc, co)
+        else:
+            state.trace("found cached rewritten pyc for %r" % (fn,))
+        self.modules[name] = co, pyc
+        return self
+
+    def load_module(self, name):
+        co, pyc = self.modules.pop(name)
+        # I wish I could just call imp.load_compiled here, but __file__ has to
+        # be set properly. In Python 3.2+, this all would be handled correctly
+        # by load_compiled.
+        mod = sys.modules[name] = imp.new_module(name)
+        try:
+            mod.__file__ = co.co_filename
+            # Normally, this attribute is 3.2+.
+            mod.__cached__ = pyc
+            py.builtin.exec_(co, mod.__dict__)
+        except:
+            del sys.modules[name]
+            raise
+        return sys.modules[name]
+
+def _write_pyc(co, source_path, pyc):
+    # Technically, we don't have to have the same pyc format as (C)Python, since
+    # these "pycs" should never be seen by builtin import. However, there's
+    # little reason deviate, and I hope sometime to be able to use
+    # imp.load_compiled to load them. (See the comment in load_module above.)
+    mtime = int(source_path.mtime())
+    try:
+        fp = open(pyc, "wb")
+    except IOError:
+        err = sys.exc_info()[1].errno
+        if err == PATH_COMPONENT_NOT_DIR:
+            # This happens when we get a EEXIST in find_module creating the
+            # __pycache__ directory and __pycache__ is by some non-dir node.
+            return False
+        raise
+    try:
+        fp.write(imp.get_magic())
+        fp.write(struct.pack("<l", mtime))
+        marshal.dump(co, fp)
+    finally:
+        fp.close()
+    return True
+
+RN = "\r\n".encode("utf-8")
+N = "\n".encode("utf-8")
+
+def _rewrite_test(state, fn):
+    """Try to read and rewrite *fn* and return the code object."""
+    try:
+        source = fn.read("rb")
+    except EnvironmentError:
+        return None
+    # On Python versions which are not 2.7 and less than or equal to 3.1, the
+    # parser expects *nix newlines.
+    if REWRITE_NEWLINES:
+        source = source.replace(RN, N) + N
+    try:
+        tree = ast.parse(source)
+    except SyntaxError:
+        # Let this pop up again in the real import.
+        state.trace("failed to parse: %r" % (fn,))
+        return None
+    rewrite_asserts(tree)
+    try:
+        co = compile(tree, fn.strpath, "exec")
+    except SyntaxError:
+        # It's possible that this error is from some bug in the
+        # assertion rewriting, but I don't know of a fast way to tell.
+        state.trace("failed to compile: %r" % (fn,))
+        return None
+    return co
+
+def _make_rewritten_pyc(state, fn, pyc, co):
+    """Try to dump rewritten code to *pyc*."""
+    if sys.platform.startswith("win"):
+        # Windows grants exclusive access to open files and doesn't have atomic
+        # rename, so just write into the final file.
+        _write_pyc(co, fn, pyc)
+    else:
+        # When not on windows, assume rename is atomic. Dump the code object
+        # into a file specific to this process and atomically replace it.
+        proc_pyc = pyc + "." + str(os.getpid())
+        if _write_pyc(co, fn, proc_pyc):
+            os.rename(proc_pyc, pyc)
+
+def _read_pyc(source, pyc):
+    """Possibly read a py.test pyc containing rewritten code.
+
+    Return rewritten code if successful or None if not.
+    """
+    try:
+        fp = open(pyc, "rb")
+    except IOError:
+        return None
+    try:
+        try:
+            mtime = int(source.mtime())
+            data = fp.read(8)
+        except EnvironmentError:
+            return None
+        # Check for invalid or out of date pyc file.
+        if (len(data) != 8 or
+            data[:4] != imp.get_magic() or
+            struct.unpack("<l", data[4:])[0] != mtime):
+            return None
+        co = marshal.load(fp)
+        if not isinstance(co, types.CodeType):
+            # That's interesting....
+            return None
+        return co
+    finally:
+        fp.close()
+
+
 def rewrite_asserts(mod):
     """Rewrite the assert statements in mod."""
     AssertionRewriter().run(mod)
@@ -17,13 +261,8 @@
 _saferepr = py.io.saferepr
 from _pytest.assertion.util import format_explanation as _format_explanation
 
-def _format_boolop(operands, explanations, is_or):
-    show_explanations = []
-    for operand, expl in zip(operands, explanations):
-        show_explanations.append(expl)
-        if operand == is_or:
-            break
-    return "(" + (is_or and " or " or " and ").join(show_explanations) + ")"
+def _format_boolop(explanations, is_or):
+    return "(" + (is_or and " or " or " and ").join(explanations) + ")"
 
 def _call_reprcompare(ops, results, expls, each_obj):
     for i, res, expl in zip(range(len(ops)), results, expls):
@@ -109,8 +348,8 @@
                     return
                 lineno += len(doc) - 1
                 expect_docstring = False
-            elif (not isinstance(item, ast.ImportFrom) or item.level > 0 and
-                  item.identifier != "__future__"):
+            elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
+                  item.module != "__future__"):
                 lineno = item.lineno
                 break
             pos += 1
@@ -118,9 +357,9 @@
                    for alias in aliases]
         mod.body[pos:pos] = imports
         # Collect asserts.
-        nodes = collections.deque([mod])
+        nodes = [mod]
         while nodes:
-            node = nodes.popleft()
+            node = nodes.pop()
             for name, field in ast.iter_fields(node):
                 if isinstance(field, list):
                     new = []
@@ -143,7 +382,7 @@
         """Get a new variable."""
         # Use a character invalid in python identifiers to avoid clashing.
         name = "@py_assert" + str(next(self.variable_counter))
-        self.variables.add(name)
+        self.variables.append(name)
         return name
 
     def assign(self, expr):
@@ -198,7 +437,8 @@
             # There's already a message. Don't mess with it.
             return [assert_]
         self.statements = []
-        self.variables = set()
+        self.cond_chain = ()
+        self.variables = []
         self.variable_counter = itertools.count()
         self.stack = []
         self.on_failure = []
@@ -220,11 +460,11 @@
         else:
             raise_ = ast.Raise(exc, None, None)
         body.append(raise_)
-        # Delete temporary variables.
-        names = [ast.Name(name, ast.Del()) for name in self.variables]
-        if names:
-            delete = ast.Delete(names)
-            self.statements.append(delete)
+        # Clear temporary variables by setting them to None.
+        if self.variables:
+            variables = [ast.Name(name, ast.Store()) for name in self.variables]
+            clear = ast.Assign(variables, ast.Name("None", ast.Load()))
+            self.statements.append(clear)
         # Fix line numbers.
         for stmt in self.statements:
             set_location(stmt, assert_.lineno, assert_.col_offset)
@@ -240,21 +480,38 @@
         return name, self.explanation_param(expr)
 
     def visit_BoolOp(self, boolop):
-        operands = []
-        explanations = []
+        res_var = self.variable()
+        expl_list = self.assign(ast.List([], ast.Load()))
+        app = ast.Attribute(expl_list, "append", ast.Load())
+        is_or = int(isinstance(boolop.op, ast.Or))
+        body = save = self.statements
+        fail_save = self.on_failure
+        levels = len(boolop.values) - 1
         self.push_format_context()
-        for operand in boolop.values:
-            res, explanation = self.visit(operand)
-            operands.append(res)
-            explanations.append(explanation)
-        expls = ast.Tuple([ast.Str(expl) for expl in explanations], ast.Load())
-        is_or = ast.Num(isinstance(boolop.op, ast.Or))
-        expl_template = self.helper("format_boolop",
-                                    ast.Tuple(operands, ast.Load()), expls,
-                                    is_or)
+        # Process each operand, short-circuting if needed.
+        for i, v in enumerate(boolop.values):
+            if i:
+                fail_inner = []
+                self.on_failure.append(ast.If(cond, fail_inner, []))
+                self.on_failure = fail_inner
+            self.push_format_context()
+            res, expl = self.visit(v)
+            body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
+            expl_format = self.pop_format_context(ast.Str(expl))
+            call = ast.Call(app, [expl_format], [], None, None)
+            self.on_failure.append(ast.Expr(call))
+            if i < levels:
+                cond = res
+                if is_or:
+                    cond = ast.UnaryOp(ast.Not(), cond)
+                inner = []
+                self.statements.append(ast.If(cond, inner, []))
+                self.statements = body = inner
+        self.statements = save
+        self.on_failure = fail_save
+        expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
         expl = self.pop_format_context(expl_template)
-        res = self.assign(ast.BoolOp(boolop.op, operands))
-        return res, self.explanation_param(expl)
+        return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
 
     def visit_UnaryOp(self, unary):
         pattern = unary_map[unary.op.__class__]
@@ -288,7 +545,7 @@
             new_star, expl = self.visit(call.starargs)
             arg_expls.append("*" + expl)
         if call.kwargs:
-            new_kwarg, expl = self.visit(call.kwarg)
+            new_kwarg, expl = self.visit(call.kwargs)
             arg_expls.append("**" + expl)
         expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
         new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py
--- a/_pytest/assertion/util.py
+++ b/_pytest/assertion/util.py
@@ -2,6 +2,7 @@
 
 import py
 
+BuiltinAssertionError = py.builtin.builtins.AssertionError
 
 # The _reprcompare attribute on the util module is used by the new assertion
 # interpretation code and assertion rewriter to detect this plugin was
diff --git a/_pytest/capture.py b/_pytest/capture.py
--- a/_pytest/capture.py
+++ b/_pytest/capture.py
@@ -11,22 +11,22 @@
     group._addoption('-s', action="store_const", const="no", dest="capture",
         help="shortcut for --capture=no.")
 
+ at pytest.mark.tryfirst
+def pytest_cmdline_parse(pluginmanager, args):
+    # we want to perform capturing already for plugin/conftest loading
+    if '-s' in args or "--capture=no" in args:
+        method = "no"
+    elif hasattr(os, 'dup') and '--capture=sys' not in args:
+        method = "fd"
+    else:
+        method = "sys"
+    capman = CaptureManager(method)
+    pluginmanager.register(capman, "capturemanager")
+
 def addouterr(rep, outerr):
-    repr = getattr(rep, 'longrepr', None)
-    if not hasattr(repr, 'addsection'):
-        return
     for secname, content in zip(["out", "err"], outerr):
         if content:
-            repr.addsection("Captured std%s" % secname, content.rstrip())
-
-def pytest_unconfigure(config):
-    # registered in config.py during early conftest.py loading
-    capman = config.pluginmanager.getplugin('capturemanager')
-    while capman._method2capture:
-        name, cap = capman._method2capture.popitem()
-        # XXX logging module may wants to close it itself on process exit
-        # otherwise we could do finalization here and call "reset()".
-        cap.suspend()
+            rep.sections.append(("Captured std%s" % secname, content))
 
 class NoCapture:
     def startall(self):
@@ -39,8 +39,9 @@
         return "", ""
 
 class CaptureManager:
-    def __init__(self):
+    def __init__(self, defaultmethod=None):
         self._method2capture = {}
+        self._defaultmethod = defaultmethod
 
     def _maketempfile(self):
         f = py.std.tempfile.TemporaryFile()
@@ -65,14 +66,6 @@
         else:
             raise ValueError("unknown capturing method: %r" % method)
 
-    def _getmethod_preoptionparse(self, args):
-        if '-s' in args or "--capture=no" in args:
-            return "no"
-        elif hasattr(os, 'dup') and '--capture=sys' not in args:
-            return "fd"
-        else:
-            return "sys"
-
     def _getmethod(self, config, fspath):
         if config.option.capture:
             method = config.option.capture
@@ -85,16 +78,22 @@
             method = "sys"
         return method
 
+    def reset_capturings(self):
+        for name, cap in self._method2capture.items():
+            cap.reset()
+
     def resumecapture_item(self, item):
         method = self._getmethod(item.config, item.fspath)
         if not hasattr(item, 'outerr'):
             item.outerr = ('', '') # we accumulate outerr on the item
         return self.resumecapture(method)
 
-    def resumecapture(self, method):
+    def resumecapture(self, method=None):
         if hasattr(self, '_capturing'):
             raise ValueError("cannot resume, already capturing with %r" %
                 (self._capturing,))
+        if method is None:
+            method = self._defaultmethod
         cap = self._method2capture.get(method)
         self._capturing = method
         if cap is None:
@@ -164,17 +163,6 @@
     def pytest_runtest_teardown(self, item):
         self.resumecapture_item(item)
 
-    def pytest__teardown_final(self, __multicall__, session):
-        method = self._getmethod(session.config, None)
-        self.resumecapture(method)
-        try:
-            rep = __multicall__.execute()
-        finally:
-            outerr = self.suspendcapture()
-        if rep:
-            addouterr(rep, outerr)
-        return rep
-
     def pytest_keyboard_interrupt(self, excinfo):
         if hasattr(self, '_capturing'):
             self.suspendcapture()
diff --git a/_pytest/config.py b/_pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -8,13 +8,15 @@
 def pytest_cmdline_parse(pluginmanager, args):
     config = Config(pluginmanager)
     config.parse(args)
-    if config.option.debug:
-        config.trace.root.setwriter(sys.stderr.write)
     return config
 
 def pytest_unconfigure(config):
-    for func in config._cleanup:
-        func()
+    while 1:
+        try:
+            fin = config._cleanup.pop()
+        except IndexError:
+            break
+        fin()
 
 class Parser:
     """ Parser for command line arguments. """
@@ -81,6 +83,7 @@
         self._inidict[name] = (help, type, default)
         self._ininames.append(name)
 
+
 class OptionGroup:
     def __init__(self, name, description="", parser=None):
         self.name = name
@@ -256,11 +259,14 @@
         self.hook = self.pluginmanager.hook
         self._inicache = {}
         self._cleanup = []
-    
+
     @classmethod
     def fromdictargs(cls, option_dict, args):
         """ constructor useable for subprocesses. """
         config = cls()
+        # XXX slightly crude way to initialize capturing
+        import _pytest.capture
+        _pytest.capture.pytest_cmdline_parse(config.pluginmanager, args)
         config._preparse(args, addopts=False)
         config.option.__dict__.update(option_dict)
         for x in config.option.plugins:
@@ -285,11 +291,10 @@
 
     def _setinitialconftest(self, args):
         # capture output during conftest init (#issue93)
-        from _pytest.capture import CaptureManager
-        capman = CaptureManager()
-        self.pluginmanager.register(capman, 'capturemanager')
-        # will be unregistered in capture.py's unconfigure()
-        capman.resumecapture(capman._getmethod_preoptionparse(args))
+        # XXX introduce load_conftest hook to avoid needing to know
+        # about capturing plugin here
+        capman = self.pluginmanager.getplugin("capturemanager")
+        capman.resumecapture()
         try:
             try:
                 self._conftest.setinitial(args)
@@ -334,6 +339,7 @@
         # Note that this can only be called once per testing process.
         assert not hasattr(self, 'args'), (
                 "can only parse cmdline args at most once per Config object")
+        self._origargs = args
         self._preparse(args)
         self._parser.hints.extend(self.pluginmanager._hints)
         args = self._parser.parse_setoption(args, self.option)
@@ -341,6 +347,14 @@
             args.append(py.std.os.getcwd())
         self.args = args
 
+    def addinivalue_line(self, name, line):
+        """ add a line to an ini-file option. The option must have been
+        declared but might not yet be set in which case the line becomes the
+        the first line in its value. """
+        x = self.getini(name)
+        assert isinstance(x, list)
+        x.append(line) # modifies the cached list inline
+
     def getini(self, name):
         """ return configuration value from an ini file. If the
         specified name hasn't been registered through a prior ``parse.addini``
@@ -422,7 +436,7 @@
 
 
 def getcfg(args, inibasenames):
-    args = [x for x in args if str(x)[0] != "-"]
+    args = [x for x in args if not str(x).startswith("-")]
     if not args:
         args = [py.path.local()]
     for arg in args:
diff --git a/_pytest/core.py b/_pytest/core.py
--- a/_pytest/core.py
+++ b/_pytest/core.py
@@ -16,11 +16,10 @@
  "junitxml resultlog doctest").split()
 
 class TagTracer:
-    def __init__(self, prefix="[pytest] "):
+    def __init__(self):
         self._tag2proc = {}
         self.writer = None
         self.indent = 0
-        self.prefix = prefix
 
     def get(self, name):
         return TagTracerSub(self, (name,))
@@ -30,7 +29,7 @@
             if args:
                 indent = "  " * self.indent
                 content = " ".join(map(str, args))
-                self.writer("%s%s%s\n" %(self.prefix, indent, content))
+                self.writer("%s%s [%s]\n" %(indent, content, ":".join(tags)))
         try:
             self._tag2proc[tags](tags, args)
         except KeyError:
@@ -212,6 +211,14 @@
             self.register(mod, modname)
             self.consider_module(mod)
 
+    def pytest_configure(self, config):
+        config.addinivalue_line("markers",
+            "tryfirst: mark a hook implementation function such that the "
+            "plugin machinery will try to call it first/as early as possible.")
+        config.addinivalue_line("markers",
+            "trylast: mark a hook implementation function such that the "
+            "plugin machinery will try to call it last/as late as possible.")
+
     def pytest_plugin_registered(self, plugin):
         import pytest
         dic = self.call_plugin(plugin, "pytest_namespace", {}) or {}
@@ -432,10 +439,7 @@
 def _preloadplugins():
     _preinit.append(PluginManager(load=True))
 
-def main(args=None, plugins=None):
-    """ returned exit code integer, after an in-process testing run
-    with the given command line arguments, preloading an optional list
-    of passed in plugin objects. """
+def _prepareconfig(args=None, plugins=None):
     if args is None:
         args = sys.argv[1:]
     elif isinstance(args, py.path.local):
@@ -449,13 +453,19 @@
     else: # subsequent calls to main will create a fresh instance
         _pluginmanager = PluginManager(load=True)
     hook = _pluginmanager.hook
+    if plugins:
+        for plugin in plugins:
+            _pluginmanager.register(plugin)
+    return hook.pytest_cmdline_parse(
+            pluginmanager=_pluginmanager, args=args)
+
+def main(args=None, plugins=None):
+    """ returned exit code integer, after an in-process testing run
+    with the given command line arguments, preloading an optional list
+    of passed in plugin objects. """
     try:
-        if plugins:
-            for plugin in plugins:
-                _pluginmanager.register(plugin)
-        config = hook.pytest_cmdline_parse(
-                pluginmanager=_pluginmanager, args=args)
-        exitstatus = hook.pytest_cmdline_main(config=config)
+        config = _prepareconfig(args, plugins)
+        exitstatus = config.hook.pytest_cmdline_main(config=config)
     except UsageError:
         e = sys.exc_info()[1]
         sys.stderr.write("ERROR: %s\n" %(e.args[0],))
diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py
--- a/_pytest/helpconfig.py
+++ b/_pytest/helpconfig.py
@@ -1,7 +1,7 @@
 """ version info, help messages, tracing configuration.  """
 import py
 import pytest
-import inspect, sys
+import os, inspect, sys
 from _pytest.core import varnames
 
 def pytest_addoption(parser):
@@ -18,7 +18,29 @@
                help="trace considerations of conftest.py files."),
     group.addoption('--debug',
                action="store_true", dest="debug", default=False,
-               help="generate and show internal debugging information.")
+               help="store internal tracing debug information in 'pytestdebug.log'.")
+
+
+def pytest_cmdline_parse(__multicall__):
+    config = __multicall__.execute()
+    if config.option.debug:
+        path = os.path.abspath("pytestdebug.log")
+        f = open(path, 'w')
+        config._debugfile = f
+        f.write("versions pytest-%s, py-%s, python-%s\ncwd=%s\nargs=%s\n\n" %(
+            pytest.__version__, py.__version__, ".".join(map(str, sys.version_info)),
+            os.getcwd(), config._origargs))
+        config.trace.root.setwriter(f.write)
+        sys.stderr.write("writing pytestdebug information to %s\n" % path)
+    return config
+
+ at pytest.mark.trylast
+def pytest_unconfigure(config):
+    if hasattr(config, '_debugfile'):
+        config._debugfile.close()
+        sys.stderr.write("wrote pytestdebug information to %s\n" %
+            config._debugfile.name)
+        config.trace.root.setwriter(None)
 
 
 def pytest_cmdline_main(config):
@@ -34,6 +56,7 @@
     elif config.option.help:
         config.pluginmanager.do_configure(config)
         showhelp(config)
+        config.pluginmanager.do_unconfigure(config)
         return 0
 
 def showhelp(config):
@@ -91,7 +114,7 @@
         verinfo = getpluginversioninfo(config)
         if verinfo:
             lines.extend(verinfo)
-            
+
     if config.option.traceconfig:
         lines.append("active plugins:")
         plugins = []
diff --git a/_pytest/hookspec.py b/_pytest/hookspec.py
--- a/_pytest/hookspec.py
+++ b/_pytest/hookspec.py
@@ -121,16 +121,23 @@
 def pytest_itemstart(item, node=None):
     """ (deprecated, use pytest_runtest_logstart). """
 
-def pytest_runtest_protocol(item):
-    """ implements the standard runtest_setup/call/teardown protocol including
-    capturing exceptions and calling reporting hooks on the results accordingly.
+def pytest_runtest_protocol(item, nextitem):
+    """ implements the runtest_setup/call/teardown protocol for
+    the given test item, including capturing exceptions and calling
+    reporting hooks.
+
+    :arg item: test item for which the runtest protocol is performed.
+
+    :arg nexitem: the scheduled-to-be-next test item (or None if this
+                  is the end my friend).  This argument is passed on to
+                  :py:func:`pytest_runtest_teardown`.
 
     :return boolean: True if no further hook implementations should be invoked.
     """
 pytest_runtest_protocol.firstresult = True
 
 def pytest_runtest_logstart(nodeid, location):
-    """ signal the start of a test run. """
+    """ signal the start of running a single test item. """
 
 def pytest_runtest_setup(item):
     """ called before ``pytest_runtest_call(item)``. """
@@ -138,8 +145,14 @@
 def pytest_runtest_call(item):
     """ called to execute the test ``item``. """
 
-def pytest_runtest_teardown(item):
-    """ called after ``pytest_runtest_call``. """
+def pytest_runtest_teardown(item, nextitem):
+    """ called after ``pytest_runtest_call``.
+
+    :arg nexitem: the scheduled-to-be-next test item (None if no further
+                  test item is scheduled).  This argument can be used to
+                  perform exact teardowns, i.e. calling just enough finalizers
+                  so that nextitem only needs to call setup-functions.
+    """
 
 def pytest_runtest_makereport(item, call):
     """ return a :py:class:`_pytest.runner.TestReport` object
@@ -149,15 +162,8 @@
 pytest_runtest_makereport.firstresult = True
 
 def pytest_runtest_logreport(report):
-    """ process item test report. """
-
-# special handling for final teardown - somewhat internal for now
-def pytest__teardown_final(session):
-    """ called before test session finishes. """
-pytest__teardown_final.firstresult = True
-
-def pytest__teardown_final_logerror(report, session):
-    """ called if runtest_teardown_final failed. """
+    """ process a test setup/call/teardown report relating to
+    the respective phase of executing a test. """
 
 # -------------------------------------------------------------------------
 # test session related hooks
diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py
--- a/_pytest/junitxml.py
+++ b/_pytest/junitxml.py
@@ -25,21 +25,39 @@
     long = int
 
 
+class Junit(py.xml.Namespace):
+    pass
+
+
 # We need to get the subset of the invalid unicode ranges according to
 # XML 1.0 which are valid in this python build.  Hence we calculate
 # this dynamically instead of hardcoding it.  The spec range of valid
 # chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
 #                    | [#x10000-#x10FFFF]
-_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x19),
-                   (0xD800, 0xDFFF), (0xFDD0, 0xFFFF)]
-_illegal_ranges = [unicode("%s-%s") % (unichr(low), unichr(high))
-                  for (low, high) in _illegal_unichrs
+_legal_chars = (0x09, 0x0A, 0x0d)
+_legal_ranges = (
+    (0x20, 0xD7FF),
+    (0xE000, 0xFFFD),
+    (0x10000, 0x10FFFF),
+)
+_legal_xml_re = [unicode("%s-%s") % (unichr(low), unichr(high))
+                  for (low, high) in _legal_ranges
                   if low < sys.maxunicode]
-illegal_xml_re = re.compile(unicode('[%s]') %
-                            unicode('').join(_illegal_ranges))
-del _illegal_unichrs
-del _illegal_ranges
+_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
+illegal_xml_re = re.compile(unicode('[^%s]') %
+                            unicode('').join(_legal_xml_re))
+del _legal_chars
+del _legal_ranges
+del _legal_xml_re
 
+def bin_xml_escape(arg):
+    def repl(matchobj):
+        i = ord(matchobj.group())
+        if i <= 0xFF:
+            return unicode('#x%02X') % i
+        else:
+            return unicode('#x%04X') % i
+    return illegal_xml_re.sub(repl, py.xml.escape(arg))
 
 def pytest_addoption(parser):
     group = parser.getgroup("terminal reporting")
@@ -68,117 +86,97 @@
         logfile = os.path.expanduser(os.path.expandvars(logfile))
         self.logfile = os.path.normpath(logfile)
         self.prefix = prefix
-        self.test_logs = []
+        self.tests = []
         self.passed = self.skipped = 0
         self.failed = self.errors = 0
-        self._durations = {}
 
     def _opentestcase(self, report):
         names = report.nodeid.split("::")
         names[0] = names[0].replace("/", '.')
-        names = tuple(names)
-        d = {'time': self._durations.pop(report.nodeid, "0")}
         names = [x.replace(".py", "") for x in names if x != "()"]
         classnames = names[:-1]
         if self.prefix:
             classnames.insert(0, self.prefix)
-        d['classname'] = ".".join(classnames)
-        d['name'] = py.xml.escape(names[-1])
-        attrs = ['%s="%s"' % item for item in sorted(d.items())]
-        self.test_logs.append("\n<testcase %s>" % " ".join(attrs))
+        self.tests.append(Junit.testcase(
+            classname=".".join(classnames),
+            name=names[-1],
+            time=getattr(report, 'duration', 0)
+        ))
 
-    def _closetestcase(self):
-        self.test_logs.append("</testcase>")
-
-    def appendlog(self, fmt, *args):
-        def repl(matchobj):
-            i = ord(matchobj.group())
-            if i <= 0xFF:
-                return unicode('#x%02X') % i
-            else:
-                return unicode('#x%04X') % i
-        args = tuple([illegal_xml_re.sub(repl, py.xml.escape(arg))
-                      for arg in args])
-        self.test_logs.append(fmt % args)
+    def append(self, obj):
+        self.tests[-1].append(obj)
 
     def append_pass(self, report):
         self.passed += 1
-        self._opentestcase(report)
-        self._closetestcase()
 
     def append_failure(self, report):
-        self._opentestcase(report)
         #msg = str(report.longrepr.reprtraceback.extraline)
         if "xfail" in report.keywords:
-            self.appendlog(
-                '<skipped message="xfail-marked test passes unexpectedly"/>')
+            self.append(
+                Junit.skipped(message="xfail-marked test passes unexpectedly"))
             self.skipped += 1
         else:
-            self.appendlog('<failure message="test failure">%s</failure>',
-                report.longrepr)
+            sec = dict(report.sections)
+            fail = Junit.failure(message="test failure")
+            fail.append(str(report.longrepr))
+            self.append(fail)
+            for name in ('out', 'err'):
+                content = sec.get("Captured std%s" % name)
+                if content:
+                    tag = getattr(Junit, 'system-'+name)
+                    self.append(tag(bin_xml_escape(content)))
             self.failed += 1
-        self._closetestcase()
 
     def append_collect_failure(self, report):
-        self._opentestcase(report)
         #msg = str(report.longrepr.reprtraceback.extraline)
-        self.appendlog('<failure message="collection failure">%s</failure>',
-            report.longrepr)
-        self._closetestcase()
+        self.append(Junit.failure(str(report.longrepr),
+                                  message="collection failure"))
         self.errors += 1
 
     def append_collect_skipped(self, report):
-        self._opentestcase(report)
         #msg = str(report.longrepr.reprtraceback.extraline)
-        self.appendlog('<skipped message="collection skipped">%s</skipped>',
-            report.longrepr)
-        self._closetestcase()
+        self.append(Junit.skipped(str(report.longrepr),
+                                  message="collection skipped"))
         self.skipped += 1
 
     def append_error(self, report):
-        self._opentestcase(report)
-        self.appendlog('<error message="test setup failure">%s</error>',
-            report.longrepr)
-        self._closetestcase()
+        self.append(Junit.error(str(report.longrepr),
+                                message="test setup failure"))
         self.errors += 1
 
     def append_skipped(self, report):
-        self._opentestcase(report)
         if "xfail" in report.keywords:
-            self.appendlog(
-                '<skipped message="expected test failure">%s</skipped>',
-                report.keywords['xfail'])
+            self.append(Junit.skipped(str(report.keywords['xfail']),
+                                      message="expected test failure"))
         else:
             filename, lineno, skipreason = report.longrepr
             if skipreason.startswith("Skipped: "):
                 skipreason = skipreason[9:]
-            self.appendlog('<skipped type="pytest.skip" '
-                           'message="%s">%s</skipped>',
-                skipreason, "%s:%s: %s" % report.longrepr,
-                )
-        self._closetestcase()
+            self.append(
+                Junit.skipped("%s:%s: %s" % report.longrepr,
+                              type="pytest.skip",
+                              message=skipreason
+                ))
         self.skipped += 1
 
     def pytest_runtest_logreport(self, report):
         if report.passed:
-            self.append_pass(report)
+            if report.when == "call": # ignore setup/teardown
+                self._opentestcase(report)
+                self.append_pass(report)
         elif report.failed:
+            self._opentestcase(report)
             if report.when != "call":
                 self.append_error(report)
             else:
                 self.append_failure(report)
         elif report.skipped:
+            self._opentestcase(report)
             self.append_skipped(report)
 
-    def pytest_runtest_call(self, item, __multicall__):
-        start = time.time()
-        try:
-            return __multicall__.execute()
-        finally:
-            self._durations[item.nodeid] = time.time() - start
-
     def pytest_collectreport(self, report):
         if not report.passed:
+            self._opentestcase(report)
             if report.failed:
                 self.append_collect_failure(report)
             else:
@@ -187,10 +185,11 @@
     def pytest_internalerror(self, excrepr):
         self.errors += 1
         data = py.xml.escape(excrepr)
-        self.test_logs.append(
-            '\n<testcase classname="pytest" name="internal">'
-            '    <error message="internal error">'
-            '%s</error></testcase>' % data)
+        self.tests.append(
+            Junit.testcase(
+                    Junit.error(data, message="internal error"),
+                    classname="pytest",
+                    name="internal"))
 
     def pytest_sessionstart(self, session):
         self.suite_start_time = time.time()
@@ -204,17 +203,17 @@
         suite_stop_time = time.time()
         suite_time_delta = suite_stop_time - self.suite_start_time
         numtests = self.passed + self.failed
+
         logfile.write('<?xml version="1.0" encoding="utf-8"?>')
-        logfile.write('<testsuite ')
-        logfile.write('name="" ')
-        logfile.write('errors="%i" ' % self.errors)
-        logfile.write('failures="%i" ' % self.failed)
-        logfile.write('skips="%i" ' % self.skipped)
-        logfile.write('tests="%i" ' % numtests)
-        logfile.write('time="%.3f"' % suite_time_delta)
-        logfile.write(' >')
-        logfile.writelines(self.test_logs)
-        logfile.write('</testsuite>')
+        logfile.write(Junit.testsuite(
+            self.tests,
+            name="",
+            errors=self.errors,
+            failures=self.failed,
+            skips=self.skipped,
+            tests=numtests,
+            time="%.3f" % suite_time_delta,
+        ).unicode(indent=0))
         logfile.close()
 
     def pytest_terminal_summary(self, terminalreporter):
diff --git a/_pytest/main.py b/_pytest/main.py
--- a/_pytest/main.py
+++ b/_pytest/main.py
@@ -2,7 +2,7 @@
 
 import py
 import pytest, _pytest
-import os, sys
+import os, sys, imp
 tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
 
 # exitcodes for the command line
@@ -11,6 +11,8 @@
 EXIT_INTERRUPTED = 2
 EXIT_INTERNALERROR = 3
 
+name_re = py.std.re.compile("^[a-zA-Z_]\w*$")
+
 def pytest_addoption(parser):
     parser.addini("norecursedirs", "directory patterns to avoid for recursion",
         type="args", default=('.*', 'CVS', '_darcs', '{arch}'))
@@ -27,6 +29,9 @@
                action="store", type="int", dest="maxfail", default=0,
                help="exit after first num failures or errors.")
 
+    group._addoption('--strict', action="store_true",
+               help="run pytest in strict mode, warnings become errors.")
+
     group = parser.getgroup("collect", "collection")
     group.addoption('--collectonly',
         action="store_true", dest="collectonly",
@@ -48,7 +53,7 @@
 def pytest_namespace():
     collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
     return dict(collect=collect)
-        
+
 def pytest_configure(config):
     py.test.config = config # compatibiltiy
     if config.option.exitfirst:
@@ -77,11 +82,11 @@
         session.exitstatus = EXIT_INTERNALERROR
         if excinfo.errisinstance(SystemExit):
             sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
+    if initstate >= 2:
+        config.hook.pytest_sessionfinish(session=session,
+            exitstatus=session.exitstatus or (session._testsfailed and 1))
     if not session.exitstatus and session._testsfailed:
         session.exitstatus = EXIT_TESTSFAILED
-    if initstate >= 2:
-        config.hook.pytest_sessionfinish(session=session,
-            exitstatus=session.exitstatus)
     if initstate >= 1:
         config.pluginmanager.do_unconfigure(config)
     return session.exitstatus
@@ -101,8 +106,12 @@
 def pytest_runtestloop(session):
     if session.config.option.collectonly:
         return True
-    for item in session.session.items:
-        item.config.hook.pytest_runtest_protocol(item=item)
+    for i, item in enumerate(session.items):
+        try:
+            nextitem = session.items[i+1]
+        except IndexError:
+            nextitem = None
+        item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
         if session.shouldstop:
             raise session.Interrupted(session.shouldstop)
     return True
@@ -132,7 +141,7 @@
         return getattr(pytest, name)
     return property(fget, None, None,
         "deprecated attribute %r, use pytest.%s" % (name,name))
-    
+
 class Node(object):
     """ base class for all Nodes in the collection tree.
     Collector subclasses have children, Items are terminal nodes."""
@@ -143,13 +152,13 @@
 
         #: the parent collector node.
         self.parent = parent
-        
+
         #: the test config object
         self.config = config or parent.config
 
         #: the collection this node is part of
         self.session = session or parent.session
-        
+
         #: filesystem path where this node was collected from
         self.fspath = getattr(parent, 'fspath', None)
         self.ihook = self.session.gethookproxy(self.fspath)
@@ -224,13 +233,13 @@
     def listchain(self):
         """ return list of all parent collectors up to self,
             starting from root of collection tree. """
-        l = [self]
-        while 1:
-            x = l[0]
-            if x.parent is not None: # and x.parent.parent is not None:
-                l.insert(0, x.parent)
-            else:
-                return l
+        chain = []
+        item = self
+        while item is not None:
+            chain.append(item)
+            item = item.parent
+        chain.reverse()
+        return chain
 
     def listnames(self):
         return [x.name for x in self.listchain()]
@@ -325,6 +334,8 @@
     """ a basic test invocation item. Note that for a single function
     there might be multiple test invocation items.
     """
+    nextitem = None
+
     def reportinfo(self):
         return self.fspath, None, ""
 
@@ -399,6 +410,7 @@
         self._notfound = []
         self._initialpaths = set()
         self._initialparts = []
+        self.items = items = []
         for arg in args:
             parts = self._parsearg(arg)
             self._initialparts.append(parts)
@@ -414,7 +426,6 @@
         if not genitems:
             return rep.result
         else:
-            self.items = items = []
             if rep.passed:
                 for node in rep.result:
                     self.items.extend(self.genitems(node))
@@ -469,16 +480,29 @@
         return True
 
     def _tryconvertpyarg(self, x):
-        try:
-            mod = __import__(x, None, None, ['__doc__'])
-        except (ValueError, ImportError):
-            return x
-        p = py.path.local(mod.__file__)
-        if p.purebasename == "__init__":
-            p = p.dirpath()
-        else:
-            p = p.new(basename=p.purebasename+".py")
-        return str(p)
+        mod = None
+        path = [os.path.abspath('.')] + sys.path
+        for name in x.split('.'):
+            # ignore anything that's not a proper name here
+            # else something like --pyargs will mess up '.'
+            # since imp.find_module will actually sometimes work for it
+            # but it's supposed to be considered a filesystem path
+            # not a package
+            if name_re.match(name) is None:
+                return x
+            try:
+                fd, mod, type_ = imp.find_module(name, path)
+            except ImportError:
+                return x
+            else:
+                if fd is not None:
+                    fd.close()
+
+            if type_[2] != imp.PKG_DIRECTORY:
+                path = [os.path.dirname(mod)]
+            else:
+                path = [mod]
+        return mod
 
     def _parsearg(self, arg):
         """ return (fspath, names) tuple after checking the file exists. """
@@ -496,7 +520,7 @@
             raise pytest.UsageError(msg + arg)
         parts[0] = path
         return parts
-   
+
     def matchnodes(self, matching, names):
         self.trace("matchnodes", matching, names)
         self.trace.root.indent += 1
diff --git a/_pytest/mark.py b/_pytest/mark.py
--- a/_pytest/mark.py
+++ b/_pytest/mark.py
@@ -14,12 +14,37 @@
              "Terminate expression with ':' to make the first match match "
              "all subsequent tests (usually file-order). ")
 
+    group._addoption("-m",
+        action="store", dest="markexpr", default="", metavar="MARKEXPR",
+        help="only run tests matching given mark expression.  "
+             "example: -m 'mark1 and not mark2'."
+             )
+
+    group.addoption("--markers", action="store_true", help=
+        "show markers (builtin, plugin and per-project ones).")
+
+    parser.addini("markers", "markers for test functions", 'linelist')
+
+def pytest_cmdline_main(config):
+    if config.option.markers:
+        config.pluginmanager.do_configure(config)
+        tw = py.io.TerminalWriter()
+        for line in config.getini("markers"):
+            name, rest = line.split(":", 1)
+            tw.write("@pytest.mark.%s:" %  name, bold=True)
+            tw.line(rest)
+            tw.line()
+        config.pluginmanager.do_unconfigure(config)
+        return 0
+pytest_cmdline_main.tryfirst = True
+
 def pytest_collection_modifyitems(items, config):
     keywordexpr = config.option.keyword
-    if not keywordexpr:
+    matchexpr = config.option.markexpr
+    if not keywordexpr and not matchexpr:
         return
     selectuntil = False
-    if keywordexpr[-1] == ":":
+    if keywordexpr[-1:] == ":":
         selectuntil = True
         keywordexpr = keywordexpr[:-1]
 
@@ -29,21 +54,38 @@
         if keywordexpr and skipbykeyword(colitem, keywordexpr):
             deselected.append(colitem)
         else:
-            remaining.append(colitem)
             if selectuntil:
                 keywordexpr = None
+            if matchexpr:
+                if not matchmark(colitem, matchexpr):
+                    deselected.append(colitem)
+                    continue
+            remaining.append(colitem)
 
     if deselected:
         config.hook.pytest_deselected(items=deselected)
         items[:] = remaining
 
+class BoolDict:
+    def __init__(self, mydict):
+        self._mydict = mydict
+    def __getitem__(self, name):
+        return name in self._mydict
+
+def matchmark(colitem, matchexpr):
+    return eval(matchexpr, {}, BoolDict(colitem.obj.__dict__))
+
+def pytest_configure(config):
+    if config.option.strict:
+        pytest.mark._config = config
+
 def skipbykeyword(colitem, keywordexpr):
     """ return True if they given keyword expression means to
         skip this collector/item.
     """
     if not keywordexpr:
         return
-    
+
     itemkeywords = getkeywords(colitem)
     for key in filter(None, keywordexpr.split()):
         eor = key[:1] == '-'
@@ -77,15 +119,31 @@
          @py.test.mark.slowtest
          def test_function():
             pass
-  
+
     will set a 'slowtest' :class:`MarkInfo` object
     on the ``test_function`` object. """
 
     def __getattr__(self, name):
         if name[0] == "_":
             raise AttributeError(name)
+        if hasattr(self, '_config'):
+            self._check(name)
         return MarkDecorator(name)
 
+    def _check(self, name):
+        try:
+            if name in self._markers:
+                return
+        except AttributeError:
+            pass
+        self._markers = l = set()
+        for line in self._config.getini("markers"):
+            beginning = line.split(":", 1)
+            x = beginning[0].split("(", 1)[0]
+            l.add(x)
+        if name not in self._markers:
+            raise AttributeError("%r not a registered marker" % (name,))
+
 class MarkDecorator:
     """ A decorator for test functions and test classes.  When applied
     it will create :class:`MarkInfo` objects which may be
@@ -133,8 +191,7 @@
                         holder = MarkInfo(self.markname, self.args, self.kwargs)
                         setattr(func, self.markname, holder)
                     else:
-                        holder.kwargs.update(self.kwargs)
-                        holder.args += self.args
+                        holder.add(self.args, self.kwargs)
                 return func
         kw = self.kwargs.copy()
         kw.update(kwargs)
@@ -150,27 +207,20 @@
         self.args = args
         #: keyword argument dictionary, empty if nothing specified
         self.kwargs = kwargs
+        self._arglist = [(args, kwargs.copy())]
 
     def __repr__(self):
         return "<MarkInfo %r args=%r kwargs=%r>" % (
                 self.name, self.args, self.kwargs)
 
-def pytest_itemcollected(item):
-    if not isinstance(item, pytest.Function):
-        return
-    try:
-        func = item.obj.__func__
-    except AttributeError:
-        func = getattr(item.obj, 'im_func', item.obj)
-    pyclasses = (pytest.Class, pytest.Module)
-    for node in item.listchain():
-        if isinstance(node, pyclasses):
-            marker = getattr(node.obj, 'pytestmark', None)
-            if marker is not None:
-                if isinstance(marker, list):
-                    for mark in marker:
-                        mark(func)
-                else:
-                    marker(func)
-        node = node.parent
-    item.keywords.update(py.builtin._getfuncdict(func))
+    def add(self, args, kwargs):
+        """ add a MarkInfo with the given args and kwargs. """
+        self._arglist.append((args, kwargs))
+        self.args += args
+        self.kwargs.update(kwargs)
+
+    def __iter__(self):
+        """ yield MarkInfo objects each relating to a marking-call. """
+        for args, kwargs in self._arglist:
+            yield MarkInfo(self.name, args, kwargs)
+
diff --git a/_pytest/monkeypatch.py b/_pytest/monkeypatch.py
--- a/_pytest/monkeypatch.py
+++ b/_pytest/monkeypatch.py
@@ -13,6 +13,7 @@
         monkeypatch.setenv(name, value, prepend=False)
         monkeypatch.delenv(name, value, raising=True)
         monkeypatch.syspath_prepend(path)
+        monkeypatch.chdir(path)
 
     All modifications will be undone after the requesting
     test function has finished. The ``raising``
@@ -30,6 +31,7 @@
     def __init__(self):
         self._setattr = []
         self._setitem = []
+        self._cwd = None
 
     def setattr(self, obj, name, value, raising=True):
         """ set attribute ``name`` on ``obj`` to ``value``, by default
@@ -83,6 +85,17 @@
             self._savesyspath = sys.path[:]
         sys.path.insert(0, str(path))
 
+    def chdir(self, path):
+        """ change the current working directory to the specified path
+        path can be a string or a py.path.local object
+        """
+        if self._cwd is None:
+            self._cwd = os.getcwd()
+        if hasattr(path, "chdir"):
+            path.chdir()
+        else:
+            os.chdir(path)
+
     def undo(self):
         """ undo previous changes.  This call consumes the
         undo stack.  Calling it a second time has no effect unless
@@ -95,9 +108,17 @@
         self._setattr[:] = []
         for dictionary, name, value in self._setitem:
             if value is notset:
-                del dictionary[name]
+                try:
+                    del dictionary[name]
+                except KeyError:
+                    pass # was already deleted, so we have the desired state
             else:
                 dictionary[name] = value
         self._setitem[:] = []
         if hasattr(self, '_savesyspath'):
             sys.path[:] = self._savesyspath
+            del self._savesyspath
+
+        if self._cwd is not None:
+            os.chdir(self._cwd)
+            self._cwd = None
diff --git a/_pytest/nose.py b/_pytest/nose.py
--- a/_pytest/nose.py
+++ b/_pytest/nose.py
@@ -13,6 +13,7 @@
             call.excinfo = call2.excinfo
 
 
+ at pytest.mark.trylast
 def pytest_runtest_setup(item):
     if isinstance(item, (pytest.Function)):
         if isinstance(item.parent, pytest.Generator):
diff --git a/_pytest/pastebin.py b/_pytest/pastebin.py
--- a/_pytest/pastebin.py
+++ b/_pytest/pastebin.py
@@ -38,7 +38,11 @@
         del tr._tw.__dict__['write']
 
 def getproxy():
-    return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes
+    if sys.version_info < (3, 0):
+        from xmlrpclib import ServerProxy
+    else:
+        from xmlrpc.client import ServerProxy
+    return ServerProxy(url.xmlrpc).pastes
 
 def pytest_terminal_summary(terminalreporter):
     if terminalreporter.config.option.pastebin != "failed":
diff --git a/_pytest/pdb.py b/_pytest/pdb.py
--- a/_pytest/pdb.py
+++ b/_pytest/pdb.py
@@ -19,11 +19,13 @@
 class pytestPDB:
     """ Pseudo PDB that defers to the real pdb. """
     item = None
+    collector = None
 
     def set_trace(self):
         """ invoke PDB set_trace debugging, dropping any IO capturing. """
         frame = sys._getframe().f_back
-        item = getattr(self, 'item', None)
+        item = self.item or self.collector
+
         if item is not None:
             capman = item.config.pluginmanager.getplugin("capturemanager")
             out, err = capman.suspendcapture()
@@ -38,6 +40,14 @@
     pytestPDB.item = item
 pytest_runtest_setup = pytest_runtest_call = pytest_runtest_teardown = pdbitem
 
+ at pytest.mark.tryfirst
+def pytest_make_collect_report(__multicall__, collector):
+    try:
+        pytestPDB.collector = collector
+        return __multicall__.execute()
+    finally:
+        pytestPDB.collector = None
+
 def pytest_runtest_makereport():
     pytestPDB.item = None
     
@@ -60,7 +70,13 @@
         tw.sep(">", "traceback")
         rep.toterminal(tw)
         tw.sep(">", "entering PDB")
-        post_mortem(call.excinfo._excinfo[2])
+        # A doctest.UnexpectedException is not useful for post_mortem.
+        # Use the underlying exception instead:
+        if isinstance(call.excinfo.value, py.std.doctest.UnexpectedException):
+            tb = call.excinfo.value.exc_info[2]
+        else:
+            tb = call.excinfo._excinfo[2]
+        post_mortem(tb)
         rep._pdbshown = True
         return rep
 
diff --git a/_pytest/pytester.py b/_pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -25,6 +25,7 @@
         _pytest_fullpath
     except NameError:
         _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
+        _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
 
 def pytest_funcarg___pytest(request):
     return PytestArg(request)
@@ -313,16 +314,6 @@
             result.extend(session.genitems(colitem))
         return result
 
-    def inline_genitems(self, *args):
-        #config = self.parseconfig(*args)
-        config = self.parseconfigure(*args)
-        rec = self.getreportrecorder(config)
-        session = Session(config)
-        config.hook.pytest_sessionstart(session=session)
-        session.perform_collect()
-        config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
-        return session.items, rec
-
     def runitem(self, source):
         # used from runner functional tests
         item = self.getitem(source)
@@ -343,64 +334,57 @@
         l = list(args) + [p]
         reprec = self.inline_run(*l)
         reports = reprec.getreports("pytest_runtest_logreport")
-        assert len(reports) == 1, reports
-        return reports[0]
+        assert len(reports) == 3, reports # setup/call/teardown
+        return reports[1]
+
+    def inline_genitems(self, *args):
+        return self.inprocess_run(list(args) + ['--collectonly'])
 
     def inline_run(self, *args):
-        args = ("-s", ) + args # otherwise FD leakage
-        config = self.parseconfig(*args)
-        reprec = self.getreportrecorder(config)
-        #config.pluginmanager.do_configure(config)
-        config.hook.pytest_cmdline_main(config=config)
-        #config.pluginmanager.do_unconfigure(config)
-        return reprec
+        items, rec = self.inprocess_run(args)
+        return rec
 
-    def config_preparse(self):
-        config = self.Config()
-        for plugin in self.plugins:
-            if isinstance(plugin, str):
-                config.pluginmanager.import_plugin(plugin)
-            else:
-                if isinstance(plugin, dict):
-                    plugin = PseudoPlugin(plugin)
-                if not config.pluginmanager.isregistered(plugin):
-                    config.pluginmanager.register(plugin)
-        return config
+    def inprocess_run(self, args, plugins=None):
+        rec = []
+        items = []
+        class Collect:
+            def pytest_configure(x, config):
+                rec.append(self.getreportrecorder(config))
+            def pytest_itemcollected(self, item):
+                items.append(item)
+        if not plugins:
+            plugins = []
+        plugins.append(Collect())
+        ret = self.pytestmain(list(args), plugins=[Collect()])
+        reprec = rec[0]
+        reprec.ret = ret
+        assert len(rec) == 1
+        return items, reprec
 
     def parseconfig(self, *args):
-        if not args:
-            args = (self.tmpdir,)
-        config = self.config_preparse()
-        args = list(args)
+        args = [str(x) for x in args]
         for x in args:
             if str(x).startswith('--basetemp'):
                 break
         else:
             args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
-        config.parse(args)
+        import _pytest.core
+        config = _pytest.core._prepareconfig(args, self.plugins)
+        # the in-process pytest invocation needs to avoid leaking FDs
+        # so we register a "reset_capturings" callmon the capturing manager
+        # and make sure it gets called
+        config._cleanup.append(
+            config.pluginmanager.getplugin("capturemanager").reset_capturings)
+        import _pytest.config
+        self.request.addfinalizer(
+            lambda: _pytest.config.pytest_unconfigure(config))
         return config
 
-    def reparseconfig(self, args=None):
-        """ this is used from tests that want to re-invoke parse(). """
-        if not args:
-            args = [self.tmpdir]
-        oldconfig = getattr(py.test, 'config', None)
-        try:
-            c = py.test.config = self.Config()
-            c.basetemp = py.path.local.make_numbered_dir(prefix="reparse",
-                keep=0, rootdir=self.tmpdir, lock_timeout=None)
-            c.parse(args)
-            c.pluginmanager.do_configure(c)
-            self.request.addfinalizer(lambda: c.pluginmanager.do_unconfigure(c))
-            return c
-        finally:
-            py.test.config = oldconfig
-
     def parseconfigure(self, *args):
         config = self.parseconfig(*args)
         config.pluginmanager.do_configure(config)
         self.request.addfinalizer(lambda:
-            config.pluginmanager.do_unconfigure(config))
+        config.pluginmanager.do_unconfigure(config))
         return config
 
     def getitem(self,  source, funcname="test_func"):
@@ -420,7 +404,6 @@
             self.makepyfile(__init__ = "#")
         self.config = config = self.parseconfigure(path, *configargs)
         node = self.getnode(config, path)
-        #config.pluginmanager.do_unconfigure(config)
         return node
 
     def collect_by_name(self, modcol, name):
@@ -437,9 +420,16 @@
         return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
 
     def pytestmain(self, *args, **kwargs):
-        ret = pytest.main(*args, **kwargs)
-        if ret == 2:
-            raise KeyboardInterrupt()
+        class ResetCapturing:
+            @pytest.mark.trylast
+            def pytest_unconfigure(self, config):
+                capman = config.pluginmanager.getplugin("capturemanager")
+                capman.reset_capturings()
+        plugins = kwargs.setdefault("plugins", [])
+        rc = ResetCapturing()
+        plugins.append(rc)
+        return pytest.main(*args, **kwargs)
+
     def run(self, *cmdargs):
         return self._run(*cmdargs)
 
@@ -528,6 +518,8 @@
         pexpect = py.test.importorskip("pexpect", "2.4")
         if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine():
             pytest.skip("pypy-64 bit not supported")
+        if sys.platform == "darwin":
+            pytest.xfail("pexpect does not work reliably on darwin?!")
         logfile = self.tmpdir.join("spawn.out")
         child = pexpect.spawn(cmd, logfile=logfile.open("w"))
         child.timeout = expect_timeout
@@ -540,10 +532,6 @@
             return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
                     py.io.saferepr(out),)
 
-class PseudoPlugin:
-    def __init__(self, vars):
-        self.__dict__.update(vars)
-
 class ReportRecorder(object):
     def __init__(self, hook):
         self.hook = hook
@@ -565,10 +553,17 @@
     def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
         return [x.report for x in self.getcalls(names)]
 
-    def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport", when=None):
+    def matchreport(self, inamepart="",
+        names="pytest_runtest_logreport pytest_collectreport", when=None):
         """ return a testreport whose dotted import path matches """
         l = []
         for rep in self.getreports(names=names):
+            try:
+                if not when and rep.when != "call" and rep.passed:
+                    # setup/teardown passing reports - let's ignore those
+                    continue
+            except AttributeError:
+                pass
             if when and getattr(rep, 'when', None) != when:
                 continue
             if not inamepart or inamepart in rep.nodeid.split("::"):
diff --git a/_pytest/python.py b/_pytest/python.py
--- a/_pytest/python.py
+++ b/_pytest/python.py
@@ -4,6 +4,7 @@
 import sys
 import pytest
 from py._code.code import TerminalRepr
+from _pytest.monkeypatch import monkeypatch
 
 import _pytest
 cutdir = py.path.local(_pytest.__file__).dirpath()
@@ -26,6 +27,24 @@
         showfuncargs(config)
         return 0
 
+
+def pytest_generate_tests(metafunc):
+    try:
+        param = metafunc.function.parametrize
+    except AttributeError:
+        return
+    for p in param:
+        metafunc.parametrize(*p.args, **p.kwargs)
+
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "parametrize(argnames, argvalues): call a test function multiple "
+        "times passing in multiple different argument value sets. Example: "
+        "@parametrize('arg1', [1,2]) would lead to two calls of the decorated "
+        "test function, one with arg1=1 and another with arg1=2."
+    )
+
+
 @pytest.mark.trylast
 def pytest_namespace():
     raises.Exception = pytest.fail.Exception
@@ -138,6 +157,7 @@
             obj = obj.place_as
 
         self._fslineno = py.code.getfslineno(obj)
+        assert isinstance(self._fslineno[1], int), obj
         return self._fslineno
 
     def reportinfo(self):
@@ -155,6 +175,7 @@
         else:
             fspath, lineno = self._getfslineno()
             modpath = self.getmodpath()
+        assert isinstance(lineno, int)
         return fspath, lineno, modpath
 
 class PyCollectorMixin(PyobjMixin, pytest.Collector):
@@ -200,6 +221,7 @@
         module = self.getparent(Module).obj
         clscol = self.getparent(Class)
         cls = clscol and clscol.obj or None
+        transfer_markers(funcobj, cls, module)
         metafunc = Metafunc(funcobj, config=self.config,
             cls=cls, module=module)
         gentesthook = self.config.hook.pytest_generate_tests
@@ -219,6 +241,19 @@
             l.append(function)
         return l
 
+def transfer_markers(funcobj, cls, mod):
+    # XXX this should rather be code in the mark plugin or the mark
+    # plugin should merge with the python plugin.
+    for holder in (cls, mod):
+        try:
+            pytestmark = holder.pytestmark
+        except AttributeError:
+            continue
+        if isinstance(pytestmark, list):
+            for mark in pytestmark:
+                mark(funcobj)
+        else:
+            pytestmark(funcobj)
 
 class Module(pytest.File, PyCollectorMixin):
     def _getobj(self):
@@ -226,13 +261,8 @@
 
     def _importtestmodule(self):
         # we assume we are only called once per module
-        from _pytest import assertion
-        assertion.before_module_import(self)
         try:
-            try:
-                mod = self.fspath.pyimport(ensuresyspath=True)
-            finally:
-                assertion.after_module_import(self)
+            mod = self.fspath.pyimport(ensuresyspath=True)
         except SyntaxError:
             excinfo = py.code.ExceptionInfo()
             raise self.CollectError(excinfo.getrepr(style="short"))
@@ -244,7 +274,8 @@
                 "  %s\n"
                 "which is not the same as the test file we want to collect:\n"
                 "  %s\n"
-                "HINT: use a unique basename for your test file modules"
+                "HINT: remove __pycache__ / .pyc files and/or use a "
+                "unique basename for your test file modules"
                  % e.args
             )
         #print "imported test module", mod
@@ -374,6 +405,7 @@
         tw.line()
         tw.line("%s:%d" % (self.filename, self.firstlineno+1))
 
+
 class Generator(FunctionMixin, PyCollectorMixin, pytest.Collector):
     def collect(self):
         # test generators are seen as collectors but they also
@@ -430,6 +462,7 @@
                 "yielded functions (deprecated) cannot have funcargs")
         else:
             if callspec is not None:
+                self.callspec = callspec
                 self.funcargs = callspec.funcargs or {}
                 self._genid = callspec.id
                 if hasattr(callspec, "param"):
@@ -506,15 +539,59 @@
     request._fillfuncargs()
 
 _notexists = object()
-class CallSpec:
-    def __init__(self, funcargs, id, param):
-        self.funcargs = funcargs
-        self.id = id
+
+class CallSpec2(object):
+    def __init__(self, metafunc):
+        self.metafunc = metafunc
+        self.funcargs = {}
+        self._idlist = []
+        self.params = {}
+        self._globalid = _notexists
+        self._globalid_args = set()
+        self._globalparam = _notexists
+
+    def copy(self, metafunc):
+        cs = CallSpec2(self.metafunc)
+        cs.funcargs.update(self.funcargs)
+        cs.params.update(self.params)
+        cs._idlist = list(self._idlist)
+        cs._globalid = self._globalid
+        cs._globalid_args = self._globalid_args
+        cs._globalparam = self._globalparam
+        return cs
+
+    def _checkargnotcontained(self, arg):
+        if arg in self.params or arg in self.funcargs:
+            raise ValueError("duplicate %r" %(arg,))
+
+    def getparam(self, name):
+        try:
+            return self.params[name]
+        except KeyError:
+            if self._globalparam is _notexists:
+                raise ValueError(name)
+            return self._globalparam
+
+    @property
+    def id(self):
+        return "-".join(map(str, filter(None, self._idlist)))
+
+    def setmulti(self, valtype, argnames, valset, id):
+        for arg,val in zip(argnames, valset):
+            self._checkargnotcontained(arg)
+            getattr(self, valtype)[arg] = val
+        self._idlist.append(id)
+
+    def setall(self, funcargs, id, param):
+        for x in funcargs:
+            self._checkargnotcontained(x)
+        self.funcargs.update(funcargs)
+        if id is not _notexists:
+            self._idlist.append(id)
         if param is not _notexists:
-            self.param = param
-    def __repr__(self):
-        return "<CallSpec id=%r param=%r funcargs=%r>" %(
-            self.id, getattr(self, 'param', '?'), self.funcargs)
+            assert self._globalparam is _notexists
+            self._globalparam = param
+
 
 class Metafunc:
     def __init__(self, function, config=None, cls=None, module=None):
@@ -528,31 +605,71 @@
         self._calls = []
         self._ids = py.builtin.set()
 
+    def parametrize(self, argnames, argvalues, indirect=False, ids=None):
+        """ Add new invocations to the underlying test function using the list
+        of argvalues for the given argnames.  Parametrization is performed
+        during the collection phase.  If you need to setup expensive resources
+        you may pass indirect=True and implement a funcarg factory which can
+        perform the expensive setup just before a test is actually run.
+
+        :arg argnames: an argument name or a list of argument names
+
+        :arg argvalues: a list of values for the argname or a list of tuples of
+            values for the list of argument names.
+
+        :arg indirect: if True each argvalue corresponding to an argument will
+            be passed as request.param to its respective funcarg factory so
+            that it can perform more expensive setups during the setup phase of
+            a test rather than at collection time.
+
+        :arg ids: list of string ids each corresponding to the argvalues so
+            that they are part of the test id. If no ids are provided they will
+            be generated automatically from the argvalues.
+        """
+        if not isinstance(argnames, (tuple, list)):
+            argnames = (argnames,)
+            argvalues = [(val,) for val in argvalues]
+        if not indirect:
+            #XXX should we also check for the opposite case?
+            for arg in argnames:
+                if arg not in self.funcargnames:
+                    raise ValueError("%r has no argument %r" %(self.function, arg))
+        valtype = indirect and "params" or "funcargs"
+        if not ids:
+            idmaker = IDMaker()
+            ids = list(map(idmaker, argvalues))
+        newcalls = []
+        for callspec in self._calls or [CallSpec2(self)]:
+            for i, valset in enumerate(argvalues):
+                assert len(valset) == len(argnames)
+                newcallspec = callspec.copy(self)
+                newcallspec.setmulti(valtype, argnames, valset, ids[i])
+                newcalls.append(newcallspec)
+        self._calls = newcalls
+
     def addcall(self, funcargs=None, id=_notexists, param=_notexists):
-        """ add a new call to the underlying test function during the
-        collection phase of a test run.  Note that request.addcall() is
-        called during the test collection phase prior and independently
-        to actual test execution.  Therefore you should perform setup
-        of resources in a funcarg factory which can be instrumented
-        with the ``param``.
+        """ (deprecated, use parametrize) Add a new call to the underlying
+        test function during the collection phase of a test run.  Note that
+        request.addcall() is called during the test collection phase prior and
+        independently to actual test execution.  You should only use addcall()
+        if you need to specify multiple arguments of a test function.
 
         :arg funcargs: argument keyword dictionary used when invoking
             the test function.
 
         :arg id: used for reporting and identification purposes.  If you
-            don't supply an `id` the length of the currently
-            list of calls to the test function will be used.
+            don't supply an `id` an automatic unique id will be generated.
 
-        :arg param: will be exposed to a later funcarg factory invocation
-            through the ``request.param`` attribute.  It allows to
-            defer test fixture setup activities to when an actual
-            test is run.
+        :arg param: a parameter which will be exposed to a later funcarg factory
+            invocation through the ``request.param`` attribute.
         """
         assert funcargs is None or isinstance(funcargs, dict)
         if funcargs is not None:
             for name in funcargs:
                 if name not in self.funcargnames:
                     pytest.fail("funcarg %r not used in this function." % name)
+        else:
+            funcargs = {}
         if id is None:
             raise ValueError("id=None not allowed")
         if id is _notexists:
@@ -561,11 +678,26 @@
         if id in self._ids:
             raise ValueError("duplicate id %r" % id)
         self._ids.add(id)
-        self._calls.append(CallSpec(funcargs, id, param))
+
+        cs = CallSpec2(self)
+        cs.setall(funcargs, id, param)
+        self._calls.append(cs)
+
+class IDMaker:
+    def __init__(self):
+        self.counter = 0
+    def __call__(self, valset):
+        l = []
+        for val in valset:
+            if not isinstance(val, (int, str)):
+                val = "."+str(self.counter)
+            self.counter += 1
+            l.append(str(val))
+        return "-".join(l)
 
 class FuncargRequest:
     """ A request for function arguments from a test function.
-        
+
         Note that there is an optional ``param`` attribute in case
         there was an invocation to metafunc.addcall(param=...).
         If no such call was done in a ``pytest_generate_tests``
@@ -637,7 +769,7 @@
 
 
     def applymarker(self, marker):
-        """ apply a marker to a single test function invocation.
+        """ Apply a marker to a single test function invocation.
         This method is useful if you don't want to have a keyword/marker
         on all function invocations.
 
@@ -649,7 +781,7 @@
         self._pyfuncitem.keywords[marker.markname] = marker
 
     def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
-        """ return a testing resource managed by ``setup`` &
+        """ Return a testing resource managed by ``setup`` &
         ``teardown`` calls.  ``scope`` and ``extrakey`` determine when the
         ``teardown`` function will be called so that subsequent calls to
         ``setup`` would recreate the resource.
@@ -698,11 +830,18 @@
             self._raiselookupfailed(argname)
         funcargfactory = self._name2factory[argname].pop()
         oldarg = self._currentarg
-        self._currentarg = argname
+        mp = monkeypatch()
+        mp.setattr(self, '_currentarg', argname)
+        try:
+            param = self._pyfuncitem.callspec.getparam(argname)
+        except (AttributeError, ValueError):
+            pass
+        else:
+            mp.setattr(self, 'param', param, raising=False)
         try:
             self._funcargs[argname] = res = funcargfactory(request=self)
         finally:
-            self._currentarg = oldarg
+            mp.undo()
         return res
 
     def _getscopeitem(self, scope):
@@ -817,8 +956,7 @@
         >>> raises(ZeroDivisionError, f, x=0)
         <ExceptionInfo ...>
 
-    A third possibility is to use a string which which will
-    be executed::
+    A third possibility is to use a string to be executed::
 
         >>> raises(ZeroDivisionError, "f(0)")
         <ExceptionInfo ...>
diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py
--- a/_pytest/resultlog.py
+++ b/_pytest/resultlog.py
@@ -63,6 +63,8 @@
         self.write_log_entry(testpath, lettercode, longrepr)
 
     def pytest_runtest_logreport(self, report):
+        if report.when != "call" and report.passed:
+            return
         res = self.config.hook.pytest_report_teststatus(report=report)
         code = res[1]
         if code == 'x':
@@ -89,5 +91,8 @@
             self.log_outcome(report, code, longrepr)
 
     def pytest_internalerror(self, excrepr):
-        path = excrepr.reprcrash.path
+        reprcrash = getattr(excrepr, 'reprcrash', None)
+        path = getattr(reprcrash, "path", None)
+        if path is None:
+            path = "cwd:%s" % py.path.local()
         self.write_log_entry(path, '!', str(excrepr))
diff --git a/_pytest/runner.py b/_pytest/runner.py
--- a/_pytest/runner.py
+++ b/_pytest/runner.py
@@ -1,6 +1,6 @@
 """ basic collect and runtest protocol implementations """
 
-import py, sys
+import py, sys, time
 from py._code.code import TerminalRepr
 
 def pytest_namespace():
@@ -14,33 +14,60 @@
 #
 # pytest plugin hooks
 
+def pytest_addoption(parser):
+    group = parser.getgroup("terminal reporting", "reporting", after="general")
+    group.addoption('--durations',
+         action="store", type="int", default=None, metavar="N",
+         help="show N slowest setup/test durations (N=0 for all)."),
+
+def pytest_terminal_summary(terminalreporter):
+    durations = terminalreporter.config.option.durations
+    if durations is None:
+        return
+    tr = terminalreporter
+    dlist = []
+    for replist in tr.stats.values():
+        for rep in replist:
+            if hasattr(rep, 'duration'):
+                dlist.append(rep)
+    if not dlist:
+        return
+    dlist.sort(key=lambda x: x.duration)
+    dlist.reverse()
+    if not durations:
+        tr.write_sep("=", "slowest test durations")
+    else:
+        tr.write_sep("=", "slowest %s test durations" % durations)
+        dlist = dlist[:durations]
+
+    for rep in dlist:
+        nodeid = rep.nodeid.replace("::()::", "::")
+        tr.write_line("%02.2fs %-8s %s" %
+            (rep.duration, rep.when, nodeid))
+
 def pytest_sessionstart(session):
     session._setupstate = SetupState()
-
-def pytest_sessionfinish(session, exitstatus):
-    hook = session.config.hook
-    rep = hook.pytest__teardown_final(session=session)
-    if rep:
-        hook.pytest__teardown_final_logerror(session=session, report=rep)
-        session.exitstatus = 1
+def pytest_sessionfinish(session):
+    session._setupstate.teardown_all()
 
 class NodeInfo:
     def __init__(self, location):
         self.location = location
 
-def pytest_runtest_protocol(item):
+def pytest_runtest_protocol(item, nextitem):
     item.ihook.pytest_runtest_logstart(
         nodeid=item.nodeid, location=item.location,
     )
-    runtestprotocol(item)
+    runtestprotocol(item, nextitem=nextitem)
     return True
 
-def runtestprotocol(item, log=True):
+def runtestprotocol(item, log=True, nextitem=None):
     rep = call_and_report(item, "setup", log)
     reports = [rep]
     if rep.passed:
         reports.append(call_and_report(item, "call", log))
-    reports.append(call_and_report(item, "teardown", log))
+    reports.append(call_and_report(item, "teardown", log,
+        nextitem=nextitem))
     return reports
 
 def pytest_runtest_setup(item):
@@ -49,16 +76,8 @@
 def pytest_runtest_call(item):
     item.runtest()
 
-def pytest_runtest_teardown(item):
-    item.session._setupstate.teardown_exact(item)
-
-def pytest__teardown_final(session):
-    call = CallInfo(session._setupstate.teardown_all, when="teardown")
-    if call.excinfo:
-        ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir)
-        call.excinfo.traceback = ntraceback.filter()
-        longrepr = call.excinfo.getrepr(funcargs=True)
-        return TeardownErrorReport(longrepr)
+def pytest_runtest_teardown(item, nextitem):
+    item.session._setupstate.teardown_exact(item, nextitem)
 
 def pytest_report_teststatus(report):
     if report.when in ("setup", "teardown"):
@@ -74,18 +93,18 @@
 #
 # Implementation
 
-def call_and_report(item, when, log=True):
-    call = call_runtest_hook(item, when)
+def call_and_report(item, when, log=True, **kwds):
+    call = call_runtest_hook(item, when, **kwds)
     hook = item.ihook
     report = hook.pytest_runtest_makereport(item=item, call=call)
-    if log and (when == "call" or not report.passed):
+    if log:
         hook.pytest_runtest_logreport(report=report)
     return report
 
-def call_runtest_hook(item, when):
+def call_runtest_hook(item, when, **kwds):
     hookname = "pytest_runtest_" + when
     ihook = getattr(item.ihook, hookname)
-    return CallInfo(lambda: ihook(item=item), when=when)
+    return CallInfo(lambda: ihook(item=item, **kwds), when=when)
 
 class CallInfo:
     """ Result/Exception info a function invocation. """
@@ -95,12 +114,16 @@
         #: context of invocation: one of "setup", "call",
         #: "teardown", "memocollect"
         self.when = when
+        self.start = time.time()
         try:
-            self.result = func()
-        except KeyboardInterrupt:
-            raise
-        except:
-            self.excinfo = py.code.ExceptionInfo()
+            try:
+                self.result = func()
+            except KeyboardInterrupt:
+                raise
+            except:
+                self.excinfo = py.code.ExceptionInfo()
+        finally:
+            self.stop = time.time()
 
     def __repr__(self):
         if self.excinfo:
@@ -120,6 +143,10 @@
         return s
 
 class BaseReport(object):
+
+    def __init__(self, **kw):
+        self.__dict__.update(kw)
+
     def toterminal(self, out):
         longrepr = self.longrepr
         if hasattr(self, 'node'):
@@ -139,6 +166,7 @@
 
 def pytest_runtest_makereport(item, call):
     when = call.when
+    duration = call.stop-call.start
     keywords = dict([(x,1) for x in item.keywords])
     excinfo = call.excinfo
     if not call.excinfo:
@@ -160,14 +188,15 @@
             else: # exception in setup or teardown
                 longrepr = item._repr_failure_py(excinfo)
     return TestReport(item.nodeid, item.location,
-        keywords, outcome, longrepr, when)
+                      keywords, outcome, longrepr, when,
+                      duration=duration)
 
 class TestReport(BaseReport):
     """ Basic test report object (also used for setup and teardown calls if
     they fail).
     """
     def __init__(self, nodeid, location,
-            keywords, outcome, longrepr, when):
+            keywords, outcome, longrepr, when, sections=(), duration=0, **extra):
         #: normalized collection node id
         self.nodeid = nodeid
 
@@ -179,16 +208,25 @@
         #: a name -> value dictionary containing all keywords and
         #: markers associated with a test invocation.
         self.keywords = keywords
-        
+
         #: test outcome, always one of "passed", "failed", "skipped".
         self.outcome = outcome
 
         #: None or a failure representation.
         self.longrepr = longrepr
-        
+
         #: one of 'setup', 'call', 'teardown' to indicate runtest phase.
         self.when = when
 
+        #: list of (secname, data) extra information which needs to
+        #: marshallable
+        self.sections = list(sections)
+
+        #: time it took to run just the test
+        self.duration = duration
+
+        self.__dict__.update(extra)
+
     def __repr__(self):
         return "<TestReport %r when=%r outcome=%r>" % (
             self.nodeid, self.when, self.outcome)
@@ -196,8 +234,10 @@
 class TeardownErrorReport(BaseReport):
     outcome = "failed"
     when = "teardown"
-    def __init__(self, longrepr):
+    def __init__(self, longrepr, **extra):
         self.longrepr = longrepr
+        self.sections = []
+        self.__dict__.update(extra)
 
 def pytest_make_collect_report(collector):
     call = CallInfo(collector._memocollect, "memocollect")
@@ -219,11 +259,13 @@
         getattr(call, 'result', None))
 
 class CollectReport(BaseReport):
-    def __init__(self, nodeid, outcome, longrepr, result):
+    def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
         self.nodeid = nodeid
         self.outcome = outcome
         self.longrepr = longrepr
         self.result = result or []
+        self.sections = list(sections)
+        self.__dict__.update(extra)
 
     @property
     def location(self):
@@ -277,20 +319,22 @@
         self._teardown_with_finalization(None)
         assert not self._finalizers
 
-    def teardown_exact(self, item):
-        if self.stack and item == self.stack[-1]:
+    def teardown_exact(self, item, nextitem):
+        needed_collectors = nextitem and nextitem.listchain() or []
+        self._teardown_towards(needed_collectors)
+
+    def _teardown_towards(self, needed_collectors):
+        while self.stack:
+            if self.stack == needed_collectors[:len(self.stack)]:
+                break
             self._pop_and_teardown()
-        else:
-            self._callfinalizers(item)
 
     def prepare(self, colitem):
         """ setup objects along the collector chain to the test-method
             and teardown previously setup objects."""
         needed_collectors = colitem.listchain()
-        while self.stack:
-            if self.stack == needed_collectors[:len(self.stack)]:
-                break
-            self._pop_and_teardown()
+        self._teardown_towards(needed_collectors)
+
         # check if the last collection node has raised an error
         for col in self.stack:
             if hasattr(col, '_prepare_exc'):
diff --git a/_pytest/skipping.py b/_pytest/skipping.py
--- a/_pytest/skipping.py
+++ b/_pytest/skipping.py
@@ -9,6 +9,21 @@
            action="store_true", dest="runxfail", default=False,
            help="run tests even if they are marked xfail")
 
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "skipif(*conditions): skip the given test function if evaluation "
+        "of all conditions has a True value.  Evaluation happens within the "
+        "module global context. Example: skipif('sys.platform == \"win32\"') "
+        "skips the test if we are on the win32 platform. "
+    )
+    config.addinivalue_line("markers",
+        "xfail(*conditions, reason=None, run=True): mark the the test function "
+        "as an expected failure. Optionally specify a reason and run=False "
+        "if you don't even want to execute the test function. Any positional "
+        "condition strings will be evaluated (like with skipif) and if one is "
+        "False the marker will not be applied."
+    )
+
 def pytest_namespace():
     return dict(xfail=xfail)
 
@@ -117,6 +132,14 @@
 def pytest_runtest_makereport(__multicall__, item, call):
     if not isinstance(item, pytest.Function):
         return
+    # unitttest special case, see setting of _unexpectedsuccess
+    if hasattr(item, '_unexpectedsuccess'):
+        rep = __multicall__.execute()
+        if rep.when == "call":
+            # we need to translate into how py.test encodes xpass
+            rep.keywords['xfail'] = "reason: " + item._unexpectedsuccess
+            rep.outcome = "failed"
+        return rep
     if not (call.excinfo and
         call.excinfo.errisinstance(py.test.xfail.Exception)):
         evalxfail = getattr(item, '_evalxfail', None)
@@ -169,21 +192,23 @@
         elif char == "X":
             show_xpassed(terminalreporter, lines)
         elif char in "fF":
-            show_failed(terminalreporter, lines)
+            show_simple(terminalreporter, lines, 'failed', "FAIL %s")
         elif char in "sS":
             show_skipped(terminalreporter, lines)
+        elif char == "E":
+            show_simple(terminalreporter, lines, 'error', "ERROR %s")
     if lines:
         tr._tw.sep("=", "short test summary info")
         for line in lines:
             tr._tw.line(line)
 
-def show_failed(terminalreporter, lines):
+def show_simple(terminalreporter, lines, stat, format):
     tw = terminalreporter._tw
-    failed = terminalreporter.stats.get("failed")
+    failed = terminalreporter.stats.get(stat)
     if failed:
         for rep in failed:
             pos = rep.nodeid
-            lines.append("FAIL %s" %(pos, ))
+            lines.append(format %(pos, ))
 
 def show_xfailed(terminalreporter, lines):
     xfailed = terminalreporter.stats.get("xfailed")
diff --git a/_pytest/terminal.py b/_pytest/terminal.py
--- a/_pytest/terminal.py
+++ b/_pytest/terminal.py
@@ -15,7 +15,7 @@
     group._addoption('-r',
          action="store", dest="reportchars", default=None, metavar="chars",
          help="show extra test summary info as specified by chars (f)ailed, "
-              "(s)skipped, (x)failed, (X)passed.")
+              "(E)error, (s)skipped, (x)failed, (X)passed.")
     group._addoption('-l', '--showlocals',
          action="store_true", dest="showlocals", default=False,
          help="show locals in tracebacks (disabled by default).")
@@ -43,7 +43,8 @@
             pass
         else:
             stdout = os.fdopen(newfd, stdout.mode, 1)
-            config._toclose = stdout
+            config._cleanup.append(lambda: stdout.close())
+
     reporter = TerminalReporter(config, stdout)
     config.pluginmanager.register(reporter, 'terminalreporter')
     if config.option.debug or config.option.traceconfig:
@@ -52,11 +53,6 @@
             reporter.write_line("[traceconfig] " + msg)
         config.trace.root.setprocessor("pytest:config", mywriter)
 
-def pytest_unconfigure(config):
-    if hasattr(config, '_toclose'):
-        #print "closing", config._toclose, config._toclose.fileno()
-        config._toclose.close()
-
 def getreportopt(config):
     reportopts = ""
     optvalue = config.option.report
@@ -165,9 +161,6 @@
     def pytest_deselected(self, items):
         self.stats.setdefault('deselected', []).extend(items)
 
-    def pytest__teardown_final_logerror(self, report):
-        self.stats.setdefault("error", []).append(report)
-
     def pytest_runtest_logstart(self, nodeid, location):
         # ensure that the path is printed before the
         # 1st test of a module starts running
@@ -259,7 +252,7 @@
         msg = "platform %s -- Python %s" % (sys.platform, verinfo)
         if hasattr(sys, 'pypy_version_info'):
             verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
-            msg += "[pypy-%s]" % verinfo
+            msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
         msg += " -- pytest-%s" % (py.test.__version__)
         if self.verbosity > 0 or self.config.option.debug or \
            getattr(self.config.option, 'pastebin', None):
@@ -289,10 +282,18 @@
         # we take care to leave out Instances aka ()
         # because later versions are going to get rid of them anyway
         if self.config.option.verbose < 0:
-            for item in items:
-                nodeid = item.nodeid
-                nodeid = nodeid.replace("::()::", "::")
-                self._tw.line(nodeid)
+            if self.config.option.verbose < -1:
+                counts = {}
+                for item in items:
+                    name = item.nodeid.split('::', 1)[0]
+                    counts[name] = counts.get(name, 0) + 1
+                for name, count in sorted(counts.items()):
+                    self._tw.line("%s: %d" % (name, count))
+            else:
+                for item in items:
+                    nodeid = item.nodeid
+                    nodeid = nodeid.replace("::()::", "::")
+                    self._tw.line(nodeid)
             return
         stack = []
         indent = ""
@@ -318,12 +319,17 @@
             self.config.hook.pytest_terminal_summary(terminalreporter=self)
         if exitstatus == 2:
             self._report_keyboardinterrupt()
+            del self._keyboardinterrupt_memo
         self.summary_deselected()
         self.summary_stats()
 
     def pytest_keyboard_interrupt(self, excinfo):
         self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
 
+    def pytest_unconfigure(self):
+        if hasattr(self, '_keyboardinterrupt_memo'):
+            self._report_keyboardinterrupt()
+
     def _report_keyboardinterrupt(self):
         excrepr = self._keyboardinterrupt_memo
         msg = excrepr.reprcrash.message
@@ -388,7 +394,7 @@
                 else:
                     msg = self._getfailureheadline(rep)
                     self.write_sep("_", msg)
-                    rep.toterminal(self._tw)
+                    self._outrep_summary(rep)
 
     def summary_errors(self):
         if self.config.option.tbstyle != "no":
@@ -406,7 +412,15 @@
                 elif rep.when == "teardown":
                     msg = "ERROR at teardown of " + msg
                 self.write_sep("_", msg)
-                rep.toterminal(self._tw)
+                self._outrep_summary(rep)
+
+    def _outrep_summary(self, rep):
+        rep.toterminal(self._tw)
+        for secname, content in rep.sections:
+            self._tw.sep("-", secname)
+            if content[-1:] == "\n":
+                content = content[:-1]
+            self._tw.line(content)
 
     def summary_stats(self):
         session_duration = py.std.time.time() - self._sessionstarttime
@@ -417,9 +431,10 @@
                 keys.append(key)
         parts = []
         for key in keys:
-            val = self.stats.get(key, None)
-            if val:
-                parts.append("%d %s" %(len(val), key))
+            if key: # setup/teardown reports have an empty key, ignore them
+                val = self.stats.get(key, None)
+                if val:
+                    parts.append("%d %s" %(len(val), key))
         line = ", ".join(parts)
         # XXX coloring
         msg = "%s in %.2f seconds" %(line, session_duration)
@@ -430,8 +445,15 @@
 
     def summary_deselected(self):
         if 'deselected' in self.stats:
+            l = []
+            k = self.config.option.keyword
+            if k:
+                l.append("-k%s" % k)
+            m = self.config.option.markexpr
+            if m:
+                l.append("-m %r" % m)
             self.write_sep("=", "%d tests deselected by %r" %(
-                len(self.stats['deselected']), self.config.option.keyword), bold=True)
+                len(self.stats['deselected']), " ".join(l)), bold=True)
 
 def repr_pythonversion(v=None):
     if v is None:
diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py
--- a/_pytest/tmpdir.py
+++ b/_pytest/tmpdir.py
@@ -46,7 +46,7 @@
 
     def finish(self):
         self.trace("finish")
-        
+
 def pytest_configure(config):
     mp = monkeypatch()
     t = TempdirHandler(config)
@@ -64,5 +64,5 @@
     name = request._pyfuncitem.name
     name = py.std.re.sub("[\W]", "_", name)
     x = request.config._tmpdirhandler.mktemp(name, numbered=True)
-    return x.realpath()
+    return x
 
diff --git a/_pytest/unittest.py b/_pytest/unittest.py
--- a/_pytest/unittest.py
+++ b/_pytest/unittest.py
@@ -2,6 +2,9 @@
 import pytest, py
 import sys, pdb
 
+# for transfering markers
+from _pytest.python import transfer_markers
+
 def pytest_pycollect_makeitem(collector, name, obj):
     unittest = sys.modules.get('unittest')
     if unittest is None:
@@ -19,7 +22,14 @@
 class UnitTestCase(pytest.Class):
     def collect(self):
         loader = py.std.unittest.TestLoader()
+        module = self.getparent(pytest.Module).obj
+        cls = self.obj
         for name in loader.getTestCaseNames(self.obj):
+            x = getattr(self.obj, name)
+            funcobj = getattr(x, 'im_func', x)
+            transfer_markers(funcobj, cls, module)
+            if hasattr(funcobj, 'todo'):
+                pytest.mark.xfail(reason=str(funcobj.todo))(funcobj)
             yield TestCaseFunction(name, parent=self)
 
     def setup(self):
@@ -37,15 +47,13 @@
 class TestCaseFunction(pytest.Function):
     _excinfo = None
 
-    def __init__(self, name, parent):
-        super(TestCaseFunction, self).__init__(name, parent)
-        if hasattr(self._obj, 'todo'):
-            getattr(self._obj, 'im_func', self._obj).xfail = \
-                pytest.mark.xfail(reason=str(self._obj.todo))
-
     def setup(self):
         self._testcase = self.parent.obj(self.name)
         self._obj = getattr(self._testcase, self.name)
+        if hasattr(self._testcase, 'skip'):
+            pytest.skip(self._testcase.skip)
+        if hasattr(self._obj, 'skip'):
+            pytest.skip(self._obj.skip)
         if hasattr(self._testcase, 'setup_method'):
             self._testcase.setup_method(self._obj)
 
@@ -83,28 +91,37 @@
         self._addexcinfo(rawexcinfo)
     def addFailure(self, testcase, rawexcinfo):
         self._addexcinfo(rawexcinfo)
+
     def addSkip(self, testcase, reason):
         try:
             pytest.skip(reason)
         except pytest.skip.Exception:
             self._addexcinfo(sys.exc_info())
-    def addExpectedFailure(self, testcase, rawexcinfo, reason):
+
+    def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
         try:
             pytest.xfail(str(reason))
         except pytest.xfail.Exception:
             self._addexcinfo(sys.exc_info())
-    def addUnexpectedSuccess(self, testcase, reason):
-        pass
+
+    def addUnexpectedSuccess(self, testcase, reason=""):
+        self._unexpectedsuccess = reason
+
     def addSuccess(self, testcase):
         pass
+
     def stopTest(self, testcase):
         pass
+
     def runtest(self):
         self._testcase(result=self)
 
     def _prunetraceback(self, excinfo):
         pytest.Function._prunetraceback(self, excinfo)
-        excinfo.traceback = excinfo.traceback.filter(lambda x:not x.frame.f_globals.get('__unittest'))
+        traceback = excinfo.traceback.filter(
+            lambda x:not x.frame.f_globals.get('__unittest'))
+        if traceback:
+            excinfo.traceback = traceback
 
 @pytest.mark.tryfirst
 def pytest_runtest_makereport(item, call):
@@ -120,14 +137,19 @@
             ut = sys.modules['twisted.python.failure']
             Failure__init__ = ut.Failure.__init__.im_func
             check_testcase_implements_trial_reporter()
-            def excstore(self, exc_value=None, exc_type=None, exc_tb=None):
+            def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
+                captureVars=None):
                 if exc_value is None:
                     self._rawexcinfo = sys.exc_info()
                 else:
                     if exc_type is None:
                         exc_type = type(exc_value)
                     self._rawexcinfo = (exc_type, exc_value, exc_tb)
-                Failure__init__(self, exc_value, exc_type, exc_tb)
+                try:
+                    Failure__init__(self, exc_value, exc_type, exc_tb,
+                        captureVars=captureVars)
+                except TypeError:
+                    Failure__init__(self, exc_value, exc_type, exc_tb)
             ut.Failure.__init__ = excstore
             try:
                 return __multicall__.execute()
diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py
--- a/dotviewer/graphparse.py
+++ b/dotviewer/graphparse.py
@@ -93,6 +93,7 @@
     return result
 
 def parse_plain(graph_id, plaincontent, links={}, fixedfont=False):
+    plaincontent = plaincontent.replace('\r\n', '\n')    # fix Windows EOL
     lines = plaincontent.splitlines(True)
     for i in range(len(lines)-2, -1, -1):
         if lines[i].endswith('\\\n'):   # line ending in '\'
diff --git a/lib-python/modified-2.7/test/test_set.py b/lib-python/modified-2.7/test/test_set.py
--- a/lib-python/modified-2.7/test/test_set.py
+++ b/lib-python/modified-2.7/test/test_set.py
@@ -1568,7 +1568,7 @@
             for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
                 for g in (G, I, Ig, L, R):
                     expected = meth(data)
-                    actual = meth(G(data))
+                    actual = meth(g(data))
                     if isinstance(expected, bool):
                         self.assertEqual(actual, expected)
                     else:
diff --git a/lib_pypy/numpypy/core/fromnumeric.py b/lib_pypy/numpypy/core/fromnumeric.py
--- a/lib_pypy/numpypy/core/fromnumeric.py
+++ b/lib_pypy/numpypy/core/fromnumeric.py
@@ -411,7 +411,8 @@
             [3, 7]]])
 
     """
-    raise NotImplementedError('Waiting on interp level method')
+    swapaxes = a.swapaxes
+    return swapaxes(axis1, axis2)
 
 
 def transpose(a, axes=None):
diff --git a/py/__init__.py b/py/__init__.py
--- a/py/__init__.py
+++ b/py/__init__.py
@@ -8,7 +8,7 @@
 
 (c) Holger Krekel and others, 2004-2010
 """
-__version__ = '1.4.4.dev1'
+__version__ = '1.4.7'
 
 from py import _apipkg
 
@@ -70,6 +70,11 @@
         'getrawcode'        : '._code.code:getrawcode',
         'patch_builtins'    : '._code.code:patch_builtins',
         'unpatch_builtins'  : '._code.code:unpatch_builtins',
+        '_AssertionError'   : '._code.assertion:AssertionError',
+        '_reinterpret_old'  : '._code.assertion:reinterpret_old',
+        '_reinterpret'      : '._code.assertion:reinterpret',
+        '_reprcompare'      : '._code.assertion:_reprcompare',
+        '_format_explanation' : '._code.assertion:_format_explanation',
     },
 
     # backports and additions of builtins
diff --git a/py/_builtin.py b/py/_builtin.py
--- a/py/_builtin.py
+++ b/py/_builtin.py
@@ -113,9 +113,12 @@
 
     # some backward compatibility helpers
     _basestring = str
-    def _totext(obj, encoding=None):
+    def _totext(obj, encoding=None, errors=None):
         if isinstance(obj, bytes):
-            obj = obj.decode(encoding)
+            if errors is None:
+                obj = obj.decode(encoding)
+            else:
+                obj = obj.decode(encoding, errors)
         elif not isinstance(obj, str):
             obj = str(obj)
         return obj
@@ -142,7 +145,7 @@
             del back
         elif locs is None:
             locs = globs
-        fp = open(fn, "rb")
+        fp = open(fn, "r")
         try:
             source = fp.read()
         finally:
diff --git a/py/_code/_assertionnew.py b/py/_code/_assertionnew.py
new file mode 100644
--- /dev/null
+++ b/py/_code/_assertionnew.py
@@ -0,0 +1,339 @@
+"""
+Find intermediate evalutation results in assert statements through builtin AST.
+This should replace _assertionold.py eventually.
+"""
+
+import sys
+import ast
+
+import py
+from py._code.assertion import _format_explanation, BuiltinAssertionError
+
+
+if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+    # See http://bugs.jython.org/issue1497
+    _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
+              "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
+              "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
+              "List", "Tuple")
+    _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
+              "AugAssign", "Print", "For", "While", "If", "With", "Raise",
+              "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
+              "Exec", "Global", "Expr", "Pass", "Break", "Continue")
+    _expr_nodes = set(getattr(ast, name) for name in _exprs)
+    _stmt_nodes = set(getattr(ast, name) for name in _stmts)
+    def _is_ast_expr(node):
+        return node.__class__ in _expr_nodes
+    def _is_ast_stmt(node):
+        return node.__class__ in _stmt_nodes
+else:
+    def _is_ast_expr(node):
+        return isinstance(node, ast.expr)
+    def _is_ast_stmt(node):
+        return isinstance(node, ast.stmt)
+
+
+class Failure(Exception):
+    """Error found while interpreting AST."""
+
+    def __init__(self, explanation=""):
+        self.cause = sys.exc_info()
+        self.explanation = explanation
+
+
+def interpret(source, frame, should_fail=False):
+    mod = ast.parse(source)
+    visitor = DebugInterpreter(frame)
+    try:
+        visitor.visit(mod)
+    except Failure:
+        failure = sys.exc_info()[1]
+        return getfailure(failure)
+    if should_fail:
+        return ("(assertion failed, but when it was re-run for "
+                "printing intermediate values, it did not fail.  Suggestions: "
+                "compute assert expression before the assert or use --no-assert)")
+
+def run(offending_line, frame=None):
+    if frame is None:
+        frame = py.code.Frame(sys._getframe(1))
+    return interpret(offending_line, frame)
+
+def getfailure(failure):
+    explanation = _format_explanation(failure.explanation)
+    value = failure.cause[1]
+    if str(value):
+        lines = explanation.splitlines()
+        if not lines:
+            lines.append("")
+        lines[0] += " << %s" % (value,)
+        explanation = "\n".join(lines)
+    text = "%s: %s" % (failure.cause[0].__name__, explanation)
+    if text.startswith("AssertionError: assert "):
+        text = text[16:]
+    return text
+
+
+operator_map = {
+    ast.BitOr : "|",
+    ast.BitXor : "^",
+    ast.BitAnd : "&",
+    ast.LShift : "<<",
+    ast.RShift : ">>",
+    ast.Add : "+",
+    ast.Sub : "-",
+    ast.Mult : "*",
+    ast.Div : "/",
+    ast.FloorDiv : "//",
+    ast.Mod : "%",
+    ast.Eq : "==",
+    ast.NotEq : "!=",
+    ast.Lt : "<",
+    ast.LtE : "<=",
+    ast.Gt : ">",
+    ast.GtE : ">=",
+    ast.Pow : "**",
+    ast.Is : "is",
+    ast.IsNot : "is not",
+    ast.In : "in",
+    ast.NotIn : "not in"
+}
+
+unary_map = {
+    ast.Not : "not %s",
+    ast.Invert : "~%s",
+    ast.USub : "-%s",
+    ast.UAdd : "+%s"
+}
+
+
+class DebugInterpreter(ast.NodeVisitor):
+    """Interpret AST nodes to gleam useful debugging information. """
+
+    def __init__(self, frame):
+        self.frame = frame
+
+    def generic_visit(self, node):
+        # Fallback when we don't have a special implementation.
+        if _is_ast_expr(node):
+            mod = ast.Expression(node)
+            co = self._compile(mod)
+            try:
+                result = self.frame.eval(co)
+            except Exception:
+                raise Failure()
+            explanation = self.frame.repr(result)
+            return explanation, result
+        elif _is_ast_stmt(node):
+            mod = ast.Module([node])
+            co = self._compile(mod, "exec")
+            try:
+                self.frame.exec_(co)
+            except Exception:
+                raise Failure()
+            return None, None
+        else:
+            raise AssertionError("can't handle %s" %(node,))
+
+    def _compile(self, source, mode="eval"):
+        return compile(source, "<assertion interpretation>", mode)
+
+    def visit_Expr(self, expr):
+        return self.visit(expr.value)
+
+    def visit_Module(self, mod):
+        for stmt in mod.body:
+            self.visit(stmt)
+
+    def visit_Name(self, name):
+        explanation, result = self.generic_visit(name)
+        # See if the name is local.
+        source = "%r in locals() is not globals()" % (name.id,)
+        co = self._compile(source)
+        try:
+            local = self.frame.eval(co)
+        except Exception:
+            # have to assume it isn't
+            local = False
+        if not local:
+            return name.id, result
+        return explanation, result
+
+    def visit_Compare(self, comp):
+        left = comp.left
+        left_explanation, left_result = self.visit(left)
+        for op, next_op in zip(comp.ops, comp.comparators):
+            next_explanation, next_result = self.visit(next_op)
+            op_symbol = operator_map[op.__class__]
+            explanation = "%s %s %s" % (left_explanation, op_symbol,
+                                        next_explanation)
+            source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
+            co = self._compile(source)
+            try:
+                result = self.frame.eval(co, __exprinfo_left=left_result,
+                                         __exprinfo_right=next_result)
+            except Exception:
+                raise Failure(explanation)
+            try:
+                if not result:
+                    break
+            except KeyboardInterrupt:
+                raise
+            except:
+                break
+            left_explanation, left_result = next_explanation, next_result
+
+        rcomp = py.code._reprcompare
+        if rcomp:
+            res = rcomp(op_symbol, left_result, next_result)
+            if res:
+                explanation = res
+        return explanation, result
+
+    def visit_BoolOp(self, boolop):
+        is_or = isinstance(boolop.op, ast.Or)
+        explanations = []
+        for operand in boolop.values:
+            explanation, result = self.visit(operand)
+            explanations.append(explanation)
+            if result == is_or:
+                break
+        name = is_or and " or " or " and "
+        explanation = "(" + name.join(explanations) + ")"
+        return explanation, result
+
+    def visit_UnaryOp(self, unary):
+        pattern = unary_map[unary.op.__class__]
+        operand_explanation, operand_result = self.visit(unary.operand)
+        explanation = pattern % (operand_explanation,)
+        co = self._compile(pattern % ("__exprinfo_expr",))
+        try:
+            result = self.frame.eval(co, __exprinfo_expr=operand_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, result
+
+    def visit_BinOp(self, binop):
+        left_explanation, left_result = self.visit(binop.left)
+        right_explanation, right_result = self.visit(binop.right)
+        symbol = operator_map[binop.op.__class__]
+        explanation = "(%s %s %s)" % (left_explanation, symbol,
+                                      right_explanation)
+        source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, __exprinfo_left=left_result,
+                                     __exprinfo_right=right_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, result
+
+    def visit_Call(self, call):
+        func_explanation, func = self.visit(call.func)
+        arg_explanations = []
+        ns = {"__exprinfo_func" : func}
+        arguments = []
+        for arg in call.args:
+            arg_explanation, arg_result = self.visit(arg)
+            arg_name = "__exprinfo_%s" % (len(ns),)
+            ns[arg_name] = arg_result
+            arguments.append(arg_name)
+            arg_explanations.append(arg_explanation)
+        for keyword in call.keywords:
+            arg_explanation, arg_result = self.visit(keyword.value)
+            arg_name = "__exprinfo_%s" % (len(ns),)
+            ns[arg_name] = arg_result
+            keyword_source = "%s=%%s" % (keyword.arg)
+            arguments.append(keyword_source % (arg_name,))
+            arg_explanations.append(keyword_source % (arg_explanation,))
+        if call.starargs:
+            arg_explanation, arg_result = self.visit(call.starargs)
+            arg_name = "__exprinfo_star"
+            ns[arg_name] = arg_result
+            arguments.append("*%s" % (arg_name,))
+            arg_explanations.append("*%s" % (arg_explanation,))
+        if call.kwargs:
+            arg_explanation, arg_result = self.visit(call.kwargs)
+            arg_name = "__exprinfo_kwds"
+            ns[arg_name] = arg_result
+            arguments.append("**%s" % (arg_name,))
+            arg_explanations.append("**%s" % (arg_explanation,))
+        args_explained = ", ".join(arg_explanations)
+        explanation = "%s(%s)" % (func_explanation, args_explained)
+        args = ", ".join(arguments)
+        source = "__exprinfo_func(%s)" % (args,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, **ns)
+        except Exception:
+            raise Failure(explanation)
+        pattern = "%s\n{%s = %s\n}"
+        rep = self.frame.repr(result)
+        explanation = pattern % (rep, rep, explanation)
+        return explanation, result
+
+    def _is_builtin_name(self, name):
+        pattern = "%r not in globals() and %r not in locals()"
+        source = pattern % (name.id, name.id)
+        co = self._compile(source)
+        try:
+            return self.frame.eval(co)
+        except Exception:
+            return False
+
+    def visit_Attribute(self, attr):
+        if not isinstance(attr.ctx, ast.Load):
+            return self.generic_visit(attr)
+        source_explanation, source_result = self.visit(attr.value)
+        explanation = "%s.%s" % (source_explanation, attr.attr)
+        source = "__exprinfo_expr.%s" % (attr.attr,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, __exprinfo_expr=source_result)
+        except Exception:
+            raise Failure(explanation)
+        explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
+                                              self.frame.repr(result),
+                                              source_explanation, attr.attr)
+        # Check if the attr is from an instance.
+        source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
+        source = source % (attr.attr,)
+        co = self._compile(source)
+        try:
+            from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
+        except Exception:
+            from_instance = True
+        if from_instance:
+            rep = self.frame.repr(result)
+            pattern = "%s\n{%s = %s\n}"
+            explanation = pattern % (rep, rep, explanation)
+        return explanation, result
+
+    def visit_Assert(self, assrt):
+        test_explanation, test_result = self.visit(assrt.test)
+        if test_explanation.startswith("False\n{False =") and \
+                test_explanation.endswith("\n"):
+            test_explanation = test_explanation[15:-2]
+        explanation = "assert %s" % (test_explanation,)
+        if not test_result:
+            try:
+                raise BuiltinAssertionError
+            except Exception:
+                raise Failure(explanation)
+        return explanation, test_result
+
+    def visit_Assign(self, assign):
+        value_explanation, value_result = self.visit(assign.value)
+        explanation = "... = %s" % (value_explanation,)
+        name = ast.Name("__exprinfo_expr", ast.Load(),
+                        lineno=assign.value.lineno,
+                        col_offset=assign.value.col_offset)
+        new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
+                                col_offset=assign.col_offset)
+        mod = ast.Module([new_assign])
+        co = self._compile(mod, "exec")
+        try:
+            self.frame.exec_(co, __exprinfo_expr=value_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, value_result
diff --git a/py/_code/_assertionold.py b/py/_code/_assertionold.py
new file mode 100644
--- /dev/null
+++ b/py/_code/_assertionold.py
@@ -0,0 +1,555 @@
+import py
+import sys, inspect
+from compiler import parse, ast, pycodegen
+from py._code.assertion import BuiltinAssertionError, _format_explanation
+
+passthroughex = py.builtin._sysex
+
+class Failure:
+    def __init__(self, node):
+        self.exc, self.value, self.tb = sys.exc_info()
+        self.node = node
+
+class View(object):
+    """View base class.
+
+    If C is a subclass of View, then C(x) creates a proxy object around
+    the object x.  The actual class of the proxy is not C in general,
+    but a *subclass* of C determined by the rules below.  To avoid confusion
+    we call view class the class of the proxy (a subclass of C, so of View)
+    and object class the class of x.
+
+    Attributes and methods not found in the proxy are automatically read on x.
+    Other operations like setting attributes are performed on the proxy, as
+    determined by its view class.  The object x is available from the proxy
+    as its __obj__ attribute.
+
+    The view class selection is determined by the __view__ tuples and the
+    optional __viewkey__ method.  By default, the selected view class is the
+    most specific subclass of C whose __view__ mentions the class of x.
+    If no such subclass is found, the search proceeds with the parent
+    object classes.  For example, C(True) will first look for a subclass
+    of C with __view__ = (..., bool, ...) and only if it doesn't find any
+    look for one with __view__ = (..., int, ...), and then ..., object,...
+    If everything fails the class C itself is considered to be the default.
+
+    Alternatively, the view class selection can be driven by another aspect
+    of the object x, instead of the class of x, by overriding __viewkey__.
+    See last example at the end of this module.
+    """
+
+    _viewcache = {}
+    __view__ = ()
+
+    def __new__(rootclass, obj, *args, **kwds):
+        self = object.__new__(rootclass)
+        self.__obj__ = obj
+        self.__rootclass__ = rootclass
+        key = self.__viewkey__()
+        try:
+            self.__class__ = self._viewcache[key]
+        except KeyError:
+            self.__class__ = self._selectsubclass(key)
+        return self
+
+    def __getattr__(self, attr):
+        # attributes not found in the normal hierarchy rooted on View
+        # are looked up in the object's real class
+        return getattr(self.__obj__, attr)
+
+    def __viewkey__(self):
+        return self.__obj__.__class__
+
+    def __matchkey__(self, key, subclasses):
+        if inspect.isclass(key):
+            keys = inspect.getmro(key)
+        else:
+            keys = [key]
+        for key in keys:
+            result = [C for C in subclasses if key in C.__view__]
+            if result:
+                return result
+        return []
+
+    def _selectsubclass(self, key):
+        subclasses = list(enumsubclasses(self.__rootclass__))
+        for C in subclasses:
+            if not isinstance(C.__view__, tuple):
+                C.__view__ = (C.__view__,)
+        choices = self.__matchkey__(key, subclasses)
+        if not choices:
+            return self.__rootclass__
+        elif len(choices) == 1:
+            return choices[0]
+        else:
+            # combine the multiple choices
+            return type('?', tuple(choices), {})
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
+
+
+def enumsubclasses(cls):
+    for subcls in cls.__subclasses__():
+        for subsubclass in enumsubclasses(subcls):
+            yield subsubclass
+    yield cls
+
+
+class Interpretable(View):
+    """A parse tree node with a few extra methods."""
+    explanation = None
+
+    def is_builtin(self, frame):
+        return False
+
+    def eval(self, frame):
+        # fall-back for unknown expression nodes
+        try:
+            expr = ast.Expression(self.__obj__)
+            expr.filename = '<eval>'
+            self.__obj__.filename = '<eval>'
+            co = pycodegen.ExpressionCodeGenerator(expr).getCode()
+            result = frame.eval(co)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        self.result = result
+        self.explanation = self.explanation or frame.repr(self.result)
+
+    def run(self, frame):
+        # fall-back for unknown statement nodes
+        try:
+            expr = ast.Module(None, ast.Stmt([self.__obj__]))
+            expr.filename = '<run>'
+            co = pycodegen.ModuleCodeGenerator(expr).getCode()
+            frame.exec_(co)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+
+    def nice_explanation(self):
+        return _format_explanation(self.explanation)
+
+
+class Name(Interpretable):
+    __view__ = ast.Name
+
+    def is_local(self, frame):
+        source = '%r in locals() is not globals()' % self.name
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def is_global(self, frame):
+        source = '%r in globals()' % self.name
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def is_builtin(self, frame):
+        source = '%r not in locals() and %r not in globals()' % (
+            self.name, self.name)
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def eval(self, frame):
+        super(Name, self).eval(frame)
+        if not self.is_local(frame):
+            self.explanation = self.name
+
+class Compare(Interpretable):
+    __view__ = ast.Compare
+
+    def eval(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        for operation, expr2 in self.ops:
+            if hasattr(self, 'result'):
+                # shortcutting in chained expressions
+                if not frame.is_true(self.result):
+                    break
+            expr2 = Interpretable(expr2)
+            expr2.eval(frame)
+            self.explanation = "%s %s %s" % (
+                expr.explanation, operation, expr2.explanation)
+            source = "__exprinfo_left %s __exprinfo_right" % operation
+            try:
+                self.result = frame.eval(source,
+                                         __exprinfo_left=expr.result,
+                                         __exprinfo_right=expr2.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+            expr = expr2
+
+class And(Interpretable):
+    __view__ = ast.And
+
+    def eval(self, frame):
+        explanations = []
+        for expr in self.nodes:
+            expr = Interpretable(expr)
+            expr.eval(frame)
+            explanations.append(expr.explanation)
+            self.result = expr.result
+            if not frame.is_true(expr.result):
+                break
+        self.explanation = '(' + ' and '.join(explanations) + ')'
+
+class Or(Interpretable):
+    __view__ = ast.Or
+
+    def eval(self, frame):
+        explanations = []
+        for expr in self.nodes:
+            expr = Interpretable(expr)
+            expr.eval(frame)
+            explanations.append(expr.explanation)
+            self.result = expr.result
+            if frame.is_true(expr.result):
+                break
+        self.explanation = '(' + ' or '.join(explanations) + ')'
+
+
+# == Unary operations ==
+keepalive = []
+for astclass, astpattern in {
+    ast.Not    : 'not __exprinfo_expr',
+    ast.Invert : '(~__exprinfo_expr)',
+    }.items():
+
+    class UnaryArith(Interpretable):
+        __view__ = astclass
+
+        def eval(self, frame, astpattern=astpattern):
+            expr = Interpretable(self.expr)
+            expr.eval(frame)
+            self.explanation = astpattern.replace('__exprinfo_expr',
+                                                  expr.explanation)
+            try:
+                self.result = frame.eval(astpattern,
+                                         __exprinfo_expr=expr.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+    keepalive.append(UnaryArith)
+
+# == Binary operations ==
+for astclass, astpattern in {
+    ast.Add    : '(__exprinfo_left + __exprinfo_right)',
+    ast.Sub    : '(__exprinfo_left - __exprinfo_right)',
+    ast.Mul    : '(__exprinfo_left * __exprinfo_right)',
+    ast.Div    : '(__exprinfo_left / __exprinfo_right)',
+    ast.Mod    : '(__exprinfo_left % __exprinfo_right)',
+    ast.Power  : '(__exprinfo_left ** __exprinfo_right)',
+    }.items():
+
+    class BinaryArith(Interpretable):
+        __view__ = astclass
+
+        def eval(self, frame, astpattern=astpattern):
+            left = Interpretable(self.left)
+            left.eval(frame)
+            right = Interpretable(self.right)
+            right.eval(frame)
+            self.explanation = (astpattern
+                                .replace('__exprinfo_left',  left .explanation)
+                                .replace('__exprinfo_right', right.explanation))
+            try:
+                self.result = frame.eval(astpattern,
+                                         __exprinfo_left=left.result,
+                                         __exprinfo_right=right.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+    keepalive.append(BinaryArith)
+
+
+class CallFunc(Interpretable):
+    __view__ = ast.CallFunc
+
+    def is_bool(self, frame):
+        source = 'isinstance(__exprinfo_value, bool)'
+        try:
+            return frame.is_true(frame.eval(source,
+                                            __exprinfo_value=self.result))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def eval(self, frame):
+        node = Interpretable(self.node)
+        node.eval(frame)
+        explanations = []
+        vars = {'__exprinfo_fn': node.result}
+        source = '__exprinfo_fn('
+        for a in self.args:
+            if isinstance(a, ast.Keyword):
+                keyword = a.name
+                a = a.expr
+            else:
+                keyword = None
+            a = Interpretable(a)
+            a.eval(frame)
+            argname = '__exprinfo_%d' % len(vars)
+            vars[argname] = a.result
+            if keyword is None:
+                source += argname + ','
+                explanations.append(a.explanation)
+            else:
+                source += '%s=%s,' % (keyword, argname)
+                explanations.append('%s=%s' % (keyword, a.explanation))
+        if self.star_args:
+            star_args = Interpretable(self.star_args)
+            star_args.eval(frame)
+            argname = '__exprinfo_star'
+            vars[argname] = star_args.result
+            source += '*' + argname + ','
+            explanations.append('*' + star_args.explanation)
+        if self.dstar_args:
+            dstar_args = Interpretable(self.dstar_args)
+            dstar_args.eval(frame)
+            argname = '__exprinfo_kwds'
+            vars[argname] = dstar_args.result
+            source += '**' + argname + ','
+            explanations.append('**' + dstar_args.explanation)
+        self.explanation = "%s(%s)" % (
+            node.explanation, ', '.join(explanations))
+        if source.endswith(','):
+            source = source[:-1]
+        source += ')'
+        try:
+            self.result = frame.eval(source, **vars)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        if not node.is_builtin(frame) or not self.is_bool(frame):
+            r = frame.repr(self.result)
+            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+class Getattr(Interpretable):
+    __view__ = ast.Getattr
+
+    def eval(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        source = '__exprinfo_expr.%s' % self.attrname
+        try:
+            self.result = frame.eval(source, __exprinfo_expr=expr.result)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        self.explanation = '%s.%s' % (expr.explanation, self.attrname)
+        # if the attribute comes from the instance, its value is interesting
+        source = ('hasattr(__exprinfo_expr, "__dict__") and '
+                  '%r in __exprinfo_expr.__dict__' % self.attrname)
+        try:
+            from_instance = frame.is_true(
+                frame.eval(source, __exprinfo_expr=expr.result))
+        except passthroughex:
+            raise
+        except:
+            from_instance = True
+        if from_instance:
+            r = frame.repr(self.result)
+            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+# == Re-interpretation of full statements ==
+
+class Assert(Interpretable):
+    __view__ = ast.Assert
+
+    def run(self, frame):
+        test = Interpretable(self.test)
+        test.eval(frame)
+        # simplify 'assert False where False = ...'
+        if (test.explanation.startswith('False\n{False = ') and
+            test.explanation.endswith('\n}')):
+            test.explanation = test.explanation[15:-2]
+        # print the result as  'assert <explanation>'
+        self.result = test.result
+        self.explanation = 'assert ' + test.explanation
+        if not frame.is_true(test.result):
+            try:
+                raise BuiltinAssertionError
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+class Assign(Interpretable):
+    __view__ = ast.Assign
+
+    def run(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        self.result = expr.result
+        self.explanation = '... = ' + expr.explanation
+        # fall-back-run the rest of the assignment
+        ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
+        mod = ast.Module(None, ast.Stmt([ass]))
+        mod.filename = '<run>'
+        co = pycodegen.ModuleCodeGenerator(mod).getCode()
+        try:
+            frame.exec_(co, __exprinfo_expr=expr.result)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+
+class Discard(Interpretable):
+    __view__ = ast.Discard
+
+    def run(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        self.result = expr.result
+        self.explanation = expr.explanation
+
+class Stmt(Interpretable):
+    __view__ = ast.Stmt
+
+    def run(self, frame):
+        for stmt in self.nodes:
+            stmt = Interpretable(stmt)
+            stmt.run(frame)
+
+
+def report_failure(e):
+    explanation = e.node.nice_explanation()
+    if explanation:
+        explanation = ", in: " + explanation
+    else:
+        explanation = ""
+    sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
+
+def check(s, frame=None):
+    if frame is None:
+        frame = sys._getframe(1)
+        frame = py.code.Frame(frame)
+    expr = parse(s, 'eval')
+    assert isinstance(expr, ast.Expression)
+    node = Interpretable(expr.node)
+    try:
+        node.eval(frame)
+    except passthroughex:
+        raise
+    except Failure:
+        e = sys.exc_info()[1]
+        report_failure(e)
+    else:
+        if not frame.is_true(node.result):
+            sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
+
+
+###########################################################
+# API / Entry points
+# #########################################################
+
+def interpret(source, frame, should_fail=False):
+    module = Interpretable(parse(source, 'exec').node)
+    #print "got module", module
+    if isinstance(frame, py.std.types.FrameType):
+        frame = py.code.Frame(frame)
+    try:
+        module.run(frame)
+    except Failure:
+        e = sys.exc_info()[1]
+        return getfailure(e)
+    except passthroughex:
+        raise
+    except:
+        import traceback
+        traceback.print_exc()
+    if should_fail:
+        return ("(assertion failed, but when it was re-run for "
+                "printing intermediate values, it did not fail.  Suggestions: "
+                "compute assert expression before the assert or use --nomagic)")
+    else:
+        return None
+
+def getmsg(excinfo):
+    if isinstance(excinfo, tuple):
+        excinfo = py.code.ExceptionInfo(excinfo)
+    #frame, line = gettbline(tb)
+    #frame = py.code.Frame(frame)
+    #return interpret(line, frame)
+
+    tb = excinfo.traceback[-1]
+    source = str(tb.statement).strip()
+    x = interpret(source, tb.frame, should_fail=True)
+    if not isinstance(x, str):
+        raise TypeError("interpret returned non-string %r" % (x,))
+    return x
+
+def getfailure(e):
+    explanation = e.node.nice_explanation()
+    if str(e.value):
+        lines = explanation.split('\n')
+        lines[0] += "  << %s" % (e.value,)
+        explanation = '\n'.join(lines)
+    text = "%s: %s" % (e.exc.__name__, explanation)
+    if text.startswith('AssertionError: assert '):
+        text = text[16:]
+    return text
+
+def run(s, frame=None):
+    if frame is None:
+        frame = sys._getframe(1)
+        frame = py.code.Frame(frame)
+    module = Interpretable(parse(s, 'exec').node)
+    try:
+        module.run(frame)
+    except Failure:
+        e = sys.exc_info()[1]
+        report_failure(e)
+
+
+if __name__ == '__main__':
+    # example:
+    def f():
+        return 5
+    def g():
+        return 3
+    def h(x):
+        return 'never'
+    check("f() * g() == 5")
+    check("not f()")
+    check("not (f() and g() or 0)")
+    check("f() == g()")
+    i = 4
+    check("i == f()")
+    check("len(f()) == 0")
+    check("isinstance(2+3+4, float)")
+
+    run("x = i")
+    check("x == 5")
+
+    run("assert not f(), 'oops'")
+    run("a, b, c = 1, 2")
+    run("a, b, c = f()")
+
+    check("max([f(),g()]) == 4")
+    check("'hello'[g()] == 'h'")
+    run("'guk%d' % h(f())")
diff --git a/py/_code/assertion.py b/py/_code/assertion.py
new file mode 100644
--- /dev/null
+++ b/py/_code/assertion.py
@@ -0,0 +1,94 @@
+import sys
+import py
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+
+_reprcompare = None # if set, will be called by assert reinterp for comparison ops
+
+def _format_explanation(explanation):
+    """This formats an explanation
+
+    Normally all embedded newlines are escaped, however there are
+    three exceptions: \n{, \n} and \n~.  The first two are intended
+    cover nested explanations, see function and attribute explanations
+    for examples (.visit_Call(), visit_Attribute()).  The last one is
+    for when one explanation needs to span multiple lines, e.g. when
+    displaying diffs.
+    """
+    raw_lines = (explanation or '').split('\n')
+    # escape newlines not followed by {, } and ~
+    lines = [raw_lines[0]]
+    for l in raw_lines[1:]:
+        if l.startswith('{') or l.startswith('}') or l.startswith('~'):
+            lines.append(l)
+        else:
+            lines[-1] += '\\n' + l
+
+    result = lines[:1]
+    stack = [0]
+    stackcnt = [0]
+    for line in lines[1:]:
+        if line.startswith('{'):
+            if stackcnt[-1]:
+                s = 'and   '
+            else:
+                s = 'where '
+            stack.append(len(result))
+            stackcnt[-1] += 1
+            stackcnt.append(0)
+            result.append(' +' + '  '*(len(stack)-1) + s + line[1:])
+        elif line.startswith('}'):
+            assert line.startswith('}')
+            stack.pop()
+            stackcnt.pop()
+            result[stack[-1]] += line[1:]
+        else:
+            assert line.startswith('~')
+            result.append('  '*len(stack) + line[1:])
+    assert len(stack) == 1
+    return '\n'.join(result)
+
+
+class AssertionError(BuiltinAssertionError):
+    def __init__(self, *args):
+        BuiltinAssertionError.__init__(self, *args)
+        if args:
+            try:
+                self.msg = str(args[0])
+            except py.builtin._sysex:
+                raise
+            except:
+                self.msg = "<[broken __repr__] %s at %0xd>" %(
+                    args[0].__class__, id(args[0]))
+        else:
+            f = py.code.Frame(sys._getframe(1))
+            try:
+                source = f.code.fullsource
+                if source is not None:
+                    try:
+                        source = source.getstatement(f.lineno, assertion=True)
+                    except IndexError:
+                        source = None
+                    else:
+                        source = str(source.deindent()).strip()
+            except py.error.ENOENT:
+                source = None
+                # this can also occur during reinterpretation, when the
+                # co_filename is set to "<run>".
+            if source:
+                self.msg = reinterpret(source, f, should_fail=True)
+            else:
+                self.msg = "<could not determine information>"
+            if not self.args:
+                self.args = (self.msg,)
+
+if sys.version_info > (3, 0):
+    AssertionError.__module__ = "builtins"
+    reinterpret_old = "old reinterpretation not available for py3"
+else:
+    from py._code._assertionold import interpret as reinterpret_old
+if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
+    from py._code._assertionnew import interpret as reinterpret
+else:
+    reinterpret = reinterpret_old
+
diff --git a/py/_code/code.py b/py/_code/code.py
--- a/py/_code/code.py
+++ b/py/_code/code.py
@@ -145,6 +145,17 @@
         return self.frame.f_locals
     locals = property(getlocals, None, None, "locals of underlaying frame")
 
+    def reinterpret(self):
+        """Reinterpret the failing statement and returns a detailed information
+           about what operations are performed."""
+        if self.exprinfo is None:
+            source = str(self.statement).strip()
+            x = py.code._reinterpret(source, self.frame, should_fail=True)
+            if not isinstance(x, str):
+                raise TypeError("interpret returned non-string %r" % (x,))
+            self.exprinfo = x
+        return self.exprinfo
+
     def getfirstlinesource(self):
         # on Jython this firstlineno can be -1 apparently
         return max(self.frame.code.firstlineno, 0)
@@ -158,13 +169,12 @@
         end = self.lineno
         try:
             _, end = source.getstatementrange(end)
-        except IndexError:
+        except (IndexError, ValueError):
             end = self.lineno + 1
         # heuristic to stop displaying source on e.g.
         #   if something:  # assume this causes a NameError
         #      # _this_ lines and the one
                #        below we don't want from entry.getsource()
-        end = min(end, len(source))
         for i in range(self.lineno, end):
             if source[i].rstrip().endswith(':'):
                 end = i + 1
@@ -273,7 +283,11 @@
         """
         cache = {}
         for i, entry in enumerate(self):
-            key = entry.frame.code.path, entry.lineno
+            # id for the code.raw is needed to work around
+            # the strange metaprogramming in the decorator lib from pypi
+            # which generates code objects that have hash/value equality
+            #XXX needs a test
+            key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
             #print "checking for recursion at", key
             l = cache.setdefault(key, [])
             if l:
@@ -308,7 +322,7 @@
                     self._striptext = 'AssertionError: '
         self._excinfo = tup
         self.type, self.value, tb = self._excinfo
-        self.typename = getattr(self.type, "__name__", "???")
+        self.typename = self.type.__name__
         self.traceback = py.code.Traceback(tb)
 
     def __repr__(self):
@@ -347,14 +361,16 @@
             showlocals: show locals per traceback entry
             style: long|short|no|native traceback style
             tbfilter: hide entries (where __tracebackhide__ is true)
+
+            in case of style==native, tbfilter and showlocals is ignored.
         """
         if style == 'native':
-            import traceback
-            return ''.join(traceback.format_exception(
-                self.type,
-                self.value,
-                self.traceback[0]._rawentry,
-                ))
+            return ReprExceptionInfo(ReprTracebackNative(
+                py.std.traceback.format_exception(
+                    self.type,
+                    self.value,
+                    self.traceback[0]._rawentry,
+                )), self._getreprcrash())
 
         fmt = FormattedExcinfo(showlocals=showlocals, style=style,
             abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
@@ -452,7 +468,7 @@
     def repr_locals(self, locals):
         if self.showlocals:
             lines = []
-            keys = list(locals)
+            keys = [loc for loc in locals if loc[0] != "@"]
             keys.sort()
             for name in keys:
                 value = locals[name]
@@ -506,7 +522,10 @@
 
     def _makepath(self, path):
         if not self.abspath:
-            np = py.path.local().bestrelpath(path)
+            try:
+                np = py.path.local().bestrelpath(path)
+            except OSError:
+                return path
             if len(np) < len(str(path)):
                 path = np
         return path
@@ -595,6 +614,19 @@
         if self.extraline:
             tw.line(self.extraline)
 
+class ReprTracebackNative(ReprTraceback):
+    def __init__(self, tblines):
+        self.style = "native"
+        self.reprentries = [ReprEntryNative(tblines)]
+        self.extraline = None
+
+class ReprEntryNative(TerminalRepr):
+    def __init__(self, tblines):
+        self.lines = tblines
+
+    def toterminal(self, tw):
+        tw.write("".join(self.lines))
+
 class ReprEntry(TerminalRepr):
     localssep = "_ "
 
@@ -680,19 +712,26 @@
 
 oldbuiltins = {}
 
-def patch_builtins(compile=True):
-    """ put compile builtins to Python's builtins. """
+def patch_builtins(assertion=True, compile=True):
+    """ put compile and AssertionError builtins to Python's builtins. """
+    if assertion:
+        from py._code import assertion
+        l = oldbuiltins.setdefault('AssertionError', [])
+        l.append(py.builtin.builtins.AssertionError)
+        py.builtin.builtins.AssertionError = assertion.AssertionError
     if compile:
         l = oldbuiltins.setdefault('compile', [])
         l.append(py.builtin.builtins.compile)
         py.builtin.builtins.compile = py.code.compile
 
-def unpatch_builtins(compile=True):
+def unpatch_builtins(assertion=True, compile=True):
     """ remove compile and AssertionError builtins from Python builtins. """
+    if assertion:
+        py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
     if compile:
         py.builtin.builtins.compile = oldbuiltins['compile'].pop()
 
-def getrawcode(obj):
+def getrawcode(obj, trycall=True):
     """ return code object for given function. """
     try:
         return obj.__code__
@@ -701,5 +740,10 @@
         obj = getattr(obj, 'func_code', obj)
         obj = getattr(obj, 'f_code', obj)
         obj = getattr(obj, '__code__', obj)
+        if trycall and not hasattr(obj, 'co_firstlineno'):
+            if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
+                x = getrawcode(obj.__call__, trycall=False)
+                if hasattr(x, 'co_firstlineno'):
+                    return x
         return obj
 
diff --git a/py/_code/source.py b/py/_code/source.py
--- a/py/_code/source.py
+++ b/py/_code/source.py
@@ -108,6 +108,7 @@
     def getstatementrange(self, lineno, assertion=False):
         """ return (start, end) tuple which spans the minimal
             statement region which containing the given lineno.
+            raise an IndexError if no such statementrange can be found.
         """
         # XXX there must be a better than these heuristic ways ...
         # XXX there may even be better heuristics :-)
@@ -116,6 +117,7 @@
 
         # 1. find the start of the statement
         from codeop import compile_command
+        end = None
         for start in range(lineno, -1, -1):
             if assertion:
                 line = self.lines[start]
@@ -139,7 +141,9 @@
                 trysource = self[start:end]
                 if trysource.isparseable():
                     return start, end
-        return start, len(self)
+        if end is None:
+            raise IndexError("no valid source range around line %d " % (lineno,))
+        return start, end
 
     def getblockend(self, lineno):
         # XXX
@@ -257,23 +261,29 @@
 
 
 def getfslineno(obj):
+    """ Return source location (path, lineno) for the given object.
+    If the source cannot be determined return ("", -1)
+    """
     try:
         code = py.code.Code(obj)
     except TypeError:
-        # fallback to
-        fn = (py.std.inspect.getsourcefile(obj) or
-              py.std.inspect.getfile(obj))
+        try:
+            fn = (py.std.inspect.getsourcefile(obj) or
+                  py.std.inspect.getfile(obj))
+        except TypeError:
+            return "", -1
+
         fspath = fn and py.path.local(fn) or None
+        lineno = -1
         if fspath:
             try:
                 _, lineno = findsource(obj)
             except IOError:
-                lineno = None
-        else:
-            lineno = None
+                pass
     else:
         fspath = code.path
         lineno = code.firstlineno
+    assert isinstance(lineno, int)
     return fspath, lineno
 
 #
@@ -286,7 +296,7 @@
     except py.builtin._sysex:
         raise
     except:
-        return None, None
+        return None, -1
     source = Source()
     source.lines = [line.rstrip() for line in sourcelines]
     return source, lineno
diff --git a/py/_error.py b/py/_error.py
--- a/py/_error.py
+++ b/py/_error.py
@@ -23,6 +23,7 @@
     2: errno.ENOENT,
     3: errno.ENOENT,
     17: errno.EEXIST,
+    13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
     22: errno.ENOTDIR,
     267: errno.ENOTDIR,
     5: errno.EACCES,  # anything better?
diff --git a/py/_iniconfig.py b/py/_iniconfig.py
--- a/py/_iniconfig.py
+++ b/py/_iniconfig.py
@@ -103,6 +103,7 @@
     def _parseline(self, line, lineno):
         # comments
         line = line.split('#')[0].rstrip()
+        line = line.split(';')[0].rstrip()
         # blank lines
         if not line:
             return None, None
diff --git a/py/_io/capture.py b/py/_io/capture.py
--- a/py/_io/capture.py
+++ b/py/_io/capture.py
@@ -12,7 +12,7 @@
     class TextIO(StringIO):
         def write(self, data):
             if not isinstance(data, unicode):
-                data = unicode(data, getattr(self, '_encoding', 'UTF-8'))
+                data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace')
             StringIO.write(self, data)
 else:
     TextIO = StringIO
@@ -258,6 +258,9 @@
                 f = getattr(self, name).tmpfile
                 f.seek(0)
                 res = f.read()
+                enc = getattr(f, 'encoding', None)
+                if enc:
+                    res = py.builtin._totext(res, enc, 'replace')
                 f.truncate(0)
                 f.seek(0)
             l.append(res)
diff --git a/py/_io/terminalwriter.py b/py/_io/terminalwriter.py
--- a/py/_io/terminalwriter.py
+++ b/py/_io/terminalwriter.py
@@ -105,6 +105,8 @@
                      Blue=44, Purple=45, Cyan=46, White=47,
                      bold=1, light=2, blink=5, invert=7)
 
+    _newline = None   # the last line printed
+
     # XXX deprecate stringio argument
     def __init__(self, file=None, stringio=False, encoding=None):
         if file is None:
@@ -112,11 +114,9 @@
                 self.stringio = file = py.io.TextIO()
             else:
                 file = py.std.sys.stdout
-                if hasattr(file, 'encoding'):
-                    encoding = file.encoding
         elif hasattr(file, '__call__'):
             file = WriteFile(file, encoding=encoding)
-        self.encoding = encoding
+        self.encoding = encoding or getattr(file, 'encoding', "utf-8")
         self._file = file
         self.fullwidth = get_terminal_width()
         self.hasmarkup = should_do_markup(file)
@@ -182,8 +182,31 @@
         return s
 
     def line(self, s='', **kw):
+        if self._newline == False:
+            self.write("\n")
         self.write(s, **kw)
         self.write('\n')
+        self._newline = True
+
+    def reline(self, line, **opts):
+        if not self.hasmarkup:
+            raise ValueError("cannot use rewrite-line without terminal")
+        if not self._newline:
+            self.write("\r")
+        self.write(line, **opts)
+        # see if we need to fill up some spaces at the end
+        # xxx have a more exact lastlinelen working from self.write?
+        lenline = len(line)
+        try:
+            lastlen = self._lastlinelen
+        except AttributeError:
+            pass
+        else:
+            if lenline < lastlen:
+                self.write(" " * (lastlen - lenline + 1))
+        self._lastlinelen = lenline
+        self._newline = False
+
 
 class Win32ConsoleWriter(TerminalWriter):
     def write(self, s, **kw):
@@ -280,10 +303,10 @@
     SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
     SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
     SetConsoleTextAttribute.restype = wintypes.BOOL
-        
+
     _GetConsoleScreenBufferInfo = \
         ctypes.windll.kernel32.GetConsoleScreenBufferInfo
-    _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE, 
+    _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
                                 ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
     _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
     def GetConsoleInfo(handle):
diff --git a/py/_path/common.py b/py/_path/common.py
--- a/py/_path/common.py
+++ b/py/_path/common.py
@@ -64,7 +64,10 @@
                 else:
                     if bool(value) ^ bool(meth()) ^ invert:
                         return False
-            except (py.error.ENOENT, py.error.ENOTDIR):
+            except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
+                # EBUSY feels not entirely correct,
+                # but its kind of necessary since ENOMEDIUM
+                # is not accessible in python     
                 for name in self._depend_on_existence:
                     if name in kw:
                         if kw.get(name):
@@ -368,6 +371,5 @@
         else:
             name = str(path) # path.strpath # XXX svn?
             pattern = '*' + path.sep + pattern
-        from fnmatch import fnmatch
-        return fnmatch(name, pattern)
+        return py.std.fnmatch.fnmatch(name, pattern)
 
diff --git a/py/_path/local.py b/py/_path/local.py
--- a/py/_path/local.py
+++ b/py/_path/local.py
@@ -157,14 +157,16 @@
         return str(self) < str(other)
 
     def samefile(self, other):
-        """ return True if 'other' references the same file as 'self'. """
-        if not iswin32:
-            return py.error.checked_call(
-                    os.path.samefile, str(self), str(other))
+        """ return True if 'other' references the same file as 'self'.
+        """
+        if not isinstance(other, py.path.local):
+            other = os.path.abspath(str(other))
         if self == other:
             return True
-        other = os.path.abspath(str(other))
-        return self == other
+        if iswin32:
+            return False # ther is no samefile
+        return py.error.checked_call(
+                os.path.samefile, str(self), str(other))
 
     def remove(self, rec=1, ignore_errors=False):
         """ remove a file or directory (or a directory tree if rec=1).
@@ -539,7 +541,11 @@
                 if self.basename != "__init__.py":
                     modfile = modfile[:-12]
 
-            if not self.samefile(modfile):
+            try:
+                issame = self.samefile(modfile)
+            except py.error.ENOENT:
+                issame = False
+            if not issame:
                 raise self.ImportMismatchError(modname, modfile, self)
             return mod
         else:
diff --git a/py/_path/svnurl.py b/py/_path/svnurl.py
--- a/py/_path/svnurl.py
+++ b/py/_path/svnurl.py
@@ -233,6 +233,8 @@
                 e = sys.exc_info()[1]
                 if e.err.find('non-existent in that revision') != -1:
                     raise py.error.ENOENT(self, e.err)
+                elif e.err.find("E200009:") != -1:
+                    raise py.error.ENOENT(self, e.err)
                 elif e.err.find('File not found') != -1:
                     raise py.error.ENOENT(self, e.err)
                 elif e.err.find('not part of a repository')!=-1:
diff --git a/py/_path/svnwc.py b/py/_path/svnwc.py
--- a/py/_path/svnwc.py
+++ b/py/_path/svnwc.py
@@ -482,10 +482,13 @@
         except py.process.cmdexec.Error:
             e = sys.exc_info()[1]
             strerr = e.err.lower()
-            if strerr.find('file not found') != -1:
+            if strerr.find('not found') != -1:
+                raise py.error.ENOENT(self)
+            elif strerr.find("E200009:") != -1:
                 raise py.error.ENOENT(self)
             if (strerr.find('file exists') != -1 or
                 strerr.find('file already exists') != -1 or
+                strerr.find('w150002:') != -1 or
                 strerr.find("can't create directory") != -1):
                 raise py.error.EEXIST(self)
             raise
@@ -593,7 +596,7 @@
         out = self._authsvn('lock').strip()
         if not out:
             # warning or error, raise exception
-            raise Exception(out[4:])
+            raise ValueError("unknown error in svn lock command")
 
     def unlock(self):
         """ unset a previously set lock """
@@ -1066,6 +1069,8 @@
                 modrev = '?'
                 author = '?'
                 date = ''
+            elif itemstatus == "replaced":
+                pass
             else:
                 #print entryel.toxml()
                 commitel = entryel.getElementsByTagName('commit')[0]
@@ -1148,7 +1153,11 @@
             raise  ValueError("Not a versioned resource")
             #raise ValueError, "Not a versioned resource %r" % path
         self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
-        self.rev = int(d['revision'])
+        try:
+            self.rev = int(d['revision'])
+        except KeyError:
+            self.rev = None
+
         self.path = py.path.local(d['path'])
         self.size = self.path.size()
         if 'lastchangedrev' in d:
diff --git a/py/_xmlgen.py b/py/_xmlgen.py
--- a/py/_xmlgen.py
+++ b/py/_xmlgen.py
@@ -52,7 +52,7 @@
     def unicode(self, indent=2):
         l = []
         SimpleUnicodeVisitor(l.append, indent).visit(self)
-        return "".join(l)
+        return u("").join(l)
 
     def __repr__(self):
         name = self.__class__.__name__
@@ -122,11 +122,13 @@
                 if visitmethod is not None:
                     break
             else:
-                visitmethod = self.object
+                visitmethod = self.__object
             self.cache[cls] = visitmethod
         visitmethod(node)
 
-    def object(self, obj):
+    # the default fallback handler is marked private
+    # to avoid clashes with the tag name object
+    def __object(self, obj):
         #self.write(obj)
         self.write(escape(unicode(obj)))
 
@@ -136,7 +138,8 @@
     def list(self, obj):
         assert id(obj) not in self.visited
         self.visited[id(obj)] = 1
-        map(self.visit, obj)
+        for elem in obj:
+            self.visit(elem)
 
     def Tag(self, tag):
         assert id(tag) not in self.visited
@@ -181,7 +184,11 @@
             value = getattr(attrs, name)
             if name.endswith('_'):
                 name = name[:-1]
-            return ' %s="%s"' % (name, escape(unicode(value)))
+            if isinstance(value, raw):
+                insert = value.uniobj
+            else:
+                insert = escape(unicode(value))
+            return ' %s="%s"' % (name, insert)
 
     def getstyle(self, tag):
         """ return attribute list suitable for styling. """
diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py
--- a/pypy/annotation/test/test_annrpython.py
+++ b/pypy/annotation/test/test_annrpython.py
@@ -483,7 +483,7 @@
                 return a_str.strip(' ')
             elif n == 1:
                 return a_str.rstrip(' ')
-            else: 
+            else:
                 return a_str.lstrip(' ')
         s = a.build_types(f, [int, annmodel.SomeString(no_nul=True)])
         assert s.no_nul
@@ -3737,6 +3737,25 @@
         s = a.build_types(f, [int])
         assert s.listdef.listitem.range_step == 0
 
+    def test_specialize_arg_memo(self):
+        @objectmodel.specialize.memo()
+        def g(n):
+            return n
+        @objectmodel.specialize.arg(0)
+        def f(i):
+            return g(i)
+        def main(i):
+            if i == 2:
+                return f(2)
+            elif i == 3:
+                return f(3)
+            else:
+                raise NotImplementedError
+
+        a = self.RPythonAnnotator()
+        s = a.build_types(main, [int])
+        assert isinstance(s, annmodel.SomeInteger)
+
 
 def g(n):
     return [0,1,2,n]
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -158,6 +158,12 @@
 .. __: http://morepypy.blogspot.com/2008/02/python-finalizers-semantics-part-1.html
 .. __: http://morepypy.blogspot.com/2008/02/python-finalizers-semantics-part-2.html
 
+Note that this difference might show up indirectly in some cases.  For
+example, a generator left pending in the middle is --- again ---
+garbage-collected later in PyPy than in CPython.  You can see the
+difference if the ``yield`` keyword it is suspended at is itself
+enclosed in a ``try:`` or a ``with:`` block.
+
 Using the default GC called ``minimark``, the built-in function ``id()``
 works like it does in CPython.  With other GCs it returns numbers that
 are not real addresses (because an object can move around several times)
diff --git a/pypy/doc/discussion/win64_todo.txt b/pypy/doc/discussion/win64_todo.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/discussion/win64_todo.txt
@@ -0,0 +1,9 @@
+2011-11-04
+ll_os.py has a problem with the file rwin32.py.
+Temporarily disabled for the win64_gborg branch. This needs to be
+investigated and re-enabled.
+Resolved, enabled.
+
+2011-11-05
+test_typed.py needs explicit tests to ensure that we
+handle word sizes right.
\ No newline at end of file
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -149,6 +149,22 @@
 exported.  This would give us a one-size-fits-all generic .so file to be
 imported by any application that wants to load .so files :-)
 
+Optimising cpyext (CPython C-API compatibility layer)
+-----------------------------------------------------
+
+A lot of work has gone into PyPy's implementation of CPython's C-API over
+the last years to let it reach a practical level of compatibility, so that
+C extensions for CPython work on PyPy without major rewrites. However,
+there are still many edges and corner cases where it misbehaves, and it has
+not received any substantial optimisation so far.
+
+The objective of this project is to fix bugs in cpyext and to optimise
+several performance critical parts of it, such as the reference counting
+support and other heavily used C-API functions. The net result would be to
+have CPython extensions run much faster on PyPy than they currently do, or
+to make them work at all if they currently don't. A part of this work would
+be to get cpyext into a shape where it supports running Cython generated
+extensions.
 
 .. _`issue tracker`: http://bugs.pypy.org
 .. _`mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev
diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst
--- a/pypy/doc/stackless.rst
+++ b/pypy/doc/stackless.rst
@@ -199,17 +199,11 @@
 The following features (present in some past Stackless version of PyPy)
 are for the time being not supported any more:
 
-* Tasklets and channels (currently ``stackless.py`` seems to import,
-  but you have tasklets on top of coroutines on top of greenlets on
-  top of continulets on top of stacklets, and it's probably not too
-  hard to cut two of these levels by adapting ``stackless.py`` to
-  use directly continulets)
-
 * Coroutines (could be rewritten at app-level)
 
-* Pickling and unpickling continulets (*)
-
-* Continuing execution of a continulet in a different thread (*)
+* Continuing execution of a continulet in a different thread
+  (but if it is "simple enough", you can pickle it and unpickle it
+  in the other thread).
 
 * Automatic unlimited stack (must be emulated__ so far)
 
@@ -217,15 +211,6 @@
 
 .. __: `recursion depth limit`_
 
-(*) Pickling, as well as changing threads, could be implemented by using
-a "soft" stack switching mode again.  We would get either "hard" or
-"soft" switches, similarly to Stackless Python 3rd version: you get a
-"hard" switch (like now) when the C stack contains non-trivial C frames
-to save, and a "soft" switch (like previously) when it contains only
-simple calls from Python to Python.  Soft-switched continulets would
-also consume a bit less RAM, and the switch might be a bit faster too
-(unsure about that; what is the Stackless Python experience?).
-
 
 Recursion depth limit
 +++++++++++++++++++++
diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py
new file mode 100644
--- /dev/null
+++ b/pypy/doc/tool/makecontributor.py
@@ -0,0 +1,133 @@
+import py
+import sys
+from collections import defaultdict
+import operator
+import re
+import mercurial.localrepo
+import mercurial.ui
+
+ROOT = py.path.local(__file__).join('..', '..', '..', '..')
+author_re = re.compile('(.*) <.*>')
+pair_programming_re = re.compile(r'^\((.*?)\)')
+excluded = set(["pypy", "convert-repo"])
+
+alias = {
+    'Anders Chrigstrom': ['arre'],
+    'Antonio Cuni': ['antocuni', 'anto'],
+    'Armin Rigo': ['arigo', 'arfigo', 'armin', 'arigato'],
+    'Maciej Fijalkowski': ['fijal'],
+    'Carl Friedrich Bolz': ['cfbolz', 'cf'],
+    'Samuele Pedroni': ['pedronis', 'samuele', 'samule'],
+    'Michael Hudson': ['mwh'],
+    'Holger Krekel': ['hpk', 'holger krekel', 'holger', 'hufpk'],
+    "Amaury Forgeot d'Arc": ['afa'],
+    'Alex Gaynor': ['alex', 'agaynor'],
+    'David Schneider': ['bivab', 'david'],
+    'Christian Tismer': ['chris', 'christian', 'tismer',
+                         'tismer at christia-wjtqxl.localdomain'],
+    'Benjamin Peterson': ['benjamin'],
+    'Hakan Ardo': ['hakan', 'hakanardo'],
+    'Niklaus Haldimann': ['nik'],
+    'Alexander Schremmer': ['xoraxax'],
+    'Anders Hammarquist': ['iko'],
+    'David Edelsohn': ['edelsoh', 'edelsohn'],
+    'Niko Matsakis': ['niko'],
+    'Jakub Gustak': ['jlg'],
+    'Guido Wesdorp': ['guido'],
+    'Michael Foord': ['mfoord'],
+    'Mark Pearse': ['mwp'],
+    'Toon Verwaest': ['tverwaes'],
+    'Eric van Riet Paap': ['ericvrp'],
+    'Jacob Hallen': ['jacob', 'jakob'],
+    'Anders Lehmann': ['ale', 'anders'],
+    'Bert Freudenberg': ['bert'],
+    'Boris Feigin': ['boris', 'boria'],
+    'Valentino Volonghi': ['valentino', 'dialtone'],
+    'Aurelien Campeas': ['aurelien', 'aureliene'],
+    'Adrien Di Mascio': ['adim'],
+    'Jacek Generowicz': ['Jacek', 'jacek'],
+    'Jim Hunziker': ['landtuna at gmail.com'],
+    'Kristjan Valur Jonsson': ['kristjan at kristjan-lp.ccp.ad.local'],
+    'Laura Creighton': ['lac'],
+    'Aaron Iles': ['aliles'],
+    'Ludovic Aubry': ['ludal', 'ludovic'],
+    'Lukas Diekmann': ['l.diekmann', 'ldiekmann'],
+    'Matti Picus': ['Matti Picus matti.picus at gmail.com',
+                    'matthp', 'mattip', 'mattip>'],
+    'Michael Cheng': ['mikefc'],
+    'Richard Emslie': ['rxe'],
+    'Roberto De Ioris': ['roberto at goyle'],
+    'Roberto De Ioris': ['roberto at mrspurr'],
+    'Sven Hager': ['hager'],
+    'Tomo Cocoa': ['cocoatomo'],
+    }
+
+alias_map = {}
+for name, nicks in alias.iteritems():
+    for nick in nicks:
+        alias_map[nick] = name
+
+def get_canonical_author(name):
+    match = author_re.match(name)
+    if match:
+        name = match.group(1)
+    return alias_map.get(name, name)
+
+ignored_nicknames = defaultdict(int)
+
+def get_more_authors(log):
+    match = pair_programming_re.match(log)
+    if not match:
+        return set()
+    ignore_words = ['around', 'consulting', 'yesterday', 'for a bit', 'thanks',
+                    'in-progress', 'bits of', 'even a little', 'floating',]
+    sep_words = ['and', ';', '+', '/', 'with special  by']
+    nicknames = match.group(1)
+    for word in ignore_words:
+        nicknames = nicknames.replace(word, '')
+    for word in sep_words:
+        nicknames = nicknames.replace(word, ',')
+    nicknames = [nick.strip().lower() for nick in nicknames.split(',')]
+    authors = set()
+    for nickname in nicknames:
+        author = alias_map.get(nickname)
+        if not author:
+            ignored_nicknames[nickname] += 1
+        else:
+            authors.add(author)
+    return authors
+
+def main(show_numbers):
+    ui = mercurial.ui.ui()
+    repo = mercurial.localrepo.localrepository(ui, str(ROOT))
+    authors_count = defaultdict(int)
+    for i in repo:
+        ctx = repo[i]
+        authors = set()
+        authors.add(get_canonical_author(ctx.user()))
+        authors.update(get_more_authors(ctx.description()))
+        for author in authors:
+            if author not in excluded:
+                authors_count[author] += 1
+
+    # uncomment the next lines to get the list of nicknamed which could not be
+    # parsed from commit logs
+    ## items = ignored_nicknames.items()
+    ## items.sort(key=operator.itemgetter(1), reverse=True)
+    ## for name, n in items:
+    ##     if show_numbers:
+    ##         print '%5d %s' % (n, name)
+    ##     else:
+    ##         print name
+                
+    items = authors_count.items()
+    items.sort(key=operator.itemgetter(1), reverse=True)
+    for name, n in items:
+        if show_numbers:
+            print '%5d %s' % (n, name)
+        else:
+            print name
+
+if __name__ == '__main__':
+    show_numbers = '-n' in sys.argv
+    main(show_numbers)
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -18,7 +18,8 @@
 Edition.  Other configurations may work as well.
 
 The translation scripts will set up the appropriate environment variables
-for the compiler.  They will attempt to locate the same compiler version that
+for the compiler, so you do not need to run vcvars before translation.  
+They will attempt to locate the same compiler version that
 was used to build the Python interpreter doing the
 translation.  Failing that, they will pick the most recent Visual Studio
 compiler they can find.  In addition, the target architecture
@@ -26,7 +27,7 @@
 using a 32 bit Python and vice versa.
 
 **Note:** PyPy is currently not supported for 64 bit Windows, and translation
-will be aborted in this case.
+will fail in this case.
 
 The compiler is all you need to build pypy-c, but it will miss some
 modules that relies on third-party libraries.  See below how to get
@@ -57,7 +58,8 @@
 install third-party libraries.  We chose to install them in the parent
 directory of the pypy checkout.  For example, if you installed pypy in
 ``d:\pypy\trunk\`` (This directory contains a README file), the base
-directory is ``d:\pypy``.
+directory is ``d:\pypy``. You may choose different values by setting the
+INCLUDE, LIB and PATH (for DLLs)
 
 The Boehm garbage collector
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -126,18 +128,54 @@
 ------------------------
 
 You can compile pypy with the mingw compiler, using the --cc=mingw32 option;
-mingw.exe must be on the PATH.
+gcc.exe must be on the PATH. If the -cc flag does not begin with "ming", it should be
+the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit
+compiler creating a 64 bit target.
 
-libffi for the mingw32 compiler
+libffi for the mingw compiler
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-To enable the _rawffi (and ctypes) module, you need to compile a mingw32
-version of libffi.  I downloaded the `libffi source files`_, and extracted
-them in the base directory.  Then run::
+To enable the _rawffi (and ctypes) module, you need to compile a mingw
+version of libffi.  Here is one way to do this, wich should allow you to try
+to build for win64 or win32:
+
+#. Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw
+#. If you do not use cygwin, you will need msys to provide make, 
+   autoconf tools and other goodies.
+
+    #. Download and unzip a `msys for mingw`_, say into c:\msys
+    #. Edit the c:\msys\etc\fstab file to mount c:\mingw
+
+#. Download and unzip the `libffi source files`_, and extract
+   them in the base directory.  
+#. Run c:\msys\msys.bat or a cygwin shell which should make you
+   feel better since it is a shell prompt with shell tools.
+#. From inside the shell, cd to the libffi directory and do::
 
     sh ./configure
     make
     cp .libs/libffi-5.dll <somewhere on the PATH>
 
+If you can't find the dll, and the libtool issued a warning about 
+"undefined symbols not allowed", you will need to edit the libffi
+Makefile in the toplevel directory. Add the flag -no-undefined to
+the definition of libffi_la_LDFLAGS
+
+If you wish to experiment with win64, you must run configure with flags::
+
+    sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32
+
+or such, depending on your mingw64 download.
+
+hacking on Pypy with the mingw compiler
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Since hacking on Pypy means running tests, you will need a way to specify
+the mingw compiler when hacking (as opposed to translating). As of
+March 2012, --cc is not a valid option for pytest.py. However if you set an
+environment variable CC it will allow you to choose a compiler.
+
+.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds
+.. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds
+.. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29   
 .. _`libffi source files`: http://sourceware.org/libffi/
 .. _`RPython translation toolchain`: translation.html
diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/you-want-to-help.rst
@@ -0,0 +1,86 @@
+
+You want to help with PyPy, now what?
+=====================================
+
+PyPy is a very large project that has a reputation of being hard to dive into.
+Some of this fame is warranted, some of it is purely accidental. There are three
+important lessons that everyone willing to contribute should learn:
+
+* PyPy has layers. There are many pieces of architecture that are very well
+  separated from each other. More about this below, but often the manifestation
+  of this is that things are at a different layer than you would expect them
+  to be. For example if you are looking for the JIT implementation, you will
+  not find it in the implementation of the Python programming language.
+
+* Because of the above, we are very serious about Test Driven Development.
+  It's not only what we believe in, but also that PyPy's architecture is
+  working very well with TDD in mind and not so well without it. Often
+  the development means progressing in an unrelated corner, one unittest
+  at a time; and then flipping a giant switch, bringing it all together.
+  (It generally works out of the box.  If it doesn't, then we didn't
+  write enough unit tests.)  It's worth repeating - PyPy
+  approach is great if you do TDD, not so great otherwise.
+
+* PyPy uses an entirely different set of tools - most of them included
+  in the PyPy repository. There is no Makefile, nor autoconf. More below
+
+Architecture
+============
+
+PyPy has layers. The 100 miles view:
+
+* `RPython`_ is the language in which we write interpreters. Not the entire
+  PyPy project is written in RPython, only the parts that are compiled in
+  the translation process. The interesting point is that RPython has no parser,
+  it's compiled from the live python objects, which make it possible to do
+  all kinds of metaprogramming during import time. In short, Python is a meta
+  programming language for RPython.
+
+  The RPython standard library is to be found in the ``rlib`` subdirectory.
+
+.. _`RPython`: coding-guide.html#RPython
+
+* The translation toolchain - this is the part that takes care about translating
+  RPython to flow graphs and then to C. There is more in the `architecture`_
+  document written about it.
+
+  It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``.
+
+.. _`architecture`: architecture.html 
+
+* Python Interpreter
+
+  xxx
+
+* Python modules
+
+  xxx
+
+* Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the
+  interpreter written in RPython, rather than the user program that it
+  interprets.  As a result it applies to any interpreter, i.e. any
+  language.  But getting it to work correctly is not trivial: it
+  requires a small number of precise "hints" and possibly some small
+  refactorings of the interpreter.  The JIT itself also has several
+  almost-independent parts: the tracer itself in ``jit/metainterp``, the
+  optimizer in ``jit/metainterp/optimizer`` that optimizes a list of
+  residual operations, and the backend in ``jit/backend/<machine-name>``
+  that turns it into machine code.  Writing a new backend is a
+  traditional way to get into the project.
+
+.. _`we have a tracing JIT`: jit/index.html
+
+* Garbage Collectors (GC): as you can notice if you are used to CPython's
+  C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code.
+  `Garbage collection in PyPy`_ is inserted
+  during translation.  Moreover, this is not reference counting; it is a real
+  GC written as more RPython code.  The best one we have so far is in
+  ``rpython/memory/gc/minimark.py``.
+
+.. _`Garbage collection in PyPy`: garbage_collection.html
+
+
+Toolset
+=======
+
+xxx
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -182,9 +182,11 @@
     def _combine_starstarargs_wrapped(self, w_starstararg):
         # unpack the ** arguments
         space = self.space
+        keywords, values_w = space.view_as_kwargs(w_starstararg)
+        if keywords is not None: # this path also taken for empty dicts
+            self._add_keywordargs_no_unwrapping(keywords, values_w)
+            return not jit.isconstant(len(self.keywords))
         if space.isinstance_w(w_starstararg, space.w_dict):
-            if not space.is_true(w_starstararg):
-                return False # don't call unpackiterable - it's jit-opaque
             keys_w = space.unpackiterable(w_starstararg)
         else:
             try:
@@ -199,11 +201,8 @@
                                    "a mapping, not %s" % (typename,)))
                 raise
             keys_w = space.unpackiterable(w_keys)
-        if keys_w:
-            self._do_combine_starstarargs_wrapped(keys_w, w_starstararg)
-            return True
-        else:
-            return False    # empty dict; don't disable the JIT
+        self._do_combine_starstarargs_wrapped(keys_w, w_starstararg)
+        return True
 
     def _do_combine_starstarargs_wrapped(self, keys_w, w_starstararg):
         space = self.space
@@ -240,6 +239,26 @@
             self.keywords_w = self.keywords_w + keywords_w
         self.keyword_names_w = keys_w
 
+    @jit.look_inside_iff(lambda self, keywords, keywords_w:
+            jit.isconstant(len(keywords) and
+            jit.isconstant(self.keywords)))
+    def _add_keywordargs_no_unwrapping(self, keywords, keywords_w):
+        if self.keywords is None:
+            self.keywords = keywords[:] # copy to make non-resizable
+            self.keywords_w = keywords_w[:]
+        else:
+            # looks quadratic, but the JIT should remove all of it nicely.
+            # Also, all the lists should be small
+            for key in keywords:
+                for otherkey in self.keywords:
+                    if otherkey == key:
+                        raise operationerrfmt(self.space.w_TypeError,
+                                              "got multiple values "
+                                              "for keyword argument "
+                                              "'%s'", key)
+            self.keywords = self.keywords + keywords
+            self.keywords_w = self.keywords_w + keywords_w
+
     def fixedunpack(self, argcount):
         """The simplest argument parsing: get the 'argcount' arguments,
         or raise a real ValueError if the length is wrong."""
@@ -417,7 +436,7 @@
 
         # collect extra keyword arguments into the **kwarg
         if has_kwarg:
-            w_kwds = self.space.newdict()
+            w_kwds = self.space.newdict(kwargs=True)
             if num_remainingkwds:
                 #
                 limit = len(keywords)
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -10,16 +10,6 @@
 from pypy.interpreter.astcompiler import ast, consts
 
 
-try:
-    all
-except NameError:
-    def all(iterable):
-        for x in iterable:
-            if not x:
-                return False
-        return True
-
-
 class TestAstBuilder:
 
     def setup_class(cls):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -297,6 +297,7 @@
         self.check_signal_action = None   # changed by the signal module
         self.user_del_action = UserDelAction(self)
         self.frame_trace_action = FrameTraceAction(self)
+        self._code_of_sys_exc_info = None
 
         from pypy.interpreter.pycode import cpython_magic, default_magic
         self.our_magic = default_magic
@@ -467,9 +468,9 @@
                 if name not in modules:
                     modules.append(name)
 
-        # a bit of custom logic: time2 or rctime take precedence over time
+        # a bit of custom logic: rctime take precedence over time
         # XXX this could probably be done as a "requires" in the config
-        if ('time2' in modules or 'rctime' in modules) and 'time' in modules:
+        if 'rctime' in modules and 'time' in modules:
             modules.remove('time')
 
         if not self.config.objspace.nofaking:
@@ -919,6 +920,12 @@
         """
         return None
 
+    def view_as_kwargs(self, w_dict):
+        """ if w_dict is a kwargs-dict, return two lists, one of unwrapped
+        strings and one of wrapped values. otherwise return (None, None)
+        """
+        return (None, None)
+
     def newlist_str(self, list_s):
         return self.newlist([self.wrap(s) for s in list_s])
 
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -154,6 +154,7 @@
         #operationerr.print_detailed_traceback(self.space)
 
     def _convert_exc(self, operr):
+        # Only for the flow object space
         return operr
 
     def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!!
@@ -166,6 +167,11 @@
             frame = self.getnextframe_nohidden(frame)
         return None
 
+    def set_sys_exc_info(self, operror):
+        frame = self.gettopframe_nohidden()
+        if frame:     # else, the exception goes nowhere and is lost
+            frame.last_exception = operror
+
     def settrace(self, w_func):
         """Set the global trace function."""
         if self.space.is_w(w_func, self.space.w_None):
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -115,6 +115,12 @@
         from pypy.interpreter.pycode import PyCode
 
         code = self.getcode() # hook for the jit
+        #
+        if (jit.we_are_jitted() and code is self.space._code_of_sys_exc_info
+                                and nargs == 0):
+            from pypy.module.sys.vm import exc_info_direct
+            return exc_info_direct(self.space, frame)
+        #
         fast_natural_arity = code.fast_natural_arity
         if nargs == fast_natural_arity:
             if nargs == 0:
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -880,6 +880,12 @@
             fn.add_to_table()
         if gateway.as_classmethod:
             fn = ClassMethod(space.wrap(fn))
+        #
+        from pypy.module.sys.vm import exc_info
+        if code._bltin is exc_info:
+            assert space._code_of_sys_exc_info is None
+            space._code_of_sys_exc_info = code
+        #
         return fn
 
 
diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py
--- a/pypy/interpreter/pyparser/parsestring.py
+++ b/pypy/interpreter/pyparser/parsestring.py
@@ -3,29 +3,35 @@
 from pypy.rlib.rstring import StringBuilder
 
 def parsestr(space, encoding, s):
-    # compiler.transformer.Transformer.decode_literal depends on what 
-    # might seem like minor details of this function -- changes here 
-    # must be reflected there.
+    """Parses a string or unicode literal, and return a wrapped value.
+
+    If encoding=iso8859-1, the source string is also in this encoding.
+    If encoding=None, the source string is ascii only.
+    In other cases, the source string is in utf-8 encoding.
+
+    When a bytes string is returned, it will be encoded with the
+    original encoding.
+
+    Yes, it's very inefficient.
+    Yes, CPython has very similar code.
+    """
 
     # we use ps as "pointer to s"
     # q is the virtual last char index of the string
     ps = 0
     quote = s[ps]
     rawmode = False
-    unicode = True
+    unicode_literal = True
 
     # string decoration handling
-    o = ord(quote)
-    isalpha = (o>=97 and o<=122) or (o>=65 and o<=90)
-    if isalpha or quote == '_':
-        if quote == 'b' or quote == 'B':
-            ps += 1
-            quote = s[ps]
-            unicode = False
-        if quote == 'r' or quote == 'R':
-            ps += 1
-            quote = s[ps]
-            rawmode = True
+    if quote == 'b' or quote == 'B':
+        ps += 1
+        quote = s[ps]
+        unicode_literal = False
+    if quote == 'r' or quote == 'R':
+        ps += 1
+        quote = s[ps]
+        rawmode = True
     if quote != "'" and quote != '"':
         raise_app_valueerror(space,
                              'Internal error: parser passed unquoted literal')
@@ -42,21 +48,28 @@
                                         'unmatched triple quotes in literal')
         q -= 2
 
-    if unicode: # XXX Py_UnicodeFlag is ignored for now
+    if unicode_literal: # XXX Py_UnicodeFlag is ignored for now
         if encoding is None or encoding == "iso-8859-1":
+            # 'unicode_escape' expects latin-1 bytes, string is ready.
             buf = s
             bufp = ps
             bufq = q
             u = None
         else:
-            # "\XX" may become "\u005c\uHHLL" (12 bytes)
+            # String is utf8-encoded, but 'unicode_escape' expects
+            # latin-1; So multibyte sequences must be escaped.
             lis = [] # using a list to assemble the value
             end = q
+            # Worst case: "\XX" may become "\u005c\uHHLL" (12 bytes)
             while ps < end:
                 if s[ps] == '\\':
                     lis.append(s[ps])
                     ps += 1
                     if ord(s[ps]) & 0x80:
+                        # A multibyte sequence will follow, it will be
+                        # escaped like \u1234. To avoid confusion with
+                        # the backslash we just wrote, we emit "\u005c"
+                        # instead.
                         lis.append("u005c")
                 if ord(s[ps]) & 0x80: # XXX inefficient
                     w, ps = decode_utf8(space, s, ps, end, "utf-16-be")
@@ -82,13 +95,11 @@
 
     need_encoding = (encoding is not None and
                      encoding != "utf-8" and encoding != "iso-8859-1")
-    # XXX add strchr like interface to rtyper
     assert 0 <= ps <= q
     substr = s[ps : q]
     if rawmode or '\\' not in s[ps:]:
         if need_encoding:
             w_u = space.wrap(unicodehelper.PyUnicode_DecodeUTF8(space, substr))
-            #w_v = space.wrap(space.unwrap(w_u).encode(encoding)) this works
             w_v = unicodehelper.PyUnicode_AsEncodedString(space, w_u, space.wrap(encoding))
             return w_v
         else:
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -76,7 +76,10 @@
     def unpackiterable(self, it):
         return list(it)
 
-    def newdict(self):
+    def view_as_kwargs(self, x):
+        return None, None
+
+    def newdict(self, kwargs=False):
         return {}
 
     def newlist(self, l=[]):
@@ -489,6 +492,57 @@
         assert len(l) == 1
         assert l[0] == space.wrap(5)
 
+    def test_starstarargs_special(self):
+        class kwargs(object):
+            def __init__(self, k, v):
+                self.k = k
+                self.v = v
+        class MyDummySpace(DummySpace):
+            def view_as_kwargs(self, kw):
+                if isinstance(kw, kwargs):
+                    return kw.k, kw.v
+                return None, None
+        space = MyDummySpace()
+        for i in range(3):
+            kwds = [("c", 3)]
+            kwds_w = dict(kwds[:i])
+            keywords = kwds_w.keys()
+            keywords_w = kwds_w.values()
+            rest = dict(kwds[i:])
+            w_kwds = kwargs(rest.keys(), rest.values())
+            if i == 2:
+                w_kwds = None
+            assert len(keywords) == len(keywords_w)
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None]
+            args._match_signature(None, l, Signature(["a", "b", "c"]), defaults_w=[4])
+            assert l == [1, 2, 3]
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None, None]
+            args._match_signature(None, l, Signature(["a", "b", "b1", "c"]), defaults_w=[4, 5])
+            assert l == [1, 2, 4, 3]
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None, None]
+            args._match_signature(None, l, Signature(["a", "b", "c", "d"]), defaults_w=[4, 5])
+            assert l == [1, 2, 3, 5]
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None, None]
+            py.test.raises(ArgErr, args._match_signature, None, l,
+                           Signature(["c", "b", "a", "d"]), defaults_w=[4, 5])
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None, None]
+            py.test.raises(ArgErr, args._match_signature, None, l,
+                           Signature(["a", "b", "c1", "d"]), defaults_w=[4, 5])
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None]
+            args._match_signature(None, l, Signature(["a", "b"], None, "**"))
+            assert l == [1, 2, {'c': 3}]
+        excinfo = py.test.raises(OperationError, Arguments, space, [], ["a"],
+                                 [1], w_starstararg=kwargs(["a"], [2]))
+        assert excinfo.value.w_type is TypeError
+
+
+
 class TestErrorHandling(object):
     def test_missing_args(self):
         # got_nargs, nkwds, expected_nargs, has_vararg, has_kwarg,
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -321,7 +321,7 @@
             
             def user_setup(self, space, w_subtype):
                 self.w__dict__ = space.newdict(
-                    instance=True, classofinstance=w_subtype)
+                    instance=True)
                 base_user_setup(self, space, w_subtype)
 
             def setclass(self, space, w_subtype):
diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py
--- a/pypy/jit/backend/llgraph/llimpl.py
+++ b/pypy/jit/backend/llgraph/llimpl.py
@@ -1797,6 +1797,7 @@
         if specialize_as_constant:
             def specialize_call(self, hop):
                 llvalue = func(hop.args_s[0].const)
+                hop.exception_cannot_occur()
                 return hop.inputconst(lltype.typeOf(llvalue), llvalue)
         else:
             # specialize as direct_call
@@ -1813,6 +1814,7 @@
                     sm = ootype._static_meth(FUNCTYPE, _name=func.__name__, _callable=func)
                     cfunc = hop.inputconst(FUNCTYPE, sm)
                 args_v = hop.inputargs(*hop.args_r)
+                hop.exception_is_here()
                 return hop.genop('direct_call', [cfunc] + args_v, hop.r_result)
 
 
diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py
--- a/pypy/jit/backend/test/runner_test.py
+++ b/pypy/jit/backend/test/runner_test.py
@@ -27,6 +27,12 @@
 def constfloat(x):
     return ConstFloat(longlong.getfloatstorage(x))
 
+def boxlonglong(ll):
+    if longlong.is_64_bit:
+        return BoxInt(ll)
+    else:
+        return BoxFloat(ll)
+
 
 class Runner(object):
 
@@ -1623,6 +1629,11 @@
                                      [boxfloat(2.5)], t).value
         assert res == longlong2float.float2longlong(2.5)
 
+        bytes = longlong2float.float2longlong(2.5)
+        res = self.execute_operation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT,
+                                     [boxlonglong(res)], 'float').value
+        assert longlong.getrealfloat(res) == 2.5
+
     def test_ooops_non_gc(self):
         x = lltype.malloc(lltype.Struct('x'), flavor='raw')
         v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x))
diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py
--- a/pypy/jit/backend/test/test_random.py
+++ b/pypy/jit/backend/test/test_random.py
@@ -328,6 +328,15 @@
     def produce_into(self, builder, r):
         self.put(builder, [r.choice(builder.intvars)])
 
+class CastLongLongToFloatOperation(AbstractFloatOperation):
+    def produce_into(self, builder, r):
+        if longlong.is_64_bit:
+            self.put(builder, [r.choice(builder.intvars)])
+        else:
+            if not builder.floatvars:
+                raise CannotProduceOperation
+            self.put(builder, [r.choice(builder.floatvars)])
+
 class CastFloatToIntOperation(AbstractFloatOperation):
     def produce_into(self, builder, r):
         if not builder.floatvars:
@@ -450,6 +459,7 @@
 OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT))
 OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT))
 OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG))
+OPERATIONS.append(CastLongLongToFloatOperation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT))
 
 OperationBuilder.OPERATIONS = OPERATIONS
 
diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
--- a/pypy/jit/backend/x86/assembler.py
+++ b/pypy/jit/backend/x86/assembler.py
@@ -1251,6 +1251,15 @@
         else:
             self.mov(loc0, resloc)
 
+    def genop_convert_longlong_bytes_to_float(self, op, arglocs, resloc):
+        loc0, = arglocs
+        if longlong.is_64_bit:
+            assert isinstance(resloc, RegLoc)
+            assert isinstance(loc0, RegLoc)
+            self.mc.MOVD(resloc, loc0)
+        else:
+            self.mov(loc0, resloc)
+
     def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc):
         guard_opnum = guard_op.getopnum()
         self.mc.CMP(arglocs[0], imm0)
diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
--- a/pypy/jit/backend/x86/regalloc.py
+++ b/pypy/jit/backend/x86/regalloc.py
@@ -773,10 +773,24 @@
             self.Perform(op, [loc0], loc1)
             self.xrm.possibly_free_var(op.getarg(0))
         else:
-            loc0 = self.xrm.loc(op.getarg(0))
+            arg0 = op.getarg(0)
+            loc0 = self.xrm.loc(arg0)
+            loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0])
+            self.Perform(op, [loc0], loc1)
+            self.xrm.possibly_free_var(arg0)
+
+    def consider_convert_longlong_bytes_to_float(self, op):
+        if longlong.is_64_bit:
+            loc0 = self.rm.make_sure_var_in_reg(op.getarg(0))
             loc1 = self.xrm.force_allocate_reg(op.result)
             self.Perform(op, [loc0], loc1)
-            self.xrm.possibly_free_var(op.getarg(0))
+            self.rm.possibly_free_var(op.getarg(0))
+        else:
+            arg0 = op.getarg(0)
+            loc0 = self.xrm.make_sure_var_in_reg(arg0)
+            loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0])
+            self.Perform(op, [loc0], loc1)
+            self.xrm.possibly_free_var(arg0)
 
     def _consider_llong_binop_xx(self, op):
         # must force both arguments into xmm registers, because we don't
diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py
--- a/pypy/jit/backend/x86/rx86.py
+++ b/pypy/jit/backend/x86/rx86.py
@@ -601,7 +601,9 @@
     CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A',
                           register(1, 8), stack_bp(2))
 
-    # These work on machine sized registers.
+    # These work on machine sized registers, so MOVD is actually MOVQ
+    # when running on 64 bits.  Note a bug in the Intel documentation:
+    # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html
     MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0')
     MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0')
     MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2))
diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
--- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
+++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
@@ -182,6 +182,12 @@
         filename  = str(testdir.join(FILENAME  % methname))
         g = open(inputname, 'w')
         g.write('\x09.string "%s"\n' % BEGIN_TAG)
+        #
+        if instrname == 'MOVD' and self.WORD == 8:
+            instrname = 'MOVQ'
+            if argmodes == 'xb':
+                py.test.skip('"as" uses an undocumented alternate encoding??')
+        #
         for args in args_lists:
             suffix = ""
     ##        all = instr.as_all_suffixes
@@ -229,9 +235,6 @@
                 # movq $xxx, %rax => movl $xxx, %eax
                 suffix = 'l'
                 ops[1] = reduce_to_32bit(ops[1])
-            if instrname.lower() == 'movd':
-                ops[0] = reduce_to_32bit(ops[0])
-                ops[1] = reduce_to_32bit(ops[1])
             #
             op = '\t%s%s %s%s' % (instrname.lower(), suffix,
                                   ', '.join(ops), following)
diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py
--- a/pypy/jit/codewriter/jtransform.py
+++ b/pypy/jit/codewriter/jtransform.py
@@ -295,6 +295,7 @@
         return op
 
     rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite
+    rewrite_op_convert_longlong_bytes_to_float = _noop_rewrite
 
     # ----------
     # Various kinds of calls
diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py
--- a/pypy/jit/codewriter/test/test_flatten.py
+++ b/pypy/jit/codewriter/test/test_flatten.py
@@ -968,20 +968,22 @@
             int_return %i2
         """, transform=True)
 
-    def test_convert_float_bytes_to_int(self):
-        from pypy.rlib.longlong2float import float2longlong
+    def test_convert_float_bytes(self):
+        from pypy.rlib.longlong2float import float2longlong, longlong2float
         def f(x):
-            return float2longlong(x)
+            ll = float2longlong(x)
+            return longlong2float(ll)
         if longlong.is_64_bit:
-            result_var = "%i0"
-            return_op = "int_return"
+            tmp_var = "%i0"
+            result_var = "%f1"
         else:
-            result_var = "%f1"
-            return_op = "float_return"
+            tmp_var = "%f1"
+            result_var = "%f2"
         self.encoding_test(f, [25.0], """
-            convert_float_bytes_to_longlong %%f0 -> %(result_var)s
-            %(return_op)s %(result_var)s
-        """ % {"result_var": result_var, "return_op": return_op})
+            convert_float_bytes_to_longlong %%f0 -> %(tmp_var)s
+            convert_longlong_bytes_to_float %(tmp_var)s -> %(result_var)s
+            float_return %(result_var)s
+        """ % {"result_var": result_var, "tmp_var": tmp_var}, transform=True)
 
 
 def check_force_cast(FROM, TO, operations, value):
diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py
--- a/pypy/jit/metainterp/blackhole.py
+++ b/pypy/jit/metainterp/blackhole.py
@@ -672,6 +672,11 @@
         a = longlong.getrealfloat(a)
         return longlong2float.float2longlong(a)
 
+    @arguments(LONGLONG_TYPECODE, returns="f")
+    def bhimpl_convert_longlong_bytes_to_float(a):
+        a = longlong2float.longlong2float(a)
+        return longlong.getfloatstorage(a)
+
     # ----------
     # control flow operations
 
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -224,6 +224,7 @@
                     'float_neg', 'float_abs',
                     'cast_ptr_to_int', 'cast_int_to_ptr',
                     'convert_float_bytes_to_longlong',
+                    'convert_longlong_bytes_to_float',
                     ]:
         exec py.code.Source('''
             @arguments("box")
diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py
--- a/pypy/jit/metainterp/resoperation.py
+++ b/pypy/jit/metainterp/resoperation.py
@@ -420,6 +420,7 @@
     'CAST_FLOAT_TO_SINGLEFLOAT/1',
     'CAST_SINGLEFLOAT_TO_FLOAT/1',
     'CONVERT_FLOAT_BYTES_TO_LONGLONG/1',
+    'CONVERT_LONGLONG_BYTES_TO_FLOAT/1',
     #
     'INT_LT/2b',
     'INT_LE/2b',
diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py
--- a/pypy/jit/metainterp/test/test_ajit.py
+++ b/pypy/jit/metainterp/test/test_ajit.py
@@ -1,3 +1,4 @@
+import math
 import sys
 
 import py
@@ -15,7 +16,7 @@
     loop_invariant, elidable, promote, jit_debug, assert_green,
     AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff,
     isconstant, isvirtual, promote_string, set_param, record_known_class)
-from pypy.rlib.longlong2float import float2longlong
+from pypy.rlib.longlong2float import float2longlong, longlong2float
 from pypy.rlib.rarithmetic import ovfcheck, is_valid_int
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.rpython.ootypesystem import ootype
@@ -3795,15 +3796,15 @@
         res = self.interp_operations(g, [1])
         assert res == 3
 
-    def test_float2longlong(self):
+    def test_float_bytes(self):
         def f(n):
-            return float2longlong(n)
+            ll = float2longlong(n)
+            return longlong2float(ll)
 
         for x in [2.5, float("nan"), -2.5, float("inf")]:
             # There are tests elsewhere to verify the correctness of this.
-            expected = float2longlong(x)
             res = self.interp_operations(f, [x])
-            assert longlong.getfloatstorage(res) == expected
+            assert res == x or math.isnan(x) and math.isnan(res)
 
 
 class TestLLtype(BaseLLtypeTests, LLJitMixin):
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -16,13 +16,15 @@
     appleveldefs = {}
     interpleveldefs = {}
     if sys.platform.startswith("linux"):
+        from pypy.module.__pypy__ import interp_time
         interpleveldefs["clock_gettime"] = "interp_time.clock_gettime"
         interpleveldefs["clock_getres"] = "interp_time.clock_getres"
         for name in [
             "CLOCK_REALTIME", "CLOCK_MONOTONIC", "CLOCK_MONOTONIC_RAW",
             "CLOCK_PROCESS_CPUTIME_ID", "CLOCK_THREAD_CPUTIME_ID"
         ]:
-            interpleveldefs[name] = "space.wrap(interp_time.%s)" % name
+            if getattr(interp_time, name) is not None:
+                interpleveldefs[name] = "space.wrap(interp_time.%s)" % name
 
 
 class Module(MixedModule):
diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py
--- a/pypy/module/__pypy__/interp_time.py
+++ b/pypy/module/__pypy__/interp_time.py
@@ -1,3 +1,4 @@
+from __future__ import with_statement
 import sys
 
 from pypy.interpreter.error import exception_from_errno
diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py
--- a/pypy/module/_lsprof/interp_lsprof.py
+++ b/pypy/module/_lsprof/interp_lsprof.py
@@ -185,6 +185,7 @@
             if subentry is not None:
                 subentry._stop(tt, it)
 
+ at jit.elidable_promote()
 def create_spec(space, w_arg):
     if isinstance(w_arg, Method):
         w_function = w_arg.w_function
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -615,7 +615,8 @@
                     raise _ssl_seterror(space, self, length)
                 try:
                     # this is actually an immutable bytes sequence
-                    return space.wrap(rffi.charp2str(buf_ptr[0]))
+                    return space.wrap(rffi.charpsize2str(buf_ptr[0],
+                                                         length))
                 finally:
                     libssl_OPENSSL_free(buf_ptr[0])
         else:
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -10,7 +10,7 @@
 from pypy.objspace.std.register_all import register_all
 from pypy.rlib.rarithmetic import ovfcheck
 from pypy.rlib.unroll import unrolling_iterable
-from pypy.rlib.objectmodel import specialize
+from pypy.rlib.objectmodel import specialize, keepalive_until_here
 from pypy.rpython.lltypesystem import lltype, rffi
 
 
@@ -144,18 +144,24 @@
 unroll_typecodes = unrolling_iterable(types.keys())
 
 class ArrayBuffer(RWBuffer):
-    def __init__(self, data, bytes):
-        self.data = data
-        self.len = bytes
+    def __init__(self, array):
+        self.array = array
 
     def getlength(self):
-        return self.len
+        return self.array.len * self.array.itemsize
 
     def getitem(self, index):
-        return self.data[index]
+        array = self.array
+        data = array._charbuf_start()
+        char = data[index]
+        array._charbuf_stop()
+        return char
 
     def setitem(self, index, char):
-        self.data[index] = char
+        array = self.array
+        data = array._charbuf_start()
+        data[index] = char
+        array._charbuf_stop()
 
 
 def make_array(mytype):
@@ -277,9 +283,10 @@
             oldlen = self.len
             new = len(s) / mytype.bytes
             self.setlen(oldlen + new)
-            cbuf = self.charbuf()
+            cbuf = self._charbuf_start()
             for i in range(len(s)):
                 cbuf[oldlen * mytype.bytes + i] = s[i]
+            self._charbuf_stop()
 
         def fromlist(self, w_lst):
             s = self.len
@@ -309,8 +316,11 @@
             else:
                 self.fromsequence(w_iterable)
 
-        def charbuf(self):
-            return  rffi.cast(rffi.CCHARP, self.buffer)
+        def _charbuf_start(self):
+            return rffi.cast(rffi.CCHARP, self.buffer)
+
+        def _charbuf_stop(self):
+            keepalive_until_here(self)
 
         def w_getitem(self, space, idx):
             item = self.buffer[idx]
@@ -520,8 +530,10 @@
         self.fromstring(space.bytes_w(w_s))
 
     def array_tobytes__Array(space, self):
-        cbuf = self.charbuf()
-        return self.space.wrapbytes(rffi.charpsize2str(cbuf, self.len * mytype.bytes))
+        cbuf = self._charbuf_start()
+        s = rffi.charpsize2str(cbuf, self.len * mytype.bytes)
+        self._charbuf_stop()
+        return self.space.wrap(s)
 
     def array_tostring__Array(space, self):
         space.warn("tostring() is deprecated. Use tobytes() instead.",
@@ -602,8 +614,7 @@
     # Misc methods
 
     def buffer__Array(space, self):
-        b = ArrayBuffer(self.charbuf(), self.len * mytype.bytes)
-        return space.wrap(b)
+        return space.wrap(ArrayBuffer(self))
 
     def array_buffer_info__Array(space, self):
         w_ptr = space.wrap(rffi.cast(lltype.Unsigned, self.buffer))
@@ -638,7 +649,7 @@
             raise OperationError(space.w_RuntimeError, space.wrap(msg))
         if self.len == 0:
             return
-        bytes = self.charbuf()
+        bytes = self._charbuf_start()
         tmp = [bytes[0]] * mytype.bytes
         for start in range(0, self.len * mytype.bytes, mytype.bytes):
             stop = start + mytype.bytes - 1
@@ -646,6 +657,7 @@
                 tmp[i] = bytes[start + i]
             for i in range(mytype.bytes):
                 bytes[stop - i] = tmp[i]
+        self._charbuf_stop()
 
     def repr__Array(space, self):
         if self.len == 0:
diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py
--- a/pypy/module/array/test/test_array.py
+++ b/pypy/module/array/test/test_array.py
@@ -402,7 +402,25 @@
         a = self.array('h', b'Hi')
         buf = memoryview(a)
         assert buf[1] == b'i'
-        #raises(TypeError, buf.__setitem__, 1, 'o')
+
+    def test_buffer_write(self):
+        a = self.array('b', b'hello')
+        buf = memoryview(a)
+        print(repr(buf))
+        try:
+            buf[3] = b'L'
+        except TypeError:
+            skip("memoryview(array) returns a read-only buffer on CPython")
+        assert a.tostring() == 'helLo'
+
+    def test_buffer_keepalive(self):
+        buf = memoryview(self.array('b', b'text'))
+        assert buf[2] == b'x'
+        #
+        a = self.array('b', b'foobarbaz')
+        buf = memoryview(a)
+        a.fromstring(b'some extra text')
+        assert buf[:] == b'foobarbazsome extra text'
 
     def test_list_methods(self):
         assert repr(self.array('i')) == "array('i')"
diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py
--- a/pypy/module/cpyext/bufferobject.py
+++ b/pypy/module/cpyext/bufferobject.py
@@ -2,8 +2,10 @@
 from pypy.module.cpyext.api import (
     cpython_api, Py_ssize_t, cpython_struct, bootstrap_function,
     PyObjectFields, PyObject)
-from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef
+from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref
 from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer
+from pypy.interpreter.error import OperationError
+from pypy.module.array.interp_array import ArrayBuffer
 
 
 PyBufferObjectStruct = lltype.ForwardReference()
@@ -41,26 +43,38 @@
         py_buf.c_b_offset = w_obj.offset
         w_obj = w_obj.buffer
 
+    # If w_obj already allocated a fixed buffer, use it, and keep a
+    # reference to w_obj.
+    # Otherwise, b_base stays NULL, and we own the b_ptr.
+
     if isinstance(w_obj, StringBuffer):
-        py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value)
-        py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str()))
+        py_buf.c_b_base = lltype.nullptr(PyObject.TO)
+        py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.value))
+        py_buf.c_b_size = w_obj.getlength()
+    elif isinstance(w_obj, ArrayBuffer):
+        w_base = w_obj.array
+        py_buf.c_b_base = make_ref(space, w_base)
+        py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.array._charbuf_start())
         py_buf.c_b_size = w_obj.getlength()
     else:
-        raise Exception("Fail fail fail fail fail")
+        raise OperationError(space.w_NotImplementedError, space.wrap(
+            "buffer flavor not supported"))
 
 
 def buffer_realize(space, py_obj):
     """
     Creates the buffer in the PyPy interpreter from a cpyext representation.
     """
-    raise Exception("realize fail fail fail")
-
+    raise OperationError(space.w_NotImplementedError, space.wrap(
+        "Don't know how to realize a buffer"))
 
 
 @cpython_api([PyObject], lltype.Void, external=False)
 def buffer_dealloc(space, py_obj):
     py_buf = rffi.cast(PyBufferObject, py_obj)
-    Py_DecRef(space, py_buf.c_b_base)
-    rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr))
+    if py_buf.c_b_base:
+        Py_DecRef(space, py_buf.c_b_base)
+    else:
+        rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr))
     from pypy.module.cpyext.object import PyObject_dealloc
     PyObject_dealloc(space, py_obj)
diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -29,7 +29,7 @@
 #define PY_VERSION		"2.7.2"
 
 /* PyPy version as a string */
-#define PYPY_VERSION "1.8.1"
+#define PYPY_VERSION "1.9.1"
 
 /* Subversion Revision number of this file (not of the repository).
  * Empty since Mercurial migration. */
diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py
--- a/pypy/module/cpyext/pyerrors.py
+++ b/pypy/module/cpyext/pyerrors.py
@@ -2,6 +2,7 @@
 
 from pypy.rpython.lltypesystem import rffi, lltype
 from pypy.interpreter.error import OperationError
+from pypy.interpreter import pytraceback
 from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, CONST_STRING
 from pypy.module.exceptions.interp_exceptions import W_RuntimeWarning
 from pypy.module.cpyext.pyobject import (
@@ -314,3 +315,65 @@
     It may be called without holding the interpreter lock."""
     space.check_signal_action.set_interrupt()
 
+ at cpython_api([PyObjectP, PyObjectP, PyObjectP], lltype.Void)
+def PyErr_GetExcInfo(space, ptype, pvalue, ptraceback):
+    """---Cython extension---
+
+    Retrieve the exception info, as known from ``sys.exc_info()``.  This
+    refers to an exception that was already caught, not to an exception
+    that was freshly raised.  Returns new references for the three
+    objects, any of which may be *NULL*.  Does not modify the exception
+    info state.
+
+    .. note::
+
+       This function is not normally used by code that wants to handle
+       exceptions.  Rather, it can be used when code needs to save and
+       restore the exception state temporarily.  Use
+       :c:func:`PyErr_SetExcInfo` to restore or clear the exception
+       state.
+    """
+    ec = space.getexecutioncontext()
+    operror = ec.sys_exc_info()
+    if operror:
+        ptype[0] = make_ref(space, operror.w_type)
+        pvalue[0] = make_ref(space, operror.get_w_value(space))
+        ptraceback[0] = make_ref(space, space.wrap(operror.get_traceback()))
+    else:
+        ptype[0] = lltype.nullptr(PyObject.TO)
+        pvalue[0] = lltype.nullptr(PyObject.TO)
+        ptraceback[0] = lltype.nullptr(PyObject.TO)
+
+ at cpython_api([PyObject, PyObject, PyObject], lltype.Void)
+def PyErr_SetExcInfo(space, w_type, w_value, w_traceback):
+    """---Cython extension---
+
+    Set the exception info, as known from ``sys.exc_info()``.  This refers
+    to an exception that was already caught, not to an exception that was
+    freshly raised.  This function steals the references of the arguments.
+    To clear the exception state, pass *NULL* for all three arguments.
+    For general rules about the three arguments, see :c:func:`PyErr_Restore`.
+ 
+    .. note::
+ 
+       This function is not normally used by code that wants to handle
+       exceptions.  Rather, it can be used when code needs to save and
+       restore the exception state temporarily.  Use
+       :c:func:`PyErr_GetExcInfo` to read the exception state.
+    """
+    if w_value is None or space.is_w(w_value, space.w_None):
+        operror = None
+    else:
+        tb = None
+        if w_traceback is not None:
+            try:
+                tb = pytraceback.check_traceback(space, w_traceback, '?')
+            except OperationError:    # catch and ignore bogus objects
+                pass
+        operror = OperationError(w_type, w_value, tb)
+    #
+    ec = space.getexecutioncontext()
+    ec.set_sys_exc_info(operror)
+    Py_DecRef(space, w_type)
+    Py_DecRef(space, w_value)
+    Py_DecRef(space, w_traceback)
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -167,14 +167,16 @@
     if rffi.cast(lltype.Signed, res) == -1:
         space.fromcache(State).check_and_raise_exception(always=True)
 
+# Warning, confusing function name (like CPython).  Used only for sq_contains.
 def wrap_objobjproc(space, w_self, w_args, func):
     func_target = rffi.cast(objobjproc, func)
     check_num_args(space, w_args, 1)
     w_value, = space.fixedview(w_args)
     res = generic_cpy_call(space, func_target, w_self, w_value)
-    if rffi.cast(lltype.Signed, res) == -1:
+    res = rffi.cast(lltype.Signed, res)
+    if res == -1:
         space.fromcache(State).check_and_raise_exception(always=True)
-    return space.wrap(res)
+    return space.wrap(bool(res))
 
 def wrap_objobjargproc(space, w_self, w_args, func):
     func_target = rffi.cast(objobjargproc, func)
@@ -183,7 +185,7 @@
     res = generic_cpy_call(space, func_target, w_self, w_key, w_value)
     if rffi.cast(lltype.Signed, res) == -1:
         space.fromcache(State).check_and_raise_exception(always=True)
-    return space.wrap(res)
+    return space.w_None
 
 def wrap_delitem(space, w_self, w_args, func):
     func_target = rffi.cast(objobjargproc, func)
diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py
--- a/pypy/module/cpyext/stubs.py
+++ b/pypy/module/cpyext/stubs.py
@@ -2235,24 +2235,6 @@
     """Concat two strings giving a new Unicode string."""
     raise NotImplementedError
 
- at cpython_api([PyObject, PyObject, Py_ssize_t], PyObject)
-def PyUnicode_Split(space, s, sep, maxsplit):
-    """Split a string giving a list of Unicode strings.  If sep is NULL, splitting
-    will be done at all whitespace substrings.  Otherwise, splits occur at the given
-    separator.  At most maxsplit splits will be done.  If negative, no limit is
-    set.  Separators are not included in the resulting list.
-
-    This function used an int type for maxsplit. This might require
-    changes in your code for properly supporting 64-bit systems."""
-    raise NotImplementedError
-
- at cpython_api([PyObject, rffi.INT_real], PyObject)
-def PyUnicode_Splitlines(space, s, keepend):
-    """Split a Unicode string at line breaks, returning a list of Unicode strings.
-    CRLF is considered to be one line break.  If keepend is 0, the Line break
-    characters are not included in the resulting strings."""
-    raise NotImplementedError
-
 @cpython_api([PyObject, PyObject, rffi.CCHARP], PyObject)
 def PyUnicode_Translate(space, str, table, errors):
     """Translate a string by applying a character mapping table to it and return the
@@ -2269,29 +2251,6 @@
     use the default error handling."""
     raise NotImplementedError
 
- at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], Py_ssize_t, error=-2)
-def PyUnicode_Find(space, str, substr, start, end, direction):
-    """Return the first position of substr in str*[*start:end] using the given
-    direction (direction == 1 means to do a forward search, direction == -1 a
-    backward search).  The return value is the index of the first match; a value of
-    -1 indicates that no match was found, and -2 indicates that an error
-    occurred and an exception has been set.
-
-    This function used an int type for start and end. This
-    might require changes in your code for properly supporting 64-bit
-    systems."""
-    raise NotImplementedError
-
- at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t], Py_ssize_t, error=-1)
-def PyUnicode_Count(space, str, substr, start, end):
-    """Return the number of non-overlapping occurrences of substr in
-    str[start:end].  Return -1 if an error occurred.
-
-    This function returned an int type and used an int
-    type for start and end. This might require changes in your code for
-    properly supporting 64-bit systems."""
-    raise NotImplementedError
-
 @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject)
 def PyUnicode_RichCompare(space, left, right, op):
     """Rich compare two unicode strings and return one of the following:
diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py
--- a/pypy/module/cpyext/test/conftest.py
+++ b/pypy/module/cpyext/test/conftest.py
@@ -10,7 +10,7 @@
     return False
 
 def pytest_funcarg__space(request):
-    return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+    return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array'])
 
 def pytest_funcarg__api(request):
     return request.cls.api
diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c
--- a/pypy/module/cpyext/test/foo.c
+++ b/pypy/module/cpyext/test/foo.c
@@ -176,6 +176,8 @@
     {NULL}  /* Sentinel */
 };
 
+PyDoc_STRVAR(foo_doc, "foo is for testing.");
+
 static PyTypeObject footype = {
     PyVarObject_HEAD_INIT(NULL, 0)
     "foo.foo",               /*tp_name*/
@@ -198,7 +200,7 @@
     (setattrofunc)foo_setattro, /*tp_setattro*/
     0,                       /*tp_as_buffer*/
     Py_TPFLAGS_DEFAULT,      /*tp_flags*/
-    0,                       /*tp_doc*/
+    foo_doc,                 /*tp_doc*/
     0,                       /*tp_traverse*/
     0,                       /*tp_clear*/
     0,                       /*tp_richcompare*/
diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py
--- a/pypy/module/cpyext/test/test_api.py
+++ b/pypy/module/cpyext/test/test_api.py
@@ -19,7 +19,8 @@
 
 class BaseApiTest(LeakCheckingTest):
     def setup_class(cls):
-        cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+        cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi',
+                                                        'array'])
 
         # warm up reference counts:
         # - the posix module allocates a HCRYPTPROV on Windows
diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py
--- a/pypy/module/cpyext/test/test_arraymodule.py
+++ b/pypy/module/cpyext/test/test_arraymodule.py
@@ -6,7 +6,6 @@
 
 class AppTestArrayModule(AppTestCpythonExtensionBase):
     enable_leak_checking = False
-    extra_modules = ['array']
 
     def test_basic(self):
         module = self.import_module(name='array')
diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py
--- a/pypy/module/cpyext/test/test_bufferobject.py
+++ b/pypy/module/cpyext/test/test_bufferobject.py
@@ -48,3 +48,17 @@
             ])
         b = module.buffer_new()
         raises(AttributeError, getattr, b, 'x')
+
+    def test_array_buffer(self):
+        module = self.import_extension('foo', [
+            ("roundtrip", "METH_O",
+             """
+                 PyBufferObject *buf = (PyBufferObject *)args;
+                 return PyString_FromStringAndSize(buf->b_ptr, buf->b_size);
+             """),
+            ])
+        import array
+        a = array.array('c', 'text')
+        b = buffer(a)
+        assert module.roundtrip(b) == 'text'
+        
diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -35,7 +35,7 @@
 
 class AppTestApi:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array'])
         from pypy.rlib.libffi import get_libc_name
         cls.w_libc = cls.space.wrap(get_libc_name())
 
@@ -165,11 +165,9 @@
         return leaking
 
 class AppTestCpythonExtensionBase(LeakCheckingTest):
-    extra_modules = []
     
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'] +
-                                               cls.extra_modules)
+        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array'])
         cls.space.getbuiltinmodule("cpyext")
         from pypy.module.imp.importing import importhook
         importhook(cls.space, "os") # warm up reference counts
diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py
--- a/pypy/module/cpyext/test/test_pyerrors.py
+++ b/pypy/module/cpyext/test/test_pyerrors.py
@@ -218,3 +218,51 @@
             assert e.filename == "blyf"
             assert e.errno == errno.EBADF
             assert e.strerror == os.strerror(errno.EBADF)
+
+    def test_GetSetExcInfo(self):
+        import sys
+        module = self.import_extension('foo', [
+            ("getset_exc_info", "METH_VARARGS",
+             r'''
+             PyObject *type, *val, *tb;
+             PyObject *new_type, *new_val, *new_tb;
+             PyObject *result;
+
+             if (!PyArg_ParseTuple(args, "OOO", &new_type, &new_val, &new_tb))
+                 return NULL;
+
+             PyErr_GetExcInfo(&type, &val, &tb);
+
+             Py_INCREF(new_type);
+             Py_INCREF(new_val);
+             Py_INCREF(new_tb);
+             PyErr_SetExcInfo(new_type, new_val, new_tb);
+
+             result = Py_BuildValue("OOO",
+                                    type ? type : Py_None,
+                                    val  ? val  : Py_None,
+                                    tb   ? tb   : Py_None);
+             Py_XDECREF(type);
+             Py_XDECREF(val);
+             Py_XDECREF(tb);
+             return result;
+             '''
+             ),
+            ])
+        try:
+            raise ValueError(5)
+        except ValueError, old_exc:
+            new_exc = TypeError("TEST")
+            orig_sys_exc_info = sys.exc_info()
+            orig_exc_info = module.getset_exc_info(new_exc.__class__,
+                                                   new_exc, None)
+            new_sys_exc_info = sys.exc_info()
+            new_exc_info = module.getset_exc_info(*orig_exc_info)
+            reset_sys_exc_info = sys.exc_info()
+
+            assert orig_exc_info[0] is old_exc.__class__
+            assert orig_exc_info[1] is old_exc
+            assert orig_exc_info == orig_sys_exc_info
+            assert orig_exc_info == reset_sys_exc_info
+            assert new_exc_info == (new_exc.__class__, new_exc, None)
+            assert new_exc_info == new_sys_exc_info
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -20,6 +20,7 @@
         assert type(obj) is module.fooType
         print "type of obj has type", type(type(obj))
         print "type of type of obj has type", type(type(type(obj)))
+        assert module.fooType.__doc__ == "foo is for testing."
 
     def test_typeobject_method_descriptor(self):
         module = self.import_module(name='foo')
@@ -414,8 +415,11 @@
             static int
             mp_ass_subscript(PyObject *self, PyObject *key, PyObject *value)
             {
-                PyErr_SetNone(PyExc_ZeroDivisionError);
-                return -1;
+                if (PyInt_Check(key)) {
+                    PyErr_SetNone(PyExc_ZeroDivisionError);
+                    return -1;
+                }
+                return 0;
             }
             PyMappingMethods tp_as_mapping;
             static PyTypeObject Foo_Type = {
@@ -425,6 +429,36 @@
             ''')
         obj = module.new_obj()
         raises(ZeroDivisionError, obj.__setitem__, 5, None)
+        res = obj.__setitem__('foo', None)
+        assert res is None
+
+    def test_sq_contains(self):
+        module = self.import_extension('foo', [
+           ("new_obj", "METH_NOARGS",
+            '''
+                PyObject *obj;
+                Foo_Type.tp_as_sequence = &tp_as_sequence;
+                tp_as_sequence.sq_contains = sq_contains;
+                if (PyType_Ready(&Foo_Type) < 0) return NULL;
+                obj = PyObject_New(PyObject, &Foo_Type);
+                return obj;
+            '''
+            )],
+            '''
+            static int
+            sq_contains(PyObject *self, PyObject *value)
+            {
+                return 42;
+            }
+            PySequenceMethods tp_as_sequence;
+            static PyTypeObject Foo_Type = {
+                PyVarObject_HEAD_INIT(NULL, 0)
+                "foo.foo",
+            };
+            ''')
+        obj = module.new_obj()
+        res = "foo" in obj
+        assert res is True
 
     def test_tp_iter(self):
         module = self.import_extension('foo', [
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -492,3 +492,31 @@
         assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 1, 5, -1) == 1
         self.raises(space, api, TypeError,
                     api.PyUnicode_Tailmatch, w_str, space.wrap(3), 2, 10, 1)
+
+    def test_count(self, space, api):
+        w_str = space.wrap(u"abcabdab")
+        assert api.PyUnicode_Count(w_str, space.wrap(u"ab"), 0, -1) == 2
+        assert api.PyUnicode_Count(w_str, space.wrap(u"ab"), 0, 2) == 1
+        assert api.PyUnicode_Count(w_str, space.wrap(u"ab"), -5, 30) == 2
+
+    def test_find(self, space, api):
+        w_str = space.wrap(u"abcabcd")
+        assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 0, 7, 1) == 2
+        assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 3, 7, 1) == 5
+        assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 0, 7, -1) == 5
+        assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 3, 7, -1) == 5
+        assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 0, 4, -1) == 2
+        assert api.PyUnicode_Find(w_str, space.wrap(u"z"), 0, 4, -1) == -1
+
+    def test_split(self, space, api):
+        w_str = space.wrap(u"a\nb\nc\nd")
+        assert "[u'a', u'b', u'c', u'd']" == space.unwrap(space.repr(
+                api.PyUnicode_Split(w_str, space.wrap('\n'), -1)))
+        assert r"[u'a', u'b', u'c\nd']" == space.unwrap(space.repr(
+                api.PyUnicode_Split(w_str, space.wrap('\n'), 2)))
+        assert r"[u'a', u'b', u'c d']" == space.unwrap(space.repr(
+                api.PyUnicode_Split(space.wrap(u'a\nb  c d'), None, 2)))
+        assert "[u'a', u'b', u'c', u'd']" == space.unwrap(space.repr(
+                api.PyUnicode_Splitlines(w_str, 0)))
+        assert r"[u'a\n', u'b\n', u'c\n', u'd']" == space.unwrap(space.repr(
+                api.PyUnicode_Splitlines(w_str, 1)))
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -306,6 +306,8 @@
         if not space.is_true(space.issubtype(self, space.w_type)):
             self.flag_cpytype = True
         self.flag_heaptype = False
+        if pto.c_tp_doc:
+            self.w_doc = space.wrap(rffi.charp2str(pto.c_tp_doc))
 
 @bootstrap_function
 def init_typeobject(space):
@@ -620,7 +622,6 @@
     Creates an interpreter type from a PyTypeObject structure.
     """
     # missing:
-    # setting __doc__ if not defined and tp_doc defined
     # inheriting tp_as_* slots
     # unsupported:
     # tp_mro, tp_subclasses
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -602,3 +602,46 @@
     else:
         return stringtype.stringendswith(str, substr, start, end)
 
+ at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t], Py_ssize_t, error=-1)
+def PyUnicode_Count(space, w_str, w_substr, start, end):
+    """Return the number of non-overlapping occurrences of substr in
+    str[start:end].  Return -1 if an error occurred."""
+    w_count = space.call_method(w_str, "count", w_substr,
+                                space.wrap(start), space.wrap(end))
+    return space.int_w(w_count)
+
+ at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real],
+             Py_ssize_t, error=-2)
+def PyUnicode_Find(space, w_str, w_substr, start, end, direction):
+    """Return the first position of substr in str*[*start:end] using
+    the given direction (direction == 1 means to do a forward search,
+    direction == -1 a backward search).  The return value is the index
+    of the first match; a value of -1 indicates that no match was
+    found, and -2 indicates that an error occurred and an exception
+    has been set."""
+    if rffi.cast(lltype.Signed, direction) > 0:
+        w_pos = space.call_method(w_str, "find", w_substr,
+                                  space.wrap(start), space.wrap(end))
+    else:
+        w_pos = space.call_method(w_str, "rfind", w_substr,
+                                  space.wrap(start), space.wrap(end))
+    return space.int_w(w_pos)
+
+ at cpython_api([PyObject, PyObject, Py_ssize_t], PyObject)
+def PyUnicode_Split(space, w_str, w_sep, maxsplit):
+    """Split a string giving a list of Unicode strings.  If sep is
+    NULL, splitting will be done at all whitespace substrings.
+    Otherwise, splits occur at the given separator.  At most maxsplit
+    splits will be done.  If negative, no limit is set.  Separators
+    are not included in the resulting list."""
+    if w_sep is None:
+        w_sep = space.w_None
+    return space.call_method(w_str, "split", w_sep, space.wrap(maxsplit))
+
+ at cpython_api([PyObject, rffi.INT_real], PyObject)
+def PyUnicode_Splitlines(space, w_str, keepend):
+    """Split a Unicode string at line breaks, returning a list of
+    Unicode strings.  CRLF is considered to be one line break.  If
+    keepend is 0, the Line break characters are not included in the
+    resulting strings."""
+    return space.call_method(w_str, "splitlines", space.wrap(keepend))
diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -29,6 +29,7 @@
         'flatiter': 'interp_numarray.W_FlatIterator',
         'isna': 'interp_numarray.isna',
         'concatenate': 'interp_numarray.concatenate',
+        'repeat': 'interp_numarray.repeat',
 
         'set_string_function': 'appbridge.set_string_function',
 
@@ -99,9 +100,12 @@
         ("exp2", "exp2"),
         ("expm1", "expm1"),
         ("fabs", "fabs"),
+        ("fmax", "fmax"),
+        ("fmin", "fmin"),
         ("fmod", "fmod"),
         ("floor", "floor"),
         ("ceil", "ceil"),
+        ("trunc", "trunc"),
         ("greater", "greater"),
         ("greater_equal", "greater_equal"),
         ("less", "less"),
@@ -122,12 +126,16 @@
         ("sinh", "sinh"),
         ("subtract", "subtract"),
         ('sqrt', 'sqrt'),
+        ('square', 'square'),
         ("tan", "tan"),
         ("tanh", "tanh"),
         ('bitwise_and', 'bitwise_and'),
         ('bitwise_or', 'bitwise_or'),
         ('bitwise_xor', 'bitwise_xor'),
         ('bitwise_not', 'invert'),
+        ('left_shift', 'left_shift'),
+        ('right_shift', 'right_shift'),
+        ('invert', 'invert'),
         ('isnan', 'isnan'),
         ('isinf', 'isinf'),
         ('isneginf', 'isneginf'),
diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py
--- a/pypy/module/micronumpy/app_numpy.py
+++ b/pypy/module/micronumpy/app_numpy.py
@@ -16,7 +16,7 @@
         a[i][i] = 1
     return a
 
-def sum(a,axis=None):
+def sum(a,axis=None, out=None):
     '''sum(a, axis=None)
     Sum of array elements over a given axis.
 
@@ -43,17 +43,17 @@
     # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements.
     if not hasattr(a, "sum"):
         a = _numpypy.array(a)
-    return a.sum(axis)
+    return a.sum(axis=axis, out=out)
 
-def min(a, axis=None):
+def min(a, axis=None, out=None):
     if not hasattr(a, "min"):
         a = _numpypy.array(a)
-    return a.min(axis)
+    return a.min(axis=axis, out=out)
 
-def max(a, axis=None):
+def max(a, axis=None, out=None):
     if not hasattr(a, "max"):
         a = _numpypy.array(a)
-    return a.max(axis)
+    return a.max(axis=axis, out=out)
 
 def arange(start, stop=None, step=1, dtype=None):
     '''arange([start], stop[, step], dtype=None)
diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py
--- a/pypy/module/micronumpy/interp_boxes.py
+++ b/pypy/module/micronumpy/interp_boxes.py
@@ -62,21 +62,24 @@
         return space.wrap(dtype.itemtype.bool(self))
 
     def _binop_impl(ufunc_name):
-        def impl(self, space, w_other):
+        def impl(self, space, w_other, w_out=None):
             from pypy.module.micronumpy import interp_ufuncs
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other])
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space,
+                                                            [self, w_other, w_out])
         return func_with_new_name(impl, "binop_%s_impl" % ufunc_name)
 
     def _binop_right_impl(ufunc_name):
-        def impl(self, space, w_other):
+        def impl(self, space, w_other, w_out=None):
             from pypy.module.micronumpy import interp_ufuncs
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self])
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, 
+                                                            [w_other, self, w_out])
         return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name)
 
     def _unaryop_impl(ufunc_name):
-        def impl(self, space):
+        def impl(self, space, w_out=None):
             from pypy.module.micronumpy import interp_ufuncs
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self])
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space,
+                                                                    [self, w_out])
         return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name)
 
     descr_add = _binop_impl("add")
diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
--- a/pypy/module/micronumpy/interp_dtype.py
+++ b/pypy/module/micronumpy/interp_dtype.py
@@ -387,23 +387,6 @@
             alternate_constructors=[space.w_float],
             aliases=["float"],
         )
-        self.w_longlongdtype = W_Dtype(
-            types.Int64(),
-            num=9,
-            kind=SIGNEDLTR,
-            name='int64',
-            char='q',
-            w_box_type = space.gettypefor(interp_boxes.W_LongLongBox),
-            alternate_constructors=[space.w_int],
-        )
-        self.w_ulonglongdtype = W_Dtype(
-            types.UInt64(),
-            num=10,
-            kind=UNSIGNEDLTR,
-            name='uint64',
-            char='Q',
-            w_box_type = space.gettypefor(interp_boxes.W_ULongLongBox),
-        )
         self.w_stringdtype = W_Dtype(
             types.StringType(1),
             num=18,
@@ -436,17 +419,19 @@
             self.w_booldtype, self.w_int8dtype, self.w_uint8dtype,
             self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype,
             self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype,
-            self.w_longlongdtype, self.w_ulonglongdtype,
+            self.w_int64dtype, self.w_uint64dtype,
             self.w_float32dtype,
             self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype,
             self.w_voiddtype,
         ]
-        self.dtypes_by_num_bytes = sorted(
+        self.float_dtypes_by_num_bytes = sorted(
             (dtype.itemtype.get_element_size(), dtype)
-            for dtype in self.builtin_dtypes
+            for dtype in [self.w_float32dtype, self.w_float64dtype]
         )
         self.dtypes_by_name = {}
-        for dtype in self.builtin_dtypes:
+        # we reverse, so the stuff with lower numbers override stuff with
+        # higher numbers
+        for dtype in reversed(self.builtin_dtypes):
             self.dtypes_by_name[dtype.name] = dtype
             can_name = dtype.kind + str(dtype.itemtype.get_element_size())
             self.dtypes_by_name[can_name] = dtype
@@ -474,7 +459,7 @@
             'LONG': self.w_longdtype,
             'UNICODE': self.w_unicodedtype,
             #'OBJECT',
-            'ULONGLONG': self.w_ulonglongdtype,
+            'ULONGLONG': self.w_uint64dtype,
             'STRING': self.w_stringdtype,
             #'CDOUBLE',
             #'DATETIME',
diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py
--- a/pypy/module/micronumpy/interp_iter.py
+++ b/pypy/module/micronumpy/interp_iter.py
@@ -269,7 +269,7 @@
 
     def apply_transformations(self, arr, transformations):
         v = BaseIterator.apply_transformations(self, arr, transformations)
-        if len(arr.shape) == 1:
+        if len(arr.shape) == 1 and len(v.res_shape) == 1:
             return OneDimIterator(self.offset, self.strides[0],
                                   self.res_shape[0])
         return v
diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
--- a/pypy/module/micronumpy/interp_numarray.py
+++ b/pypy/module/micronumpy/interp_numarray.py
@@ -83,8 +83,9 @@
         return space.wrap(W_NDimArray(shape[:], dtype=dtype))
 
     def _unaryop_impl(ufunc_name):
-        def impl(self, space):
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self])
+        def impl(self, space, w_out=None):
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space,
+                                                                [self, w_out])
         return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name)
 
     descr_pos = _unaryop_impl("positive")
@@ -93,8 +94,9 @@
     descr_invert = _unaryop_impl("invert")
 
     def _binop_impl(ufunc_name):
-        def impl(self, space, w_other):
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other])
+        def impl(self, space, w_other, w_out=None):
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space,
+                                                        [self, w_other, w_out])
         return func_with_new_name(impl, "binop_%s_impl" % ufunc_name)
 
     descr_add = _binop_impl("add")
@@ -124,12 +126,12 @@
         return space.newtuple([w_quotient, w_remainder])
 
     def _binop_right_impl(ufunc_name):
-        def impl(self, space, w_other):
+        def impl(self, space, w_other, w_out=None):
             w_other = scalar_w(space,
                 interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()),
                 w_other
             )
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self])
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out])
         return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name)
 
     descr_radd = _binop_right_impl("add")
@@ -152,13 +154,21 @@
         return space.newtuple([w_quotient, w_remainder])
 
     def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False):
-        def impl(self, space, w_axis=None):
+        def impl(self, space, w_axis=None, w_out=None):
             if space.is_w(w_axis, space.w_None):
                 axis = -1
             else:
                 axis = space.int_w(w_axis)
+            if space.is_w(w_out, space.w_None) or not w_out:
+                out = None
+            elif not isinstance(w_out, BaseArray):
+                raise OperationError(space.w_TypeError, space.wrap( 
+                        'output must be an array'))
+            else:
+                out = w_out
             return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space,
-                                        self, True, promote_to_largest, axis)
+                                        self, True, promote_to_largest, axis,
+                                                                   False, out)
         return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name)
 
     descr_sum = _reduce_ufunc_impl("add")
@@ -213,6 +223,7 @@
     def descr_dot(self, space, w_other):
         other = convert_to_array(space, w_other)
         if isinstance(other, Scalar):
+            #Note: w_out is not modified, this is numpy compliant.
             return self.descr_mul(space, other)
         elif len(self.shape) < 2 and len(other.shape) < 2:
             w_res = self.descr_mul(space, other)
@@ -502,7 +513,30 @@
             arr = concrete.copy(space)
             arr.setshape(space, new_shape)
         return arr
-
+       
+    @unwrap_spec(axis1=int, axis2=int)
+    def descr_swapaxes(self, space, axis1, axis2):
+        """a.swapaxes(axis1, axis2)
+    
+        Return a view of the array with `axis1` and `axis2` interchanged.
+    
+        Refer to `numpy.swapaxes` for full documentation.
+    
+        See Also
+        --------
+        numpy.swapaxes : equivalent function
+        """
+        concrete = self.get_concrete()
+        shape = concrete.shape[:]
+        strides = concrete.strides[:]
+        backstrides = concrete.backstrides[:]
+        shape[axis1], shape[axis2] = shape[axis2], shape[axis1]   
+        strides[axis1], strides[axis2] = strides[axis2], strides[axis1]
+        backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] 
+        arr = W_NDimSlice(concrete.start, strides, 
+                           backstrides, shape, concrete)
+        return space.wrap(arr)   
+                                      
     def descr_tolist(self, space):
         if len(self.shape) == 0:
             assert isinstance(self, Scalar)
@@ -514,14 +548,14 @@
             )
         return w_result
 
-    def descr_mean(self, space, w_axis=None):
+    def descr_mean(self, space, w_axis=None, w_out=None):
         if space.is_w(w_axis, space.w_None):
             w_axis = space.wrap(-1)
             w_denom = space.wrap(support.product(self.shape))
         else:
             dim = space.int_w(w_axis)
             w_denom = space.wrap(self.shape[dim])
-        return space.div(self.descr_sum_promote(space, w_axis), w_denom)
+        return space.div(self.descr_sum_promote(space, w_axis, w_out), w_denom)
 
     def descr_var(self, space, w_axis=None):
         return get_appbridge_cache(space).call_method(space, '_var', self,
@@ -662,6 +696,10 @@
     def compute_first_step(self, sig, frame):
         pass
 
+    @unwrap_spec(repeats=int)
+    def descr_repeat(self, space, repeats, w_axis=None):
+        return repeat(space, self, repeats, w_axis)
+
 def convert_to_array(space, w_obj):
     if isinstance(w_obj, BaseArray):
         return w_obj
@@ -714,11 +752,12 @@
     """
     Class for representing virtual arrays, such as binary ops or ufuncs
     """
-    def __init__(self, name, shape, res_dtype):
+    def __init__(self, name, shape, res_dtype, out_arg=None):
         BaseArray.__init__(self, shape)
         self.forced_result = None
         self.res_dtype = res_dtype
         self.name = name
+        self.res = out_arg
         self.size = support.product(self.shape) * res_dtype.get_size()
 
     def _del_sources(self):
@@ -727,13 +766,18 @@
         raise NotImplementedError
 
     def compute(self):
-        ra = ResultArray(self, self.shape, self.res_dtype)
+        ra = ResultArray(self, self.shape, self.res_dtype, self.res)
         loop.compute(ra)
+        if self.res:
+            broadcast_dims = len(self.res.shape) - len(self.shape)
+            chunks = [Chunk(0,0,0,0)] * broadcast_dims + \
+                     [Chunk(0, i, 1, i) for i in self.shape]
+            return Chunks(chunks).apply(self.res)
         return ra.left
 
     def force_if_needed(self):
         if self.forced_result is None:
-            self.forced_result = self.compute()
+            self.forced_result = self.compute().get_concrete()
             self._del_sources()
 
     def get_concrete(self):
@@ -773,8 +817,9 @@
 
 
 class Call1(VirtualArray):
-    def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values):
-        VirtualArray.__init__(self, name, shape, res_dtype)
+    def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values,
+                                                            out_arg=None):
+        VirtualArray.__init__(self, name, shape, res_dtype, out_arg)
         self.values = values
         self.size = values.size
         self.ufunc = ufunc
@@ -786,6 +831,12 @@
     def create_sig(self):
         if self.forced_result is not None:
             return self.forced_result.create_sig()
+        if self.shape != self.values.shape:
+            #This happens if out arg is used
+            return signature.BroadcastUfunc(self.ufunc, self.name,
+                                            self.calc_dtype,
+                                            self.values.create_sig(),
+                                            self.res.create_sig())
         return signature.Call1(self.ufunc, self.name, self.calc_dtype,
                                self.values.create_sig())
 
@@ -793,8 +844,9 @@
     """
     Intermediate class for performing binary operations.
     """
-    def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right):
-        VirtualArray.__init__(self, name, shape, res_dtype)
+    def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right,
+            out_arg=None):
+        VirtualArray.__init__(self, name, shape, res_dtype, out_arg)
         self.ufunc = ufunc
         self.left = left
         self.right = right
@@ -832,8 +884,13 @@
         Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child)
 
     def create_sig(self):
-        return signature.ResultSignature(self.res_dtype, self.left.create_sig(),
-                                         self.right.create_sig())
+        if self.left.shape != self.right.shape:
+            sig = signature.BroadcastResultSignature(self.res_dtype,
+                        self.left.create_sig(), self.right.create_sig())
+        else:
+            sig = signature.ResultSignature(self.res_dtype, 
+                        self.left.create_sig(), self.right.create_sig())
+        return sig
 
 class ToStringArray(Call1):
     def __init__(self, child):
@@ -842,9 +899,9 @@
         self.s = StringBuilder(child.size * self.item_size)
         Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype,
                        child)
-        self.res = W_NDimArray([1], dtype, 'C')
-        self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char),
-                                    self.res.storage)
+        self.res_str = W_NDimArray([1], dtype, order='C')
+        self.res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char),
+                                    self.res_str.storage)
 
     def create_sig(self):
         return signature.ToStringSignature(self.calc_dtype,
@@ -950,7 +1007,7 @@
 
     def setitem(self, item, value):
         self.invalidated()
-        self.dtype.setitem(self, item, value)
+        self.dtype.setitem(self, item, value.convert_to(self.dtype))
 
     def calc_strides(self, shape):
         dtype = self.find_dtype()
@@ -1231,6 +1288,31 @@
         return convert_to_array(space, w_obj2).descr_dot(space, w_arr)
     return w_arr.descr_dot(space, w_obj2)
 
+ at unwrap_spec(repeats=int)
+def repeat(space, w_arr, repeats, w_axis=None):
+    arr = convert_to_array(space, w_arr)
+    if space.is_w(w_axis, space.w_None):
+        arr = arr.descr_flatten(space).get_concrete()
+        orig_size = arr.shape[0]
+        shape = [arr.shape[0] * repeats]
+        res = W_NDimArray(shape, arr.find_dtype())
+        for i in range(repeats):
+            Chunks([Chunk(i, shape[0] - repeats + i, repeats,
+                          orig_size)]).apply(res).setslice(space, arr)
+    else:
+        arr = arr.get_concrete()
+        axis = space.int_w(w_axis)
+        shape = arr.shape[:]
+        chunks = [Chunk(0, i, 1, i) for i in shape]
+        orig_size = shape[axis]
+        shape[axis] *= repeats
+        res = W_NDimArray(shape, arr.find_dtype())
+        for i in range(repeats):
+            chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats,
+                                 orig_size)
+            Chunks(chunks).apply(res).setslice(space, arr)
+    return res
+
 @unwrap_spec(axis=int)
 def concatenate(space, w_args, axis=0):
     args_w = space.listview(w_args)
@@ -1353,9 +1435,11 @@
     copy = interp2app(BaseArray.descr_copy),
     flatten = interp2app(BaseArray.descr_flatten),
     reshape = interp2app(BaseArray.descr_reshape),
+    swapaxes = interp2app(BaseArray.descr_swapaxes),
     tolist = interp2app(BaseArray.descr_tolist),
     take = interp2app(BaseArray.descr_take),
     compress = interp2app(BaseArray.descr_compress),
+    repeat = interp2app(BaseArray.descr_repeat),
 )
 
 
diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
--- a/pypy/module/micronumpy/interp_ufuncs.py
+++ b/pypy/module/micronumpy/interp_ufuncs.py
@@ -3,9 +3,11 @@
 from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped
 from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
 from pypy.module.micronumpy import interp_boxes, interp_dtype, support, loop
+from pypy.rlib import jit
 from pypy.rlib.rarithmetic import LONG_BIT
 from pypy.tool.sourcetools import func_with_new_name
 
+
 class W_Ufunc(Wrappable):
     _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"]
     _immutable_fields_ = ["promote_to_float", "promote_bools", "name"]
@@ -28,26 +30,38 @@
         return self.identity
 
     def descr_call(self, space, __args__):
+        from interp_numarray import BaseArray
         args_w, kwds_w = __args__.unpack()
         # it occurs to me that we don't support any datatypes that
         # require casting, change it later when we do
         kwds_w.pop('casting', None)
         w_subok = kwds_w.pop('subok', None)
         w_out = kwds_w.pop('out', space.w_None)
-        if ((w_subok is not None and space.is_true(w_subok)) or
-            not space.is_w(w_out, space.w_None)):
+        # Setup a default value for out
+        if space.is_w(w_out, space.w_None):
+            out = None
+        else:
+            out = w_out
+        if (w_subok is not None and space.is_true(w_subok)):
             raise OperationError(space.w_NotImplementedError,
                                  space.wrap("parameters unsupported"))
         if kwds_w or len(args_w) < self.argcount:
             raise OperationError(space.w_ValueError,
                 space.wrap("invalid number of arguments")
             )
-        elif len(args_w) > self.argcount:
-            # The extra arguments should actually be the output array, but we
-            # don't support that yet.
+        elif (len(args_w) > self.argcount and out is not None) or \
+             (len(args_w) > self.argcount + 1):
             raise OperationError(space.w_TypeError,
                 space.wrap("invalid number of arguments")
             )
+        # Override the default out value, if it has been provided in w_wargs
+        if len(args_w) > self.argcount:
+            out = args_w[-1]
+        else:
+            args_w = args_w[:] + [out]
+        if out is not None and not isinstance(out, BaseArray):
+            raise OperationError(space.w_TypeError, space.wrap(
+                                            'output must be an array'))
         return self.call(space, args_w)
 
     @unwrap_spec(skipna=bool, keepdims=bool)
@@ -105,28 +119,33 @@
         array([[ 1,  5],
                [ 9, 13]])
         """
-        if not space.is_w(w_out, space.w_None):
-            raise OperationError(space.w_NotImplementedError, space.wrap(
-                "out not supported"))
+        from pypy.module.micronumpy.interp_numarray import BaseArray
         if w_axis is None:
             axis = 0
         elif space.is_w(w_axis, space.w_None):
             axis = -1
         else:
             axis = space.int_w(w_axis)
-        return self.reduce(space, w_obj, False, False, axis, keepdims)
+        if space.is_w(w_out, space.w_None):
+            out = None
+        elif not isinstance(w_out, BaseArray):
+            raise OperationError(space.w_TypeError, space.wrap(
+                                                'output must be an array'))
+        else:
+            out = w_out
+        return self.reduce(space, w_obj, False, False, axis, keepdims, out)
 
-    def reduce(self, space, w_obj, multidim, promote_to_largest, dim,
-               keepdims=False):
+    def reduce(self, space, w_obj, multidim, promote_to_largest, axis,
+               keepdims=False, out=None):
         from pypy.module.micronumpy.interp_numarray import convert_to_array, \
-                                                           Scalar, ReduceArray
+                                             Scalar, ReduceArray, W_NDimArray
         if self.argcount != 2:
             raise OperationError(space.w_ValueError, space.wrap("reduce only "
                 "supported for binary functions"))
         assert isinstance(self, W_Ufunc2)
         obj = convert_to_array(space, w_obj)
-        if dim >= len(obj.shape):
-            raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % dim))
+        if axis >= len(obj.shape):
+            raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % axis))
         if isinstance(obj, Scalar):
             raise OperationError(space.w_TypeError, space.wrap("cannot reduce "
                 "on a scalar"))
@@ -144,21 +163,55 @@
         if self.identity is None and size == 0:
             raise operationerrfmt(space.w_ValueError, "zero-size array to "
                     "%s.reduce without identity", self.name)
-        if shapelen > 1 and dim >= 0:
-            return self.do_axis_reduce(obj, dtype, dim, keepdims)
-        arr = ReduceArray(self.func, self.name, self.identity, obj, dtype)
-        return loop.compute(arr)
+        if shapelen > 1 and axis >= 0:
+            if keepdims:
+                shape = obj.shape[:axis] + [1] + obj.shape[axis + 1:]
+            else:
+                shape = obj.shape[:axis] + obj.shape[axis + 1:]
+            if out:
+                #Test for shape agreement
+                if len(out.shape) > len(shape):
+                    raise operationerrfmt(space.w_ValueError,
+                        'output parameter for reduction operation %s' +
+                        ' has too many dimensions', self.name)
+                elif len(out.shape) < len(shape):
+                    raise operationerrfmt(space.w_ValueError,
+                        'output parameter for reduction operation %s' +
+                        ' does not have enough dimensions', self.name)
+                elif out.shape != shape:
+                    raise operationerrfmt(space.w_ValueError,
+                        'output parameter shape mismatch, expecting [%s]' +
+                        ' , got [%s]',
+                        ",".join([str(x) for x in shape]),
+                        ",".join([str(x) for x in out.shape]),
+                        )
+                #Test for dtype agreement, perhaps create an itermediate
+                #if out.dtype != dtype:
+                #    raise OperationError(space.w_TypeError, space.wrap(
+                #        "mismatched  dtypes"))
+                return self.do_axis_reduce(obj, out.find_dtype(), axis, out)
+            else:
+                result = W_NDimArray(shape, dtype)
+                return self.do_axis_reduce(obj, dtype, axis, result)
+        if out:
+            if len(out.shape)>0:
+                raise operationerrfmt(space.w_ValueError, "output parameter "
+                              "for reduction operation %s has too many"
+                              " dimensions",self.name)
+            arr = ReduceArray(self.func, self.name, self.identity, obj,
+                                                            out.find_dtype())
+            val = loop.compute(arr)
+            assert isinstance(out, Scalar)
+            out.value = val
+        else:
+            arr = ReduceArray(self.func, self.name, self.identity, obj, dtype)
+            val = loop.compute(arr)
+        return val
 
-    def do_axis_reduce(self, obj, dtype, dim, keepdims):
-        from pypy.module.micronumpy.interp_numarray import AxisReduce,\
-             W_NDimArray
-        if keepdims:
-            shape = obj.shape[:dim] + [1] + obj.shape[dim + 1:]
-        else:
-            shape = obj.shape[:dim] + obj.shape[dim + 1:]
-        result = W_NDimArray(shape, dtype)
+    def do_axis_reduce(self, obj, dtype, axis, result):
+        from pypy.module.micronumpy.interp_numarray import AxisReduce
         arr = AxisReduce(self.func, self.name, self.identity, obj.shape, dtype,
-                         result, obj, dim)
+                         result, obj, axis)
         loop.compute(arr)
         return arr.left
 
@@ -176,24 +229,55 @@
         self.bool_result = bool_result
 
     def call(self, space, args_w):
-        from pypy.module.micronumpy.interp_numarray import (Call1,
-            convert_to_array, Scalar)
-
-        [w_obj] = args_w
+        from pypy.module.micronumpy.interp_numarray import (Call1, BaseArray,
+            convert_to_array, Scalar, shape_agreement)
+        if len(args_w)<2:
+            [w_obj] = args_w
+            out = None
+        else:
+            [w_obj, out] = args_w
+            if space.is_w(out, space.w_None):
+                out = None
         w_obj = convert_to_array(space, w_obj)
         calc_dtype = find_unaryop_result_dtype(space,
                                   w_obj.find_dtype(),
                                   promote_to_float=self.promote_to_float,
                                   promote_bools=self.promote_bools)
-        if self.bool_result:
+        if out:
+            if not isinstance(out, BaseArray):
+                raise OperationError(space.w_TypeError, space.wrap(
+                                                'output must be an array'))
+            res_dtype = out.find_dtype()
+        elif self.bool_result:
             res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype
         else:
             res_dtype = calc_dtype
         if isinstance(w_obj, Scalar):
-            return space.wrap(self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)))
-
-        w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype,
-                      w_obj)
+            arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype))
+            if isinstance(out,Scalar):
+                out.value = arr
+            elif isinstance(out, BaseArray):
+                out.fill(space, arr)
+            else:
+                out = arr
+            return space.wrap(out)
+        if out:
+            assert isinstance(out, BaseArray) # For translation
+            broadcast_shape =  shape_agreement(space, w_obj.shape, out.shape)
+            if not broadcast_shape or broadcast_shape != out.shape:
+                raise operationerrfmt(space.w_ValueError,
+                    'output parameter shape mismatch, could not broadcast [%s]' +
+                    ' to [%s]',
+                    ",".join([str(x) for x in w_obj.shape]),
+                    ",".join([str(x) for x in out.shape]),
+                    )
+            w_res = Call1(self.func, self.name, out.shape, calc_dtype,
+                                         res_dtype, w_obj, out)
+            #Force it immediately
+            w_res.get_concrete()
+        else:
+            w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype,
+                                         res_dtype, w_obj)
         w_obj.add_invalidates(w_res)
         return w_res
 
@@ -210,34 +294,63 @@
         self.func = func
         self.comparison_func = comparison_func
 
+    @jit.unroll_safe
     def call(self, space, args_w):
         from pypy.module.micronumpy.interp_numarray import (Call2,
-            convert_to_array, Scalar, shape_agreement)
-
-        [w_lhs, w_rhs] = args_w
+            convert_to_array, Scalar, shape_agreement, BaseArray)
+        if len(args_w) > 2:
+            [w_lhs, w_rhs, w_out] = args_w
+        else:
+            [w_lhs, w_rhs] = args_w
+            w_out = None
         w_lhs = convert_to_array(space, w_lhs)
         w_rhs = convert_to_array(space, w_rhs)
-        calc_dtype = find_binop_result_dtype(space,
-            w_lhs.find_dtype(), w_rhs.find_dtype(),
-            int_only=self.int_only,
-            promote_to_float=self.promote_to_float,
-            promote_bools=self.promote_bools,
-        )
+        if space.is_w(w_out, space.w_None) or w_out is None:
+            out = None
+            calc_dtype = find_binop_result_dtype(space,
+                w_lhs.find_dtype(), w_rhs.find_dtype(),
+                int_only=self.int_only,
+                promote_to_float=self.promote_to_float,
+                promote_bools=self.promote_bools,
+            )
+        elif not isinstance(w_out, BaseArray):
+            raise OperationError(space.w_TypeError, space.wrap(
+                    'output must be an array'))
+        else:
+            out = w_out
+            calc_dtype = out.find_dtype()
         if self.comparison_func:
             res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype
         else:
             res_dtype = calc_dtype
         if isinstance(w_lhs, Scalar) and isinstance(w_rhs, Scalar):
-            return space.wrap(self.func(calc_dtype,
+            arr = self.func(calc_dtype,
                 w_lhs.value.convert_to(calc_dtype),
                 w_rhs.value.convert_to(calc_dtype)
-            ))
+            )
+            if isinstance(out,Scalar):
+                out.value = arr
+            elif isinstance(out, BaseArray):
+                out.fill(space, arr)
+            else:
+                out = arr
+            return space.wrap(out)
         new_shape = shape_agreement(space, w_lhs.shape, w_rhs.shape)
+        # Test correctness of out.shape
+        if out and out.shape != shape_agreement(space, new_shape, out.shape):
+            raise operationerrfmt(space.w_ValueError,
+                'output parameter shape mismatch, could not broadcast [%s]' +
+                ' to [%s]',
+                ",".join([str(x) for x in new_shape]),
+                ",".join([str(x) for x in out.shape]),
+                )
         w_res = Call2(self.func, self.name,
                       new_shape, calc_dtype,
-                      res_dtype, w_lhs, w_rhs)
+                      res_dtype, w_lhs, w_rhs, out)
         w_lhs.add_invalidates(w_res)
         w_rhs.add_invalidates(w_res)
+        if out:
+            w_res.get_concrete()
         return w_res
 
 
@@ -314,7 +427,7 @@
             return dt
         if dt.num >= 5:
             return interp_dtype.get_dtype_cache(space).w_float64dtype
-        for bytes, dtype in interp_dtype.get_dtype_cache(space).dtypes_by_num_bytes:
+        for bytes, dtype in interp_dtype.get_dtype_cache(space).float_dtypes_by_num_bytes:
             if (dtype.kind == interp_dtype.FLOATINGLTR and
                 dtype.itemtype.get_element_size() > dt.itemtype.get_element_size()):
                 return dtype
@@ -423,14 +536,18 @@
             ("reciprocal", "reciprocal", 1),
 
             ("fabs", "fabs", 1, {"promote_to_float": True}),
+            ("fmax", "fmax", 2, {"promote_to_float": True}),
+            ("fmin", "fmin", 2, {"promote_to_float": True}),
             ("fmod", "fmod", 2, {"promote_to_float": True}),
             ("floor", "floor", 1, {"promote_to_float": True}),
             ("ceil", "ceil", 1, {"promote_to_float": True}),
+            ("trunc", "trunc", 1, {"promote_to_float": True}),
             ("exp", "exp", 1, {"promote_to_float": True}),
             ("exp2", "exp2", 1, {"promote_to_float": True}),
             ("expm1", "expm1", 1, {"promote_to_float": True}),
 
             ('sqrt', 'sqrt', 1, {'promote_to_float': True}),
+            ('square', 'square', 1, {'promote_to_float': True}),
 
             ("sin", "sin", 1, {"promote_to_float": True}),
             ("cos", "cos", 1, {"promote_to_float": True}),
diff --git a/pypy/module/micronumpy/signature.py b/pypy/module/micronumpy/signature.py
--- a/pypy/module/micronumpy/signature.py
+++ b/pypy/module/micronumpy/signature.py
@@ -107,6 +107,10 @@
         arr.compute_first_step(self, f)
         return f
 
+    def debug_repr(self):
+        # should be overridden, but in case it isn't, provide a default
+        return str(self)
+
 class ConcreteSignature(Signature):
     _immutable_fields_ = ['dtype']
 
@@ -207,7 +211,7 @@
     def _create_iter(self, iterlist, arraylist, arr, transforms):
         from pypy.module.micronumpy.interp_numarray import VirtualSlice
         assert isinstance(arr, VirtualSlice)
-        transforms = transforms + [ViewTransform(arr.chunks)]
+        transforms = [ViewTransform(arr.chunks)] + transforms 
         self.child._create_iter(iterlist, arraylist, arr.child, transforms)
 
     def eval(self, frame, arr):
@@ -215,14 +219,18 @@
         assert isinstance(arr, VirtualSlice)
         return self.child.eval(frame, arr.child)
 
+    def debug_repr(self):
+        return 'VirtualSlice(%s)' % self.child.debug_repr()
+
 class Call1(Signature):
-    _immutable_fields_ = ['unfunc', 'name', 'child', 'dtype']
+    _immutable_fields_ = ['unfunc', 'name', 'child', 'res', 'dtype']
 
-    def __init__(self, func, name, dtype, child):
+    def __init__(self, func, name, dtype, child, res=None):
         self.unfunc = func
         self.child = child
         self.name = name
         self.dtype = dtype
+        self.res  = res
 
     def hash(self):
         return compute_hash(self.name) ^ intmask(self.child.hash() << 1)
@@ -256,6 +264,29 @@
         v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype)
         return self.unfunc(arr.calc_dtype, v)
 
+
+class BroadcastUfunc(Call1):
+    def _invent_numbering(self, cache, allnumbers):
+        self.res._invent_numbering(cache, allnumbers)
+        self.child._invent_numbering(new_cache(), allnumbers)
+
+    def debug_repr(self):
+        return 'BroadcastUfunc(%s, %s)' % (self.name, self.child.debug_repr())
+
+    def _create_iter(self, iterlist, arraylist, arr, transforms):
+        from pypy.module.micronumpy.interp_numarray import Call1
+
+        assert isinstance(arr, Call1)
+        vtransforms = [BroadcastTransform(arr.values.shape)] + transforms
+        self.child._create_iter(iterlist, arraylist, arr.values, vtransforms)
+        self.res._create_iter(iterlist, arraylist, arr.res, transforms)
+
+    def eval(self, frame, arr):
+        from pypy.module.micronumpy.interp_numarray import Call1
+        assert isinstance(arr, Call1)
+        v = self.child.eval(frame, arr.values).convert_to(arr.calc_dtype)
+        return self.unfunc(arr.calc_dtype, v)
+
 class Call2(Signature):
     _immutable_fields_ = ['binfunc', 'name', 'calc_dtype', 'left', 'right']
 
@@ -316,7 +347,17 @@
 
         assert isinstance(arr, ResultArray)
         offset = frame.get_final_iter().offset
-        arr.left.setitem(offset, self.right.eval(frame, arr.right))
+        val = self.right.eval(frame, arr.right)
+        arr.left.setitem(offset, val)
+
+class BroadcastResultSignature(ResultSignature):
+    def _create_iter(self, iterlist, arraylist, arr, transforms):
+        from pypy.module.micronumpy.interp_numarray import ResultArray
+
+        assert isinstance(arr, ResultArray)
+        rtransforms = [BroadcastTransform(arr.left.shape)] + transforms
+        self.left._create_iter(iterlist, arraylist, arr.left, transforms)
+        self.right._create_iter(iterlist, arraylist, arr.right, rtransforms)
 
 class ToStringSignature(Call1):
     def __init__(self, dtype, child):
@@ -327,10 +368,10 @@
         from pypy.module.micronumpy.interp_numarray import ToStringArray
 
         assert isinstance(arr, ToStringArray)
-        arr.res.setitem(0, self.child.eval(frame, arr.values).convert_to(
+        arr.res_str.setitem(0, self.child.eval(frame, arr.values).convert_to(
             self.dtype))
         for i in range(arr.item_size):
-            arr.s.append(arr.res_casted[i])
+            arr.s.append(arr.res_str_casted[i])
 
 class BroadcastLeft(Call2):
     def _invent_numbering(self, cache, allnumbers):
@@ -341,7 +382,7 @@
         from pypy.module.micronumpy.interp_numarray import Call2
 
         assert isinstance(arr, Call2)
-        ltransforms = transforms + [BroadcastTransform(arr.shape)]
+        ltransforms = [BroadcastTransform(arr.shape)] + transforms
         self.left._create_iter(iterlist, arraylist, arr.left, ltransforms)
         self.right._create_iter(iterlist, arraylist, arr.right, transforms)
 
@@ -354,7 +395,7 @@
         from pypy.module.micronumpy.interp_numarray import Call2
 
         assert isinstance(arr, Call2)
-        rtransforms = transforms + [BroadcastTransform(arr.shape)]
+        rtransforms = [BroadcastTransform(arr.shape)] + transforms
         self.left._create_iter(iterlist, arraylist, arr.left, transforms)
         self.right._create_iter(iterlist, arraylist, arr.right, rtransforms)
 
@@ -367,8 +408,8 @@
         from pypy.module.micronumpy.interp_numarray import Call2
 
         assert isinstance(arr, Call2)
-        rtransforms = transforms + [BroadcastTransform(arr.shape)]
-        ltransforms = transforms + [BroadcastTransform(arr.shape)]
+        rtransforms = [BroadcastTransform(arr.shape)] + transforms
+        ltransforms = [BroadcastTransform(arr.shape)] + transforms
         self.left._create_iter(iterlist, arraylist, arr.left, ltransforms)
         self.right._create_iter(iterlist, arraylist, arr.right, rtransforms)
 
@@ -390,7 +431,7 @@
         frame.cur_value = self.binfunc(self.calc_dtype, frame.cur_value, rval)
 
     def debug_repr(self):
-        return 'ReduceSig(%s)' % (self.name, self.right.debug_repr())
+        return 'ReduceSig(%s, %s)' % (self.name, self.right.debug_repr())
 
 class SliceloopSignature(Call2):
     def eval(self, frame, arr):
@@ -414,7 +455,7 @@
         from pypy.module.micronumpy.interp_numarray import SliceArray
 
         assert isinstance(arr, SliceArray)
-        rtransforms = transforms + [BroadcastTransform(arr.shape)]
+        rtransforms = [BroadcastTransform(arr.shape)] + transforms
         self.left._create_iter(iterlist, arraylist, arr.left, transforms)
         self.right._create_iter(iterlist, arraylist, arr.right, rtransforms)
 
@@ -455,6 +496,5 @@
             cur = arr.left.getitem(iterator.offset)
             value = self.binfunc(self.calc_dtype, cur, v)
         arr.left.setitem(iterator.offset, value)
-    
     def debug_repr(self):
         return 'AxisReduceSig(%s, %s)' % (self.name, self.right.debug_repr())
diff --git a/pypy/module/micronumpy/strides.py b/pypy/module/micronumpy/strides.py
--- a/pypy/module/micronumpy/strides.py
+++ b/pypy/module/micronumpy/strides.py
@@ -1,6 +1,7 @@
 from pypy.rlib import jit
 from pypy.interpreter.error import OperationError
 
+ at jit.look_inside_iff(lambda chunks: jit.isconstant(len(chunks)))
 def enumerate_chunks(chunks):
     result = []
     i = -1
@@ -85,9 +86,9 @@
         space.isinstance_w(w_item_or_slice, space.w_slice)):
         raise OperationError(space.w_IndexError,
                              space.wrap('unsupported iterator index'))
-            
+
     start, stop, step, lngth = space.decode_index4(w_item_or_slice, size)
-        
+
     coords = [0] * len(shape)
     i = start
     if order == 'C':
diff --git a/pypy/module/micronumpy/support.py b/pypy/module/micronumpy/support.py
--- a/pypy/module/micronumpy/support.py
+++ b/pypy/module/micronumpy/support.py
@@ -1,5 +1,9 @@
+from pypy.rlib import jit
+
+
+ at jit.unroll_safe
 def product(s):
     i = 1
     for x in s:
         i *= x
-    return i
\ No newline at end of file
+    return i
diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py
--- a/pypy/module/micronumpy/test/test_dtypes.py
+++ b/pypy/module/micronumpy/test/test_dtypes.py
@@ -301,6 +301,7 @@
         else:
             raises(OverflowError, numpy.int32, 2147483648)
             raises(OverflowError, numpy.int32, '2147483648')
+        assert numpy.dtype('int32') is numpy.dtype(numpy.int32)
 
     def test_uint32(self):
         import sys
@@ -332,15 +333,11 @@
         assert numpy.dtype(numpy.int64).type is numpy.int64
         assert numpy.int64(3) == 3
 
-        if sys.maxsize >= 2 ** 63 - 1:
-            assert numpy.int64(9223372036854775807) == 9223372036854775807
-            assert numpy.int64('9223372036854775807') == 9223372036854775807
-        else:
-            raises(OverflowError, numpy.int64, 9223372036854775807)
-            raises(OverflowError, numpy.int64, '9223372036854775807')
+        assert numpy.int64(9223372036854775807) == 9223372036854775807
+        assert numpy.int64(9223372036854775807) == 9223372036854775807
 
         raises(OverflowError, numpy.int64, 9223372036854775808)
-        raises(OverflowError, numpy.int64, '9223372036854775808')
+        raises(OverflowError, numpy.int64, 9223372036854775808L)
 
     def test_uint64(self):
         import sys
diff --git a/pypy/module/micronumpy/test/test_numarray.py b/pypy/module/micronumpy/test/test_numarray.py
--- a/pypy/module/micronumpy/test/test_numarray.py
+++ b/pypy/module/micronumpy/test/test_numarray.py
@@ -395,11 +395,19 @@
         assert a[3] == 0.
 
     def test_newaxis(self):
-        from _numpypy import array
+        import math
+        from _numpypy import array, cos, zeros
         from numpypy.core.numeric import newaxis
         a = array(range(5))
         b = array([range(5)])
         assert (a[newaxis] == b).all()
+        a = array(range(3))
+        b = array([1, 3])
+        expected = zeros((3, 2))
+        for x in range(3):
+            for y in range(2):
+                expected[x, y] = math.cos(a[x]) * math.cos(b[y])
+        assert ((cos(a)[:,newaxis] * cos(b).T) == expected).all()
 
     def test_newaxis_slice(self):
         from _numpypy import array
@@ -995,6 +1003,10 @@
         assert a.sum() == 5
 
         raises(TypeError, 'a.sum(2, 3)')
+        d = array(0.)
+        b = a.sum(out=d)
+        assert b == d
+        assert isinstance(b, float)
 
     def test_reduce_nd(self):
         from numpypy import arange, array, multiply
@@ -1334,6 +1346,10 @@
         dims_disagree = raises(ValueError, concatenate, (a1, b1), axis=0)
         assert str(dims_disagree.value) == \
             "array dimensions must agree except for axis being concatenated"
+        a = array([1, 2, 3, 4, 5, 6])
+        a = (a + a)[::2]
+        b = concatenate((a[:3], a[-3:]))
+        assert (b == [2, 6, 10, 2, 6, 10]).all()
 
     def test_std(self):
         from _numpypy import array
@@ -1383,7 +1399,46 @@
         assert (ones(1) + ones(1)).nbytes == 8
         assert array(3.0).nbytes == 8
 
+    def test_repeat(self):
+        from _numpypy import repeat, array
+        assert (repeat([[1, 2], [3, 4]], 3) == [1, 1, 1, 2, 2, 2,
+                                                3, 3, 3, 4, 4, 4]).all()
+        assert (repeat([[1, 2], [3, 4]], 2, axis=0) == [[1, 2], [1, 2], [3, 4],
+                                                        [3, 4]]).all()
+        assert (repeat([[1, 2], [3, 4]], 2, axis=1) == [[1, 1, 2, 2], [3, 3,
+                                                        4, 4]]).all()
+        assert (array([1, 2]).repeat(2) == array([1, 1, 2, 2])).all()
 
+
+    def test_swapaxes(self):
+        from _numpypy import array
+        # testcases from numpy docstring
+        x = array([[1, 2, 3]])
+        assert (x.swapaxes(0, 1) == array([[1], [2], [3]])).all() 
+        x = array([[[0,1],[2,3]],[[4,5],[6,7]]]) # shape = (2, 2, 2)
+        assert (x.swapaxes(0, 2) == array([[[0, 4], [2, 6]], 
+                                           [[1, 5], [3, 7]]])).all() 
+        assert (x.swapaxes(0, 1) == array([[[0, 1], [4, 5]], 
+                                           [[2, 3], [6, 7]]])).all()
+        assert (x.swapaxes(1, 2) == array([[[0, 2], [1, 3]], 
+                                           [[4, 6],[5, 7]]])).all()
+
+        # more complex shape i.e. (2, 2, 3)
+        x = array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) 
+        assert (x.swapaxes(0, 1) == array([[[1, 2, 3], [7, 8, 9]], 
+                                           [[4, 5, 6], [10, 11, 12]]])).all() 
+        assert (x.swapaxes(0, 2) == array([[[1, 7], [4, 10]], [[2, 8], [5, 11]], 
+                                           [[3, 9], [6, 12]]])).all() 
+        assert (x.swapaxes(1, 2) == array([[[1, 4], [2, 5], [3, 6]], 
+                                           [[7, 10], [8, 11],[9, 12]]])).all() 
+        
+        # test slice
+        assert (x[0:1,0:2].swapaxes(0,2) == array([[[1], [4]], [[2], [5]], 
+                                                   [[3], [6]]])).all()
+        # test virtual
+        assert ((x + x).swapaxes(0,1) == array([[[ 2,  4,  6], [14, 16, 18]], 
+                                         [[ 8, 10, 12], [20, 22, 24]]])).all()
+                        
 class AppTestMultiDim(BaseNumpyAppTest):
     def test_init(self):
         import _numpypy
@@ -1495,8 +1550,6 @@
         a = array([[1, 2], [3, 4], [5, 6], [7, 8],
                    [9, 10], [11, 12], [13, 14]])
         b = a[::2]
-        print a
-        print b
         assert (b == [[1, 2], [5, 6], [9, 10], [13, 14]]).all()
         c = b + b
         assert c[1][1] == 12
diff --git a/pypy/module/micronumpy/test/test_outarg.py b/pypy/module/micronumpy/test/test_outarg.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/micronumpy/test/test_outarg.py
@@ -0,0 +1,126 @@
+import py
+from pypy.module.micronumpy.test.test_base import BaseNumpyAppTest
+
+class AppTestOutArg(BaseNumpyAppTest):
+    def test_reduce_out(self):
+        from numpypy import arange, zeros, array
+        a = arange(15).reshape(5, 3)
+        b = arange(12).reshape(4,3)
+        c = a.sum(0, out=b[1])
+        assert (c == [30, 35, 40]).all()
+        assert (c == b[1]).all()
+        raises(ValueError, 'a.prod(0, out=arange(10))')
+        a=arange(12).reshape(3,2,2)
+        raises(ValueError, 'a.sum(0, out=arange(12).reshape(3,2,2))')
+        raises(ValueError, 'a.sum(0, out=arange(3))')
+        c = array([-1, 0, 1]).sum(out=zeros([], dtype=bool))
+        #You could argue that this should product False, but
+        # that would require an itermediate result. Cpython numpy
+        # gives True.
+        assert c == True
+        a = array([[-1, 0, 1], [1, 0, -1]])
+        c = a.sum(0, out=zeros((3,), dtype=bool))
+        assert (c == [True, False, True]).all()
+        c = a.sum(1, out=zeros((2,), dtype=bool))
+        assert (c == [True, True]).all()
+
+    def test_reduce_intermediary(self):
+        from numpypy import arange, array
+        a = arange(15).reshape(5, 3)
+        b = array(range(3), dtype=bool)
+        c = a.prod(0, out=b)
+        assert(b == [False,  True,  True]).all()
+
+    def test_ufunc_out(self):
+        from _numpypy import array, negative, zeros, sin
+        from math import sin as msin
+        a = array([[1, 2], [3, 4]])
+        c = zeros((2,2,2))
+        b = negative(a + a, out=c[1])
+        #test for view, and also test that forcing out also forces b
+        assert (c[:, :, 1] == [[0, 0], [-4, -8]]).all()
+        assert (b == [[-2, -4], [-6, -8]]).all()
+        #Test broadcast, type promotion
+        b = negative(3, out=a)
+        assert (a == -3).all()
+        c = zeros((2, 2), dtype=float)
+        b = negative(3, out=c)
+        assert b.dtype.kind == c.dtype.kind
+        assert b.shape == c.shape
+        a = array([1, 2])
+        b = sin(a, out=c)
+        assert(c == [[msin(1), msin(2)]] * 2).all()
+        b = sin(a, out=c+c)
+        assert (c == b).all()
+
+        #Test shape agreement
+        a = zeros((3,4))
+        b = zeros((3,5))
+        raises(ValueError, 'negative(a, out=b)')
+        b = zeros((1,4))
+        raises(ValueError, 'negative(a, out=b)')
+
+    def test_binfunc_out(self):
+        from _numpypy import array, add
+        a = array([[1, 2], [3, 4]])
+        out = array([[1, 2], [3, 4]])
+        c = add(a, a, out=out)
+        assert (c == out).all()
+        assert c.shape == a.shape
+        assert c.dtype is a.dtype
+        c[0,0] = 100
+        assert out[0, 0] == 100
+        out[:] = 100
+        raises(ValueError, 'c = add(a, a, out=out[1])')
+        c = add(a[0], a[1], out=out[1])
+        assert (c == out[1]).all()
+        assert (c == [4, 6]).all()
+        assert (out[0] == 100).all()
+        c = add(a[0], a[1], out=out)
+        assert (c == out[1]).all()
+        assert (c == out[0]).all()
+        out = array(16, dtype=int)
+        b = add(10, 10, out=out)
+        assert b==out
+        assert b.dtype == out.dtype
+        
+    def test_applevel(self):
+        from _numpypy import array, sum, max, min
+        a = array([[1, 2], [3, 4]])
+        out = array([[0, 0], [0, 0]])
+        c = sum(a, axis=0, out=out[0])
+        assert (c == [4, 6]).all()
+        assert (c == out[0]).all()
+        assert (c != out[1]).all()
+        c = max(a, axis=1, out=out[0])
+        assert (c == [2, 4]).all()
+        assert (c == out[0]).all()
+        assert (c != out[1]).all()
+        
+    def test_ufunc_cast(self):
+        from _numpypy import array, negative, add, sum
+        a = array(16, dtype = int)
+        c = array(0, dtype = float)
+        b = negative(a, out=c)
+        assert b == c
+        b = add(a, a, out=c)
+        assert b == c
+        d = array([16, 16], dtype=int)
+        b = sum(d, out=c)
+        assert b == c
+        try:
+            from _numpypy import version
+            v = version.version.split('.')
+        except:
+            v = ['1', '6', '0'] # numpypy is api compatable to what version?
+        if v[0]<'2':
+            b = negative(c, out=a)
+            assert b == a
+            b = add(c, c, out=a)
+            assert b == a
+            b = sum(array([16, 16], dtype=float), out=a)
+            assert b == a
+        else:
+            cast_error = raises(TypeError, negative, c, a)
+            assert str(cast_error.value) == \
+            "Cannot cast ufunc negative output from dtype('float64') to dtype('int64') with casting rule 'same_kind'"
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -135,6 +135,38 @@
         assert fabs(float('-inf')) == float('inf')
         assert isnan(fabs(float('nan')))
 
+    def test_fmax(self):
+        from _numpypy import fmax
+        import math
+
+        nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf')
+
+        a = [ninf, -5, 0, 5, inf]
+        assert (fmax(a, [ninf]*5) == a).all()
+        assert (fmax(a, [inf]*5) == [inf]*5).all()
+        assert (fmax(a, [1]*5) == [1, 1, 1, 5, inf]).all()
+        assert math.isnan(fmax(nan, 0))
+        assert math.isnan(fmax(0, nan))
+        assert math.isnan(fmax(nan, nan))
+        # The numpy docs specify that the FIRST NaN should be used if both are NaN
+        assert math.copysign(1.0, fmax(nnan, nan)) == -1.0
+
+    def test_fmin(self):
+        from _numpypy import fmin
+        import math
+
+        nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf')
+
+        a = [ninf, -5, 0, 5, inf]
+        assert (fmin(a, [ninf]*5) == [ninf]*5).all()
+        assert (fmin(a, [inf]*5) == a).all()
+        assert (fmin(a, [1]*5) == [ninf, -5, 0, 1, 1]).all()
+        assert math.isnan(fmin(nan, 0))
+        assert math.isnan(fmin(0, nan))
+        assert math.isnan(fmin(nan, nan))
+        # The numpy docs specify that the FIRST NaN should be used if both are NaN
+        assert math.copysign(1.0, fmin(nnan, nan)) == -1.0
+
     def test_fmod(self):
         from _numpypy import fmod
         import math
@@ -221,24 +253,17 @@
         for i in range(3):
             assert c[i] == a[i] - b[i]
 
-    def test_floorceil(self):
-        from _numpypy import array, floor, ceil
+    def test_floorceiltrunc(self):
+        from _numpypy import array, floor, ceil, trunc
         import math
-        reference = [-2.0, -2.0, -1.0, 0.0, 1.0, 1.0, 0]
-        a = array([-1.4, -1.5, -1.0, 0.0, 1.0, 1.4, 0.5])
-        b = floor(a)
-        for i in range(5):
-            assert b[i] == reference[i]
-        reference = [-1.0, -1.0, -1.0, 0.0, 1.0, 2.0, 1.0]
-        a = array([-1.4, -1.5, -1.0, 0.0, 1.0, 1.4, 0.5])
-        b = ceil(a)
-        assert (reference == b).all()
-        inf = float("inf")
-        data = [1.5, 2.9999, -1.999, inf]
-        results = [math.floor(x) for x in data]
-        assert (floor(data) == results).all()
-        results = [math.ceil(x) for x in data]
-        assert (ceil(data) == results).all()
+        ninf, inf = float("-inf"), float("inf")
+        a = array([ninf, -1.4, -1.5, -1.0, 0.0, 1.0, 1.4, 0.5, inf])
+        assert ([ninf, -2.0, -2.0, -1.0, 0.0, 1.0, 1.0, 0.0, inf] == floor(a)).all()
+        assert ([ninf, -1.0, -1.0, -1.0, 0.0, 1.0, 2.0, 1.0, inf] == ceil(a)).all()
+        assert ([ninf, -1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 0.0, inf] == trunc(a)).all()
+        assert all([math.isnan(f(float("nan"))) for f in floor, ceil, trunc])
+        assert all([math.copysign(1, f(float("nan"))) == 1 for f in floor, ceil, trunc])
+        assert all([math.copysign(1, f(float("-nan"))) == -1 for f in floor, ceil, trunc])
 
     def test_copysign(self):
         from _numpypy import array, copysign
@@ -455,6 +480,19 @@
         assert math.isnan(sqrt(-1))
         assert math.isnan(sqrt(nan))
 
+    def test_square(self):
+        import math
+        from _numpypy import square
+
+        nan, inf, ninf = float("nan"), float("inf"), float("-inf")
+
+        assert math.isnan(square(nan))
+        assert math.isinf(square(inf))
+        assert math.isinf(square(ninf))
+        assert square(ninf) > 0
+        assert [square(x) for x in range(-5, 5)] == [x*x for x in range(-5, 5)]
+        assert math.isinf(square(1e300))
+
     def test_radians(self):
         import math
         from _numpypy import radians, array
@@ -546,10 +584,17 @@
         raises(TypeError, 'array([1.0]) & 1')
 
     def test_unary_bitops(self):
-        from _numpypy import bitwise_not, array
+        from _numpypy import bitwise_not, invert, array
         a = array([1, 2, 3, 4])
         assert (~a == [-2, -3, -4, -5]).all()
         assert (bitwise_not(a) == ~a).all()
+        assert (invert(a) == ~a).all()
+
+    def test_shift(self):
+        from _numpypy import left_shift, right_shift
+
+        assert (left_shift([5, 1], [2, 13]) == [20, 2**13]).all()
+        assert (right_shift(10, range(5)) == [10, 5, 2, 1, 0]).all()
 
     def test_comparisons(self):
         import operator
diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -131,7 +131,7 @@
         #            bogus. We need to improve the situation somehow.
         self.check_simple_loop({'getinteriorfield_raw': 2,
                                 'setinteriorfield_raw': 1,
-                                'arraylen_gc': 1,
+                                'arraylen_gc': 2,
                                 'guard_true': 1,
                                 'int_lt': 1,
                                 'jump': 1,
diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
--- a/pypy/module/micronumpy/types.py
+++ b/pypy/module/micronumpy/types.py
@@ -500,6 +500,19 @@
     BoxType = interp_boxes.W_ULongBox
     format_code = "L"
 
+def _int64_coerce(self, space, w_item):
+    try:
+        return self._base_coerce(space, w_item)
+    except OperationError, e:
+        if not e.match(space, space.w_OverflowError):
+            raise
+    bigint = space.bigint_w(w_item)
+    try:
+        value = bigint.tolonglong()
+    except OverflowError:
+        raise OperationError(space.w_OverflowError, space.w_None)
+    return self.box(value)
+
 class Int64(BaseType, Integer):
     _attrs_ = ()
 
@@ -507,6 +520,8 @@
     BoxType = interp_boxes.W_Int64Box
     format_code = "q"
 
+    _coerce = func_with_new_name(_int64_coerce, '_coerce')
+
 class NonNativeInt64(BaseType, NonNativeInteger):
     _attrs_ = ()
 
@@ -514,6 +529,8 @@
     BoxType = interp_boxes.W_Int64Box
     format_code = "q"    
 
+    _coerce = func_with_new_name(_int64_coerce, '_coerce')
+
 def _uint64_coerce(self, space, w_item):
     try:
         return self._base_coerce(space, w_item)
@@ -614,6 +631,22 @@
         return math.fabs(v)
 
     @simple_binary_op
+    def fmax(self, v1, v2):
+        if math.isnan(v1):
+            return v1
+        elif math.isnan(v2):
+            return v2
+        return max(v1, v2)
+
+    @simple_binary_op
+    def fmin(self, v1, v2):
+        if math.isnan(v1):
+            return v1
+        elif math.isnan(v2):
+            return v2
+        return min(v1, v2)
+
+    @simple_binary_op
     def fmod(self, v1, v2):
         try:
             return math.fmod(v1, v2)
@@ -635,6 +668,13 @@
         return math.ceil(v)
 
     @simple_unary_op
+    def trunc(self, v):
+        if v < 0:
+            return math.ceil(v)
+        else:
+            return math.floor(v)
+
+    @simple_unary_op
     def exp(self, v):
         try:
             return math.exp(v)
@@ -724,6 +764,10 @@
         except ValueError:
             return rfloat.NAN
 
+    @simple_unary_op
+    def square(self, v):
+        return v*v
+
     @raw_unary_op
     def isnan(self, v):
         return rfloat.isnan(v)
diff --git a/pypy/module/pyexpat/test/__init__.py b/pypy/module/pyexpat/test/__init__.py
new file mode 100644
diff --git a/pypy/module/pypyjit/test_pypy_c/test_call.py b/pypy/module/pypyjit/test_pypy_c/test_call.py
--- a/pypy/module/pypyjit/test_pypy_c/test_call.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_call.py
@@ -244,6 +244,7 @@
         print guards
         assert len(guards) <= 20
 
+
     def test_stararg_virtual(self):
         def main(x):
             def g(*args):
@@ -486,3 +487,38 @@
             --TICK--
             jump(..., descr=...)
         """)
+
+    def test_kwargs_virtual2(self):
+        log = self.run("""
+        def f(*args, **kwargs):
+            kwargs['a'] = kwargs['z'] * 0
+            return g(1, *args, **kwargs)
+
+        def g(x, y, z=2, a=1):
+            return x - y + z + a
+
+        def main(stop):
+            res = 0
+            i = 0
+            while i < stop:
+                res = f(res, z=i) # ID: call
+                i += 1
+            return res""", [1000])
+        assert log.result == 500
+        loop, = log.loops_by_id('call')
+        print loop.ops_by_id('call')
+        assert loop.match("""
+            i65 = int_lt(i58, i29)
+            guard_true(i65, descr=...)
+            guard_not_invalidated(..., descr=...)
+            i66 = force_token()
+            i67 = force_token()
+            i69 = int_sub_ovf(1, i56)
+            guard_no_overflow(..., descr=...)
+            i70 = int_add_ovf(i69, i58)
+            guard_no_overflow(..., descr=...)
+            i71 = int_add(i58, 1)
+            --TICK--
+            jump(..., descr=...)
+        """)
+
diff --git a/pypy/module/pypyjit/test_pypy_c/test_containers.py b/pypy/module/pypyjit/test_pypy_c/test_containers.py
--- a/pypy/module/pypyjit/test_pypy_c/test_containers.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_containers.py
@@ -128,3 +128,82 @@
         loop, = log.loops_by_filename(self.filepath)
         ops = loop.ops_by_id('look')
         assert 'call' not in log.opnames(ops)
+
+    #XXX the following tests only work with strategies enabled
+
+    def test_should_not_create_intobject_with_sets(self):
+        def main(n):
+            i = 0
+            s = set()
+            while i < n:
+                s.add(i)
+                i += 1
+        log = self.run(main, [1000])
+        assert log.result == main(1000)
+        loop, = log.loops_by_filename(self.filepath)
+        opnames = log.opnames(loop.allops())
+        assert opnames.count('new_with_vtable') == 0
+
+    def test_should_not_create_stringobject_with_sets(self):
+        def main(n):
+            i = 0
+            s = set()
+            while i < n:
+                s.add(str(i))
+                i += 1
+        log = self.run(main, [1000])
+        assert log.result == main(1000)
+        loop, = log.loops_by_filename(self.filepath)
+        opnames = log.opnames(loop.allops())
+        assert opnames.count('new_with_vtable') == 0
+
+    def test_should_not_create_intobject_with_lists(self):
+        def main(n):
+            i = 0
+            l = []
+            while i < n:
+                l.append(i)
+                i += 1
+        log = self.run(main, [1000])
+        assert log.result == main(1000)
+        loop, = log.loops_by_filename(self.filepath)
+        opnames = log.opnames(loop.allops())
+        assert opnames.count('new_with_vtable') == 0
+
+    def test_should_not_create_stringobject_with_lists(self):
+        def main(n):
+            i = 0
+            l = []
+            while i < n:
+                l.append(str(i))
+                i += 1
+        log = self.run(main, [1000])
+        assert log.result == main(1000)
+        loop, = log.loops_by_filename(self.filepath)
+        opnames = log.opnames(loop.allops())
+        assert opnames.count('new_with_vtable') == 0
+
+    def test_optimized_create_list_from_string(self):
+        def main(n):
+            i = 0
+            l = []
+            while i < n:
+                l = list("abc" * i)
+                i += 1
+        log = self.run(main, [1000])
+        assert log.result == main(1000)
+        loop, = log.loops_by_filename(self.filepath)
+        opnames = log.opnames(loop.allops())
+        assert opnames.count('new_with_vtable') == 0
+
+    def test_optimized_create_set_from_list(self):
+        def main(n):
+            i = 0
+            while i < n:
+                s = set([1,2,3])
+                i += 1
+        log = self.run(main, [1000])
+        assert log.result == main(1000)
+        loop, = log.loops_by_filename(self.filepath)
+        opnames = log.opnames(loop.allops())
+        assert opnames.count('new_with_vtable') == 0
diff --git a/pypy/module/pypyjit/test_pypy_c/test_misc.py b/pypy/module/pypyjit/test_pypy_c/test_misc.py
--- a/pypy/module/pypyjit/test_pypy_c/test_misc.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_misc.py
@@ -212,7 +212,7 @@
             i19 = int_add(i12, 1)
             setfield_gc(p9, i19, descr=<FieldS .*W_AbstractSeqIterObject.inst_index .*>)
             guard_nonnull_class(p17, 146982464, descr=...)
-            i21 = getfield_gc(p17, descr=<FieldS .*W_ArrayTypei.inst_len .*>)
+            i21 = getfield_gc(p17, descr=<FieldS .*W_Array.*.inst_len .*>)
             i23 = int_lt(0, i21)
             guard_true(i23, descr=...)
             i24 = getfield_gc(p17, descr=<FieldU .*W_ArrayTypei.inst_buffer .*>)
@@ -351,3 +351,23 @@
         # the following assertion fails if the loop was cancelled due
         # to "abort: vable escape"
         assert len(log.loops_by_id("eval")) == 1
+
+    def test_sys_exc_info(self):
+        def main():
+            i = 1
+            lst = [i]
+            while i < 1000:
+                try:
+                    return lst[i]
+                except:
+                    e = sys.exc_info()[1]    # ID: exc_info
+                    if not isinstance(e, IndexError):
+                        raise
+                i += 1
+            return 42
+
+        log = self.run(main)
+        assert log.result == 42
+        # the following assertion fails if the loop was cancelled due
+        # to "abort: vable escape"
+        assert len(log.loops_by_id("exc_info")) == 1
diff --git a/pypy/module/pypyjit/test_pypy_c/test_string.py b/pypy/module/pypyjit/test_pypy_c/test_string.py
--- a/pypy/module/pypyjit/test_pypy_c/test_string.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_string.py
@@ -198,3 +198,37 @@
             i49 = call(ConstClass(_ll_2_str_eq_nonnull__rpy_stringPtr_rpy_stringPtr), p35, ConstPtr(ptr48), descr=<Calli [48] rr EF=0 OS=28>)
             guard_value(i49, 1, descr=...)
             ''')
+
+    def test_remove_duplicate_method_calls(self):
+        def main(n):
+            lst = []
+            for i in range(n):
+                s = 'Hello %d' % i
+                t = s.lower()   # ID: callone
+                u = s.lower()   # ID: calltwo
+                lst.append(t)
+                lst.append(u)
+            return len(','.join(lst))
+        log = self.run(main, [1000])
+        assert log.result == main(1000)
+        loops = log.loops_by_filename(self.filepath)
+        loop, = loops
+        loop.match_by_id('callone', '''
+            p114 = call(ConstClass(ll_lower__rpy_stringPtr), p113, descr=<Callr . r EF=3>)
+            guard_no_exception(descr=...)
+            ''')
+        loop.match_by_id('calltwo', '')    # nothing
+
+    def test_move_method_call_out_of_loop(self):
+        def main(n):
+            lst = []
+            s = 'Hello %d' % n
+            for i in range(n):
+                t = s.lower()   # ID: callone
+                lst.append(t)
+            return len(','.join(lst))
+        log = self.run(main, [1000])
+        assert log.result == main(1000)
+        loops = log.loops_by_filename(self.filepath)
+        loop, = loops
+        loop.match_by_id('callone', '')    # nothing
diff --git a/pypy/module/sys/test/test_sysmodule.py b/pypy/module/sys/test/test_sysmodule.py
--- a/pypy/module/sys/test/test_sysmodule.py
+++ b/pypy/module/sys/test/test_sysmodule.py
@@ -538,3 +538,124 @@
         s4 = s3.swapcase()
         assert intern(s4) is s2
 
+
+class AppTestSysExcInfoDirect:
+
+    def setup_method(self, meth):
+        self.checking = not option.runappdirect
+        if self.checking:
+            self.seen = []
+            from pypy.module.sys import vm
+            def exc_info_with_tb(*args):
+                self.seen.append("n")     # not optimized
+                return self.old[0](*args)
+            def exc_info_without_tb(*args):
+                self.seen.append("y")     # optimized
+                return self.old[1](*args)
+            self.old = [vm.exc_info_with_tb, vm.exc_info_without_tb]
+            vm.exc_info_with_tb = exc_info_with_tb
+            vm.exc_info_without_tb = exc_info_without_tb
+            #
+            from pypy.rlib import jit
+            self.old2 = [jit.we_are_jitted]
+            jit.we_are_jitted = lambda: True
+
+    def teardown_method(self, meth):
+        if self.checking:
+            from pypy.module.sys import vm
+            from pypy.rlib import jit
+            vm.exc_info_with_tb = self.old[0]
+            vm.exc_info_without_tb = self.old[1]
+            jit.we_are_jitted = self.old2[0]
+            #
+            assert ''.join(self.seen) == meth.expected
+
+    def test_returns_none(self):
+        import sys
+        assert sys.exc_info() == (None, None, None)
+        assert sys.exc_info()[0] is None
+        assert sys.exc_info()[1] is None
+        assert sys.exc_info()[2] is None
+        assert sys.exc_info()[:2] == (None, None)
+        assert sys.exc_info()[:3] == (None, None, None)
+        assert sys.exc_info()[0:2] == (None, None)
+        assert sys.exc_info()[2:4] == (None,)
+    test_returns_none.expected = 'nnnnnnnn'
+
+    def test_returns_subscr(self):
+        import sys
+        e = KeyError("boom")
+        try:
+            raise e
+        except:
+            assert sys.exc_info()[0] is KeyError  # y
+            assert sys.exc_info()[1] is e         # y
+            assert sys.exc_info()[2] is not None  # n
+            assert sys.exc_info()[-3] is KeyError # y
+            assert sys.exc_info()[-2] is e        # y
+            assert sys.exc_info()[-1] is not None # n
+    test_returns_subscr.expected = 'yynyyn'
+
+    def test_returns_slice_2(self):
+        import sys
+        e = KeyError("boom")
+        try:
+            raise e
+        except:
+            foo = sys.exc_info()                  # n
+            assert sys.exc_info()[:0] == ()       # y
+            assert sys.exc_info()[:1] == foo[:1]  # y
+            assert sys.exc_info()[:2] == foo[:2]  # y
+            assert sys.exc_info()[:3] == foo      # n
+            assert sys.exc_info()[:4] == foo      # n
+            assert sys.exc_info()[:-1] == foo[:2] # y
+            assert sys.exc_info()[:-2] == foo[:1] # y
+            assert sys.exc_info()[:-3] == ()      # y
+    test_returns_slice_2.expected = 'nyyynnyyy'
+
+    def test_returns_slice_3(self):
+        import sys
+        e = KeyError("boom")
+        try:
+            raise e
+        except:
+            foo = sys.exc_info()                   # n
+            assert sys.exc_info()[2:2] == ()       # y
+            assert sys.exc_info()[0:1] == foo[:1]  # y
+            assert sys.exc_info()[1:2] == foo[1:2] # y
+            assert sys.exc_info()[0:3] == foo      # n
+            assert sys.exc_info()[2:4] == foo[2:]  # n
+            assert sys.exc_info()[0:-1] == foo[:2] # y
+            assert sys.exc_info()[0:-2] == foo[:1] # y
+            assert sys.exc_info()[5:-3] == ()      # y
+    test_returns_slice_3.expected = 'nyyynnyyy'
+
+    def test_strange_invocation(self):
+        import sys
+        e = KeyError("boom")
+        try:
+            raise e
+        except:
+            a = []; k = {}
+            assert sys.exc_info(*a)[:0] == ()
+            assert sys.exc_info(**k)[:0] == ()
+    test_strange_invocation.expected = 'nn'
+
+    def test_call_in_subfunction(self):
+        import sys
+        def g():
+            # this case is not optimized, because we need to search the
+            # frame chain.  it's probably not worth the complications
+            return sys.exc_info()[1]
+        e = KeyError("boom")
+        try:
+            raise e
+        except:
+            assert g() is e
+    test_call_in_subfunction.expected = 'n'
+
+
+class AppTestSysExcInfoDirectCallMethod(AppTestSysExcInfoDirect):
+    def setup_class(cls):
+        cls.space = gettestobjspace(**{"objspace.opcodes.CALL_METHOD": True})
+>>>>>>> other
diff --git a/pypy/module/sys/version.py b/pypy/module/sys/version.py
--- a/pypy/module/sys/version.py
+++ b/pypy/module/sys/version.py
@@ -10,7 +10,7 @@
 CPYTHON_VERSION            = (3, 2, 2, "final", 0)   #XXX # sync patchlevel.h
 CPYTHON_API_VERSION        = 1013   #XXX # sync with include/modsupport.h
 
-PYPY_VERSION               = (1, 8, 1, "dev", 0)    #XXX # sync patchlevel.h
+PYPY_VERSION               = (1, 9, 1, "dev", 0)    #XXX # sync patchlevel.h
 
 if platform.name == 'msvc':
     COMPILER_INFO = 'MSC v.%d 32 bit' % (platform.version * 10 + 600)
diff --git a/pypy/module/sys/vm.py b/pypy/module/sys/vm.py
--- a/pypy/module/sys/vm.py
+++ b/pypy/module/sys/vm.py
@@ -89,6 +89,9 @@
     """Return the (type, value, traceback) of the most recent exception
 caught by an except clause in the current stack frame or in an older stack
 frame."""
+    return exc_info_with_tb(space)    # indirection for the tests
+
+def exc_info_with_tb(space):
     operror = space.getexecutioncontext().sys_exc_info()
     if operror is None:
         return space.newtuple([space.w_None,space.w_None,space.w_None])
@@ -96,6 +99,68 @@
         return space.newtuple([operror.w_type, operror.get_w_value(space),
                                space.wrap(operror.get_traceback())])
 
+def exc_info_without_tb(space, frame):
+    operror = frame.last_exception
+    return space.newtuple([operror.w_type, operror.get_w_value(space),
+                           space.w_None])
+
+def exc_info_direct(space, frame):
+    from pypy.tool import stdlib_opcode
+    # In order to make the JIT happy, we try to return (exc, val, None)
+    # instead of (exc, val, tb).  We can do that only if we recognize
+    # the following pattern in the bytecode:
+    #       CALL_FUNCTION/CALL_METHOD         <-- invoking me
+    #       LOAD_CONST 0, 1, -2 or -3
+    #       BINARY_SUBSCR
+    # or:
+    #       CALL_FUNCTION/CALL_METHOD
+    #       LOAD_CONST <=2
+    #       SLICE_2
+    # or:
+    #       CALL_FUNCTION/CALL_METHOD
+    #       LOAD_CONST any integer
+    #       LOAD_CONST <=2
+    #       SLICE_3
+    need_all_three_args = True
+    co = frame.getcode().co_code
+    p = frame.last_instr
+    if (ord(co[p]) == stdlib_opcode.CALL_FUNCTION or
+        ord(co[p]) == stdlib_opcode.CALL_METHOD):
+        if ord(co[p+3]) == stdlib_opcode.LOAD_CONST:
+            lo = ord(co[p+4])
+            hi = ord(co[p+5])
+            w_constant = frame.getconstant_w((hi * 256) | lo)
+            if space.isinstance_w(w_constant, space.w_int):
+                constant = space.int_w(w_constant)
+                if ord(co[p+6]) == stdlib_opcode.BINARY_SUBSCR:
+                    if -3 <= constant <= 1 and constant != -1:
+                        need_all_three_args = False
+                elif ord(co[p+6]) == stdlib_opcode.SLICE+2:
+                    if constant <= 2:
+                        need_all_three_args = False
+                elif (ord(co[p+6]) == stdlib_opcode.LOAD_CONST and
+                      ord(co[p+9]) == stdlib_opcode.SLICE+3):
+                    lo = ord(co[p+7])
+                    hi = ord(co[p+8])
+                    w_constant = frame.getconstant_w((hi * 256) | lo)
+                    if space.isinstance_w(w_constant, space.w_int):
+                        if space.int_w(w_constant) <= 2:
+                            need_all_three_args = False
+    #
+    if need_all_three_args or frame.last_exception is None or frame.hide():
+        return exc_info_with_tb(space)
+    else:
+        return exc_info_without_tb(space, frame)
+
+def exc_clear(space):
+    """Clear global information on the current exception.  Subsequent calls
+to exc_info() will return (None,None,None) until another exception is
+raised and caught in the current thread or the execution stack returns to a
+frame where another exception is being handled."""
+    operror = space.getexecutioncontext().sys_exc_info()
+    if operror is not None:
+        operror.clear(space)
+
 def settrace(space, w_func):
     """Set the global debug tracing function.  It will be called on each
 function call.  See the debugger chapter in the library manual."""
diff --git a/pypy/module/test_lib_pypy/numpypy/core/test_fromnumeric.py b/pypy/module/test_lib_pypy/numpypy/core/test_fromnumeric.py
--- a/pypy/module/test_lib_pypy/numpypy/core/test_fromnumeric.py
+++ b/pypy/module/test_lib_pypy/numpypy/core/test_fromnumeric.py
@@ -141,4 +141,11 @@
         raises(NotImplementedError, "transpose(x, axes=(1, 0, 2))")
         # x = ones((1, 2, 3))
         # assert transpose(x, (1, 0, 2)).shape == (2, 1, 3)
-        
+    
+    def test_fromnumeric(self):
+        from numpypy import array, swapaxes
+        x = array([[1,2,3]])
+        assert (swapaxes(x,0,1) == array([[1], [2], [3]])).all()
+        x = array([[[0,1],[2,3]],[[4,5],[6,7]]])
+        assert (swapaxes(x,0,2) == array([[[0, 4], [2, 6]], 
+                                          [[1, 5], [3, 7]]])).all()
diff --git a/pypy/module/thread/test/test_ll_thread.py b/pypy/module/thread/test/test_ll_thread.py
--- a/pypy/module/thread/test/test_ll_thread.py
+++ b/pypy/module/thread/test/test_ll_thread.py
@@ -66,7 +66,6 @@
     def test_gc_locking(self):
         import time
         from pypy.rlib.objectmodel import invoke_around_extcall
-        from pypy.rlib.objectmodel import we_are_translated
         from pypy.rlib.debug import ll_assert
 
         class State:
@@ -129,8 +128,6 @@
             state.finished = 0
             # the next line installs before_extcall() and after_extcall()
             # to be called automatically around external function calls.
-            # When not translated it does not work around time.sleep(),
-            # so we have to call them manually for this test.
             invoke_around_extcall(before_extcall, after_extcall)
 
             g(10, 1)
@@ -142,13 +139,9 @@
                 willing_to_wait_more -= 1
                 done = len(state.answers) == expected
 
-                if not we_are_translated(): before_extcall()
                 time.sleep(0.01)
-                if not we_are_translated(): after_extcall()
 
-            if not we_are_translated(): before_extcall()
             time.sleep(0.1)
-            if not we_are_translated(): after_extcall()
 
             return len(state.answers)
 
@@ -160,12 +153,11 @@
         answers = fn()
         assert answers == expected
 
-class TestRunDirectly(AbstractThreadTests):
-    def getcompiled(self, f, argtypes):
-        return f
-
-    def test_start_new_thread(self):
-        py.test.skip("deadlocks occasionally -- why???")
+#class TestRunDirectly(AbstractThreadTests):
+#    def getcompiled(self, f, argtypes):
+#        return f
+# These are disabled because they crash occasionally for bad reasons
+# related to the fact that ll2ctypes is not at all thread-safe
 
 class TestUsingBoehm(AbstractThreadTests):
     gcpolicy = 'boehm'
diff --git a/pypy/objspace/descroperation.py b/pypy/objspace/descroperation.py
--- a/pypy/objspace/descroperation.py
+++ b/pypy/objspace/descroperation.py
@@ -42,6 +42,13 @@
     return w_eq
 type_eq._annspecialcase_ = 'specialize:memo'
 
+def list_iter(space):
+    "Utility that returns the app-level descriptor list.__iter__."
+    w_src, w_iter = space.lookup_in_type_where(space.w_list,
+                                               '__iter__')
+    return w_iter
+list_iter._annspecialcase_ = 'specialize:memo'
+
 def raiseattrerror(space, w_obj, name, w_descr=None):
     w_type = space.type(w_obj)
     typename = w_type.getname(space)
diff --git a/pypy/objspace/fake/objspace.py b/pypy/objspace/fake/objspace.py
--- a/pypy/objspace/fake/objspace.py
+++ b/pypy/objspace/fake/objspace.py
@@ -86,6 +86,7 @@
         return s_None
 
     def specialize_call(self, hop):
+        hop.exception_cannot_occur()
         return hop.inputconst(lltype.Void, None)
 
 # ____________________________________________________________
@@ -109,7 +110,7 @@
         "NOT_RPYTHON"
         raise NotImplementedError
 
-    def newdict(self, module=False, instance=False, classofinstance=None,
+    def newdict(self, module=False, instance=False, kwargs=False,
                 strdict=False):
         return w_some_obj()
 
diff --git a/pypy/objspace/flow/flowcontext.py b/pypy/objspace/flow/flowcontext.py
--- a/pypy/objspace/flow/flowcontext.py
+++ b/pypy/objspace/flow/flowcontext.py
@@ -434,6 +434,13 @@
         self.lastblock = block
         self.pushvalue(w_result)
 
+    def BUILD_LIST_FROM_ARG(self, _, next_instr):
+        # This opcode was added with pypy-1.8.  Here is a simpler
+        # version, enough for annotation.
+        last_val = self.popvalue()
+        self.pushvalue(self.space.newlist([]))
+        self.pushvalue(last_val)
+
     # XXX Unimplemented 2.7 opcodes ----------------
 
     # Set literals, set comprehensions
diff --git a/pypy/objspace/flow/model.py b/pypy/objspace/flow/model.py
--- a/pypy/objspace/flow/model.py
+++ b/pypy/objspace/flow/model.py
@@ -7,8 +7,7 @@
 from pypy.tool.uid import uid, Hashable
 from pypy.tool.descriptor import roproperty
 from pypy.tool.sourcetools import PY_IDENTIFIER, nice_repr_for_func
-from pypy.tool.identity_dict import identity_dict
-from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong
+from pypy.rlib.rarithmetic import is_valid_int, r_longlong, r_ulonglong, r_uint
 
 
 """
@@ -546,7 +545,7 @@
                     for n in cases[:len(cases)-has_default]:
                         if is_valid_int(n):
                             continue
-                        if isinstance(n, (r_longlong, r_ulonglong)):
+                        if isinstance(n, (r_longlong, r_ulonglong, r_uint)):
                             continue
                         if isinstance(n, (str, unicode)) and len(n) == 1:
                             continue
diff --git a/pypy/objspace/flow/test/test_objspace.py b/pypy/objspace/flow/test/test_objspace.py
--- a/pypy/objspace/flow/test/test_objspace.py
+++ b/pypy/objspace/flow/test/test_objspace.py
@@ -1,6 +1,6 @@
 from __future__ import with_statement
 import new
-import py
+import py, sys
 from pypy.objspace.flow.model import Constant, Block, Link, Variable
 from pypy.objspace.flow.model import mkentrymap, c_last_exception
 from pypy.interpreter.argument import Arguments
@@ -893,6 +893,8 @@
         """ Tests code generated by pypy-c compiled with BUILD_LIST_FROM_ARG
         bytecode
         """
+        if sys.version_info < (2, 7):
+            py.test.skip("2.7 only test")
         self.patch_opcodes('BUILD_LIST_FROM_ARG')
         try:
             def f():
diff --git a/pypy/objspace/std/bytearrayobject.py b/pypy/objspace/std/bytearrayobject.py
--- a/pypy/objspace/std/bytearrayobject.py
+++ b/pypy/objspace/std/bytearrayobject.py
@@ -66,9 +66,15 @@
     length = len(data)
     start, stop, step, slicelength = w_slice.indices4(space, length)
     assert slicelength >= 0
-    newdata = [data[start + i*step] for i in range(slicelength)]
+    if step == 1 and 0 <= start <= stop:
+        newdata = data[start:stop]
+    else:
+        newdata = _getitem_slice_multistep(data, start, step, slicelength)
     return W_BytearrayObject(newdata)
 
+def _getitem_slice_multistep(data, start, step, slicelength):
+    return [data[start + i*step] for i in range(slicelength)]
+
 def contains__Bytearray_Int(space, w_bytearray, w_char):
     char = space.int_w(w_char)
     if not 0 <= char < 256:
diff --git a/pypy/objspace/std/celldict.py b/pypy/objspace/std/celldict.py
--- a/pypy/objspace/std/celldict.py
+++ b/pypy/objspace/std/celldict.py
@@ -127,10 +127,10 @@
     def iter(self, w_dict):
         return ModuleDictIteratorImplementation(self.space, self, w_dict)
 
-    def keys(self, w_dict):
+    def w_keys(self, w_dict):
         space = self.space
-        iterator = self.unerase(w_dict.dstorage).iteritems
-        return [space.wrap(key) for key, cell in iterator()]
+        l = self.unerase(w_dict.dstorage).keys()
+        return space.newlist_str(l)
 
     def values(self, w_dict):
         iterator = self.unerase(w_dict.dstorage).itervalues
@@ -163,7 +163,8 @@
 
 class ModuleDictIteratorImplementation(IteratorImplementation):
     def __init__(self, space, strategy, dictimplementation):
-        IteratorImplementation.__init__(self, space, dictimplementation)
+        IteratorImplementation.__init__(
+            self, space, strategy, dictimplementation)
         dict_w = strategy.unerase(dictimplementation.dstorage)
         self.iterator = dict_w.iteritems()
 
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -33,8 +33,7 @@
 
     @staticmethod
     def allocate_and_init_instance(space, w_type=None, module=False,
-                                   instance=False, classofinstance=None,
-                                   strdict=False):
+                                   instance=False, strdict=False, kwargs=False):
 
         if space.config.objspace.std.withcelldict and module:
             from pypy.objspace.std.celldict import ModuleDictStrategy
@@ -47,11 +46,15 @@
             assert w_type is None
             strategy = space.fromcache(StringDictStrategy)
 
+        elif kwargs:
+            assert w_type is None
+            from pypy.objspace.std.kwargsdict import KwargsDictStrategy
+            strategy = space.fromcache(KwargsDictStrategy)
         else:
             strategy = space.fromcache(EmptyDictStrategy)
-
         if w_type is None:
             w_type = space.w_dict
+
         storage = strategy.get_empty_storage()
         w_self = space.allocate_instance(W_DictMultiObject, w_type)
         W_DictMultiObject.__init__(w_self, space, strategy, storage)
@@ -90,9 +93,10 @@
 def _add_indirections():
     dict_methods = "setitem setitem_str getitem \
                     getitem_str delitem length \
-                    clear keys values \
+                    clear w_keys values \
                     items iter setdefault \
-                    popitem".split()
+                    popitem listview_str listview_int \
+                    view_as_kwargs".split()
 
     def make_method(method):
         def f(self, *args):
@@ -113,7 +117,7 @@
     def get_empty_storage(self):
         raise NotImplementedError
 
-    def keys(self, w_dict):
+    def w_keys(self, w_dict):
         iterator = self.iter(w_dict)
         result = []
         while 1:
@@ -121,7 +125,7 @@
             if w_key is not None:
                 result.append(w_key)
             else:
-                return result
+                return self.space.newlist(result)
 
     def values(self, w_dict):
         iterator = self.iter(w_dict)
@@ -160,6 +164,14 @@
         w_dict.strategy = strategy
         w_dict.dstorage = storage
 
+    def listview_str(self, w_dict):
+        return None
+
+    def listview_int(self, w_dict):
+        return None
+
+    def view_as_kwargs(self, w_dict):
+        return (None, None)
 
 class EmptyDictStrategy(DictStrategy):
 
@@ -244,7 +256,7 @@
         return 0
 
     def iter(self, w_dict):
-        return EmptyIteratorImplementation(self.space, w_dict)
+        return EmptyIteratorImplementation(self.space, self, w_dict)
 
     def clear(self, w_dict):
         return
@@ -252,6 +264,9 @@
     def popitem(self, w_dict):
         raise KeyError
 
+    def view_as_kwargs(self, w_dict):
+        return ([], [])
+
 registerimplementation(W_DictMultiObject)
 
 # DictImplementation lattice
@@ -260,8 +275,9 @@
 # Iterator Implementation base classes
 
 class IteratorImplementation(object):
-    def __init__(self, space, implementation):
+    def __init__(self, space, strategy, implementation):
         self.space = space
+        self.strategy = strategy
         self.dictimplementation = implementation
         self.len = implementation.length()
         self.pos = 0
@@ -277,7 +293,20 @@
         if self.pos < self.len:
             result = self.next_entry()
             self.pos += 1
-            return result
+            if self.strategy is self.dictimplementation.strategy:
+                return result      # common case
+            else:
+                # waaa, obscure case: the strategy changed, but not the
+                # length of the dict.  The (key, value) pair in 'result'
+                # might be out-of-date.  We try to explicitly look up
+                # the key in the dict.
+                w_key = result[0]
+                w_value = self.dictimplementation.getitem(w_key)
+                if w_value is None:
+                    self.len = -1   # Make this error state sticky
+                    raise OperationError(self.space.w_RuntimeError,
+                        self.space.wrap("dictionary changed during iteration"))
+                return (w_key, w_value)
         # no more entries
         self.dictimplementation = None
         return None, None
@@ -373,8 +402,9 @@
             self.switch_to_object_strategy(w_dict)
             return w_dict.getitem(w_key)
 
-    def keys(self, w_dict):
-        return [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()]
+    def w_keys(self, w_dict):
+        l = [self.wrap(key) for key in self.unerase(w_dict.dstorage).iterkeys()]
+        return self.space.newlist(l)
 
     def values(self, w_dict):
         return self.unerase(w_dict.dstorage).values()
@@ -427,8 +457,8 @@
     def iter(self, w_dict):
         return ObjectIteratorImplementation(self.space, self, w_dict)
 
-    def keys(self, w_dict):
-        return self.unerase(w_dict.dstorage).keys()
+    def w_keys(self, w_dict):
+        return self.space.newlist(self.unerase(w_dict.dstorage).keys())
 
 
 class StringDictStrategy(AbstractTypedStrategy, DictStrategy):
@@ -471,15 +501,21 @@
         assert key is not None
         return self.unerase(w_dict.dstorage).get(key, None)
 
+    def listview_str(self, w_dict):
+        return self.unerase(w_dict.dstorage).keys()
+
     def iter(self, w_dict):
         return StrIteratorImplementation(self.space, self, w_dict)
 
+    def w_keys(self, w_dict):
+        return self.space.newlist_str(self.listview_str(w_dict))
+
 
 class _WrappedIteratorMixin(object):
     _mixin_ = True
 
     def __init__(self, space, strategy, dictimplementation):
-        IteratorImplementation.__init__(self, space, dictimplementation)
+        IteratorImplementation.__init__(self, space, strategy, dictimplementation)
         self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
 
     def next_entry(self):
@@ -493,7 +529,7 @@
     _mixin_ = True
 
     def __init__(self, space, strategy, dictimplementation):
-        IteratorImplementation.__init__(self, space, dictimplementation)
+        IteratorImplementation.__init__(self, space, strategy, dictimplementation)
         self.iterator = strategy.unerase(dictimplementation.dstorage).iteritems()
 
     def next_entry(self):
@@ -536,6 +572,11 @@
     def iter(self, w_dict):
         return IntIteratorImplementation(self.space, self, w_dict)
 
+    def listview_int(self, w_dict):
+        return self.unerase(w_dict.dstorage).keys()
+
+    # XXX there is no space.newlist_int yet to implement w_keys more efficiently
+
 class IntIteratorImplementation(_WrappedIteratorMixin, IteratorImplementation):
     pass
 
@@ -690,6 +731,9 @@
 
 def dict_keys__DictMulti(space, w_self):
     return W_DictViewKeysObject(space, w_self)
+    # XXX on default this is a fast path when keys are string, do we want to
+    # port it to py3k?
+    # return w_self.w_keys()
 
 def dict_values__DictMulti(space, w_self):
     return W_DictViewValuesObject(space, w_self)
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -76,7 +76,7 @@
 
     def keys(self, w_dict):
         space = self.space
-        return [space.wrap(key) for key in self.unerase(w_dict.dstorage).dict_w.iterkeys()]
+        return space.newlist_str(self.unerase(w_dict.dstorage).dict_w.keys())
 
     def values(self, w_dict):
         return [unwrap_cell(self.space, w_value) for w_value in self.unerase(w_dict.dstorage).dict_w.itervalues()]
@@ -98,7 +98,8 @@
 
 class DictProxyIteratorImplementation(IteratorImplementation):
     def __init__(self, space, strategy, dictimplementation):
-        IteratorImplementation.__init__(self, space, dictimplementation)
+        IteratorImplementation.__init__(
+            self, space, strategy, dictimplementation)
         w_type = strategy.unerase(dictimplementation.dstorage)
         self.iterator = w_type.dict_w.iteritems()
 
diff --git a/pypy/objspace/std/dicttype.py b/pypy/objspace/std/dicttype.py
--- a/pypy/objspace/std/dicttype.py
+++ b/pypy/objspace/std/dicttype.py
@@ -53,8 +53,14 @@
         w_fill = space.w_None
     if space.is_w(w_type, space.w_dict):
         w_dict = W_DictMultiObject.allocate_and_init_instance(space, w_type)
-        for w_key in space.listview(w_keys):
-            w_dict.setitem(w_key, w_fill)
+
+        strlist = space.listview_str(w_keys)
+        if strlist is not None:
+            for key in strlist:
+                w_dict.setitem_str(key, w_fill)
+        else:
+            for w_key in space.listview(w_keys):
+                w_dict.setitem(w_key, w_fill)
     else:
         w_dict = space.call_function(w_type)
         for w_key in space.listview(w_keys):
diff --git a/pypy/objspace/std/frozensettype.py b/pypy/objspace/std/frozensettype.py
--- a/pypy/objspace/std/frozensettype.py
+++ b/pypy/objspace/std/frozensettype.py
@@ -39,13 +39,11 @@
 def descr__frozenset__new__(space, w_frozensettype,
                             w_iterable=gateway.NoneNotWrapped):
     from pypy.objspace.std.setobject import W_FrozensetObject
-    from pypy.objspace.std.setobject import make_setdata_from_w_iterable
     if (space.is_w(w_frozensettype, space.w_frozenset) and
         w_iterable is not None and type(w_iterable) is W_FrozensetObject):
         return w_iterable
     w_obj = space.allocate_instance(W_FrozensetObject, w_frozensettype)
-    data = make_setdata_from_w_iterable(space, w_iterable)
-    W_FrozensetObject.__init__(w_obj, space, data)
+    W_FrozensetObject.__init__(w_obj, space, w_iterable)
     return w_obj
 
 frozenset_typedef = StdTypeDef("frozenset",
diff --git a/pypy/objspace/std/identitydict.py b/pypy/objspace/std/identitydict.py
--- a/pypy/objspace/std/identitydict.py
+++ b/pypy/objspace/std/identitydict.py
@@ -1,5 +1,5 @@
 ## ----------------------------------------------------------------------------
-## dict strategy (see dict_multiobject.py)
+## dict strategy (see dictmultiobject.py)
 
 from pypy.rlib import rerased
 from pypy.rlib.debug import mark_dict_non_null
@@ -80,8 +80,8 @@
     def iter(self, w_dict):
         return IdentityDictIteratorImplementation(self.space, self, w_dict)
 
-    def keys(self, w_dict):
-        return self.unerase(w_dict.dstorage).keys()
+    def w_keys(self, w_dict):
+        return self.space.newlist(self.unerase(w_dict.dstorage).keys())
 
 
 class IdentityDictIteratorImplementation(_UnwrappedIteratorMixin, IteratorImplementation):
diff --git a/pypy/objspace/std/iterobject.py b/pypy/objspace/std/iterobject.py
--- a/pypy/objspace/std/iterobject.py
+++ b/pypy/objspace/std/iterobject.py
@@ -29,9 +29,8 @@
 class W_SeqIterObject(W_AbstractSeqIterObject):
     """Sequence iterator implementation for general sequences."""
 
-class W_FastListIterObject(W_AbstractSeqIterObject):
-    """Sequence iterator specialized for lists, accessing directly their
-    RPython-level list of wrapped objects.
+class W_FastListIterObject(W_AbstractSeqIterObject): # XXX still needed
+    """Sequence iterator specialized for lists.
     """
 
 class W_FastTupleIterObject(W_AbstractSeqIterObject):
diff --git a/pypy/objspace/std/kwargsdict.py b/pypy/objspace/std/kwargsdict.py
new file mode 100644
--- /dev/null
+++ b/pypy/objspace/std/kwargsdict.py
@@ -0,0 +1,165 @@
+## ----------------------------------------------------------------------------
+## dict strategy (see dictmultiobject.py)
+
+from pypy.rlib import rerased, jit
+from pypy.objspace.std.dictmultiobject import (DictStrategy,
+                                               IteratorImplementation,
+                                               ObjectDictStrategy,
+                                               StringDictStrategy)
+
+
+class KwargsDictStrategy(DictStrategy):
+    erase, unerase = rerased.new_erasing_pair("kwargsdict")
+    erase = staticmethod(erase)
+    unerase = staticmethod(unerase)
+
+    def wrap(self, key):
+        return self.space.wrap(key)
+
+    def unwrap(self, wrapped):
+        return self.space.str_w(wrapped)
+
+    def get_empty_storage(self):
+        d = ([], [])
+        return self.erase(d)
+
+    def is_correct_type(self, w_obj):
+        space = self.space
+        return space.is_w(space.type(w_obj), space.w_str)
+
+    def _never_equal_to(self, w_lookup_type):
+        return False
+
+    def iter(self, w_dict):
+        return KwargsDictIterator(self.space, self, w_dict)
+
+    def w_keys(self, w_dict):
+        return self.space.newlist([self.space.wrap(key) for key in self.unerase(w_dict.dstorage)[0]])
+
+    def setitem(self, w_dict, w_key, w_value):
+        space = self.space
+        if self.is_correct_type(w_key):
+            self.setitem_str(w_dict, self.unwrap(w_key), w_value)
+            return
+        else:
+            self.switch_to_object_strategy(w_dict)
+            w_dict.setitem(w_key, w_value)
+
+    def setitem_str(self, w_dict, key, w_value):
+        self._setitem_str_indirection(w_dict, key, w_value)
+
+    @jit.look_inside_iff(lambda self, w_dict, key, w_value:
+            jit.isconstant(self.length(w_dict)) and jit.isconstant(key))
+    def _setitem_str_indirection(self, w_dict, key, w_value):
+        keys, values_w = self.unerase(w_dict.dstorage)
+        result = []
+        for i in range(len(keys)):
+            if keys[i] == key:
+                values_w[i] = w_value
+                break
+        else:
+            # limit the size so that the linear searches don't become too long
+            if len(keys) >= 16:
+                self.switch_to_string_strategy(w_dict)
+                w_dict.setitem_str(key, w_value)
+            else:
+                keys.append(key)
+                values_w.append(w_value)
+
+    def setdefault(self, w_dict, w_key, w_default):
+        # XXX could do better, but is it worth it?
+        self.switch_to_object_strategy(w_dict)
+        return w_dict.setdefault(w_key, w_default)
+
+    def delitem(self, w_dict, w_key):
+        # XXX could do better, but is it worth it?
+        self.switch_to_object_strategy(w_dict)
+        return w_dict.delitem(w_key)
+
+    def length(self, w_dict):
+        return len(self.unerase(w_dict.dstorage)[0])
+
+    def getitem_str(self, w_dict, key):
+        return self._getitem_str_indirection(w_dict, key)
+
+    @jit.look_inside_iff(lambda self, w_dict, key: jit.isconstant(self.length(w_dict)) and jit.isconstant(key))
+    def _getitem_str_indirection(self, w_dict, key):
+        keys, values_w = self.unerase(w_dict.dstorage)
+        result = []
+        for i in range(len(keys)):
+            if keys[i] == key:
+                return values_w[i]
+        return None
+
+    def getitem(self, w_dict, w_key):
+        space = self.space
+        if self.is_correct_type(w_key):
+            return self.getitem_str(w_dict, self.unwrap(w_key))
+        elif self._never_equal_to(space.type(w_key)):
+            return None
+        else:
+            self.switch_to_object_strategy(w_dict)
+            return w_dict.getitem(w_key)
+
+    def w_keys(self, w_dict):
+        l = self.unerase(w_dict.dstorage)[0]
+        return self.space.newlist_str(l[:])
+
+    def values(self, w_dict):
+        return self.unerase(w_dict.dstorage)[1][:] # to make non-resizable
+
+    def items(self, w_dict):
+        space = self.space
+        keys, values_w = self.unerase(w_dict.dstorage)
+        result = []
+        for i in range(len(keys)):
+            result.append(space.newtuple([self.wrap(keys[i]), values_w[i]]))
+        return result
+
+    def popitem(self, w_dict):
+        keys, values_w = self.unerase(w_dict.dstorage)
+        key = keys.pop()
+        w_value = values_w.pop()
+        return (self.wrap(key), w_value)
+
+    def clear(self, w_dict):
+        w_dict.dstorage = self.get_empty_storage()
+
+    def switch_to_object_strategy(self, w_dict):
+        strategy = self.space.fromcache(ObjectDictStrategy)
+        keys, values_w = self.unerase(w_dict.dstorage)
+        d_new = strategy.unerase(strategy.get_empty_storage())
+        for i in range(len(keys)):
+            d_new[self.wrap(keys[i])] = values_w[i]
+        w_dict.strategy = strategy
+        w_dict.dstorage = strategy.erase(d_new)
+
+    def switch_to_string_strategy(self, w_dict):
+        strategy = self.space.fromcache(StringDictStrategy)
+        keys, values_w = self.unerase(w_dict.dstorage)
+        storage = strategy.get_empty_storage()
+        d_new = strategy.unerase(storage)
+        for i in range(len(keys)):
+            d_new[keys[i]] = values_w[i]
+        w_dict.strategy = strategy
+        w_dict.dstorage = storage
+
+    def view_as_kwargs(self, w_dict):
+        return self.unerase(w_dict.dstorage)
+
+
+class KwargsDictIterator(IteratorImplementation):
+    def __init__(self, space, strategy, dictimplementation):
+        IteratorImplementation.__init__(self, space, strategy, dictimplementation)
+        keys, values_w = strategy.unerase(self.dictimplementation.dstorage)
+        self.iterator = iter(range(len(keys)))
+        # XXX this potentially leaks
+        self.keys = keys
+        self.values_w = values_w
+
+    def next_entry(self):
+        # note that this 'for' loop only runs once, at most
+        for i in self.iterator:
+            return self.space.wrap(self.keys[i]), self.values_w[i]
+        else:
+            return None, None
diff --git a/pypy/objspace/std/listobject.py b/pypy/objspace/std/listobject.py
--- a/pypy/objspace/std/listobject.py
+++ b/pypy/objspace/std/listobject.py
@@ -139,6 +139,16 @@
         new erased object as storage"""
         self.strategy.init_from_list_w(self, list_w)
 
+    def clear(self, space):
+        """Initializes (or overrides) the listobject as empty."""
+        self.space = space
+        if space.config.objspace.std.withliststrategies:
+            strategy = space.fromcache(EmptyListStrategy)
+        else:
+            strategy = space.fromcache(ObjectListStrategy)
+        self.strategy = strategy
+        strategy.clear(self)
+
     def clone(self):
         """Returns a clone by creating a new listobject
         with the same strategy and a copy of the storage"""
@@ -200,6 +210,11 @@
         """ Return the items in the list as unwrapped strings. If the list does
         not use the list strategy, return None. """
         return self.strategy.getitems_str(self)
+
+    def getitems_int(self):
+        """ Return the items in the list as unwrapped ints. If the list does
+        not use the list strategy, return None. """
+        return self.strategy.getitems_int(self)
     # ___________________________________________________
 
 
@@ -300,6 +315,9 @@
     def getitems_str(self, w_list):
         return None
 
+    def getitems_int(self, w_list):
+        return None
+
     def getstorage_copy(self, w_list):
         raise NotImplementedError
 
@@ -358,6 +376,9 @@
         assert len(list_w) == 0
         w_list.lstorage = self.erase(None)
 
+    def clear(self, w_list):
+        w_list.lstorage = self.erase(None)
+
     erase, unerase = rerased.new_erasing_pair("empty")
     erase = staticmethod(erase)
     unerase = staticmethod(unerase)
@@ -516,6 +537,9 @@
             raise IndexError
         return start + i * step
 
+    def getitems_int(self, w_list):
+        return self._getitems_range(w_list, False)
+
     def getitem(self, w_list, i):
         return self.wrap(self._getitem_unwrapped(w_list, i))
 
@@ -696,6 +720,7 @@
             for i in l:
                 if i == obj:
                     return True
+            return False
         return ListStrategy.contains(self, w_list, w_obj)
 
     def length(self, w_list):
@@ -937,6 +962,9 @@
     def init_from_list_w(self, w_list, list_w):
         w_list.lstorage = self.erase(list_w)
 
+    def clear(self, w_list):
+        w_list.lstorage = self.erase([])
+
     def contains(self, w_list, w_obj):
         return ListStrategy.contains(self, w_list, w_obj)
 
@@ -970,6 +998,9 @@
         if reverse:
             l.reverse()
 
+    def getitems_int(self, w_list):
+        return self.unerase(w_list.lstorage)
+
 class FloatListStrategy(AbstractUnwrappedStrategy, ListStrategy):
     _none_value = 0.0
     _applevel_repr = "float"
@@ -1027,37 +1058,49 @@
     def getitems_str(self, w_list):
         return self.unerase(w_list.lstorage)
 
-
 # _______________________________________________________
 
 init_signature = Signature(['sequence'], None, None)
 init_defaults = [None]
 
 def init__List(space, w_list, __args__):
-    from pypy.objspace.std.tupleobject import W_TupleObject
+    from pypy.objspace.std.tupleobject import W_AbstractTupleObject
     # this is on the silly side
     w_iterable, = __args__.parse_obj(
             None, 'list', init_signature, init_defaults)
-    w_list.__init__(space, [])
+    w_list.clear(space)
     if w_iterable is not None:
-        # unfortunately this is duplicating space.unpackiterable to avoid
-        # assigning a new RPython list to 'wrappeditems', which defeats the
-        # W_FastListIterObject optimization.
-        if isinstance(w_iterable, W_ListObject):
-            w_list.extend(w_iterable)
-        elif isinstance(w_iterable, W_TupleObject):
-            w_list.extend(W_ListObject(space, w_iterable.wrappeditems[:]))
-        else:
-            _init_from_iterable(space, w_list, w_iterable)
+        if type(w_iterable) is W_ListObject:
+            w_iterable.copy_into(w_list)
+            return
+        elif isinstance(w_iterable, W_AbstractTupleObject):
+            w_list.__init__(space, w_iterable.getitems_copy())
+            return
+
+        intlist = space.listview_int(w_iterable)
+        if intlist is not None:
+            w_list.strategy = strategy = space.fromcache(IntegerListStrategy)
+             # need to copy because intlist can share with w_iterable
+            w_list.lstorage = strategy.erase(intlist[:])
+            return
+
+        strlist = space.listview_str(w_iterable)
+        if strlist is not None:
+            w_list.strategy = strategy = space.fromcache(StringListStrategy)
+             # need to copy because intlist can share with w_iterable
+            w_list.lstorage = strategy.erase(strlist[:])
+            return
+
+        # xxx special hack for speed
+        from pypy.interpreter.generator import GeneratorIterator
+        if isinstance(w_iterable, GeneratorIterator):
+            w_iterable.unpack_into_w(w_list)
+            return
+        # /xxx
+        _init_from_iterable(space, w_list, w_iterable)
 
 def _init_from_iterable(space, w_list, w_iterable):
     # in its own function to make the JIT look into init__List
-    # xxx special hack for speed
-    from pypy.interpreter.generator import GeneratorIterator
-    if isinstance(w_iterable, GeneratorIterator):
-        w_iterable.unpack_into_w(w_list)
-        return
-    # /xxx
     w_iterator = space.iter(w_iterable)
     while True:
         try:
diff --git a/pypy/objspace/std/listtype.py b/pypy/objspace/std/listtype.py
--- a/pypy/objspace/std/listtype.py
+++ b/pypy/objspace/std/listtype.py
@@ -43,7 +43,7 @@
 def descr__new__(space, w_listtype, __args__):
     from pypy.objspace.std.listobject import W_ListObject
     w_obj = space.allocate_instance(W_ListObject, w_listtype)
-    W_ListObject.__init__(w_obj, space, [])
+    w_obj.clear(space)
     return w_obj
 
 # ____________________________________________________________
diff --git a/pypy/objspace/std/mapdict.py b/pypy/objspace/std/mapdict.py
--- a/pypy/objspace/std/mapdict.py
+++ b/pypy/objspace/std/mapdict.py
@@ -692,6 +692,8 @@
         self.delitem(w_dict, w_key)
         return (w_key, w_value)
 
+    # XXX could implement a more efficient w_keys based on space.newlist_str
+
 def materialize_r_dict(space, obj, dict_w):
     map = obj._get_mapdict_map()
     new_obj = map.materialize_r_dict(space, obj, dict_w)
@@ -699,7 +701,8 @@
 
 class MapDictIteratorImplementation(IteratorImplementation):
     def __init__(self, space, strategy, dictimplementation):
-        IteratorImplementation.__init__(self, space, dictimplementation)
+        IteratorImplementation.__init__(
+            self, space, strategy, dictimplementation)
         w_obj = strategy.unerase(dictimplementation.dstorage)
         self.w_obj = w_obj
         self.orig_map = self.curr_map = w_obj._get_mapdict_map()
diff --git a/pypy/objspace/std/objspace.py b/pypy/objspace/std/objspace.py
--- a/pypy/objspace/std/objspace.py
+++ b/pypy/objspace/std/objspace.py
@@ -234,10 +234,7 @@
             return W_ComplexObject(x.real, x.imag)
 
         if isinstance(x, set):
-            rdict_w = r_dict(self.eq_w, self.hash_w)
-            for item in x:
-                rdict_w[self.wrap(item)] = None
-            res = W_SetObject(self, rdict_w)
+            res = W_SetObject(self, self.newlist([self.wrap(item) for item in x]))
             return res
 
         if isinstance(x, frozenset):
@@ -323,16 +320,15 @@
     def newlist_str(self, list_s):
         return W_ListObject.newlist_str(self, list_s)
 
-    def newdict(self, module=False, instance=False, classofinstance=None,
+    def newdict(self, module=False, instance=False, kwargs=False,
                 strdict=False):
         return W_DictMultiObject.allocate_and_init_instance(
                 self, module=module, instance=instance,
-                classofinstance=classofinstance,
-                strdict=strdict)
+                strdict=strdict, kwargs=kwargs)
 
     def newset(self):
         from pypy.objspace.std.setobject import newset
-        return W_SetObject(self, newset(self))
+        return W_SetObject(self, None)
 
     def newslice(self, w_start, w_end, w_step):
         return W_SliceObject(w_start, w_end, w_step)
@@ -410,7 +406,7 @@
     def unpackiterable(self, w_obj, expected_length=-1):
         if isinstance(w_obj, W_AbstractTupleObject):
             t = w_obj.getitems_copy()
-        elif isinstance(w_obj, W_ListObject):
+        elif type(w_obj) is W_ListObject:
             t = w_obj.getitems_copy()
         else:
             return ObjSpace.unpackiterable(self, w_obj, expected_length)
@@ -424,7 +420,7 @@
         """
         if isinstance(w_obj, W_AbstractTupleObject):
             t = w_obj.tolist()
-        elif isinstance(w_obj, W_ListObject):
+        elif type(w_obj) is W_ListObject:
             if unroll:
                 t = w_obj.getitems_unroll()
             else:
@@ -445,10 +441,12 @@
         return self.fixedview(w_obj, expected_length, unroll=True)
 
     def listview(self, w_obj, expected_length=-1):
-        if isinstance(w_obj, W_ListObject):
+        if type(w_obj) is W_ListObject:
             t = w_obj.getitems()
         elif isinstance(w_obj, W_AbstractTupleObject):
             t = w_obj.getitems_copy()
+        elif isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj):
+            t = w_obj.getitems()
         else:
             return ObjSpace.unpackiterable(self, w_obj, expected_length)
         if expected_length != -1 and len(t) != expected_length:
@@ -456,10 +454,40 @@
         return t
 
     def listview_str(self, w_obj):
-        if isinstance(w_obj, W_ListObject):
+        # note: uses exact type checking for objects with strategies,
+        # and isinstance() for others.  See test_listobject.test_uses_custom...
+        if type(w_obj) is W_ListObject:
+            return w_obj.getitems_str()
+        if type(w_obj) is W_DictMultiObject:
+            return w_obj.listview_str()
+        if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject:
+            return w_obj.listview_str()
+        if isinstance(w_obj, W_StringObject):
+            return w_obj.listview_str()
+        if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj):
             return w_obj.getitems_str()
         return None
 
+    def listview_int(self, w_obj):
+        if type(w_obj) is W_ListObject:
+            return w_obj.getitems_int()
+        if type(w_obj) is W_DictMultiObject:
+            return w_obj.listview_int()
+        if type(w_obj) is W_SetObject or type(w_obj) is W_FrozensetObject:
+            return w_obj.listview_int()
+        if isinstance(w_obj, W_ListObject) and self._uses_list_iter(w_obj):
+            return w_obj.getitems_int()
+        return None
+
+    def view_as_kwargs(self, w_dict):
+        if type(w_dict) is W_DictMultiObject:
+            return w_dict.view_as_kwargs()
+        return (None, None)
+
+    def _uses_list_iter(self, w_obj):
+        from pypy.objspace.descroperation import list_iter
+        return self.lookup(w_obj, '__iter__') is list_iter(self)
+
     def sliceindices(self, w_slice, w_length):
         if isinstance(w_slice, W_SliceObject):
             a, b, c = w_slice.indices3(self, self.int_w(w_length))
diff --git a/pypy/objspace/std/setobject.py b/pypy/objspace/std/setobject.py
--- a/pypy/objspace/std/setobject.py
+++ b/pypy/objspace/std/setobject.py
@@ -7,6 +7,12 @@
 from pypy.interpreter.argument import Signature
 from pypy.objspace.std.settype import set_typedef as settypedef
 from pypy.objspace.std.frozensettype import frozenset_typedef as frozensettypedef
+from pypy.rlib import rerased
+from pypy.rlib.objectmodel import instantiate
+from pypy.interpreter.generator import GeneratorIterator
+from pypy.objspace.std.listobject import W_ListObject
+from pypy.objspace.std.intobject import W_IntObject
+from pypy.objspace.std.stringobject import W_StringObject
 
 class W_BaseSetObject(W_Object):
     typedef = None
@@ -20,78 +26,860 @@
             return True
         return False
 
-
-    def __init__(w_self, space, setdata):
+    def __init__(w_self, space, w_iterable=None):
         """Initialize the set by taking ownership of 'setdata'."""
-        assert setdata is not None
-        w_self.setdata = setdata
+        w_self.space = space
+        set_strategy_and_setdata(space, w_self, w_iterable)
 
     def __repr__(w_self):
         """representation for debugging purposes"""
-        reprlist = [repr(w_item) for w_item in w_self.setdata.keys()]
+        reprlist = [repr(w_item) for w_item in w_self.getkeys()]
         return "<%s(%s)>" % (w_self.__class__.__name__, ', '.join(reprlist))
 
+    def from_storage_and_strategy(w_self, storage, strategy):
+        obj = w_self._newobj(w_self.space, None)
+        assert isinstance(obj, W_BaseSetObject)
+        obj.strategy = strategy
+        obj.sstorage = storage
+        return obj
+
     _lifeline_ = None
     def getweakref(self):
         return self._lifeline_
+
     def setweakref(self, space, weakreflifeline):
         self._lifeline_ = weakreflifeline
     def delweakref(self):
         self._lifeline_ = None
 
+    def switch_to_object_strategy(self, space):
+        d = self.strategy.getdict_w(self)
+        self.strategy = strategy = space.fromcache(ObjectSetStrategy)
+        self.sstorage = strategy.erase(d)
+
+    def switch_to_empty_strategy(self):
+        self.strategy = strategy = self.space.fromcache(EmptySetStrategy)
+        self.sstorage = strategy.get_empty_storage()
+
+    # _____________ strategy methods ________________
+
+
+    def clear(self):
+        """ Removes all elements from the set. """
+        self.strategy.clear(self)
+
+    def copy_real(self):
+        """ Returns a clone of the set. Frozensets storages are also copied."""
+        return self.strategy.copy_real(self)
+
+    def length(self):
+        """ Returns the number of items inside the set. """
+        return self.strategy.length(self)
+
+    def add(self, w_key):
+        """ Adds an element to the set. The element must be wrapped. """
+        self.strategy.add(self, w_key)
+
+    def remove(self, w_item):
+        """ Removes the given element from the set. Element must be wrapped. """
+        return self.strategy.remove(self, w_item)
+
+    def getdict_w(self):
+        """ Returns a dict with all elements of the set. Needed only for switching to ObjectSetStrategy. """
+        return self.strategy.getdict_w(self)
+
+    def listview_str(self):
+        """ If this is a string set return its contents as a list of uwnrapped strings. Otherwise return None. """
+        return self.strategy.listview_str(self)
+
+    def listview_int(self):
+        """ If this is an int set return its contents as a list of uwnrapped ints. Otherwise return None. """
+        return self.strategy.listview_int(self)
+
+    def get_storage_copy(self):
+        """ Returns a copy of the storage. Needed when we want to clone all elements from one set and
+        put them into another. """
+        return self.strategy.get_storage_copy(self)
+
+    def getkeys(self):
+        """ Returns a list of all elements inside the set. Only used in __repr__. Use as less as possible."""
+        return self.strategy.getkeys(self)
+
+    def difference(self, w_other):
+        """ Returns a set with all items that are in this set, but not in w_other. W_other must be a set."""
+        return self.strategy.difference(self, w_other)
+
+    def difference_update(self, w_other):
+        """ As difference but overwrites the sets content with the result. W_other must be a set."""
+        self.strategy.difference_update(self, w_other)
+
+    def symmetric_difference(self, w_other):
+        """ Returns a set with all items that are either in this set or in w_other, but not in both. W_other must be a set. """
+        return self.strategy.symmetric_difference(self, w_other)
+
+    def symmetric_difference_update(self, w_other):
+        """ As symmetric_difference but overwrites the content of the set with the result. W_other must be a set."""
+        self.strategy.symmetric_difference_update(self, w_other)
+
+    def intersect(self, w_other):
+        """ Returns a set with all items that exists in both sets, this set and in w_other. W_other must be a set. """
+        return self.strategy.intersect(self, w_other)
+
+    def intersect_update(self, w_other):
+        """ Keeps only those elements found in both sets, removing all other elements. W_other must be a set."""
+        self.strategy.intersect_update(self, w_other)
+
+    def issubset(self, w_other):
+        """ Checks wether this set is a subset of w_other. W_other must be a set. """
+        return self.strategy.issubset(self, w_other)
+
+    def isdisjoint(self, w_other):
+        """ Checks wether this set and the w_other are completly different, i.e. have no equal elements. W_other must be a set."""
+        return self.strategy.isdisjoint(self, w_other)
+
+    def update(self, w_other):
+        """ Appends all elements from the given set to this set. W_other must be a set."""
+        self.strategy.update(self, w_other)
+
+    def has_key(self, w_key):
+        """ Checks wether this set contains the given wrapped key."""
+        return self.strategy.has_key(self, w_key)
+
+    def equals(self, w_other):
+        """ Checks wether this set and the given set are equal, i.e. contain the same elements. W_other must be a set."""
+        return self.strategy.equals(self, w_other)
+
+    def iter(self):
+        """ Returns an iterator of the elements from this set. """
+        return self.strategy.iter(self)
+
+    def popitem(self):
+        """ Removes an arbitrary element from the set. May raise KeyError if set is empty."""
+        return self.strategy.popitem(self)
+
 class W_SetObject(W_BaseSetObject):
     from pypy.objspace.std.settype import set_typedef as typedef
 
-    def _newobj(w_self, space, rdict_w):
-        """Make a new set by taking ownership of 'rdict_w'."""
-        return W_SetObject(space, rdict_w)
+    def _newobj(w_self, space, w_iterable):
+        """Make a new set by taking ownership of 'w_iterable'."""
+        return W_SetObject(space, w_iterable)
 
 class W_FrozensetObject(W_BaseSetObject):
     from pypy.objspace.std.frozensettype import frozenset_typedef as typedef
     hash = 0
 
-    def _newobj(w_self, space, rdict_w):
-        """Make a new frozenset by taking ownership of 'rdict_w'."""
-        return W_FrozensetObject(space, rdict_w)
+    def _newobj(w_self, space, w_iterable):
+        """Make a new frozenset by taking ownership of 'w_iterable'.""" 
+        return W_FrozensetObject(space, w_iterable)
 
 registerimplementation(W_BaseSetObject)
 registerimplementation(W_SetObject)
 registerimplementation(W_FrozensetObject)
 
-class W_SetIterObject(W_Object):
-    from pypy.objspace.std.settype import setiter_typedef as typedef
+class SetStrategy(object):
+    def __init__(self, space):
+        self.space = space
 
-    def __init__(w_self, setdata):
-        w_self.content = content = setdata
-        w_self.len = len(content)
-        w_self.pos = 0
-        w_self.iterator = w_self.content.iterkeys()
+    def get_empty_dict(self):
+        """ Returns an empty dictionary depending on the strategy. Used to initalize a new storage. """
+        raise NotImplementedError
 
-    def next_entry(w_self):
-        for w_key in w_self.iterator:
+    def get_empty_storage(self):
+        """ Returns an empty storage (erased) object. Used to initialize an empty set."""
+        raise NotImplementedError
+
+    def listview_str(self, w_set):
+        return None
+
+    def listview_int(self, w_set):
+        return None
+
+    #def erase(self, storage):
+    #    raise NotImplementedError
+
+    #def unerase(self, storage):
+    #    raise NotImplementedError
+
+    # __________________ methods called on W_SetObject _________________
+
+    def clear(self, w_set):
+        raise NotImplementedError
+
+    def copy_real(self, w_set):
+        raise NotImplementedError
+
+    def length(self, w_set):
+        raise NotImplementedError
+
+    def add(self, w_set, w_key):
+        raise NotImplementedError
+
+    def remove(self, w_set, w_item):
+        raise NotImplementedError
+
+    def getdict_w(self, w_set):
+        raise NotImplementedError
+
+    def get_storage_copy(self, w_set):
+        raise NotImplementedError
+
+    def getkeys(self, w_set):
+        raise NotImplementedError
+
+    def difference(self, w_set, w_other):
+        raise NotImplementedError
+
+    def difference_update(self, w_set, w_other):
+        raise NotImplementedError
+
+    def symmetric_difference(self, w_set, w_other):
+        raise NotImplementedError
+
+    def symmetric_difference_update(self, w_set, w_other):
+        raise NotImplementedError
+
+    def intersect(self, w_set, w_other):
+        raise NotImplementedError
+
+    def intersect_update(self, w_set, w_other):
+        raise NotImplementedError
+
+    def issubset(self, w_set, w_other):
+        raise NotImplementedError
+
+    def isdisjoint(self, w_set, w_other):
+        raise NotImplementedError
+
+    def update(self, w_set, w_other):
+        raise NotImplementedError
+
+    def has_key(self, w_set, w_key):
+        raise NotImplementedError
+
+    def equals(self, w_set, w_other):
+        raise NotImplementedError
+
+    def iter(self, w_set):
+        raise NotImplementedError
+
+    def popitem(self, w_set):
+        raise NotImplementedError
+
+class EmptySetStrategy(SetStrategy):
+
+    erase, unerase = rerased.new_erasing_pair("empty")
+    erase = staticmethod(erase)
+    unerase = staticmethod(unerase)
+
+    def get_empty_storage(self):
+        return self.erase(None)
+
+    def is_correct_type(self, w_key):
+        return False
+
+    def length(self, w_set):
+        return 0
+
+    def clear(self, w_set):
+        pass
+
+    def copy_real(self, w_set):
+        storage = self.erase(None)
+        clone = w_set.from_storage_and_strategy(storage, self)
+        return clone
+
+    def add(self, w_set, w_key):
+        if type(w_key) is W_IntObject:
+            strategy = self.space.fromcache(IntegerSetStrategy)
+        elif type(w_key) is W_StringObject:
+            strategy = self.space.fromcache(StringSetStrategy)
+        else:
+            strategy = self.space.fromcache(ObjectSetStrategy)
+        w_set.strategy = strategy
+        w_set.sstorage = strategy.get_empty_storage()
+        w_set.add(w_key)
+
+    def remove(self, w_set, w_item):
+        return False
+
+    def getdict_w(self, w_set):
+        return newset(self.space)
+
+    def get_storage_copy(self, w_set):
+        return w_set.sstorage
+
+    def getkeys(self, w_set):
+        return []
+
+    def has_key(self, w_set, w_key):
+        return False
+
+    def equals(self, w_set, w_other):
+        if w_other.strategy is self or w_other.length() == 0:
+            return True
+        return False
+
+    def difference(self, w_set, w_other):
+        return w_set.copy_real()
+
+    def difference_update(self, w_set, w_other):
+        pass
+
+    def intersect(self, w_set, w_other):
+        return w_set.copy_real()
+
+    def intersect_update(self, w_set, w_other):
+        pass
+
+    def isdisjoint(self, w_set, w_other):
+        return True
+
+    def issubset(self, w_set, w_other):
+        return True
+
+    def symmetric_difference(self, w_set, w_other):
+        return w_other.copy_real()
+
+    def symmetric_difference_update(self, w_set, w_other):
+        w_set.strategy = w_other.strategy
+        w_set.sstorage = w_other.get_storage_copy()
+
+    def update(self, w_set, w_other):
+        w_set.strategy = w_other.strategy
+        w_set.sstorage = w_other.get_storage_copy()
+
+    def iter(self, w_set):
+        return EmptyIteratorImplementation(self.space, self, w_set)
+
+    def popitem(self, w_set):
+        raise OperationError(self.space.w_KeyError,
+                                self.space.wrap('pop from an empty set'))
+
+class AbstractUnwrappedSetStrategy(object):
+    _mixin_ = True
+
+    def is_correct_type(self, w_key):
+        """ Checks wether the given wrapped key fits this strategy."""
+        raise NotImplementedError
+
+    def unwrap(self, w_item):
+        """ Returns the unwrapped value of the given wrapped item."""
+        raise NotImplementedError
+
+    def wrap(self, item):
+        """ Returns a wrapped version of the given unwrapped item. """
+        raise NotImplementedError
+
+    def get_storage_from_list(self, list_w):
+        setdata = self.get_empty_dict()
+        for w_item in list_w:
+            setdata[self.unwrap(w_item)] = None
+        return self.erase(setdata)
+
+    def get_storage_from_unwrapped_list(self, items):
+        setdata = self.get_empty_dict()
+        for item in items:
+            setdata[item] = None
+        return self.erase(setdata)
+
+    def length(self, w_set):
+        return len(self.unerase(w_set.sstorage))
+
+    def clear(self, w_set):
+        w_set.switch_to_empty_strategy()
+
+    def copy_real(self, w_set):
+        # may be used internally on frozen sets, although frozenset().copy()
+        # returns self in frozenset_copy__Frozenset.
+        strategy = w_set.strategy
+        d = self.unerase(w_set.sstorage)
+        storage = self.erase(d.copy())
+        clone = w_set.from_storage_and_strategy(storage, strategy)
+        return clone
+
+    def add(self, w_set, w_key):
+        if self.is_correct_type(w_key):
+            d = self.unerase(w_set.sstorage)
+            d[self.unwrap(w_key)] = None
+        else:
+            w_set.switch_to_object_strategy(self.space)
+            w_set.add(w_key)
+
+    def remove(self, w_set, w_item):
+        from pypy.objspace.std.dictmultiobject import _never_equal_to_string
+        d = self.unerase(w_set.sstorage)
+        if not self.is_correct_type(w_item):
+            #XXX check type of w_item and immediately return False in some cases
+            w_set.switch_to_object_strategy(self.space)
+            return w_set.remove(w_item)
+
+        key = self.unwrap(w_item)
+        try:
+            del d[key]
+            return True
+        except KeyError:
+            return False
+
+    def getdict_w(self, w_set):
+        result = newset(self.space)
+        keys = self.unerase(w_set.sstorage).keys()
+        for key in keys:
+            result[self.wrap(key)] = None
+        return result
+
+    def get_storage_copy(self, w_set):
+        d = self.unerase(w_set.sstorage)
+        copy = self.erase(d.copy())
+        return copy
+
+    def getkeys(self, w_set):
+        keys = self.unerase(w_set.sstorage).keys()
+        keys_w = [self.wrap(key) for key in keys]
+        return keys_w
+
+    def has_key(self, w_set, w_key):
+        from pypy.objspace.std.dictmultiobject import _never_equal_to_string
+        if not self.is_correct_type(w_key):
+            #XXX check type of w_item and immediately return False in some cases
+            w_set.switch_to_object_strategy(self.space)
+            return w_set.has_key(w_key)
+        d = self.unerase(w_set.sstorage)
+        return self.unwrap(w_key) in d
+
+    def equals(self, w_set, w_other):
+        if w_set.length() != w_other.length():
+            return False
+        items = self.unerase(w_set.sstorage).keys()
+        for key in items:
+            if not w_other.has_key(self.wrap(key)):
+                return False
+        return True
+
+    def _difference_wrapped(self, w_set, w_other):
+        strategy = self.space.fromcache(ObjectSetStrategy)
+
+        d_new = strategy.get_empty_dict()
+        for obj in self.unerase(w_set.sstorage):
+            w_item = self.wrap(obj)
+            if not w_other.has_key(w_item):
+                d_new[w_item] = None
+
+        return strategy.erase(d_new)
+
+    def _difference_unwrapped(self, w_set, w_other):
+        iterator = self.unerase(w_set.sstorage).iterkeys()
+        other_dict = self.unerase(w_other.sstorage)
+        result_dict = self.get_empty_dict()
+        for key in iterator:
+            if key not in other_dict:
+                result_dict[key] = None
+        return self.erase(result_dict)
+
+    def _difference_base(self, w_set, w_other):
+        if self is w_other.strategy:
+            strategy = w_set.strategy
+            storage = self._difference_unwrapped(w_set, w_other)
+        elif not w_set.strategy.may_contain_equal_elements(w_other.strategy):
+            strategy = w_set.strategy
+            storage = w_set.sstorage
+        else:
+            strategy = self.space.fromcache(ObjectSetStrategy)
+            storage = self._difference_wrapped(w_set, w_other)
+        return storage, strategy
+
+    def difference(self, w_set, w_other):
+        storage, strategy = self._difference_base(w_set, w_other)
+        w_newset = w_set.from_storage_and_strategy(storage, strategy)
+        return w_newset
+
+    def difference_update(self, w_set, w_other):
+        storage, strategy = self._difference_base(w_set, w_other)
+        w_set.strategy = strategy
+        w_set.sstorage = storage
+
+    def _symmetric_difference_unwrapped(self, w_set, w_other):
+        d_new = self.get_empty_dict()
+        d_this = self.unerase(w_set.sstorage)
+        d_other = self.unerase(w_other.sstorage)
+        for key in d_other.keys():
+            if not key in d_this:
+                d_new[key] = None
+        for key in d_this.keys():
+            if not key in d_other:
+                d_new[key] = None
+
+        storage = self.erase(d_new)
+        return storage
+
+    def _symmetric_difference_wrapped(self, w_set, w_other):
+        newsetdata = newset(self.space)
+        for obj in self.unerase(w_set.sstorage):
+            w_item = self.wrap(obj)
+            if not w_other.has_key(w_item):
+                newsetdata[w_item] = None
+
+        w_iterator = w_other.iter()
+        while True:
+            w_item = w_iterator.next_entry()
+            if w_item is None:
+                break
+            if not w_set.has_key(w_item):
+                newsetdata[w_item] = None
+
+        strategy = self.space.fromcache(ObjectSetStrategy)
+        return strategy.erase(newsetdata)
+
+    def _symmetric_difference_base(self, w_set, w_other):
+        if self is w_other.strategy:
+            strategy = w_set.strategy
+            storage = self._symmetric_difference_unwrapped(w_set, w_other)
+        else:
+            strategy = self.space.fromcache(ObjectSetStrategy)
+            storage = self._symmetric_difference_wrapped(w_set, w_other)
+        return storage, strategy
+
+    def symmetric_difference(self, w_set, w_other):
+        storage, strategy = self._symmetric_difference_base(w_set, w_other)
+        return w_set.from_storage_and_strategy(storage, strategy)
+
+    def symmetric_difference_update(self, w_set, w_other):
+        storage, strategy = self._symmetric_difference_base(w_set, w_other)
+        w_set.strategy = strategy
+        w_set.sstorage = storage
+
+    def _intersect_base(self, w_set, w_other):
+        if self is w_other.strategy:
+            strategy = w_set.strategy
+            storage = strategy._intersect_unwrapped(w_set, w_other)
+        elif not w_set.strategy.may_contain_equal_elements(w_other.strategy):
+            strategy = self.space.fromcache(EmptySetStrategy)
+            storage = strategy.get_empty_storage()
+        else:
+            strategy = self.space.fromcache(ObjectSetStrategy)
+            storage = self._intersect_wrapped(w_set, w_other)
+        return storage, strategy
+
+    def _intersect_wrapped(self, w_set, w_other):
+        result = newset(self.space)
+        for key in self.unerase(w_set.sstorage):
+            w_key = self.wrap(key)
+            if w_other.has_key(w_key):
+                result[w_key] = None
+
+        strategy = self.space.fromcache(ObjectSetStrategy)
+        return strategy.erase(result)
+
+    def _intersect_unwrapped(self, w_set, w_other):
+        result = self.get_empty_dict()
+        d_this = self.unerase(w_set.sstorage)
+        d_other = self.unerase(w_other.sstorage)
+        for key in d_this:
+            if key in d_other:
+                result[key] = None
+        return self.erase(result)
+
+    def intersect(self, w_set, w_other):
+        if w_set.length() > w_other.length():
+            return w_other.intersect(w_set)
+
+        storage, strategy = self._intersect_base(w_set, w_other)
+        return w_set.from_storage_and_strategy(storage, strategy)
+
+    def intersect_update(self, w_set, w_other):
+        if w_set.length() > w_other.length():
+            w_intersection = w_other.intersect(w_set)
+            strategy = w_intersection.strategy
+            storage = w_intersection.sstorage
+        else:
+            storage, strategy = self._intersect_base(w_set, w_other)
+        w_set.strategy = strategy
+        w_set.sstorage = storage
+
+    def _issubset_unwrapped(self, w_set, w_other):
+        d_other = self.unerase(w_other.sstorage)
+        for item in self.unerase(w_set.sstorage):
+            if not item in d_other:
+                return False


More information about the pypy-commit mailing list