[pypy-commit] pypy stm-gc: hg merge default

arigo noreply at buildbot.pypy.org
Tue Apr 17 11:20:56 CEST 2012


Author: Armin Rigo <arigo at tunes.org>
Branch: stm-gc
Changeset: r54454:d121be30ed67
Date: 2012-04-17 11:01 +0200
http://bitbucket.org/pypy/pypy/changeset/d121be30ed67/

Log:	hg merge default

diff too long, truncating to 10000 out of 17619 lines

diff --git a/_pytest/__init__.py b/_pytest/__init__.py
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
 #
-__version__ = '2.1.0.dev4'
+__version__ = '2.2.4.dev2'
diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py
--- a/_pytest/assertion/__init__.py
+++ b/_pytest/assertion/__init__.py
@@ -2,35 +2,25 @@
 support for presenting detailed information in failing assertions.
 """
 import py
-import imp
-import marshal
-import struct
 import sys
 import pytest
 from _pytest.monkeypatch import monkeypatch
-from _pytest.assertion import reinterpret, util
-
-try:
-    from _pytest.assertion.rewrite import rewrite_asserts
-except ImportError:
-    rewrite_asserts = None
-else:
-    import ast
+from _pytest.assertion import util
 
 def pytest_addoption(parser):
     group = parser.getgroup("debugconfig")
-    group.addoption('--assertmode', action="store", dest="assertmode",
-                    choices=("on", "old", "off", "default"), default="default",
-                    metavar="on|old|off",
+    group.addoption('--assert', action="store", dest="assertmode",
+                    choices=("rewrite", "reinterp", "plain",),
+                    default="rewrite", metavar="MODE",
                     help="""control assertion debugging tools.
-'off' performs no assertion debugging.
-'old' reinterprets the expressions in asserts to glean information.
-'on' (the default) rewrites the assert statements in test modules to provide
-sub-expression results.""")
+'plain' performs no assertion debugging.
+'reinterp' reinterprets assert statements after they failed to provide assertion expression information.
+'rewrite' (the default) rewrites assert statements in test modules on import
+to provide assert expression information. """)
     group.addoption('--no-assert', action="store_true", default=False,
-        dest="noassert", help="DEPRECATED equivalent to --assertmode=off")
+        dest="noassert", help="DEPRECATED equivalent to --assert=plain")
     group.addoption('--nomagic', action="store_true", default=False,
-        dest="nomagic", help="DEPRECATED equivalent to --assertmode=off")
+        dest="nomagic", help="DEPRECATED equivalent to --assert=plain")
 
 class AssertionState:
     """State for the assertion plugin."""
@@ -40,89 +30,90 @@
         self.trace = config.trace.root.get("assertion")
 
 def pytest_configure(config):
-    warn_about_missing_assertion()
     mode = config.getvalue("assertmode")
     if config.getvalue("noassert") or config.getvalue("nomagic"):
-        if mode not in ("off", "default"):
-            raise pytest.UsageError("assertion options conflict")
-        mode = "off"
-    elif mode == "default":
-        mode = "on"
-    if mode != "off":
-        def callbinrepr(op, left, right):
-            hook_result = config.hook.pytest_assertrepr_compare(
-                config=config, op=op, left=left, right=right)
-            for new_expl in hook_result:
-                if new_expl:
-                    return '\n~'.join(new_expl)
+        mode = "plain"
+    if mode == "rewrite":
+        try:
+            import ast
+        except ImportError:
+            mode = "reinterp"
+        else:
+            if sys.platform.startswith('java'):
+                mode = "reinterp"
+    if mode != "plain":
+        _load_modules(mode)
         m = monkeypatch()
         config._cleanup.append(m.undo)
         m.setattr(py.builtin.builtins, 'AssertionError',
                   reinterpret.AssertionError)
-        m.setattr(util, '_reprcompare', callbinrepr)
-    if mode == "on" and rewrite_asserts is None:
-        mode = "old"
+    hook = None
+    if mode == "rewrite":
+        hook = rewrite.AssertionRewritingHook()
+        sys.meta_path.append(hook)
+    warn_about_missing_assertion(mode)
     config._assertstate = AssertionState(config, mode)
+    config._assertstate.hook = hook
     config._assertstate.trace("configured with mode set to %r" % (mode,))
 
-def _write_pyc(co, source_path):
-    if hasattr(imp, "cache_from_source"):
-        # Handle PEP 3147 pycs.
-        pyc = py.path.local(imp.cache_from_source(str(source_path)))
-        pyc.ensure()
-    else:
-        pyc = source_path + "c"
-    mtime = int(source_path.mtime())
-    fp = pyc.open("wb")
-    try:
-        fp.write(imp.get_magic())
-        fp.write(struct.pack("<l", mtime))
-        marshal.dump(co, fp)
-    finally:
-        fp.close()
-    return pyc
+def pytest_unconfigure(config):
+    hook = config._assertstate.hook
+    if hook is not None:
+        sys.meta_path.remove(hook)
 
-def before_module_import(mod):
-    if mod.config._assertstate.mode != "on":
-        return
-    # Some deep magic: load the source, rewrite the asserts, and write a
-    # fake pyc, so that it'll be loaded when the module is imported.
-    source = mod.fspath.read()
-    try:
-        tree = ast.parse(source)
-    except SyntaxError:
-        # Let this pop up again in the real import.
-        mod.config._assertstate.trace("failed to parse: %r" % (mod.fspath,))
-        return
-    rewrite_asserts(tree)
-    try:
-        co = compile(tree, str(mod.fspath), "exec")
-    except SyntaxError:
-        # It's possible that this error is from some bug in the assertion
-        # rewriting, but I don't know of a fast way to tell.
-        mod.config._assertstate.trace("failed to compile: %r" % (mod.fspath,))
-        return
-    mod._pyc = _write_pyc(co, mod.fspath)
-    mod.config._assertstate.trace("wrote pyc: %r" % (mod._pyc,))
+def pytest_collection(session):
+    # this hook is only called when test modules are collected
+    # so for example not in the master process of pytest-xdist
+    # (which does not collect test modules)
+    hook = session.config._assertstate.hook
+    if hook is not None:
+        hook.set_session(session)
 
-def after_module_import(mod):
-    if not hasattr(mod, "_pyc"):
-        return
-    state = mod.config._assertstate
-    try:
-        mod._pyc.remove()
-    except py.error.ENOENT:
-        state.trace("couldn't find pyc: %r" % (mod._pyc,))
-    else:
-        state.trace("removed pyc: %r" % (mod._pyc,))
+def pytest_runtest_setup(item):
+    def callbinrepr(op, left, right):
+        hook_result = item.ihook.pytest_assertrepr_compare(
+            config=item.config, op=op, left=left, right=right)
+        for new_expl in hook_result:
+            if new_expl:
+                res = '\n~'.join(new_expl)
+                if item.config.getvalue("assertmode") == "rewrite":
+                    # The result will be fed back a python % formatting
+                    # operation, which will fail if there are extraneous
+                    # '%'s in the string. Escape them here.
+                    res = res.replace("%", "%%")
+                return res
+    util._reprcompare = callbinrepr
 
-def warn_about_missing_assertion():
+def pytest_runtest_teardown(item):
+    util._reprcompare = None
+
+def pytest_sessionfinish(session):
+    hook = session.config._assertstate.hook
+    if hook is not None:
+        hook.session = None
+
+def _load_modules(mode):
+    """Lazily import assertion related code."""
+    global rewrite, reinterpret
+    from _pytest.assertion import reinterpret
+    if mode == "rewrite":
+        from _pytest.assertion import rewrite
+
+def warn_about_missing_assertion(mode):
     try:
         assert False
     except AssertionError:
         pass
     else:
-        sys.stderr.write("WARNING: failing tests may report as passing because "
-        "assertions are turned off!  (are you using python -O?)\n")
+        if mode == "rewrite":
+            specifically = ("assertions which are not in test modules "
+                            "will be ignored")
+        else:
+            specifically = "failing tests may report as passing"
+
+        sys.stderr.write("WARNING: " + specifically +
+                        " because assert statements are not executed "
+                        "by the underlying Python interpreter "
+                        "(are you using python -O?)\n")
 
 pytest_assertrepr_compare = util.assertrepr_compare
diff --git a/_pytest/assertion/newinterpret.py b/_pytest/assertion/newinterpret.py
--- a/_pytest/assertion/newinterpret.py
+++ b/_pytest/assertion/newinterpret.py
@@ -53,7 +53,7 @@
     if should_fail:
         return ("(assertion failed, but when it was re-run for "
                 "printing intermediate values, it did not fail.  Suggestions: "
-                "compute assert expression before the assert or use --no-assert)")
+                "compute assert expression before the assert or use --assert=plain)")
 
 def run(offending_line, frame=None):
     if frame is None:
diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py
--- a/_pytest/assertion/oldinterpret.py
+++ b/_pytest/assertion/oldinterpret.py
@@ -1,8 +1,7 @@
 import py
 import sys, inspect
 from compiler import parse, ast, pycodegen
-from _pytest.assertion.util import format_explanation
-from _pytest.assertion.reinterpret import BuiltinAssertionError
+from _pytest.assertion.util import format_explanation, BuiltinAssertionError
 
 passthroughex = py.builtin._sysex
 
@@ -482,7 +481,7 @@
     if should_fail:
         return ("(assertion failed, but when it was re-run for "
                 "printing intermediate values, it did not fail.  Suggestions: "
-                "compute assert expression before the assert or use --nomagic)")
+                "compute assert expression before the assert or use --assert=plain)")
     else:
         return None
 
diff --git a/_pytest/assertion/reinterpret.py b/_pytest/assertion/reinterpret.py
--- a/_pytest/assertion/reinterpret.py
+++ b/_pytest/assertion/reinterpret.py
@@ -1,7 +1,6 @@
 import sys
 import py
-
-BuiltinAssertionError = py.builtin.builtins.AssertionError
+from _pytest.assertion.util import BuiltinAssertionError
 
 class AssertionError(BuiltinAssertionError):
     def __init__(self, *args):
diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py
--- a/_pytest/assertion/rewrite.py
+++ b/_pytest/assertion/rewrite.py
@@ -1,14 +1,258 @@
 """Rewrite assertion AST to produce nice error messages"""
 
 import ast
-import collections
+import errno
 import itertools
+import imp
+import marshal
+import os
+import struct
 import sys
+import types
 
 import py
 from _pytest.assertion import util
 
 
+# Windows gives ENOENT in places *nix gives ENOTDIR.
+if sys.platform.startswith("win"):
+    PATH_COMPONENT_NOT_DIR = errno.ENOENT
+else:
+    PATH_COMPONENT_NOT_DIR = errno.ENOTDIR
+
+# py.test caches rewritten pycs in __pycache__.
+if hasattr(imp, "get_tag"):
+    PYTEST_TAG = imp.get_tag() + "-PYTEST"
+else:
+    if hasattr(sys, "pypy_version_info"):
+        impl = "pypy"
+    elif sys.platform == "java":
+        impl = "jython"
+    else:
+        impl = "cpython"
+    ver = sys.version_info
+    PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
+    del ver, impl
+
+PYC_EXT = ".py" + "c" if __debug__ else "o"
+PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
+
+REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+
+class AssertionRewritingHook(object):
+    """Import hook which rewrites asserts."""
+
+    def __init__(self):
+        self.session = None
+        self.modules = {}
+
+    def set_session(self, session):
+        self.fnpats = session.config.getini("python_files")
+        self.session = session
+
+    def find_module(self, name, path=None):
+        if self.session is None:
+            return None
+        sess = self.session
+        state = sess.config._assertstate
+        state.trace("find_module called for: %s" % name)
+        names = name.rsplit(".", 1)
+        lastname = names[-1]
+        pth = None
+        if path is not None and len(path) == 1:
+            pth = path[0]
+        if pth is None:
+            try:
+                fd, fn, desc = imp.find_module(lastname, path)
+            except ImportError:
+                return None
+            if fd is not None:
+                fd.close()
+            tp = desc[2]
+            if tp == imp.PY_COMPILED:
+                if hasattr(imp, "source_from_cache"):
+                    fn = imp.source_from_cache(fn)
+                else:
+                    fn = fn[:-1]
+            elif tp != imp.PY_SOURCE:
+                # Don't know what this is.
+                return None
+        else:
+            fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
+        fn_pypath = py.path.local(fn)
+        # Is this a test file?
+        if not sess.isinitpath(fn):
+            # We have to be very careful here because imports in this code can
+            # trigger a cycle.
+            self.session = None
+            try:
+                for pat in self.fnpats:
+                    if fn_pypath.fnmatch(pat):
+                        state.trace("matched test file %r" % (fn,))
+                        break
+                else:
+                    return None
+            finally:
+                self.session = sess
+        else:
+            state.trace("matched test file (was specified on cmdline): %r" % (fn,))
+        # The requested module looks like a test file, so rewrite it. This is
+        # the most magical part of the process: load the source, rewrite the
+        # asserts, and load the rewritten source. We also cache the rewritten
+        # module code in a special pyc. We must be aware of the possibility of
+        # concurrent py.test processes rewriting and loading pycs. To avoid
+        # tricky race conditions, we maintain the following invariant: The
+        # cached pyc is always a complete, valid pyc. Operations on it must be
+        # atomic. POSIX's atomic rename comes in handy.
+        write = not sys.dont_write_bytecode
+        cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
+        if write:
+            try:
+                os.mkdir(cache_dir)
+            except OSError:
+                e = sys.exc_info()[1].errno
+                if e == errno.EEXIST:
+                    # Either the __pycache__ directory already exists (the
+                    # common case) or it's blocked by a non-dir node. In the
+                    # latter case, we'll ignore it in _write_pyc.
+                    pass
+                elif e == PATH_COMPONENT_NOT_DIR:
+                    # One of the path components was not a directory, likely
+                    # because we're in a zip file.
+                    write = False
+                elif e == errno.EACCES:
+                    state.trace("read only directory: %r" % (fn_pypath.dirname,))
+                    write = False
+                else:
+                    raise
+        cache_name = fn_pypath.basename[:-3] + PYC_TAIL
+        pyc = os.path.join(cache_dir, cache_name)
+        # Notice that even if we're in a read-only directory, I'm going to check
+        # for a cached pyc. This may not be optimal...
+        co = _read_pyc(fn_pypath, pyc)
+        if co is None:
+            state.trace("rewriting %r" % (fn,))
+            co = _rewrite_test(state, fn_pypath)
+            if co is None:
+                # Probably a SyntaxError in the test.
+                return None
+            if write:
+                _make_rewritten_pyc(state, fn_pypath, pyc, co)
+        else:
+            state.trace("found cached rewritten pyc for %r" % (fn,))
+        self.modules[name] = co, pyc
+        return self
+
+    def load_module(self, name):
+        co, pyc = self.modules.pop(name)
+        # I wish I could just call imp.load_compiled here, but __file__ has to
+        # be set properly. In Python 3.2+, this all would be handled correctly
+        # by load_compiled.
+        mod = sys.modules[name] = imp.new_module(name)
+        try:
+            mod.__file__ = co.co_filename
+            # Normally, this attribute is 3.2+.
+            mod.__cached__ = pyc
+            py.builtin.exec_(co, mod.__dict__)
+        except:
+            del sys.modules[name]
+            raise
+        return sys.modules[name]
+
+def _write_pyc(co, source_path, pyc):
+    # Technically, we don't have to have the same pyc format as (C)Python, since
+    # these "pycs" should never be seen by builtin import. However, there's
+    # little reason deviate, and I hope sometime to be able to use
+    # imp.load_compiled to load them. (See the comment in load_module above.)
+    mtime = int(source_path.mtime())
+    try:
+        fp = open(pyc, "wb")
+    except IOError:
+        err = sys.exc_info()[1].errno
+        if err == PATH_COMPONENT_NOT_DIR:
+            # This happens when we get a EEXIST in find_module creating the
+            # __pycache__ directory and __pycache__ is by some non-dir node.
+            return False
+        raise
+    try:
+        fp.write(imp.get_magic())
+        fp.write(struct.pack("<l", mtime))
+        marshal.dump(co, fp)
+    finally:
+        fp.close()
+    return True
+
+RN = "\r\n".encode("utf-8")
+N = "\n".encode("utf-8")
+
+def _rewrite_test(state, fn):
+    """Try to read and rewrite *fn* and return the code object."""
+    try:
+        source = fn.read("rb")
+    except EnvironmentError:
+        return None
+    # On Python versions which are not 2.7 and less than or equal to 3.1, the
+    # parser expects *nix newlines.
+    if REWRITE_NEWLINES:
+        source = source.replace(RN, N) + N
+    try:
+        tree = ast.parse(source)
+    except SyntaxError:
+        # Let this pop up again in the real import.
+        state.trace("failed to parse: %r" % (fn,))
+        return None
+    rewrite_asserts(tree)
+    try:
+        co = compile(tree, fn.strpath, "exec")
+    except SyntaxError:
+        # It's possible that this error is from some bug in the
+        # assertion rewriting, but I don't know of a fast way to tell.
+        state.trace("failed to compile: %r" % (fn,))
+        return None
+    return co
+
+def _make_rewritten_pyc(state, fn, pyc, co):
+    """Try to dump rewritten code to *pyc*."""
+    if sys.platform.startswith("win"):
+        # Windows grants exclusive access to open files and doesn't have atomic
+        # rename, so just write into the final file.
+        _write_pyc(co, fn, pyc)
+    else:
+        # When not on windows, assume rename is atomic. Dump the code object
+        # into a file specific to this process and atomically replace it.
+        proc_pyc = pyc + "." + str(os.getpid())
+        if _write_pyc(co, fn, proc_pyc):
+            os.rename(proc_pyc, pyc)
+
+def _read_pyc(source, pyc):
+    """Possibly read a py.test pyc containing rewritten code.
+
+    Return rewritten code if successful or None if not.
+    """
+    try:
+        fp = open(pyc, "rb")
+    except IOError:
+        return None
+    try:
+        try:
+            mtime = int(source.mtime())
+            data = fp.read(8)
+        except EnvironmentError:
+            return None
+        # Check for invalid or out of date pyc file.
+        if (len(data) != 8 or
+            data[:4] != imp.get_magic() or
+            struct.unpack("<l", data[4:])[0] != mtime):
+            return None
+        co = marshal.load(fp)
+        if not isinstance(co, types.CodeType):
+            # That's interesting....
+            return None
+        return co
+    finally:
+        fp.close()
+
+
 def rewrite_asserts(mod):
     """Rewrite the assert statements in mod."""
     AssertionRewriter().run(mod)
@@ -17,13 +261,8 @@
 _saferepr = py.io.saferepr
 from _pytest.assertion.util import format_explanation as _format_explanation
 
-def _format_boolop(operands, explanations, is_or):
-    show_explanations = []
-    for operand, expl in zip(operands, explanations):
-        show_explanations.append(expl)
-        if operand == is_or:
-            break
-    return "(" + (is_or and " or " or " and ").join(show_explanations) + ")"
+def _format_boolop(explanations, is_or):
+    return "(" + (is_or and " or " or " and ").join(explanations) + ")"
 
 def _call_reprcompare(ops, results, expls, each_obj):
     for i, res, expl in zip(range(len(ops)), results, expls):
@@ -109,8 +348,8 @@
                     return
                 lineno += len(doc) - 1
                 expect_docstring = False
-            elif (not isinstance(item, ast.ImportFrom) or item.level > 0 and
-                  item.identifier != "__future__"):
+            elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
+                  item.module != "__future__"):
                 lineno = item.lineno
                 break
             pos += 1
@@ -118,9 +357,9 @@
                    for alias in aliases]
         mod.body[pos:pos] = imports
         # Collect asserts.
-        nodes = collections.deque([mod])
+        nodes = [mod]
         while nodes:
-            node = nodes.popleft()
+            node = nodes.pop()
             for name, field in ast.iter_fields(node):
                 if isinstance(field, list):
                     new = []
@@ -143,7 +382,7 @@
         """Get a new variable."""
         # Use a character invalid in python identifiers to avoid clashing.
         name = "@py_assert" + str(next(self.variable_counter))
-        self.variables.add(name)
+        self.variables.append(name)
         return name
 
     def assign(self, expr):
@@ -198,7 +437,8 @@
             # There's already a message. Don't mess with it.
             return [assert_]
         self.statements = []
-        self.variables = set()
+        self.cond_chain = ()
+        self.variables = []
         self.variable_counter = itertools.count()
         self.stack = []
         self.on_failure = []
@@ -220,11 +460,11 @@
         else:
             raise_ = ast.Raise(exc, None, None)
         body.append(raise_)
-        # Delete temporary variables.
-        names = [ast.Name(name, ast.Del()) for name in self.variables]
-        if names:
-            delete = ast.Delete(names)
-            self.statements.append(delete)
+        # Clear temporary variables by setting them to None.
+        if self.variables:
+            variables = [ast.Name(name, ast.Store()) for name in self.variables]
+            clear = ast.Assign(variables, ast.Name("None", ast.Load()))
+            self.statements.append(clear)
         # Fix line numbers.
         for stmt in self.statements:
             set_location(stmt, assert_.lineno, assert_.col_offset)
@@ -240,21 +480,38 @@
         return name, self.explanation_param(expr)
 
     def visit_BoolOp(self, boolop):
-        operands = []
-        explanations = []
+        res_var = self.variable()
+        expl_list = self.assign(ast.List([], ast.Load()))
+        app = ast.Attribute(expl_list, "append", ast.Load())
+        is_or = int(isinstance(boolop.op, ast.Or))
+        body = save = self.statements
+        fail_save = self.on_failure
+        levels = len(boolop.values) - 1
         self.push_format_context()
-        for operand in boolop.values:
-            res, explanation = self.visit(operand)
-            operands.append(res)
-            explanations.append(explanation)
-        expls = ast.Tuple([ast.Str(expl) for expl in explanations], ast.Load())
-        is_or = ast.Num(isinstance(boolop.op, ast.Or))
-        expl_template = self.helper("format_boolop",
-                                    ast.Tuple(operands, ast.Load()), expls,
-                                    is_or)
+        # Process each operand, short-circuting if needed.
+        for i, v in enumerate(boolop.values):
+            if i:
+                fail_inner = []
+                self.on_failure.append(ast.If(cond, fail_inner, []))
+                self.on_failure = fail_inner
+            self.push_format_context()
+            res, expl = self.visit(v)
+            body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
+            expl_format = self.pop_format_context(ast.Str(expl))
+            call = ast.Call(app, [expl_format], [], None, None)
+            self.on_failure.append(ast.Expr(call))
+            if i < levels:
+                cond = res
+                if is_or:
+                    cond = ast.UnaryOp(ast.Not(), cond)
+                inner = []
+                self.statements.append(ast.If(cond, inner, []))
+                self.statements = body = inner
+        self.statements = save
+        self.on_failure = fail_save
+        expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
         expl = self.pop_format_context(expl_template)
-        res = self.assign(ast.BoolOp(boolop.op, operands))
-        return res, self.explanation_param(expl)
+        return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
 
     def visit_UnaryOp(self, unary):
         pattern = unary_map[unary.op.__class__]
@@ -288,7 +545,7 @@
             new_star, expl = self.visit(call.starargs)
             arg_expls.append("*" + expl)
         if call.kwargs:
-            new_kwarg, expl = self.visit(call.kwarg)
+            new_kwarg, expl = self.visit(call.kwargs)
             arg_expls.append("**" + expl)
         expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
         new_call = ast.Call(new_func, new_args, new_kwargs, new_star, new_kwarg)
diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py
--- a/_pytest/assertion/util.py
+++ b/_pytest/assertion/util.py
@@ -2,6 +2,7 @@
 
 import py
 
+BuiltinAssertionError = py.builtin.builtins.AssertionError
 
 # The _reprcompare attribute on the util module is used by the new assertion
 # interpretation code and assertion rewriter to detect this plugin was
diff --git a/_pytest/capture.py b/_pytest/capture.py
--- a/_pytest/capture.py
+++ b/_pytest/capture.py
@@ -11,22 +11,22 @@
     group._addoption('-s', action="store_const", const="no", dest="capture",
         help="shortcut for --capture=no.")
 
+ at pytest.mark.tryfirst
+def pytest_cmdline_parse(pluginmanager, args):
+    # we want to perform capturing already for plugin/conftest loading
+    if '-s' in args or "--capture=no" in args:
+        method = "no"
+    elif hasattr(os, 'dup') and '--capture=sys' not in args:
+        method = "fd"
+    else:
+        method = "sys"
+    capman = CaptureManager(method)
+    pluginmanager.register(capman, "capturemanager")
+
 def addouterr(rep, outerr):
-    repr = getattr(rep, 'longrepr', None)
-    if not hasattr(repr, 'addsection'):
-        return
     for secname, content in zip(["out", "err"], outerr):
         if content:
-            repr.addsection("Captured std%s" % secname, content.rstrip())
-
-def pytest_unconfigure(config):
-    # registered in config.py during early conftest.py loading
-    capman = config.pluginmanager.getplugin('capturemanager')
-    while capman._method2capture:
-        name, cap = capman._method2capture.popitem()
-        # XXX logging module may wants to close it itself on process exit
-        # otherwise we could do finalization here and call "reset()".
-        cap.suspend()
+            rep.sections.append(("Captured std%s" % secname, content))
 
 class NoCapture:
     def startall(self):
@@ -39,8 +39,9 @@
         return "", ""
 
 class CaptureManager:
-    def __init__(self):
+    def __init__(self, defaultmethod=None):
         self._method2capture = {}
+        self._defaultmethod = defaultmethod
 
     def _maketempfile(self):
         f = py.std.tempfile.TemporaryFile()
@@ -65,14 +66,6 @@
         else:
             raise ValueError("unknown capturing method: %r" % method)
 
-    def _getmethod_preoptionparse(self, args):
-        if '-s' in args or "--capture=no" in args:
-            return "no"
-        elif hasattr(os, 'dup') and '--capture=sys' not in args:
-            return "fd"
-        else:
-            return "sys"
-
     def _getmethod(self, config, fspath):
         if config.option.capture:
             method = config.option.capture
@@ -85,16 +78,22 @@
             method = "sys"
         return method
 
+    def reset_capturings(self):
+        for name, cap in self._method2capture.items():
+            cap.reset()
+
     def resumecapture_item(self, item):
         method = self._getmethod(item.config, item.fspath)
         if not hasattr(item, 'outerr'):
             item.outerr = ('', '') # we accumulate outerr on the item
         return self.resumecapture(method)
 
-    def resumecapture(self, method):
+    def resumecapture(self, method=None):
         if hasattr(self, '_capturing'):
             raise ValueError("cannot resume, already capturing with %r" %
                 (self._capturing,))
+        if method is None:
+            method = self._defaultmethod
         cap = self._method2capture.get(method)
         self._capturing = method
         if cap is None:
@@ -164,17 +163,6 @@
     def pytest_runtest_teardown(self, item):
         self.resumecapture_item(item)
 
-    def pytest__teardown_final(self, __multicall__, session):
-        method = self._getmethod(session.config, None)
-        self.resumecapture(method)
-        try:
-            rep = __multicall__.execute()
-        finally:
-            outerr = self.suspendcapture()
-        if rep:
-            addouterr(rep, outerr)
-        return rep
-
     def pytest_keyboard_interrupt(self, excinfo):
         if hasattr(self, '_capturing'):
             self.suspendcapture()
diff --git a/_pytest/config.py b/_pytest/config.py
--- a/_pytest/config.py
+++ b/_pytest/config.py
@@ -8,13 +8,15 @@
 def pytest_cmdline_parse(pluginmanager, args):
     config = Config(pluginmanager)
     config.parse(args)
-    if config.option.debug:
-        config.trace.root.setwriter(sys.stderr.write)
     return config
 
 def pytest_unconfigure(config):
-    for func in config._cleanup:
-        func()
+    while 1:
+        try:
+            fin = config._cleanup.pop()
+        except IndexError:
+            break
+        fin()
 
 class Parser:
     """ Parser for command line arguments. """
@@ -81,6 +83,7 @@
         self._inidict[name] = (help, type, default)
         self._ininames.append(name)
 
+
 class OptionGroup:
     def __init__(self, name, description="", parser=None):
         self.name = name
@@ -256,11 +259,14 @@
         self.hook = self.pluginmanager.hook
         self._inicache = {}
         self._cleanup = []
-    
+
     @classmethod
     def fromdictargs(cls, option_dict, args):
         """ constructor useable for subprocesses. """
         config = cls()
+        # XXX slightly crude way to initialize capturing
+        import _pytest.capture
+        _pytest.capture.pytest_cmdline_parse(config.pluginmanager, args)
         config._preparse(args, addopts=False)
         config.option.__dict__.update(option_dict)
         for x in config.option.plugins:
@@ -285,11 +291,10 @@
 
     def _setinitialconftest(self, args):
         # capture output during conftest init (#issue93)
-        from _pytest.capture import CaptureManager
-        capman = CaptureManager()
-        self.pluginmanager.register(capman, 'capturemanager')
-        # will be unregistered in capture.py's unconfigure()
-        capman.resumecapture(capman._getmethod_preoptionparse(args))
+        # XXX introduce load_conftest hook to avoid needing to know
+        # about capturing plugin here
+        capman = self.pluginmanager.getplugin("capturemanager")
+        capman.resumecapture()
         try:
             try:
                 self._conftest.setinitial(args)
@@ -334,6 +339,7 @@
         # Note that this can only be called once per testing process.
         assert not hasattr(self, 'args'), (
                 "can only parse cmdline args at most once per Config object")
+        self._origargs = args
         self._preparse(args)
         self._parser.hints.extend(self.pluginmanager._hints)
         args = self._parser.parse_setoption(args, self.option)
@@ -341,6 +347,14 @@
             args.append(py.std.os.getcwd())
         self.args = args
 
+    def addinivalue_line(self, name, line):
+        """ add a line to an ini-file option. The option must have been
+        declared but might not yet be set in which case the line becomes the
+        the first line in its value. """
+        x = self.getini(name)
+        assert isinstance(x, list)
+        x.append(line) # modifies the cached list inline
+
     def getini(self, name):
         """ return configuration value from an ini file. If the
         specified name hasn't been registered through a prior ``parse.addini``
@@ -422,7 +436,7 @@
 
 
 def getcfg(args, inibasenames):
-    args = [x for x in args if str(x)[0] != "-"]
+    args = [x for x in args if not str(x).startswith("-")]
     if not args:
         args = [py.path.local()]
     for arg in args:
diff --git a/_pytest/core.py b/_pytest/core.py
--- a/_pytest/core.py
+++ b/_pytest/core.py
@@ -16,11 +16,10 @@
  "junitxml resultlog doctest").split()
 
 class TagTracer:
-    def __init__(self, prefix="[pytest] "):
+    def __init__(self):
         self._tag2proc = {}
         self.writer = None
         self.indent = 0
-        self.prefix = prefix
 
     def get(self, name):
         return TagTracerSub(self, (name,))
@@ -30,7 +29,7 @@
             if args:
                 indent = "  " * self.indent
                 content = " ".join(map(str, args))
-                self.writer("%s%s%s\n" %(self.prefix, indent, content))
+                self.writer("%s%s [%s]\n" %(indent, content, ":".join(tags)))
         try:
             self._tag2proc[tags](tags, args)
         except KeyError:
@@ -212,6 +211,14 @@
             self.register(mod, modname)
             self.consider_module(mod)
 
+    def pytest_configure(self, config):
+        config.addinivalue_line("markers",
+            "tryfirst: mark a hook implementation function such that the "
+            "plugin machinery will try to call it first/as early as possible.")
+        config.addinivalue_line("markers",
+            "trylast: mark a hook implementation function such that the "
+            "plugin machinery will try to call it last/as late as possible.")
+
     def pytest_plugin_registered(self, plugin):
         import pytest
         dic = self.call_plugin(plugin, "pytest_namespace", {}) or {}
@@ -432,10 +439,7 @@
 def _preloadplugins():
     _preinit.append(PluginManager(load=True))
 
-def main(args=None, plugins=None):
-    """ returned exit code integer, after an in-process testing run
-    with the given command line arguments, preloading an optional list
-    of passed in plugin objects. """
+def _prepareconfig(args=None, plugins=None):
     if args is None:
         args = sys.argv[1:]
     elif isinstance(args, py.path.local):
@@ -449,13 +453,19 @@
     else: # subsequent calls to main will create a fresh instance
         _pluginmanager = PluginManager(load=True)
     hook = _pluginmanager.hook
+    if plugins:
+        for plugin in plugins:
+            _pluginmanager.register(plugin)
+    return hook.pytest_cmdline_parse(
+            pluginmanager=_pluginmanager, args=args)
+
+def main(args=None, plugins=None):
+    """ returned exit code integer, after an in-process testing run
+    with the given command line arguments, preloading an optional list
+    of passed in plugin objects. """
     try:
-        if plugins:
-            for plugin in plugins:
-                _pluginmanager.register(plugin)
-        config = hook.pytest_cmdline_parse(
-                pluginmanager=_pluginmanager, args=args)
-        exitstatus = hook.pytest_cmdline_main(config=config)
+        config = _prepareconfig(args, plugins)
+        exitstatus = config.hook.pytest_cmdline_main(config=config)
     except UsageError:
         e = sys.exc_info()[1]
         sys.stderr.write("ERROR: %s\n" %(e.args[0],))
diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py
--- a/_pytest/helpconfig.py
+++ b/_pytest/helpconfig.py
@@ -1,7 +1,7 @@
 """ version info, help messages, tracing configuration.  """
 import py
 import pytest
-import inspect, sys
+import os, inspect, sys
 from _pytest.core import varnames
 
 def pytest_addoption(parser):
@@ -18,7 +18,29 @@
                help="trace considerations of conftest.py files."),
     group.addoption('--debug',
                action="store_true", dest="debug", default=False,
-               help="generate and show internal debugging information.")
+               help="store internal tracing debug information in 'pytestdebug.log'.")
+
+
+def pytest_cmdline_parse(__multicall__):
+    config = __multicall__.execute()
+    if config.option.debug:
+        path = os.path.abspath("pytestdebug.log")
+        f = open(path, 'w')
+        config._debugfile = f
+        f.write("versions pytest-%s, py-%s, python-%s\ncwd=%s\nargs=%s\n\n" %(
+            pytest.__version__, py.__version__, ".".join(map(str, sys.version_info)),
+            os.getcwd(), config._origargs))
+        config.trace.root.setwriter(f.write)
+        sys.stderr.write("writing pytestdebug information to %s\n" % path)
+    return config
+
+ at pytest.mark.trylast
+def pytest_unconfigure(config):
+    if hasattr(config, '_debugfile'):
+        config._debugfile.close()
+        sys.stderr.write("wrote pytestdebug information to %s\n" %
+            config._debugfile.name)
+        config.trace.root.setwriter(None)
 
 
 def pytest_cmdline_main(config):
@@ -34,6 +56,7 @@
     elif config.option.help:
         config.pluginmanager.do_configure(config)
         showhelp(config)
+        config.pluginmanager.do_unconfigure(config)
         return 0
 
 def showhelp(config):
@@ -91,7 +114,7 @@
         verinfo = getpluginversioninfo(config)
         if verinfo:
             lines.extend(verinfo)
-            
+
     if config.option.traceconfig:
         lines.append("active plugins:")
         plugins = []
diff --git a/_pytest/hookspec.py b/_pytest/hookspec.py
--- a/_pytest/hookspec.py
+++ b/_pytest/hookspec.py
@@ -121,16 +121,23 @@
 def pytest_itemstart(item, node=None):
     """ (deprecated, use pytest_runtest_logstart). """
 
-def pytest_runtest_protocol(item):
-    """ implements the standard runtest_setup/call/teardown protocol including
-    capturing exceptions and calling reporting hooks on the results accordingly.
+def pytest_runtest_protocol(item, nextitem):
+    """ implements the runtest_setup/call/teardown protocol for
+    the given test item, including capturing exceptions and calling
+    reporting hooks.
+
+    :arg item: test item for which the runtest protocol is performed.
+
+    :arg nexitem: the scheduled-to-be-next test item (or None if this
+                  is the end my friend).  This argument is passed on to
+                  :py:func:`pytest_runtest_teardown`.
 
     :return boolean: True if no further hook implementations should be invoked.
     """
 pytest_runtest_protocol.firstresult = True
 
 def pytest_runtest_logstart(nodeid, location):
-    """ signal the start of a test run. """
+    """ signal the start of running a single test item. """
 
 def pytest_runtest_setup(item):
     """ called before ``pytest_runtest_call(item)``. """
@@ -138,8 +145,14 @@
 def pytest_runtest_call(item):
     """ called to execute the test ``item``. """
 
-def pytest_runtest_teardown(item):
-    """ called after ``pytest_runtest_call``. """
+def pytest_runtest_teardown(item, nextitem):
+    """ called after ``pytest_runtest_call``.
+
+    :arg nexitem: the scheduled-to-be-next test item (None if no further
+                  test item is scheduled).  This argument can be used to
+                  perform exact teardowns, i.e. calling just enough finalizers
+                  so that nextitem only needs to call setup-functions.
+    """
 
 def pytest_runtest_makereport(item, call):
     """ return a :py:class:`_pytest.runner.TestReport` object
@@ -149,15 +162,8 @@
 pytest_runtest_makereport.firstresult = True
 
 def pytest_runtest_logreport(report):
-    """ process item test report. """
-
-# special handling for final teardown - somewhat internal for now
-def pytest__teardown_final(session):
-    """ called before test session finishes. """
-pytest__teardown_final.firstresult = True
-
-def pytest__teardown_final_logerror(report, session):
-    """ called if runtest_teardown_final failed. """
+    """ process a test setup/call/teardown report relating to
+    the respective phase of executing a test. """
 
 # -------------------------------------------------------------------------
 # test session related hooks
diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py
--- a/_pytest/junitxml.py
+++ b/_pytest/junitxml.py
@@ -25,21 +25,39 @@
     long = int
 
 
+class Junit(py.xml.Namespace):
+    pass
+
+
 # We need to get the subset of the invalid unicode ranges according to
 # XML 1.0 which are valid in this python build.  Hence we calculate
 # this dynamically instead of hardcoding it.  The spec range of valid
 # chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
 #                    | [#x10000-#x10FFFF]
-_illegal_unichrs = [(0x00, 0x08), (0x0B, 0x0C), (0x0E, 0x19),
-                   (0xD800, 0xDFFF), (0xFDD0, 0xFFFF)]
-_illegal_ranges = [unicode("%s-%s") % (unichr(low), unichr(high))
-                  for (low, high) in _illegal_unichrs
+_legal_chars = (0x09, 0x0A, 0x0d)
+_legal_ranges = (
+    (0x20, 0xD7FF),
+    (0xE000, 0xFFFD),
+    (0x10000, 0x10FFFF),
+)
+_legal_xml_re = [unicode("%s-%s") % (unichr(low), unichr(high))
+                  for (low, high) in _legal_ranges
                   if low < sys.maxunicode]
-illegal_xml_re = re.compile(unicode('[%s]') %
-                            unicode('').join(_illegal_ranges))
-del _illegal_unichrs
-del _illegal_ranges
+_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
+illegal_xml_re = re.compile(unicode('[^%s]') %
+                            unicode('').join(_legal_xml_re))
+del _legal_chars
+del _legal_ranges
+del _legal_xml_re
 
+def bin_xml_escape(arg):
+    def repl(matchobj):
+        i = ord(matchobj.group())
+        if i <= 0xFF:
+            return unicode('#x%02X') % i
+        else:
+            return unicode('#x%04X') % i
+    return illegal_xml_re.sub(repl, py.xml.escape(arg))
 
 def pytest_addoption(parser):
     group = parser.getgroup("terminal reporting")
@@ -68,117 +86,97 @@
         logfile = os.path.expanduser(os.path.expandvars(logfile))
         self.logfile = os.path.normpath(logfile)
         self.prefix = prefix
-        self.test_logs = []
+        self.tests = []
         self.passed = self.skipped = 0
         self.failed = self.errors = 0
-        self._durations = {}
 
     def _opentestcase(self, report):
         names = report.nodeid.split("::")
         names[0] = names[0].replace("/", '.')
-        names = tuple(names)
-        d = {'time': self._durations.pop(report.nodeid, "0")}
         names = [x.replace(".py", "") for x in names if x != "()"]
         classnames = names[:-1]
         if self.prefix:
             classnames.insert(0, self.prefix)
-        d['classname'] = ".".join(classnames)
-        d['name'] = py.xml.escape(names[-1])
-        attrs = ['%s="%s"' % item for item in sorted(d.items())]
-        self.test_logs.append("\n<testcase %s>" % " ".join(attrs))
+        self.tests.append(Junit.testcase(
+            classname=".".join(classnames),
+            name=names[-1],
+            time=getattr(report, 'duration', 0)
+        ))
 
-    def _closetestcase(self):
-        self.test_logs.append("</testcase>")
-
-    def appendlog(self, fmt, *args):
-        def repl(matchobj):
-            i = ord(matchobj.group())
-            if i <= 0xFF:
-                return unicode('#x%02X') % i
-            else:
-                return unicode('#x%04X') % i
-        args = tuple([illegal_xml_re.sub(repl, py.xml.escape(arg))
-                      for arg in args])
-        self.test_logs.append(fmt % args)
+    def append(self, obj):
+        self.tests[-1].append(obj)
 
     def append_pass(self, report):
         self.passed += 1
-        self._opentestcase(report)
-        self._closetestcase()
 
     def append_failure(self, report):
-        self._opentestcase(report)
         #msg = str(report.longrepr.reprtraceback.extraline)
         if "xfail" in report.keywords:
-            self.appendlog(
-                '<skipped message="xfail-marked test passes unexpectedly"/>')
+            self.append(
+                Junit.skipped(message="xfail-marked test passes unexpectedly"))
             self.skipped += 1
         else:
-            self.appendlog('<failure message="test failure">%s</failure>',
-                report.longrepr)
+            sec = dict(report.sections)
+            fail = Junit.failure(message="test failure")
+            fail.append(str(report.longrepr))
+            self.append(fail)
+            for name in ('out', 'err'):
+                content = sec.get("Captured std%s" % name)
+                if content:
+                    tag = getattr(Junit, 'system-'+name)
+                    self.append(tag(bin_xml_escape(content)))
             self.failed += 1
-        self._closetestcase()
 
     def append_collect_failure(self, report):
-        self._opentestcase(report)
         #msg = str(report.longrepr.reprtraceback.extraline)
-        self.appendlog('<failure message="collection failure">%s</failure>',
-            report.longrepr)
-        self._closetestcase()
+        self.append(Junit.failure(str(report.longrepr),
+                                  message="collection failure"))
         self.errors += 1
 
     def append_collect_skipped(self, report):
-        self._opentestcase(report)
         #msg = str(report.longrepr.reprtraceback.extraline)
-        self.appendlog('<skipped message="collection skipped">%s</skipped>',
-            report.longrepr)
-        self._closetestcase()
+        self.append(Junit.skipped(str(report.longrepr),
+                                  message="collection skipped"))
         self.skipped += 1
 
     def append_error(self, report):
-        self._opentestcase(report)
-        self.appendlog('<error message="test setup failure">%s</error>',
-            report.longrepr)
-        self._closetestcase()
+        self.append(Junit.error(str(report.longrepr),
+                                message="test setup failure"))
         self.errors += 1
 
     def append_skipped(self, report):
-        self._opentestcase(report)
         if "xfail" in report.keywords:
-            self.appendlog(
-                '<skipped message="expected test failure">%s</skipped>',
-                report.keywords['xfail'])
+            self.append(Junit.skipped(str(report.keywords['xfail']),
+                                      message="expected test failure"))
         else:
             filename, lineno, skipreason = report.longrepr
             if skipreason.startswith("Skipped: "):
                 skipreason = skipreason[9:]
-            self.appendlog('<skipped type="pytest.skip" '
-                           'message="%s">%s</skipped>',
-                skipreason, "%s:%s: %s" % report.longrepr,
-                )
-        self._closetestcase()
+            self.append(
+                Junit.skipped("%s:%s: %s" % report.longrepr,
+                              type="pytest.skip",
+                              message=skipreason
+                ))
         self.skipped += 1
 
     def pytest_runtest_logreport(self, report):
         if report.passed:
-            self.append_pass(report)
+            if report.when == "call": # ignore setup/teardown
+                self._opentestcase(report)
+                self.append_pass(report)
         elif report.failed:
+            self._opentestcase(report)
             if report.when != "call":
                 self.append_error(report)
             else:
                 self.append_failure(report)
         elif report.skipped:
+            self._opentestcase(report)
             self.append_skipped(report)
 
-    def pytest_runtest_call(self, item, __multicall__):
-        start = time.time()
-        try:
-            return __multicall__.execute()
-        finally:
-            self._durations[item.nodeid] = time.time() - start
-
     def pytest_collectreport(self, report):
         if not report.passed:
+            self._opentestcase(report)
             if report.failed:
                 self.append_collect_failure(report)
             else:
@@ -187,10 +185,11 @@
     def pytest_internalerror(self, excrepr):
         self.errors += 1
         data = py.xml.escape(excrepr)
-        self.test_logs.append(
-            '\n<testcase classname="pytest" name="internal">'
-            '    <error message="internal error">'
-            '%s</error></testcase>' % data)
+        self.tests.append(
+            Junit.testcase(
+                    Junit.error(data, message="internal error"),
+                    classname="pytest",
+                    name="internal"))
 
     def pytest_sessionstart(self, session):
         self.suite_start_time = time.time()
@@ -204,17 +203,17 @@
         suite_stop_time = time.time()
         suite_time_delta = suite_stop_time - self.suite_start_time
         numtests = self.passed + self.failed
+
         logfile.write('<?xml version="1.0" encoding="utf-8"?>')
-        logfile.write('<testsuite ')
-        logfile.write('name="" ')
-        logfile.write('errors="%i" ' % self.errors)
-        logfile.write('failures="%i" ' % self.failed)
-        logfile.write('skips="%i" ' % self.skipped)
-        logfile.write('tests="%i" ' % numtests)
-        logfile.write('time="%.3f"' % suite_time_delta)
-        logfile.write(' >')
-        logfile.writelines(self.test_logs)
-        logfile.write('</testsuite>')
+        logfile.write(Junit.testsuite(
+            self.tests,
+            name="",
+            errors=self.errors,
+            failures=self.failed,
+            skips=self.skipped,
+            tests=numtests,
+            time="%.3f" % suite_time_delta,
+        ).unicode(indent=0))
         logfile.close()
 
     def pytest_terminal_summary(self, terminalreporter):
diff --git a/_pytest/main.py b/_pytest/main.py
--- a/_pytest/main.py
+++ b/_pytest/main.py
@@ -2,7 +2,7 @@
 
 import py
 import pytest, _pytest
-import os, sys
+import os, sys, imp
 tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
 
 # exitcodes for the command line
@@ -11,6 +11,8 @@
 EXIT_INTERRUPTED = 2
 EXIT_INTERNALERROR = 3
 
+name_re = py.std.re.compile("^[a-zA-Z_]\w*$")
+
 def pytest_addoption(parser):
     parser.addini("norecursedirs", "directory patterns to avoid for recursion",
         type="args", default=('.*', 'CVS', '_darcs', '{arch}'))
@@ -27,6 +29,9 @@
                action="store", type="int", dest="maxfail", default=0,
                help="exit after first num failures or errors.")
 
+    group._addoption('--strict', action="store_true",
+               help="run pytest in strict mode, warnings become errors.")
+
     group = parser.getgroup("collect", "collection")
     group.addoption('--collectonly',
         action="store_true", dest="collectonly",
@@ -48,7 +53,7 @@
 def pytest_namespace():
     collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
     return dict(collect=collect)
-        
+
 def pytest_configure(config):
     py.test.config = config # compatibiltiy
     if config.option.exitfirst:
@@ -77,11 +82,11 @@
         session.exitstatus = EXIT_INTERNALERROR
         if excinfo.errisinstance(SystemExit):
             sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
+    if initstate >= 2:
+        config.hook.pytest_sessionfinish(session=session,
+            exitstatus=session.exitstatus or (session._testsfailed and 1))
     if not session.exitstatus and session._testsfailed:
         session.exitstatus = EXIT_TESTSFAILED
-    if initstate >= 2:
-        config.hook.pytest_sessionfinish(session=session,
-            exitstatus=session.exitstatus)
     if initstate >= 1:
         config.pluginmanager.do_unconfigure(config)
     return session.exitstatus
@@ -101,8 +106,12 @@
 def pytest_runtestloop(session):
     if session.config.option.collectonly:
         return True
-    for item in session.session.items:
-        item.config.hook.pytest_runtest_protocol(item=item)
+    for i, item in enumerate(session.items):
+        try:
+            nextitem = session.items[i+1]
+        except IndexError:
+            nextitem = None
+        item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
         if session.shouldstop:
             raise session.Interrupted(session.shouldstop)
     return True
@@ -132,7 +141,7 @@
         return getattr(pytest, name)
     return property(fget, None, None,
         "deprecated attribute %r, use pytest.%s" % (name,name))
-    
+
 class Node(object):
     """ base class for all Nodes in the collection tree.
     Collector subclasses have children, Items are terminal nodes."""
@@ -143,13 +152,13 @@
 
         #: the parent collector node.
         self.parent = parent
-        
+
         #: the test config object
         self.config = config or parent.config
 
         #: the collection this node is part of
         self.session = session or parent.session
-        
+
         #: filesystem path where this node was collected from
         self.fspath = getattr(parent, 'fspath', None)
         self.ihook = self.session.gethookproxy(self.fspath)
@@ -224,13 +233,13 @@
     def listchain(self):
         """ return list of all parent collectors up to self,
             starting from root of collection tree. """
-        l = [self]
-        while 1:
-            x = l[0]
-            if x.parent is not None: # and x.parent.parent is not None:
-                l.insert(0, x.parent)
-            else:
-                return l
+        chain = []
+        item = self
+        while item is not None:
+            chain.append(item)
+            item = item.parent
+        chain.reverse()
+        return chain
 
     def listnames(self):
         return [x.name for x in self.listchain()]
@@ -325,6 +334,8 @@
     """ a basic test invocation item. Note that for a single function
     there might be multiple test invocation items.
     """
+    nextitem = None
+
     def reportinfo(self):
         return self.fspath, None, ""
 
@@ -399,6 +410,7 @@
         self._notfound = []
         self._initialpaths = set()
         self._initialparts = []
+        self.items = items = []
         for arg in args:
             parts = self._parsearg(arg)
             self._initialparts.append(parts)
@@ -414,7 +426,6 @@
         if not genitems:
             return rep.result
         else:
-            self.items = items = []
             if rep.passed:
                 for node in rep.result:
                     self.items.extend(self.genitems(node))
@@ -469,16 +480,29 @@
         return True
 
     def _tryconvertpyarg(self, x):
-        try:
-            mod = __import__(x, None, None, ['__doc__'])
-        except (ValueError, ImportError):
-            return x
-        p = py.path.local(mod.__file__)
-        if p.purebasename == "__init__":
-            p = p.dirpath()
-        else:
-            p = p.new(basename=p.purebasename+".py")
-        return str(p)
+        mod = None
+        path = [os.path.abspath('.')] + sys.path
+        for name in x.split('.'):
+            # ignore anything that's not a proper name here
+            # else something like --pyargs will mess up '.'
+            # since imp.find_module will actually sometimes work for it
+            # but it's supposed to be considered a filesystem path
+            # not a package
+            if name_re.match(name) is None:
+                return x
+            try:
+                fd, mod, type_ = imp.find_module(name, path)
+            except ImportError:
+                return x
+            else:
+                if fd is not None:
+                    fd.close()
+
+            if type_[2] != imp.PKG_DIRECTORY:
+                path = [os.path.dirname(mod)]
+            else:
+                path = [mod]
+        return mod
 
     def _parsearg(self, arg):
         """ return (fspath, names) tuple after checking the file exists. """
@@ -496,7 +520,7 @@
             raise pytest.UsageError(msg + arg)
         parts[0] = path
         return parts
-   
+
     def matchnodes(self, matching, names):
         self.trace("matchnodes", matching, names)
         self.trace.root.indent += 1
diff --git a/_pytest/mark.py b/_pytest/mark.py
--- a/_pytest/mark.py
+++ b/_pytest/mark.py
@@ -14,12 +14,37 @@
              "Terminate expression with ':' to make the first match match "
              "all subsequent tests (usually file-order). ")
 
+    group._addoption("-m",
+        action="store", dest="markexpr", default="", metavar="MARKEXPR",
+        help="only run tests matching given mark expression.  "
+             "example: -m 'mark1 and not mark2'."
+             )
+
+    group.addoption("--markers", action="store_true", help=
+        "show markers (builtin, plugin and per-project ones).")
+
+    parser.addini("markers", "markers for test functions", 'linelist')
+
+def pytest_cmdline_main(config):
+    if config.option.markers:
+        config.pluginmanager.do_configure(config)
+        tw = py.io.TerminalWriter()
+        for line in config.getini("markers"):
+            name, rest = line.split(":", 1)
+            tw.write("@pytest.mark.%s:" %  name, bold=True)
+            tw.line(rest)
+            tw.line()
+        config.pluginmanager.do_unconfigure(config)
+        return 0
+pytest_cmdline_main.tryfirst = True
+
 def pytest_collection_modifyitems(items, config):
     keywordexpr = config.option.keyword
-    if not keywordexpr:
+    matchexpr = config.option.markexpr
+    if not keywordexpr and not matchexpr:
         return
     selectuntil = False
-    if keywordexpr[-1] == ":":
+    if keywordexpr[-1:] == ":":
         selectuntil = True
         keywordexpr = keywordexpr[:-1]
 
@@ -29,21 +54,38 @@
         if keywordexpr and skipbykeyword(colitem, keywordexpr):
             deselected.append(colitem)
         else:
-            remaining.append(colitem)
             if selectuntil:
                 keywordexpr = None
+            if matchexpr:
+                if not matchmark(colitem, matchexpr):
+                    deselected.append(colitem)
+                    continue
+            remaining.append(colitem)
 
     if deselected:
         config.hook.pytest_deselected(items=deselected)
         items[:] = remaining
 
+class BoolDict:
+    def __init__(self, mydict):
+        self._mydict = mydict
+    def __getitem__(self, name):
+        return name in self._mydict
+
+def matchmark(colitem, matchexpr):
+    return eval(matchexpr, {}, BoolDict(colitem.obj.__dict__))
+
+def pytest_configure(config):
+    if config.option.strict:
+        pytest.mark._config = config
+
 def skipbykeyword(colitem, keywordexpr):
     """ return True if they given keyword expression means to
         skip this collector/item.
     """
     if not keywordexpr:
         return
-    
+
     itemkeywords = getkeywords(colitem)
     for key in filter(None, keywordexpr.split()):
         eor = key[:1] == '-'
@@ -77,15 +119,31 @@
          @py.test.mark.slowtest
          def test_function():
             pass
-  
+
     will set a 'slowtest' :class:`MarkInfo` object
     on the ``test_function`` object. """
 
     def __getattr__(self, name):
         if name[0] == "_":
             raise AttributeError(name)
+        if hasattr(self, '_config'):
+            self._check(name)
         return MarkDecorator(name)
 
+    def _check(self, name):
+        try:
+            if name in self._markers:
+                return
+        except AttributeError:
+            pass
+        self._markers = l = set()
+        for line in self._config.getini("markers"):
+            beginning = line.split(":", 1)
+            x = beginning[0].split("(", 1)[0]
+            l.add(x)
+        if name not in self._markers:
+            raise AttributeError("%r not a registered marker" % (name,))
+
 class MarkDecorator:
     """ A decorator for test functions and test classes.  When applied
     it will create :class:`MarkInfo` objects which may be
@@ -133,8 +191,7 @@
                         holder = MarkInfo(self.markname, self.args, self.kwargs)
                         setattr(func, self.markname, holder)
                     else:
-                        holder.kwargs.update(self.kwargs)
-                        holder.args += self.args
+                        holder.add(self.args, self.kwargs)
                 return func
         kw = self.kwargs.copy()
         kw.update(kwargs)
@@ -150,27 +207,20 @@
         self.args = args
         #: keyword argument dictionary, empty if nothing specified
         self.kwargs = kwargs
+        self._arglist = [(args, kwargs.copy())]
 
     def __repr__(self):
         return "<MarkInfo %r args=%r kwargs=%r>" % (
                 self.name, self.args, self.kwargs)
 
-def pytest_itemcollected(item):
-    if not isinstance(item, pytest.Function):
-        return
-    try:
-        func = item.obj.__func__
-    except AttributeError:
-        func = getattr(item.obj, 'im_func', item.obj)
-    pyclasses = (pytest.Class, pytest.Module)
-    for node in item.listchain():
-        if isinstance(node, pyclasses):
-            marker = getattr(node.obj, 'pytestmark', None)
-            if marker is not None:
-                if isinstance(marker, list):
-                    for mark in marker:
-                        mark(func)
-                else:
-                    marker(func)
-        node = node.parent
-    item.keywords.update(py.builtin._getfuncdict(func))
+    def add(self, args, kwargs):
+        """ add a MarkInfo with the given args and kwargs. """
+        self._arglist.append((args, kwargs))
+        self.args += args
+        self.kwargs.update(kwargs)
+
+    def __iter__(self):
+        """ yield MarkInfo objects each relating to a marking-call. """
+        for args, kwargs in self._arglist:
+            yield MarkInfo(self.name, args, kwargs)
+
diff --git a/_pytest/monkeypatch.py b/_pytest/monkeypatch.py
--- a/_pytest/monkeypatch.py
+++ b/_pytest/monkeypatch.py
@@ -13,6 +13,7 @@
         monkeypatch.setenv(name, value, prepend=False)
         monkeypatch.delenv(name, value, raising=True)
         monkeypatch.syspath_prepend(path)
+        monkeypatch.chdir(path)
 
     All modifications will be undone after the requesting
     test function has finished. The ``raising``
@@ -30,6 +31,7 @@
     def __init__(self):
         self._setattr = []
         self._setitem = []
+        self._cwd = None
 
     def setattr(self, obj, name, value, raising=True):
         """ set attribute ``name`` on ``obj`` to ``value``, by default
@@ -83,6 +85,17 @@
             self._savesyspath = sys.path[:]
         sys.path.insert(0, str(path))
 
+    def chdir(self, path):
+        """ change the current working directory to the specified path
+        path can be a string or a py.path.local object
+        """
+        if self._cwd is None:
+            self._cwd = os.getcwd()
+        if hasattr(path, "chdir"):
+            path.chdir()
+        else:
+            os.chdir(path)
+
     def undo(self):
         """ undo previous changes.  This call consumes the
         undo stack.  Calling it a second time has no effect unless
@@ -95,9 +108,17 @@
         self._setattr[:] = []
         for dictionary, name, value in self._setitem:
             if value is notset:
-                del dictionary[name]
+                try:
+                    del dictionary[name]
+                except KeyError:
+                    pass # was already deleted, so we have the desired state
             else:
                 dictionary[name] = value
         self._setitem[:] = []
         if hasattr(self, '_savesyspath'):
             sys.path[:] = self._savesyspath
+            del self._savesyspath
+
+        if self._cwd is not None:
+            os.chdir(self._cwd)
+            self._cwd = None
diff --git a/_pytest/nose.py b/_pytest/nose.py
--- a/_pytest/nose.py
+++ b/_pytest/nose.py
@@ -13,6 +13,7 @@
             call.excinfo = call2.excinfo
 
 
+ at pytest.mark.trylast
 def pytest_runtest_setup(item):
     if isinstance(item, (pytest.Function)):
         if isinstance(item.parent, pytest.Generator):
diff --git a/_pytest/pastebin.py b/_pytest/pastebin.py
--- a/_pytest/pastebin.py
+++ b/_pytest/pastebin.py
@@ -38,7 +38,11 @@
         del tr._tw.__dict__['write']
 
 def getproxy():
-    return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes
+    if sys.version_info < (3, 0):
+        from xmlrpclib import ServerProxy
+    else:
+        from xmlrpc.client import ServerProxy
+    return ServerProxy(url.xmlrpc).pastes
 
 def pytest_terminal_summary(terminalreporter):
     if terminalreporter.config.option.pastebin != "failed":
diff --git a/_pytest/pdb.py b/_pytest/pdb.py
--- a/_pytest/pdb.py
+++ b/_pytest/pdb.py
@@ -19,11 +19,13 @@
 class pytestPDB:
     """ Pseudo PDB that defers to the real pdb. """
     item = None
+    collector = None
 
     def set_trace(self):
         """ invoke PDB set_trace debugging, dropping any IO capturing. """
         frame = sys._getframe().f_back
-        item = getattr(self, 'item', None)
+        item = self.item or self.collector
+
         if item is not None:
             capman = item.config.pluginmanager.getplugin("capturemanager")
             out, err = capman.suspendcapture()
@@ -38,6 +40,14 @@
     pytestPDB.item = item
 pytest_runtest_setup = pytest_runtest_call = pytest_runtest_teardown = pdbitem
 
+ at pytest.mark.tryfirst
+def pytest_make_collect_report(__multicall__, collector):
+    try:
+        pytestPDB.collector = collector
+        return __multicall__.execute()
+    finally:
+        pytestPDB.collector = None
+
 def pytest_runtest_makereport():
     pytestPDB.item = None
     
@@ -60,7 +70,13 @@
         tw.sep(">", "traceback")
         rep.toterminal(tw)
         tw.sep(">", "entering PDB")
-        post_mortem(call.excinfo._excinfo[2])
+        # A doctest.UnexpectedException is not useful for post_mortem.
+        # Use the underlying exception instead:
+        if isinstance(call.excinfo.value, py.std.doctest.UnexpectedException):
+            tb = call.excinfo.value.exc_info[2]
+        else:
+            tb = call.excinfo._excinfo[2]
+        post_mortem(tb)
         rep._pdbshown = True
         return rep
 
diff --git a/_pytest/pytester.py b/_pytest/pytester.py
--- a/_pytest/pytester.py
+++ b/_pytest/pytester.py
@@ -25,6 +25,7 @@
         _pytest_fullpath
     except NameError:
         _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
+        _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
 
 def pytest_funcarg___pytest(request):
     return PytestArg(request)
@@ -313,16 +314,6 @@
             result.extend(session.genitems(colitem))
         return result
 
-    def inline_genitems(self, *args):
-        #config = self.parseconfig(*args)
-        config = self.parseconfigure(*args)
-        rec = self.getreportrecorder(config)
-        session = Session(config)
-        config.hook.pytest_sessionstart(session=session)
-        session.perform_collect()
-        config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
-        return session.items, rec
-
     def runitem(self, source):
         # used from runner functional tests
         item = self.getitem(source)
@@ -343,64 +334,57 @@
         l = list(args) + [p]
         reprec = self.inline_run(*l)
         reports = reprec.getreports("pytest_runtest_logreport")
-        assert len(reports) == 1, reports
-        return reports[0]
+        assert len(reports) == 3, reports # setup/call/teardown
+        return reports[1]
+
+    def inline_genitems(self, *args):
+        return self.inprocess_run(list(args) + ['--collectonly'])
 
     def inline_run(self, *args):
-        args = ("-s", ) + args # otherwise FD leakage
-        config = self.parseconfig(*args)
-        reprec = self.getreportrecorder(config)
-        #config.pluginmanager.do_configure(config)
-        config.hook.pytest_cmdline_main(config=config)
-        #config.pluginmanager.do_unconfigure(config)
-        return reprec
+        items, rec = self.inprocess_run(args)
+        return rec
 
-    def config_preparse(self):
-        config = self.Config()
-        for plugin in self.plugins:
-            if isinstance(plugin, str):
-                config.pluginmanager.import_plugin(plugin)
-            else:
-                if isinstance(plugin, dict):
-                    plugin = PseudoPlugin(plugin)
-                if not config.pluginmanager.isregistered(plugin):
-                    config.pluginmanager.register(plugin)
-        return config
+    def inprocess_run(self, args, plugins=None):
+        rec = []
+        items = []
+        class Collect:
+            def pytest_configure(x, config):
+                rec.append(self.getreportrecorder(config))
+            def pytest_itemcollected(self, item):
+                items.append(item)
+        if not plugins:
+            plugins = []
+        plugins.append(Collect())
+        ret = self.pytestmain(list(args), plugins=[Collect()])
+        reprec = rec[0]
+        reprec.ret = ret
+        assert len(rec) == 1
+        return items, reprec
 
     def parseconfig(self, *args):
-        if not args:
-            args = (self.tmpdir,)
-        config = self.config_preparse()
-        args = list(args)
+        args = [str(x) for x in args]
         for x in args:
             if str(x).startswith('--basetemp'):
                 break
         else:
             args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
-        config.parse(args)
+        import _pytest.core
+        config = _pytest.core._prepareconfig(args, self.plugins)
+        # the in-process pytest invocation needs to avoid leaking FDs
+        # so we register a "reset_capturings" callmon the capturing manager
+        # and make sure it gets called
+        config._cleanup.append(
+            config.pluginmanager.getplugin("capturemanager").reset_capturings)
+        import _pytest.config
+        self.request.addfinalizer(
+            lambda: _pytest.config.pytest_unconfigure(config))
         return config
 
-    def reparseconfig(self, args=None):
-        """ this is used from tests that want to re-invoke parse(). """
-        if not args:
-            args = [self.tmpdir]
-        oldconfig = getattr(py.test, 'config', None)
-        try:
-            c = py.test.config = self.Config()
-            c.basetemp = py.path.local.make_numbered_dir(prefix="reparse",
-                keep=0, rootdir=self.tmpdir, lock_timeout=None)
-            c.parse(args)
-            c.pluginmanager.do_configure(c)
-            self.request.addfinalizer(lambda: c.pluginmanager.do_unconfigure(c))
-            return c
-        finally:
-            py.test.config = oldconfig
-
     def parseconfigure(self, *args):
         config = self.parseconfig(*args)
         config.pluginmanager.do_configure(config)
         self.request.addfinalizer(lambda:
-            config.pluginmanager.do_unconfigure(config))
+        config.pluginmanager.do_unconfigure(config))
         return config
 
     def getitem(self,  source, funcname="test_func"):
@@ -420,7 +404,6 @@
             self.makepyfile(__init__ = "#")
         self.config = config = self.parseconfigure(path, *configargs)
         node = self.getnode(config, path)
-        #config.pluginmanager.do_unconfigure(config)
         return node
 
     def collect_by_name(self, modcol, name):
@@ -437,9 +420,16 @@
         return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
 
     def pytestmain(self, *args, **kwargs):
-        ret = pytest.main(*args, **kwargs)
-        if ret == 2:
-            raise KeyboardInterrupt()
+        class ResetCapturing:
+            @pytest.mark.trylast
+            def pytest_unconfigure(self, config):
+                capman = config.pluginmanager.getplugin("capturemanager")
+                capman.reset_capturings()
+        plugins = kwargs.setdefault("plugins", [])
+        rc = ResetCapturing()
+        plugins.append(rc)
+        return pytest.main(*args, **kwargs)
+
     def run(self, *cmdargs):
         return self._run(*cmdargs)
 
@@ -528,6 +518,8 @@
         pexpect = py.test.importorskip("pexpect", "2.4")
         if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine():
             pytest.skip("pypy-64 bit not supported")
+        if sys.platform == "darwin":
+            pytest.xfail("pexpect does not work reliably on darwin?!")
         logfile = self.tmpdir.join("spawn.out")
         child = pexpect.spawn(cmd, logfile=logfile.open("w"))
         child.timeout = expect_timeout
@@ -540,10 +532,6 @@
             return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
                     py.io.saferepr(out),)
 
-class PseudoPlugin:
-    def __init__(self, vars):
-        self.__dict__.update(vars)
-
 class ReportRecorder(object):
     def __init__(self, hook):
         self.hook = hook
@@ -565,10 +553,17 @@
     def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
         return [x.report for x in self.getcalls(names)]
 
-    def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport", when=None):
+    def matchreport(self, inamepart="",
+        names="pytest_runtest_logreport pytest_collectreport", when=None):
         """ return a testreport whose dotted import path matches """
         l = []
         for rep in self.getreports(names=names):
+            try:
+                if not when and rep.when != "call" and rep.passed:
+                    # setup/teardown passing reports - let's ignore those
+                    continue
+            except AttributeError:
+                pass
             if when and getattr(rep, 'when', None) != when:
                 continue
             if not inamepart or inamepart in rep.nodeid.split("::"):
diff --git a/_pytest/python.py b/_pytest/python.py
--- a/_pytest/python.py
+++ b/_pytest/python.py
@@ -4,6 +4,7 @@
 import sys
 import pytest
 from py._code.code import TerminalRepr
+from _pytest.monkeypatch import monkeypatch
 
 import _pytest
 cutdir = py.path.local(_pytest.__file__).dirpath()
@@ -26,6 +27,24 @@
         showfuncargs(config)
         return 0
 
+
+def pytest_generate_tests(metafunc):
+    try:
+        param = metafunc.function.parametrize
+    except AttributeError:
+        return
+    for p in param:
+        metafunc.parametrize(*p.args, **p.kwargs)
+
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "parametrize(argnames, argvalues): call a test function multiple "
+        "times passing in multiple different argument value sets. Example: "
+        "@parametrize('arg1', [1,2]) would lead to two calls of the decorated "
+        "test function, one with arg1=1 and another with arg1=2."
+    )
+
+
 @pytest.mark.trylast
 def pytest_namespace():
     raises.Exception = pytest.fail.Exception
@@ -138,6 +157,7 @@
             obj = obj.place_as
 
         self._fslineno = py.code.getfslineno(obj)
+        assert isinstance(self._fslineno[1], int), obj
         return self._fslineno
 
     def reportinfo(self):
@@ -155,6 +175,7 @@
         else:
             fspath, lineno = self._getfslineno()
             modpath = self.getmodpath()
+        assert isinstance(lineno, int)
         return fspath, lineno, modpath
 
 class PyCollectorMixin(PyobjMixin, pytest.Collector):
@@ -200,6 +221,7 @@
         module = self.getparent(Module).obj
         clscol = self.getparent(Class)
         cls = clscol and clscol.obj or None
+        transfer_markers(funcobj, cls, module)
         metafunc = Metafunc(funcobj, config=self.config,
             cls=cls, module=module)
         gentesthook = self.config.hook.pytest_generate_tests
@@ -219,6 +241,19 @@
             l.append(function)
         return l
 
+def transfer_markers(funcobj, cls, mod):
+    # XXX this should rather be code in the mark plugin or the mark
+    # plugin should merge with the python plugin.
+    for holder in (cls, mod):
+        try:
+            pytestmark = holder.pytestmark
+        except AttributeError:
+            continue
+        if isinstance(pytestmark, list):
+            for mark in pytestmark:
+                mark(funcobj)
+        else:
+            pytestmark(funcobj)
 
 class Module(pytest.File, PyCollectorMixin):
     def _getobj(self):
@@ -226,13 +261,8 @@
 
     def _importtestmodule(self):
         # we assume we are only called once per module
-        from _pytest import assertion
-        assertion.before_module_import(self)
         try:
-            try:
-                mod = self.fspath.pyimport(ensuresyspath=True)
-            finally:
-                assertion.after_module_import(self)
+            mod = self.fspath.pyimport(ensuresyspath=True)
         except SyntaxError:
             excinfo = py.code.ExceptionInfo()
             raise self.CollectError(excinfo.getrepr(style="short"))
@@ -244,7 +274,8 @@
                 "  %s\n"
                 "which is not the same as the test file we want to collect:\n"
                 "  %s\n"
-                "HINT: use a unique basename for your test file modules"
+                "HINT: remove __pycache__ / .pyc files and/or use a "
+                "unique basename for your test file modules"
                  % e.args
             )
         #print "imported test module", mod
@@ -374,6 +405,7 @@
         tw.line()
         tw.line("%s:%d" % (self.filename, self.firstlineno+1))
 
+
 class Generator(FunctionMixin, PyCollectorMixin, pytest.Collector):
     def collect(self):
         # test generators are seen as collectors but they also
@@ -430,6 +462,7 @@
                 "yielded functions (deprecated) cannot have funcargs")
         else:
             if callspec is not None:
+                self.callspec = callspec
                 self.funcargs = callspec.funcargs or {}
                 self._genid = callspec.id
                 if hasattr(callspec, "param"):
@@ -506,15 +539,59 @@
     request._fillfuncargs()
 
 _notexists = object()
-class CallSpec:
-    def __init__(self, funcargs, id, param):
-        self.funcargs = funcargs
-        self.id = id
+
+class CallSpec2(object):
+    def __init__(self, metafunc):
+        self.metafunc = metafunc
+        self.funcargs = {}
+        self._idlist = []
+        self.params = {}
+        self._globalid = _notexists
+        self._globalid_args = set()
+        self._globalparam = _notexists
+
+    def copy(self, metafunc):
+        cs = CallSpec2(self.metafunc)
+        cs.funcargs.update(self.funcargs)
+        cs.params.update(self.params)
+        cs._idlist = list(self._idlist)
+        cs._globalid = self._globalid
+        cs._globalid_args = self._globalid_args
+        cs._globalparam = self._globalparam
+        return cs
+
+    def _checkargnotcontained(self, arg):
+        if arg in self.params or arg in self.funcargs:
+            raise ValueError("duplicate %r" %(arg,))
+
+    def getparam(self, name):
+        try:
+            return self.params[name]
+        except KeyError:
+            if self._globalparam is _notexists:
+                raise ValueError(name)
+            return self._globalparam
+
+    @property
+    def id(self):
+        return "-".join(map(str, filter(None, self._idlist)))
+
+    def setmulti(self, valtype, argnames, valset, id):
+        for arg,val in zip(argnames, valset):
+            self._checkargnotcontained(arg)
+            getattr(self, valtype)[arg] = val
+        self._idlist.append(id)
+
+    def setall(self, funcargs, id, param):
+        for x in funcargs:
+            self._checkargnotcontained(x)
+        self.funcargs.update(funcargs)
+        if id is not _notexists:
+            self._idlist.append(id)
         if param is not _notexists:
-            self.param = param
-    def __repr__(self):
-        return "<CallSpec id=%r param=%r funcargs=%r>" %(
-            self.id, getattr(self, 'param', '?'), self.funcargs)
+            assert self._globalparam is _notexists
+            self._globalparam = param
+
 
 class Metafunc:
     def __init__(self, function, config=None, cls=None, module=None):
@@ -528,31 +605,71 @@
         self._calls = []
         self._ids = py.builtin.set()
 
+    def parametrize(self, argnames, argvalues, indirect=False, ids=None):
+        """ Add new invocations to the underlying test function using the list
+        of argvalues for the given argnames.  Parametrization is performed
+        during the collection phase.  If you need to setup expensive resources
+        you may pass indirect=True and implement a funcarg factory which can
+        perform the expensive setup just before a test is actually run.
+
+        :arg argnames: an argument name or a list of argument names
+
+        :arg argvalues: a list of values for the argname or a list of tuples of
+            values for the list of argument names.
+
+        :arg indirect: if True each argvalue corresponding to an argument will
+            be passed as request.param to its respective funcarg factory so
+            that it can perform more expensive setups during the setup phase of
+            a test rather than at collection time.
+
+        :arg ids: list of string ids each corresponding to the argvalues so
+            that they are part of the test id. If no ids are provided they will
+            be generated automatically from the argvalues.
+        """
+        if not isinstance(argnames, (tuple, list)):
+            argnames = (argnames,)
+            argvalues = [(val,) for val in argvalues]
+        if not indirect:
+            #XXX should we also check for the opposite case?
+            for arg in argnames:
+                if arg not in self.funcargnames:
+                    raise ValueError("%r has no argument %r" %(self.function, arg))
+        valtype = indirect and "params" or "funcargs"
+        if not ids:
+            idmaker = IDMaker()
+            ids = list(map(idmaker, argvalues))
+        newcalls = []
+        for callspec in self._calls or [CallSpec2(self)]:
+            for i, valset in enumerate(argvalues):
+                assert len(valset) == len(argnames)
+                newcallspec = callspec.copy(self)
+                newcallspec.setmulti(valtype, argnames, valset, ids[i])
+                newcalls.append(newcallspec)
+        self._calls = newcalls
+
     def addcall(self, funcargs=None, id=_notexists, param=_notexists):
-        """ add a new call to the underlying test function during the
-        collection phase of a test run.  Note that request.addcall() is
-        called during the test collection phase prior and independently
-        to actual test execution.  Therefore you should perform setup
-        of resources in a funcarg factory which can be instrumented
-        with the ``param``.
+        """ (deprecated, use parametrize) Add a new call to the underlying
+        test function during the collection phase of a test run.  Note that
+        request.addcall() is called during the test collection phase prior and
+        independently to actual test execution.  You should only use addcall()
+        if you need to specify multiple arguments of a test function.
 
         :arg funcargs: argument keyword dictionary used when invoking
             the test function.
 
         :arg id: used for reporting and identification purposes.  If you
-            don't supply an `id` the length of the currently
-            list of calls to the test function will be used.
+            don't supply an `id` an automatic unique id will be generated.
 
-        :arg param: will be exposed to a later funcarg factory invocation
-            through the ``request.param`` attribute.  It allows to
-            defer test fixture setup activities to when an actual
-            test is run.
+        :arg param: a parameter which will be exposed to a later funcarg factory
+            invocation through the ``request.param`` attribute.
         """
         assert funcargs is None or isinstance(funcargs, dict)
         if funcargs is not None:
             for name in funcargs:
                 if name not in self.funcargnames:
                     pytest.fail("funcarg %r not used in this function." % name)
+        else:
+            funcargs = {}
         if id is None:
             raise ValueError("id=None not allowed")
         if id is _notexists:
@@ -561,11 +678,26 @@
         if id in self._ids:
             raise ValueError("duplicate id %r" % id)
         self._ids.add(id)
-        self._calls.append(CallSpec(funcargs, id, param))
+
+        cs = CallSpec2(self)
+        cs.setall(funcargs, id, param)
+        self._calls.append(cs)
+
+class IDMaker:
+    def __init__(self):
+        self.counter = 0
+    def __call__(self, valset):
+        l = []
+        for val in valset:
+            if not isinstance(val, (int, str)):
+                val = "."+str(self.counter)
+            self.counter += 1
+            l.append(str(val))
+        return "-".join(l)
 
 class FuncargRequest:
     """ A request for function arguments from a test function.
-        
+
         Note that there is an optional ``param`` attribute in case
         there was an invocation to metafunc.addcall(param=...).
         If no such call was done in a ``pytest_generate_tests``
@@ -637,7 +769,7 @@
 
 
     def applymarker(self, marker):
-        """ apply a marker to a single test function invocation.
+        """ Apply a marker to a single test function invocation.
         This method is useful if you don't want to have a keyword/marker
         on all function invocations.
 
@@ -649,7 +781,7 @@
         self._pyfuncitem.keywords[marker.markname] = marker
 
     def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
-        """ return a testing resource managed by ``setup`` &
+        """ Return a testing resource managed by ``setup`` &
         ``teardown`` calls.  ``scope`` and ``extrakey`` determine when the
         ``teardown`` function will be called so that subsequent calls to
         ``setup`` would recreate the resource.
@@ -698,11 +830,18 @@
             self._raiselookupfailed(argname)
         funcargfactory = self._name2factory[argname].pop()
         oldarg = self._currentarg
-        self._currentarg = argname
+        mp = monkeypatch()
+        mp.setattr(self, '_currentarg', argname)
+        try:
+            param = self._pyfuncitem.callspec.getparam(argname)
+        except (AttributeError, ValueError):
+            pass
+        else:
+            mp.setattr(self, 'param', param, raising=False)
         try:
             self._funcargs[argname] = res = funcargfactory(request=self)
         finally:
-            self._currentarg = oldarg
+            mp.undo()
         return res
 
     def _getscopeitem(self, scope):
@@ -817,8 +956,7 @@
         >>> raises(ZeroDivisionError, f, x=0)
         <ExceptionInfo ...>
 
-    A third possibility is to use a string which which will
-    be executed::
+    A third possibility is to use a string to be executed::
 
         >>> raises(ZeroDivisionError, "f(0)")
         <ExceptionInfo ...>
diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py
--- a/_pytest/resultlog.py
+++ b/_pytest/resultlog.py
@@ -63,6 +63,8 @@
         self.write_log_entry(testpath, lettercode, longrepr)
 
     def pytest_runtest_logreport(self, report):
+        if report.when != "call" and report.passed:
+            return
         res = self.config.hook.pytest_report_teststatus(report=report)
         code = res[1]
         if code == 'x':
@@ -89,5 +91,8 @@
             self.log_outcome(report, code, longrepr)
 
     def pytest_internalerror(self, excrepr):
-        path = excrepr.reprcrash.path
+        reprcrash = getattr(excrepr, 'reprcrash', None)
+        path = getattr(reprcrash, "path", None)
+        if path is None:
+            path = "cwd:%s" % py.path.local()
         self.write_log_entry(path, '!', str(excrepr))
diff --git a/_pytest/runner.py b/_pytest/runner.py
--- a/_pytest/runner.py
+++ b/_pytest/runner.py
@@ -1,6 +1,6 @@
 """ basic collect and runtest protocol implementations """
 
-import py, sys
+import py, sys, time
 from py._code.code import TerminalRepr
 
 def pytest_namespace():
@@ -14,33 +14,60 @@
 #
 # pytest plugin hooks
 
+def pytest_addoption(parser):
+    group = parser.getgroup("terminal reporting", "reporting", after="general")
+    group.addoption('--durations',
+         action="store", type="int", default=None, metavar="N",
+         help="show N slowest setup/test durations (N=0 for all)."),
+
+def pytest_terminal_summary(terminalreporter):
+    durations = terminalreporter.config.option.durations
+    if durations is None:
+        return
+    tr = terminalreporter
+    dlist = []
+    for replist in tr.stats.values():
+        for rep in replist:
+            if hasattr(rep, 'duration'):
+                dlist.append(rep)
+    if not dlist:
+        return
+    dlist.sort(key=lambda x: x.duration)
+    dlist.reverse()
+    if not durations:
+        tr.write_sep("=", "slowest test durations")
+    else:
+        tr.write_sep("=", "slowest %s test durations" % durations)
+        dlist = dlist[:durations]
+
+    for rep in dlist:
+        nodeid = rep.nodeid.replace("::()::", "::")
+        tr.write_line("%02.2fs %-8s %s" %
+            (rep.duration, rep.when, nodeid))
+
 def pytest_sessionstart(session):
     session._setupstate = SetupState()
-
-def pytest_sessionfinish(session, exitstatus):
-    hook = session.config.hook
-    rep = hook.pytest__teardown_final(session=session)
-    if rep:
-        hook.pytest__teardown_final_logerror(session=session, report=rep)
-        session.exitstatus = 1
+def pytest_sessionfinish(session):
+    session._setupstate.teardown_all()
 
 class NodeInfo:
     def __init__(self, location):
         self.location = location
 
-def pytest_runtest_protocol(item):
+def pytest_runtest_protocol(item, nextitem):
     item.ihook.pytest_runtest_logstart(
         nodeid=item.nodeid, location=item.location,
     )
-    runtestprotocol(item)
+    runtestprotocol(item, nextitem=nextitem)
     return True
 
-def runtestprotocol(item, log=True):
+def runtestprotocol(item, log=True, nextitem=None):
     rep = call_and_report(item, "setup", log)
     reports = [rep]
     if rep.passed:
         reports.append(call_and_report(item, "call", log))
-    reports.append(call_and_report(item, "teardown", log))
+    reports.append(call_and_report(item, "teardown", log,
+        nextitem=nextitem))
     return reports
 
 def pytest_runtest_setup(item):
@@ -49,16 +76,8 @@
 def pytest_runtest_call(item):
     item.runtest()
 
-def pytest_runtest_teardown(item):
-    item.session._setupstate.teardown_exact(item)
-
-def pytest__teardown_final(session):
-    call = CallInfo(session._setupstate.teardown_all, when="teardown")
-    if call.excinfo:
-        ntraceback = call.excinfo.traceback .cut(excludepath=py._pydir)
-        call.excinfo.traceback = ntraceback.filter()
-        longrepr = call.excinfo.getrepr(funcargs=True)
-        return TeardownErrorReport(longrepr)
+def pytest_runtest_teardown(item, nextitem):
+    item.session._setupstate.teardown_exact(item, nextitem)
 
 def pytest_report_teststatus(report):
     if report.when in ("setup", "teardown"):
@@ -74,18 +93,18 @@
 #
 # Implementation
 
-def call_and_report(item, when, log=True):
-    call = call_runtest_hook(item, when)
+def call_and_report(item, when, log=True, **kwds):
+    call = call_runtest_hook(item, when, **kwds)
     hook = item.ihook
     report = hook.pytest_runtest_makereport(item=item, call=call)
-    if log and (when == "call" or not report.passed):
+    if log:
         hook.pytest_runtest_logreport(report=report)
     return report
 
-def call_runtest_hook(item, when):
+def call_runtest_hook(item, when, **kwds):
     hookname = "pytest_runtest_" + when
     ihook = getattr(item.ihook, hookname)
-    return CallInfo(lambda: ihook(item=item), when=when)
+    return CallInfo(lambda: ihook(item=item, **kwds), when=when)
 
 class CallInfo:
     """ Result/Exception info a function invocation. """
@@ -95,12 +114,16 @@
         #: context of invocation: one of "setup", "call",
         #: "teardown", "memocollect"
         self.when = when
+        self.start = time.time()
         try:
-            self.result = func()
-        except KeyboardInterrupt:
-            raise
-        except:
-            self.excinfo = py.code.ExceptionInfo()
+            try:
+                self.result = func()
+            except KeyboardInterrupt:
+                raise
+            except:
+                self.excinfo = py.code.ExceptionInfo()
+        finally:
+            self.stop = time.time()
 
     def __repr__(self):
         if self.excinfo:
@@ -120,6 +143,10 @@
         return s
 
 class BaseReport(object):
+
+    def __init__(self, **kw):
+        self.__dict__.update(kw)
+
     def toterminal(self, out):
         longrepr = self.longrepr
         if hasattr(self, 'node'):
@@ -139,6 +166,7 @@
 
 def pytest_runtest_makereport(item, call):
     when = call.when
+    duration = call.stop-call.start
     keywords = dict([(x,1) for x in item.keywords])
     excinfo = call.excinfo
     if not call.excinfo:
@@ -160,14 +188,15 @@
             else: # exception in setup or teardown
                 longrepr = item._repr_failure_py(excinfo)
     return TestReport(item.nodeid, item.location,
-        keywords, outcome, longrepr, when)
+                      keywords, outcome, longrepr, when,
+                      duration=duration)
 
 class TestReport(BaseReport):
     """ Basic test report object (also used for setup and teardown calls if
     they fail).
     """
     def __init__(self, nodeid, location,
-            keywords, outcome, longrepr, when):
+            keywords, outcome, longrepr, when, sections=(), duration=0, **extra):
         #: normalized collection node id
         self.nodeid = nodeid
 
@@ -179,16 +208,25 @@
         #: a name -> value dictionary containing all keywords and
         #: markers associated with a test invocation.
         self.keywords = keywords
-        
+
         #: test outcome, always one of "passed", "failed", "skipped".
         self.outcome = outcome
 
         #: None or a failure representation.
         self.longrepr = longrepr
-        
+
         #: one of 'setup', 'call', 'teardown' to indicate runtest phase.
         self.when = when
 
+        #: list of (secname, data) extra information which needs to
+        #: marshallable
+        self.sections = list(sections)
+
+        #: time it took to run just the test
+        self.duration = duration
+
+        self.__dict__.update(extra)
+
     def __repr__(self):
         return "<TestReport %r when=%r outcome=%r>" % (
             self.nodeid, self.when, self.outcome)
@@ -196,8 +234,10 @@
 class TeardownErrorReport(BaseReport):
     outcome = "failed"
     when = "teardown"
-    def __init__(self, longrepr):
+    def __init__(self, longrepr, **extra):
         self.longrepr = longrepr
+        self.sections = []
+        self.__dict__.update(extra)
 
 def pytest_make_collect_report(collector):
     call = CallInfo(collector._memocollect, "memocollect")
@@ -219,11 +259,13 @@
         getattr(call, 'result', None))
 
 class CollectReport(BaseReport):
-    def __init__(self, nodeid, outcome, longrepr, result):
+    def __init__(self, nodeid, outcome, longrepr, result, sections=(), **extra):
         self.nodeid = nodeid
         self.outcome = outcome
         self.longrepr = longrepr
         self.result = result or []
+        self.sections = list(sections)
+        self.__dict__.update(extra)
 
     @property
     def location(self):
@@ -277,20 +319,22 @@
         self._teardown_with_finalization(None)
         assert not self._finalizers
 
-    def teardown_exact(self, item):
-        if self.stack and item == self.stack[-1]:
+    def teardown_exact(self, item, nextitem):
+        needed_collectors = nextitem and nextitem.listchain() or []
+        self._teardown_towards(needed_collectors)
+
+    def _teardown_towards(self, needed_collectors):
+        while self.stack:
+            if self.stack == needed_collectors[:len(self.stack)]:
+                break
             self._pop_and_teardown()
-        else:
-            self._callfinalizers(item)
 
     def prepare(self, colitem):
         """ setup objects along the collector chain to the test-method
             and teardown previously setup objects."""
         needed_collectors = colitem.listchain()
-        while self.stack:
-            if self.stack == needed_collectors[:len(self.stack)]:
-                break
-            self._pop_and_teardown()
+        self._teardown_towards(needed_collectors)
+
         # check if the last collection node has raised an error
         for col in self.stack:
             if hasattr(col, '_prepare_exc'):
diff --git a/_pytest/skipping.py b/_pytest/skipping.py
--- a/_pytest/skipping.py
+++ b/_pytest/skipping.py
@@ -9,6 +9,21 @@
            action="store_true", dest="runxfail", default=False,
            help="run tests even if they are marked xfail")
 
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "skipif(*conditions): skip the given test function if evaluation "
+        "of all conditions has a True value.  Evaluation happens within the "
+        "module global context. Example: skipif('sys.platform == \"win32\"') "
+        "skips the test if we are on the win32 platform. "
+    )
+    config.addinivalue_line("markers",
+        "xfail(*conditions, reason=None, run=True): mark the the test function "
+        "as an expected failure. Optionally specify a reason and run=False "
+        "if you don't even want to execute the test function. Any positional "
+        "condition strings will be evaluated (like with skipif) and if one is "
+        "False the marker will not be applied."
+    )
+
 def pytest_namespace():
     return dict(xfail=xfail)
 
@@ -117,6 +132,14 @@
 def pytest_runtest_makereport(__multicall__, item, call):
     if not isinstance(item, pytest.Function):
         return
+    # unitttest special case, see setting of _unexpectedsuccess
+    if hasattr(item, '_unexpectedsuccess'):
+        rep = __multicall__.execute()
+        if rep.when == "call":
+            # we need to translate into how py.test encodes xpass
+            rep.keywords['xfail'] = "reason: " + item._unexpectedsuccess
+            rep.outcome = "failed"
+        return rep
     if not (call.excinfo and
         call.excinfo.errisinstance(py.test.xfail.Exception)):
         evalxfail = getattr(item, '_evalxfail', None)
@@ -169,21 +192,23 @@
         elif char == "X":
             show_xpassed(terminalreporter, lines)
         elif char in "fF":
-            show_failed(terminalreporter, lines)
+            show_simple(terminalreporter, lines, 'failed', "FAIL %s")
         elif char in "sS":
             show_skipped(terminalreporter, lines)
+        elif char == "E":
+            show_simple(terminalreporter, lines, 'error', "ERROR %s")
     if lines:
         tr._tw.sep("=", "short test summary info")
         for line in lines:
             tr._tw.line(line)
 
-def show_failed(terminalreporter, lines):
+def show_simple(terminalreporter, lines, stat, format):
     tw = terminalreporter._tw
-    failed = terminalreporter.stats.get("failed")
+    failed = terminalreporter.stats.get(stat)
     if failed:
         for rep in failed:
             pos = rep.nodeid
-            lines.append("FAIL %s" %(pos, ))
+            lines.append(format %(pos, ))
 
 def show_xfailed(terminalreporter, lines):
     xfailed = terminalreporter.stats.get("xfailed")
diff --git a/_pytest/terminal.py b/_pytest/terminal.py
--- a/_pytest/terminal.py
+++ b/_pytest/terminal.py
@@ -15,7 +15,7 @@
     group._addoption('-r',
          action="store", dest="reportchars", default=None, metavar="chars",
          help="show extra test summary info as specified by chars (f)ailed, "
-              "(s)skipped, (x)failed, (X)passed.")
+              "(E)error, (s)skipped, (x)failed, (X)passed.")
     group._addoption('-l', '--showlocals',
          action="store_true", dest="showlocals", default=False,
          help="show locals in tracebacks (disabled by default).")
@@ -43,7 +43,8 @@
             pass
         else:
             stdout = os.fdopen(newfd, stdout.mode, 1)
-            config._toclose = stdout
+            config._cleanup.append(lambda: stdout.close())
+
     reporter = TerminalReporter(config, stdout)
     config.pluginmanager.register(reporter, 'terminalreporter')
     if config.option.debug or config.option.traceconfig:
@@ -52,11 +53,6 @@
             reporter.write_line("[traceconfig] " + msg)
         config.trace.root.setprocessor("pytest:config", mywriter)
 
-def pytest_unconfigure(config):
-    if hasattr(config, '_toclose'):
-        #print "closing", config._toclose, config._toclose.fileno()
-        config._toclose.close()
-
 def getreportopt(config):
     reportopts = ""
     optvalue = config.option.report
@@ -165,9 +161,6 @@
     def pytest_deselected(self, items):
         self.stats.setdefault('deselected', []).extend(items)
 
-    def pytest__teardown_final_logerror(self, report):
-        self.stats.setdefault("error", []).append(report)
-
     def pytest_runtest_logstart(self, nodeid, location):
         # ensure that the path is printed before the
         # 1st test of a module starts running
@@ -259,7 +252,7 @@
         msg = "platform %s -- Python %s" % (sys.platform, verinfo)
         if hasattr(sys, 'pypy_version_info'):
             verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
-            msg += "[pypy-%s]" % verinfo
+            msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
         msg += " -- pytest-%s" % (py.test.__version__)
         if self.verbosity > 0 or self.config.option.debug or \
            getattr(self.config.option, 'pastebin', None):
@@ -289,10 +282,18 @@
         # we take care to leave out Instances aka ()
         # because later versions are going to get rid of them anyway
         if self.config.option.verbose < 0:
-            for item in items:
-                nodeid = item.nodeid
-                nodeid = nodeid.replace("::()::", "::")
-                self._tw.line(nodeid)
+            if self.config.option.verbose < -1:
+                counts = {}
+                for item in items:
+                    name = item.nodeid.split('::', 1)[0]
+                    counts[name] = counts.get(name, 0) + 1
+                for name, count in sorted(counts.items()):
+                    self._tw.line("%s: %d" % (name, count))
+            else:
+                for item in items:
+                    nodeid = item.nodeid
+                    nodeid = nodeid.replace("::()::", "::")
+                    self._tw.line(nodeid)
             return
         stack = []
         indent = ""
@@ -318,12 +319,17 @@
             self.config.hook.pytest_terminal_summary(terminalreporter=self)
         if exitstatus == 2:
             self._report_keyboardinterrupt()
+            del self._keyboardinterrupt_memo
         self.summary_deselected()
         self.summary_stats()
 
     def pytest_keyboard_interrupt(self, excinfo):
         self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
 
+    def pytest_unconfigure(self):
+        if hasattr(self, '_keyboardinterrupt_memo'):
+            self._report_keyboardinterrupt()
+
     def _report_keyboardinterrupt(self):
         excrepr = self._keyboardinterrupt_memo
         msg = excrepr.reprcrash.message
@@ -388,7 +394,7 @@
                 else:
                     msg = self._getfailureheadline(rep)
                     self.write_sep("_", msg)
-                    rep.toterminal(self._tw)
+                    self._outrep_summary(rep)
 
     def summary_errors(self):
         if self.config.option.tbstyle != "no":
@@ -406,7 +412,15 @@
                 elif rep.when == "teardown":
                     msg = "ERROR at teardown of " + msg
                 self.write_sep("_", msg)
-                rep.toterminal(self._tw)
+                self._outrep_summary(rep)
+
+    def _outrep_summary(self, rep):
+        rep.toterminal(self._tw)
+        for secname, content in rep.sections:
+            self._tw.sep("-", secname)
+            if content[-1:] == "\n":
+                content = content[:-1]
+            self._tw.line(content)
 
     def summary_stats(self):
         session_duration = py.std.time.time() - self._sessionstarttime
@@ -417,9 +431,10 @@
                 keys.append(key)
         parts = []
         for key in keys:
-            val = self.stats.get(key, None)
-            if val:
-                parts.append("%d %s" %(len(val), key))
+            if key: # setup/teardown reports have an empty key, ignore them
+                val = self.stats.get(key, None)
+                if val:
+                    parts.append("%d %s" %(len(val), key))
         line = ", ".join(parts)
         # XXX coloring
         msg = "%s in %.2f seconds" %(line, session_duration)
@@ -430,8 +445,15 @@
 
     def summary_deselected(self):
         if 'deselected' in self.stats:
+            l = []
+            k = self.config.option.keyword
+            if k:
+                l.append("-k%s" % k)
+            m = self.config.option.markexpr
+            if m:
+                l.append("-m %r" % m)
             self.write_sep("=", "%d tests deselected by %r" %(
-                len(self.stats['deselected']), self.config.option.keyword), bold=True)
+                len(self.stats['deselected']), " ".join(l)), bold=True)
 
 def repr_pythonversion(v=None):
     if v is None:
diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py
--- a/_pytest/tmpdir.py
+++ b/_pytest/tmpdir.py
@@ -46,7 +46,7 @@
 
     def finish(self):
         self.trace("finish")
-        
+
 def pytest_configure(config):
     mp = monkeypatch()
     t = TempdirHandler(config)
@@ -64,5 +64,5 @@
     name = request._pyfuncitem.name
     name = py.std.re.sub("[\W]", "_", name)
     x = request.config._tmpdirhandler.mktemp(name, numbered=True)
-    return x.realpath()
+    return x
 
diff --git a/_pytest/unittest.py b/_pytest/unittest.py
--- a/_pytest/unittest.py
+++ b/_pytest/unittest.py
@@ -2,6 +2,9 @@
 import pytest, py
 import sys, pdb
 
+# for transfering markers
+from _pytest.python import transfer_markers
+
 def pytest_pycollect_makeitem(collector, name, obj):
     unittest = sys.modules.get('unittest')
     if unittest is None:
@@ -19,7 +22,14 @@
 class UnitTestCase(pytest.Class):
     def collect(self):
         loader = py.std.unittest.TestLoader()
+        module = self.getparent(pytest.Module).obj
+        cls = self.obj
         for name in loader.getTestCaseNames(self.obj):
+            x = getattr(self.obj, name)
+            funcobj = getattr(x, 'im_func', x)
+            transfer_markers(funcobj, cls, module)
+            if hasattr(funcobj, 'todo'):
+                pytest.mark.xfail(reason=str(funcobj.todo))(funcobj)
             yield TestCaseFunction(name, parent=self)
 
     def setup(self):
@@ -37,15 +47,13 @@
 class TestCaseFunction(pytest.Function):
     _excinfo = None
 
-    def __init__(self, name, parent):
-        super(TestCaseFunction, self).__init__(name, parent)
-        if hasattr(self._obj, 'todo'):
-            getattr(self._obj, 'im_func', self._obj).xfail = \
-                pytest.mark.xfail(reason=str(self._obj.todo))
-
     def setup(self):
         self._testcase = self.parent.obj(self.name)
         self._obj = getattr(self._testcase, self.name)
+        if hasattr(self._testcase, 'skip'):
+            pytest.skip(self._testcase.skip)
+        if hasattr(self._obj, 'skip'):
+            pytest.skip(self._obj.skip)
         if hasattr(self._testcase, 'setup_method'):
             self._testcase.setup_method(self._obj)
 
@@ -83,28 +91,37 @@
         self._addexcinfo(rawexcinfo)
     def addFailure(self, testcase, rawexcinfo):
         self._addexcinfo(rawexcinfo)
+
     def addSkip(self, testcase, reason):
         try:
             pytest.skip(reason)
         except pytest.skip.Exception:
             self._addexcinfo(sys.exc_info())
-    def addExpectedFailure(self, testcase, rawexcinfo, reason):
+
+    def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
         try:
             pytest.xfail(str(reason))
         except pytest.xfail.Exception:
             self._addexcinfo(sys.exc_info())
-    def addUnexpectedSuccess(self, testcase, reason):
-        pass
+
+    def addUnexpectedSuccess(self, testcase, reason=""):
+        self._unexpectedsuccess = reason
+
     def addSuccess(self, testcase):
         pass
+
     def stopTest(self, testcase):
         pass
+
     def runtest(self):
         self._testcase(result=self)
 
     def _prunetraceback(self, excinfo):
         pytest.Function._prunetraceback(self, excinfo)
-        excinfo.traceback = excinfo.traceback.filter(lambda x:not x.frame.f_globals.get('__unittest'))
+        traceback = excinfo.traceback.filter(
+            lambda x:not x.frame.f_globals.get('__unittest'))
+        if traceback:
+            excinfo.traceback = traceback
 
 @pytest.mark.tryfirst
 def pytest_runtest_makereport(item, call):
@@ -120,14 +137,19 @@
             ut = sys.modules['twisted.python.failure']
             Failure__init__ = ut.Failure.__init__.im_func
             check_testcase_implements_trial_reporter()
-            def excstore(self, exc_value=None, exc_type=None, exc_tb=None):
+            def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
+                captureVars=None):
                 if exc_value is None:
                     self._rawexcinfo = sys.exc_info()
                 else:
                     if exc_type is None:
                         exc_type = type(exc_value)
                     self._rawexcinfo = (exc_type, exc_value, exc_tb)
-                Failure__init__(self, exc_value, exc_type, exc_tb)
+                try:
+                    Failure__init__(self, exc_value, exc_type, exc_tb,
+                        captureVars=captureVars)
+                except TypeError:
+                    Failure__init__(self, exc_value, exc_type, exc_tb)
             ut.Failure.__init__ = excstore
             try:
                 return __multicall__.execute()
diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py
--- a/dotviewer/graphparse.py
+++ b/dotviewer/graphparse.py
@@ -93,6 +93,7 @@
     return result
 
 def parse_plain(graph_id, plaincontent, links={}, fixedfont=False):
+    plaincontent = plaincontent.replace('\r\n', '\n')    # fix Windows EOL
     lines = plaincontent.splitlines(True)
     for i in range(len(lines)-2, -1, -1):
         if lines[i].endswith('\\\n'):   # line ending in '\'
diff --git a/lib-python/modified-2.7/test/test_set.py b/lib-python/modified-2.7/test/test_set.py
--- a/lib-python/modified-2.7/test/test_set.py
+++ b/lib-python/modified-2.7/test/test_set.py
@@ -1568,7 +1568,7 @@
             for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
                 for g in (G, I, Ig, L, R):
                     expected = meth(data)
-                    actual = meth(G(data))
+                    actual = meth(g(data))
                     if isinstance(expected, bool):
                         self.assertEqual(actual, expected)
                     else:
diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py
deleted file mode 100644
--- a/lib_pypy/_locale.py
+++ /dev/null
@@ -1,337 +0,0 @@
-# ctypes implementation of _locale module by Victor Stinner, 2008-03-27
-
-# ------------------------------------------------------------
-#  Note that we also have our own interp-level implementation
-# ------------------------------------------------------------
-
-"""
-Support for POSIX locales.
-"""
-
-from ctypes import (Structure, POINTER, create_string_buffer,
-    c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t)
-from ctypes_support import standard_c_lib as libc
-from ctypes_support import get_errno
-
-# load the platform-specific cache made by running locale.ctc.py
-from ctypes_config_cache._locale_cache import *
-
-try: from __pypy__ import builtinify
-except ImportError: builtinify = lambda f: f
-
-
-# Ubuntu Gusty i386 structure
-class lconv(Structure):
-    _fields_ = (
-        # Numeric (non-monetary) information.
-        ("decimal_point", c_char_p),    # Decimal point character.
-        ("thousands_sep", c_char_p),    # Thousands separator.
-
-        # Each element is the number of digits in each group;
-        # elements with higher indices are farther left.
-        # An element with value CHAR_MAX means that no further grouping is done.
-        # An element with value 0 means that the previous element is used
-        # for all groups farther left.  */
-        ("grouping", c_char_p),
-
-        # Monetary information.
-
-        # First three chars are a currency symbol from ISO 4217.
-        # Fourth char is the separator.  Fifth char is '\0'.
-        ("int_curr_symbol", c_char_p),
-        ("currency_symbol", c_char_p),   # Local currency symbol.
-        ("mon_decimal_point", c_char_p), # Decimal point character.
-        ("mon_thousands_sep", c_char_p), # Thousands separator.
-        ("mon_grouping", c_char_p),      # Like `grouping' element (above).
-        ("positive_sign", c_char_p),     # Sign for positive values.
-        ("negative_sign", c_char_p),     # Sign for negative values.
-        ("int_frac_digits", c_ubyte),    # Int'l fractional digits.
-        ("frac_digits", c_ubyte),        # Local fractional digits.
-        # 1 if currency_symbol precedes a positive value, 0 if succeeds.
-        ("p_cs_precedes", c_ubyte),
-        # 1 iff a space separates currency_symbol from a positive value.
-        ("p_sep_by_space", c_ubyte),
-        # 1 if currency_symbol precedes a negative value, 0 if succeeds.
-        ("n_cs_precedes", c_ubyte),
-        # 1 iff a space separates currency_symbol from a negative value.
-        ("n_sep_by_space", c_ubyte),
-
-        # Positive and negative sign positions:
-        # 0 Parentheses surround the quantity and currency_symbol.
-        # 1 The sign string precedes the quantity and currency_symbol.
-        # 2 The sign string follows the quantity and currency_symbol.
-        # 3 The sign string immediately precedes the currency_symbol.
-        # 4 The sign string immediately follows the currency_symbol.
-        ("p_sign_posn", c_ubyte),
-        ("n_sign_posn", c_ubyte),
-        # 1 if int_curr_symbol precedes a positive value, 0 if succeeds.
-        ("int_p_cs_precedes", c_ubyte),
-        # 1 iff a space separates int_curr_symbol from a positive value.
-        ("int_p_sep_by_space", c_ubyte),
-        # 1 if int_curr_symbol precedes a negative value, 0 if succeeds.
-        ("int_n_cs_precedes", c_ubyte),
-        # 1 iff a space separates int_curr_symbol from a negative value.
-        ("int_n_sep_by_space", c_ubyte),
-         # Positive and negative sign positions:
-         # 0 Parentheses surround the quantity and int_curr_symbol.
-         # 1 The sign string precedes the quantity and int_curr_symbol.
-         # 2 The sign string follows the quantity and int_curr_symbol.
-         # 3 The sign string immediately precedes the int_curr_symbol.
-         # 4 The sign string immediately follows the int_curr_symbol.
-        ("int_p_sign_posn", c_ubyte),
-        ("int_n_sign_posn", c_ubyte),
-    )
-
-_setlocale = libc.setlocale
-_setlocale.argtypes = (c_int, c_char_p)
-_setlocale.restype = c_char_p
-
-_localeconv = libc.localeconv
-_localeconv.argtypes = None
-_localeconv.restype = POINTER(lconv)
-
-_strcoll = libc.strcoll
-_strcoll.argtypes = (c_char_p, c_char_p)
-_strcoll.restype = c_int
-
-_wcscoll = libc.wcscoll
-_wcscoll.argtypes = (c_wchar_p, c_wchar_p)
-_wcscoll.restype = c_int
-
-_strxfrm = libc.strxfrm
-_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t)
-_strxfrm.restype = c_size_t
-
-HAS_LIBINTL = hasattr(libc, 'gettext')
-if HAS_LIBINTL:
-    _gettext = libc.gettext
-    _gettext.argtypes = (c_char_p,)
-    _gettext.restype = c_char_p
-
-    _dgettext = libc.dgettext
-    _dgettext.argtypes = (c_char_p, c_char_p)
-    _dgettext.restype = c_char_p
-
-    _dcgettext = libc.dcgettext
-    _dcgettext.argtypes = (c_char_p, c_char_p, c_int)
-    _dcgettext.restype = c_char_p
-
-    _textdomain = libc.textdomain
-    _textdomain.argtypes = (c_char_p,)
-    _textdomain.restype = c_char_p
-
-    _bindtextdomain = libc.bindtextdomain
-    _bindtextdomain.argtypes = (c_char_p, c_char_p)
-    _bindtextdomain.restype = c_char_p
-
-    HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset')
-    if HAS_BIND_TEXTDOMAIN_CODESET:
-        _bind_textdomain_codeset = libc.bindtextdomain_codeset
-        _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p)
-        _bind_textdomain_codeset.restype = c_char_p
-
-class Error(Exception):
-    pass
-
-def fixup_ulcase():
-    import string
-    #import strop
-
-    # create uppercase map string
-    ul = []
-    for c in xrange(256):
-        c = chr(c)
-        if c.isupper():
-            ul.append(c)
-    ul = ''.join(ul)
-    string.uppercase = ul
-    #strop.uppercase = ul
-
-    # create lowercase string
-    ul = []
-    for c in xrange(256):
-        c = chr(c)
-        if c.islower():
-            ul.append(c)
-    ul = ''.join(ul)
-    string.lowercase = ul
-    #strop.lowercase = ul
-
-    # create letters string
-    ul = []
-    for c in xrange(256):
-        c = chr(c)
-        if c.isalpha():
-            ul.append(c)
-    ul = ''.join(ul)
-    string.letters = ul
-
- at builtinify
-def setlocale(category, locale=None):
-    "(integer,string=None) -> string. Activates/queries locale processing."
-    if locale:
-        # set locale
-        result = _setlocale(category, locale)
-        if not result:
-            raise Error("unsupported locale setting")
-
-        # record changes to LC_CTYPE
-        if category in (LC_CTYPE, LC_ALL):
-            fixup_ulcase()
-    else:
-        # get locale
-        result = _setlocale(category, None)
-        if not result:
-            raise Error("locale query failed")
-    return result
-
-def _copy_grouping(text):
-    groups = [ ord(group) for group in text ]
-    if groups:
-        groups.append(0)
-    return groups
-
- at builtinify
-def localeconv():
-    "() -> dict. Returns numeric and monetary locale-specific parameters."
-
-    # if LC_NUMERIC is different in the C library, use saved value
-    lp = _localeconv()
-    l = lp.contents
-
-    # hopefully, the localeconv result survives the C library calls
-    # involved herein
-
-    # Numeric information
-    result = {
-        "decimal_point": l.decimal_point,
-        "thousands_sep": l.thousands_sep,
-        "grouping": _copy_grouping(l.grouping),
-        "int_curr_symbol": l.int_curr_symbol,
-        "currency_symbol": l.currency_symbol,
-        "mon_decimal_point": l.mon_decimal_point,
-        "mon_thousands_sep": l.mon_thousands_sep,
-        "mon_grouping": _copy_grouping(l.mon_grouping),
-        "positive_sign": l.positive_sign,
-        "negative_sign": l.negative_sign,
-        "int_frac_digits": l.int_frac_digits,
-        "frac_digits": l.frac_digits,
-        "p_cs_precedes": l.p_cs_precedes,
-        "p_sep_by_space": l.p_sep_by_space,
-        "n_cs_precedes": l.n_cs_precedes,
-        "n_sep_by_space": l.n_sep_by_space,
-        "p_sign_posn": l.p_sign_posn,
-        "n_sign_posn": l.n_sign_posn,
-    }
-    return result
-
- at builtinify
-def strcoll(s1, s2):
-    "string,string -> int. Compares two strings according to the locale."
-
-    # If both arguments are byte strings, use strcoll.
-    if isinstance(s1, str) and isinstance(s2, str):
-        return _strcoll(s1, s2)
-
-    # If neither argument is unicode, it's an error.
-    if not isinstance(s1, unicode) and not isinstance(s2, unicode):
-        raise ValueError("strcoll arguments must be strings")
-
-    # Convert the non-unicode argument to unicode.
-    s1 = unicode(s1)
-    s2 = unicode(s2)
-
-    # Collate the strings.
-    return _wcscoll(s1, s2)
-
- at builtinify
-def strxfrm(s):
-    "string -> string. Returns a string that behaves for cmp locale-aware."
-
-    # assume no change in size, first
-    n1 = len(s) + 1
-    buf = create_string_buffer(n1)
-    n2 = _strxfrm(buf, s, n1) + 1
-    if n2 > n1:
-        # more space needed
-        buf = create_string_buffer(n2)
-        _strxfrm(buf, s, n2)
-    return buf.value
-
- at builtinify
-def getdefaultlocale():
-    # TODO: Port code from CPython for Windows and Mac OS
-    raise NotImplementedError()
-
-if HAS_LANGINFO:
-    _nl_langinfo = libc.nl_langinfo
-    _nl_langinfo.argtypes = (nl_item,)
-    _nl_langinfo.restype = c_char_p
-
-    def nl_langinfo(key):
-        """nl_langinfo(key) -> string
-        Return the value for the locale information associated with key."""
-        # Check whether this is a supported constant. GNU libc sometimes
-        # returns numeric values in the char* return value, which would
-        # crash PyString_FromString.
-        result = _nl_langinfo(key)
-        if result is not None:
-            return result
-        raise ValueError("unsupported langinfo constant")
-
-if HAS_LIBINTL:
-    @builtinify
-    def gettext(msg):
-        """gettext(msg) -> string
-        Return translation of msg."""
-        return _gettext(msg)
-
-    @builtinify
-    def dgettext(domain, msg):
-        """dgettext(domain, msg) -> string
-        Return translation of msg in domain."""
-        return _dgettext(domain, msg)
-
-    @builtinify
-    def dcgettext(domain, msg, category):
-        """dcgettext(domain, msg, category) -> string
-        Return translation of msg in domain and category."""
-        return _dcgettext(domain, msg, category)
-
-    @builtinify
-    def textdomain(domain):
-        """textdomain(domain) -> string
-        Set the C library's textdomain to domain, returning the new domain."""
-        return _textdomain(domain)
-
-    @builtinify
-    def bindtextdomain(domain, dir):
-        """bindtextdomain(domain, dir) -> string
-        Bind the C library's domain to dir."""
-        dirname = _bindtextdomain(domain, dir)
-        if not dirname:
-            errno = get_errno()
-            raise OSError(errno)
-        return dirname
-
-    if HAS_BIND_TEXTDOMAIN_CODESET:
-        @builtinify
-        def bind_textdomain_codeset(domain, codeset):
-            """bind_textdomain_codeset(domain, codeset) -> string
-            Bind the C library's domain to codeset."""
-            codeset = _bind_textdomain_codeset(domain, codeset)
-            if codeset:
-                return codeset
-            return None
-
-__all__ = (
-    'Error',
-    'setlocale', 'localeconv', 'strxfrm', 'strcoll',
-) + ALL_CONSTANTS
-if HAS_LIBINTL:
-    __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain',
-                'bindtextdomain')
-    if HAS_BIND_TEXTDOMAIN_CODESET:
-        __all__ += ('bind_textdomain_codeset',)
-if HAS_LANGINFO:
-    __all__ += ('nl_langinfo',)
diff --git a/lib_pypy/array.py b/lib_pypy/array.py
deleted file mode 100644
--- a/lib_pypy/array.py
+++ /dev/null
@@ -1,531 +0,0 @@
-"""This module defines an object type which can efficiently represent
-an array of basic values: characters, integers, floating point
-numbers.  Arrays are sequence types and behave very much like lists,
-except that the type of objects stored in them is constrained.  The
-type is specified at object creation time by using a type code, which
-is a single character.  The following type codes are defined:
-
-    Type code   C Type             Minimum size in bytes 
-    'c'         character          1 
-    'b'         signed integer     1 
-    'B'         unsigned integer   1 
-    'u'         Unicode character  2 
-    'h'         signed integer     2 
-    'H'         unsigned integer   2 
-    'i'         signed integer     2 
-    'I'         unsigned integer   2 
-    'l'         signed integer     4 
-    'L'         unsigned integer   4 
-    'f'         floating point     4 
-    'd'         floating point     8 
-
-The constructor is:
-
-array(typecode [, initializer]) -- create a new array
-"""
-
-from struct import calcsize, pack, pack_into, unpack_from
-import operator
-
-# the buffer-like object to use internally: trying from
-# various places in order...
-try:
-    import _rawffi                    # a reasonable implementation based
-    _RAWARRAY = _rawffi.Array('c')    # on raw_malloc, and providing a
-    def bytebuffer(size):             # real address
-        return _RAWARRAY(size, autofree=True)
-    def getbufaddress(buf):
-        return buf.buffer
-except ImportError:
-    try:
-        from __pypy__ import bytebuffer     # a reasonable implementation
-        def getbufaddress(buf):             # compatible with oo backends,
-            return 0                        # but no address
-    except ImportError:
-        # not running on PyPy.  Fall back to ctypes...
-        import ctypes
-        bytebuffer = ctypes.create_string_buffer
-        def getbufaddress(buf):
-            voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p)
-            return voidp.value
-
-# ____________________________________________________________
-
-TYPECODES = "cbBuhHiIlLfd"
-
-class array(object):
-    """array(typecode [, initializer]) -> array
-    
-    Return a new array whose items are restricted by typecode, and
-    initialized from the optional initializer value, which must be a list,
-    string. or iterable over elements of the appropriate type.
-    
-    Arrays represent basic values and behave very much like lists, except
-    the type of objects stored in them is constrained.
-    
-    Methods:
-    
-    append() -- append a new item to the end of the array
-    buffer_info() -- return information giving the current memory info
-    byteswap() -- byteswap all the items of the array
-    count() -- return number of occurences of an object
-    extend() -- extend array by appending multiple elements from an iterable
-    fromfile() -- read items from a file object
-    fromlist() -- append items from the list
-    fromstring() -- append items from the string
-    index() -- return index of first occurence of an object
-    insert() -- insert a new item into the array at a provided position
-    pop() -- remove and return item (default last)
-    read() -- DEPRECATED, use fromfile()
-    remove() -- remove first occurence of an object
-    reverse() -- reverse the order of the items in the array
-    tofile() -- write all items to a file object
-    tolist() -- return the array converted to an ordinary list
-    tostring() -- return the array converted to a string
-    write() -- DEPRECATED, use tofile()
-    
-    Attributes:
-    
-    typecode -- the typecode character used to create the array
-    itemsize -- the length in bytes of one array item
-    """
-    __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"]
-
-    def __new__(cls, typecode, initializer=[], **extrakwds):
-        self = object.__new__(cls)
-        if cls is array and extrakwds:
-            raise TypeError("array() does not take keyword arguments")
-        if not isinstance(typecode, str) or len(typecode) != 1:
-            raise TypeError(
-                     "array() argument 1 must be char, not %s" % type(typecode))
-        if typecode not in TYPECODES:
-            raise ValueError(
-                  "bad typecode (must be one of %s)" % ', '.join(TYPECODES))
-        self._data = bytebuffer(0)
-        self.typecode = typecode
-        self.itemsize = calcsize(typecode)
-        if isinstance(initializer, list):
-            self.fromlist(initializer)
-        elif isinstance(initializer, str):
-            self.fromstring(initializer)
-        elif isinstance(initializer, unicode) and self.typecode == "u":
-            self.fromunicode(initializer)
-        else:
-            self.extend(initializer)
-        return self
-
-    def _clear(self):
-        self._data = bytebuffer(0)
-
-    ##### array-specific operations
-
-    def fromfile(self, f, n):
-        """Read n objects from the file object f and append them to the end of
-        the array. Also called as read."""
-        if not isinstance(f, file):
-            raise TypeError("arg1 must be open file")
-        size = self.itemsize * n
-        item = f.read(size)
-        if len(item) < size:
-            raise EOFError("not enough items in file")
-        self.fromstring(item)
-
-    def fromlist(self, l):
-        """Append items to array from list."""
-        if not isinstance(l, list):
-            raise TypeError("arg must be list")
-        self._fromiterable(l)
-        
-    def fromstring(self, s):
-        """Appends items from the string, interpreting it as an array of machine
-        values, as if it had been read from a file using the fromfile()
-        method."""
-        if isinstance(s, unicode):
-            s = str(s)
-        self._frombuffer(s)
-
-    def _frombuffer(self, s):
-        length = len(s)
-        if length % self.itemsize != 0:
-            raise ValueError("string length not a multiple of item size")
-        boundary = len(self._data)
-        newdata = bytebuffer(boundary + length)
-        newdata[:boundary] = self._data
-        newdata[boundary:] = s
-        self._data = newdata
-
-    def fromunicode(self, ustr):
-        """Extends this array with data from the unicode string ustr. The array
-        must be a type 'u' array; otherwise a ValueError is raised. Use
-        array.fromstring(ustr.encode(...)) to append Unicode data to an array of
-        some other type."""
-        if not self.typecode == "u":
-            raise ValueError(
-                          "fromunicode() may only be called on type 'u' arrays")
-        # XXX the following probable bug is not emulated:
-        # CPython accepts a non-unicode string or a buffer, and then
-        # behaves just like fromstring(), except that it strangely truncates
-        # string arguments at multiples of the unicode byte size.
-        # Let's only accept unicode arguments for now.
-        if not isinstance(ustr, unicode):
-            raise TypeError("fromunicode() argument should probably be "
-                            "a unicode string")
-        # _frombuffer() does the currect thing using
-        # the buffer behavior of unicode objects
-        self._frombuffer(buffer(ustr))
-
-    def tofile(self, f):
-        """Write all items (as machine values) to the file object f.  Also
-        called as write."""
-        if not isinstance(f, file):
-            raise TypeError("arg must be open file")
-        f.write(self.tostring())
-        
-    def tolist(self):
-        """Convert array to an ordinary list with the same items."""
-        count = len(self._data) // self.itemsize
-        return list(unpack_from('%d%s' % (count, self.typecode), self._data))
-
-    def tostring(self):
-        return self._data[:]
-
-    def __buffer__(self):
-        return buffer(self._data)
-
-    def tounicode(self):
-        """Convert the array to a unicode string. The array must be a type 'u'
-        array; otherwise a ValueError is raised. Use array.tostring().decode()
-        to obtain a unicode string from an array of some other type."""
-        if self.typecode != "u":
-            raise ValueError("tounicode() may only be called on type 'u' arrays")
-        # XXX performance is not too good
-        return u"".join(self.tolist())
-
-    def byteswap(self):
-        """Byteswap all items of the array.  If the items in the array are not
-        1, 2, 4, or 8 bytes in size, RuntimeError is raised."""
-        if self.itemsize not in [1, 2, 4, 8]:
-            raise RuntimeError("byteswap not supported for this array")
-        # XXX slowish
-        itemsize = self.itemsize
-        bytes = self._data
-        for start in range(0, len(bytes), itemsize):
-            stop = start + itemsize
-            bytes[start:stop] = bytes[start:stop][::-1]
-
-    def buffer_info(self):
-        """Return a tuple (address, length) giving the current memory address
-        and the length in items of the buffer used to hold array's contents. The
-        length should be multiplied by the itemsize attribute to calculate the
-        buffer length in bytes. On PyPy the address might be meaningless
-        (returned as 0), depending on the available modules."""
-        return (getbufaddress(self._data), len(self))
-    
-    read = fromfile
-
-    write = tofile
-
-    ##### general object protocol
-    
-    def __repr__(self):
-        if len(self._data) == 0:
-            return "array('%s')" % self.typecode
-        elif self.typecode == "c":
-            return "array('%s', %s)" % (self.typecode, repr(self.tostring()))
-        elif self.typecode == "u":
-            return "array('%s', %s)" % (self.typecode, repr(self.tounicode()))
-        else:
-            return "array('%s', %s)" % (self.typecode, repr(self.tolist()))
-
-    def __copy__(self):
-        a = array(self.typecode)
-        a._data = bytebuffer(len(self._data))
-        a._data[:] = self._data
-        return a
-
-    def __eq__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) == buffer(other._data)
-        else:
-            return self.tolist() == other.tolist()
-
-    def __ne__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) != buffer(other._data)
-        else:
-            return self.tolist() != other.tolist()
-
-    def __lt__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) < buffer(other._data)
-        else:
-            return self.tolist() < other.tolist()
-
-    def __gt__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) > buffer(other._data)
-        else:
-            return self.tolist() > other.tolist()
-
-    def __le__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) <= buffer(other._data)
-        else:
-            return self.tolist() <= other.tolist()
-
-    def __ge__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) >= buffer(other._data)
-        else:
-            return self.tolist() >= other.tolist()
-
-    def __reduce__(self):
-        dict = getattr(self, '__dict__', None)
-        data = self.tostring()
-        if data:
-            initargs = (self.typecode, data)
-        else:
-            initargs = (self.typecode,)
-        return (type(self), initargs, dict)
-
-    ##### list methods
-    
-    def append(self, x):
-        """Append new value x to the end of the array."""
-        self._frombuffer(pack(self.typecode, x))
-
-    def count(self, x):
-        """Return number of occurences of x in the array."""
-        return operator.countOf(self, x)
-
-    def extend(self, iterable):
-        """Append items to the end of the array."""
-        if isinstance(iterable, array) \
-                                    and not self.typecode == iterable.typecode:
-            raise TypeError("can only extend with array of same kind")
-        self._fromiterable(iterable)
-
-    def index(self, x):
-        """Return index of first occurence of x in the array."""
-        return operator.indexOf(self, x)
-    
-    def insert(self, i, x):
-        """Insert a new item x into the array before position i."""
-        seqlength = len(self)
-        if i < 0:
-            i += seqlength
-            if i < 0:
-                i = 0
-        elif i > seqlength:
-            i = seqlength
-        boundary = i * self.itemsize
-        data = pack(self.typecode, x)
-        newdata = bytebuffer(len(self._data) + len(data))
-        newdata[:boundary] = self._data[:boundary]
-        newdata[boundary:boundary+self.itemsize] = data
-        newdata[boundary+self.itemsize:] = self._data[boundary:]
-        self._data = newdata
-        
-    def pop(self, i=-1):
-        """Return the i-th element and delete it from the array. i defaults to
-        -1."""
-        seqlength = len(self)
-        if i < 0:
-            i += seqlength
-        if not (0 <= i < seqlength):
-            raise IndexError(i)
-        boundary = i * self.itemsize
-        result = unpack_from(self.typecode, self._data, boundary)[0]
-        newdata = bytebuffer(len(self._data) - self.itemsize)
-        newdata[:boundary] = self._data[:boundary]
-        newdata[boundary:] = self._data[boundary+self.itemsize:]
-        self._data = newdata
-        return result
-        
-    def remove(self, x):
-        """Remove the first occurence of x in the array."""
-        self.pop(self.index(x))
-        
-    def reverse(self):
-        """Reverse the order of the items in the array."""
-        lst = self.tolist()
-        lst.reverse()
-        self._clear()
-        self.fromlist(lst)
-
-    ##### list protocol
-    
-    def __len__(self):
-        return len(self._data) // self.itemsize
-    
-    def __add__(self, other):
-        if not isinstance(other, array):
-            raise TypeError("can only append array to array")
-        if self.typecode != other.typecode:
-            raise TypeError("bad argument type for built-in operation")
-        return array(self.typecode, buffer(self._data) + buffer(other._data))
-
-    def __mul__(self, repeat):
-        return array(self.typecode, buffer(self._data) * repeat)
-
-    __rmul__ = __mul__
-
-    def __getitem__(self, i):
-        seqlength = len(self)
-        if isinstance(i, slice):
-            start, stop, step = i.indices(seqlength)
-            if step != 1:
-                sublist = self.tolist()[i]    # fall-back
-                return array(self.typecode, sublist)
-            if start < 0:
-                start = 0
-            if stop < start:
-                stop = start
-            assert stop <= seqlength
-            return array(self.typecode, self._data[start * self.itemsize :
-                                                   stop * self.itemsize])
-        else:
-            if i < 0:
-                i += seqlength
-            if self.typecode == 'c':  # speed trick
-                return self._data[i]
-            if not (0 <= i < seqlength):
-                raise IndexError(i)
-            boundary = i * self.itemsize
-            return unpack_from(self.typecode, self._data, boundary)[0]
-
-    def __getslice__(self, i, j):
-        return self.__getitem__(slice(i, j))
-
-    def __setitem__(self, i, x):
-        if isinstance(i, slice):
-            if (not isinstance(x, array)
-                or self.typecode != x.typecode):
-                raise TypeError("can only assign array of same kind"
-                                " to array slice")
-            seqlength = len(self)
-            start, stop, step = i.indices(seqlength)
-            if step != 1:
-                sublist = self.tolist()    # fall-back
-                sublist[i] = x.tolist()
-                self._clear()
-                self.fromlist(sublist)
-                return
-            if start < 0:
-                start = 0
-            if stop < start:
-                stop = start
-            assert stop <= seqlength
-            boundary1 = start * self.itemsize
-            boundary2 = stop * self.itemsize
-            boundary2new = boundary1 + len(x._data)
-            if boundary2 == boundary2new:
-                self._data[boundary1:boundary2] = x._data
-            else:
-                newdata = bytebuffer(len(self._data) + boundary2new-boundary2)
-                newdata[:boundary1] = self._data[:boundary1]
-                newdata[boundary1:boundary2new] = x._data
-                newdata[boundary2new:] = self._data[boundary2:]
-                self._data = newdata
-        else:
-            seqlength = len(self)
-            if i < 0:
-                i += seqlength
-            if self.typecode == 'c':  # speed trick
-                self._data[i] = x
-                return
-            if not (0 <= i < seqlength):
-                raise IndexError(i)
-            boundary = i * self.itemsize
-            pack_into(self.typecode, self._data, boundary, x)
-
-    def __setslice__(self, i, j, x):
-        self.__setitem__(slice(i, j), x)
-
-    def __delitem__(self, i):
-        if isinstance(i, slice):
-            seqlength = len(self)
-            start, stop, step = i.indices(seqlength)
-            if start < 0:
-                start = 0
-            if stop < start:
-                stop = start
-            assert stop <= seqlength
-            if step != 1:
-                sublist = self.tolist()    # fall-back
-                del sublist[i]
-                self._clear()
-                self.fromlist(sublist)
-                return
-            dellength = stop - start
-            boundary1 = start * self.itemsize
-            boundary2 = stop * self.itemsize
-            newdata = bytebuffer(len(self._data) - (boundary2-boundary1))
-            newdata[:boundary1] = self._data[:boundary1]
-            newdata[boundary1:] = self._data[boundary2:]
-            self._data = newdata
-        else:            
-            seqlength = len(self)
-            if i < 0:
-                i += seqlength
-            if not (0 <= i < seqlength):
-                raise IndexError(i)
-            boundary = i * self.itemsize
-            newdata = bytebuffer(len(self._data) - self.itemsize)
-            newdata[:boundary] = self._data[:boundary]
-            newdata[boundary:] = self._data[boundary+self.itemsize:]
-            self._data = newdata
-
-    def __delslice__(self, i, j):
-        self.__delitem__(slice(i, j))
-
-    def __contains__(self, item):
-        for x in self:
-            if x == item:
-                return True
-        return False
-
-    def __iadd__(self, other):
-        if not isinstance(other, array):
-            raise TypeError("can only extend array with array")
-        self.extend(other)
-        return self
-
-    def __imul__(self, repeat):
-        newdata = buffer(self._data) * repeat
-        self._data = bytebuffer(len(newdata))
-        self._data[:] = newdata
-        return self
-
-    def __iter__(self):
-        p = 0
-        typecode = self.typecode
-        itemsize = self.itemsize
-        while p < len(self._data):
-            yield unpack_from(typecode, self._data, p)[0]
-            p += itemsize
-
-    ##### internal methods
-
-    def _fromiterable(self, iterable):
-        iterable = tuple(iterable)
-        n = len(iterable)
-        boundary = len(self._data)
-        newdata = bytebuffer(boundary + n * self.itemsize)
-        newdata[:boundary] = self._data
-        pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable)
-        self._data = newdata
-
-ArrayType = array
diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py
deleted file mode 100644
--- a/lib_pypy/binascii.py
+++ /dev/null
@@ -1,720 +0,0 @@
-"""A pure Python implementation of binascii.
-
-Rather slow and buggy in corner cases.
-PyPy provides an RPython version too.
-"""
-
-class Error(Exception):
-    pass
-
-class Done(Exception):
-    pass
-
-class Incomplete(Exception):
-    pass
-
-def a2b_uu(s):
-    if not s:
-        return ''
-    
-    length = (ord(s[0]) - 0x20) % 64
-
-    def quadruplets_gen(s):
-        while s:
-            try:
-                yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
-            except IndexError:
-                s += '   '
-                yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
-                return
-            s = s[4:]
-
-    try:
-        result = [''.join(
-            [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
-            chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
-            chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
-            ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
-    except ValueError:
-        raise Error('Illegal char')
-    result = ''.join(result)
-    trailingdata = result[length:]
-    if trailingdata.strip('\x00'):
-        raise Error('Trailing garbage')
-    result = result[:length]
-    if len(result) < length:
-        result += ((length - len(result)) * '\x00')
-    return result
-
-                               
-def b2a_uu(s):
-    length = len(s)
-    if length > 45:
-        raise Error('At most 45 bytes at once')
-
-    def triples_gen(s):
-        while s:
-            try:
-                yield ord(s[0]), ord(s[1]), ord(s[2])
-            except IndexError:
-                s += '\0\0'
-                yield ord(s[0]), ord(s[1]), ord(s[2])
-                return
-            s = s[3:]
-
-    result = [''.join(
-        [chr(0x20 + (( A >> 2                    ) & 0x3F)),
-         chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
-         chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
-         chr(0x20 + (( C                         ) & 0x3F))])
-              for A, B, C in triples_gen(s)]
-    return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n'
-
-
-table_a2b_base64 = {
-    'A': 0,
-    'B': 1,
-    'C': 2,
-    'D': 3,
-    'E': 4,
-    'F': 5,
-    'G': 6,
-    'H': 7,
-    'I': 8,
-    'J': 9,
-    'K': 10,
-    'L': 11,
-    'M': 12,
-    'N': 13,
-    'O': 14,
-    'P': 15,
-    'Q': 16,
-    'R': 17,
-    'S': 18,
-    'T': 19,
-    'U': 20,
-    'V': 21,
-    'W': 22,
-    'X': 23,
-    'Y': 24,
-    'Z': 25,
-    'a': 26,
-    'b': 27,
-    'c': 28,
-    'd': 29,
-    'e': 30,
-    'f': 31,
-    'g': 32,
-    'h': 33,
-    'i': 34,
-    'j': 35,
-    'k': 36,
-    'l': 37,
-    'm': 38,
-    'n': 39,
-    'o': 40,
-    'p': 41,
-    'q': 42,
-    'r': 43,
-    's': 44,
-    't': 45,
-    'u': 46,
-    'v': 47,
-    'w': 48,
-    'x': 49,
-    'y': 50,
-    'z': 51,
-    '0': 52,
-    '1': 53,
-    '2': 54,
-    '3': 55,
-    '4': 56,
-    '5': 57,
-    '6': 58,
-    '7': 59,
-    '8': 60,
-    '9': 61,
-    '+': 62,
-    '/': 63,
-    '=': 0,
-}
-
-
-def a2b_base64(s):
-    if not isinstance(s, (str, unicode)):
-        raise TypeError("expected string or unicode, got %r" % (s,))
-    s = s.rstrip()
-    # clean out all invalid characters, this also strips the final '=' padding
-    # check for correct padding
-
-    def next_valid_char(s, pos):
-        for i in range(pos + 1, len(s)):
-            c = s[i]
-            if c < '\x7f':
-                try:
-                    table_a2b_base64[c]
-                    return c
-                except KeyError:
-                    pass
-        return None
-    
-    quad_pos = 0
-    leftbits = 0
-    leftchar = 0
-    res = []
-    for i, c in enumerate(s):
-        if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
-            continue
-        if c == '=':
-            if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
-                continue
-            else:
-                leftbits = 0
-                break
-        try:
-            next_c = table_a2b_base64[c]
-        except KeyError:
-            continue
-        quad_pos = (quad_pos + 1) & 0x03
-        leftchar = (leftchar << 6) | next_c
-        leftbits += 6
-        if leftbits >= 8:
-            leftbits -= 8
-            res.append((leftchar >> leftbits & 0xff))
-            leftchar &= ((1 << leftbits) - 1)
-    if leftbits != 0:
-        raise Error('Incorrect padding')
-    
-    return ''.join([chr(i) for i in res])
-    
-table_b2a_base64 = \
-"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
-
-def b2a_base64(s):
-    length = len(s)
-    final_length = length % 3
-
-    def triples_gen(s):
-        while s:
-            try:
-                yield ord(s[0]), ord(s[1]), ord(s[2])
-            except IndexError:
-                s += '\0\0'
-                yield ord(s[0]), ord(s[1]), ord(s[2])
-                return
-            s = s[3:]
-
-    
-    a = triples_gen(s[ :length - final_length])
-
-    result = [''.join(
-        [table_b2a_base64[( A >> 2                    ) & 0x3F],
-         table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
-         table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
-         table_b2a_base64[( C                         ) & 0x3F]])
-              for A, B, C in a]
-
-    final = s[length - final_length:]
-    if final_length == 0:
-        snippet = ''
-    elif final_length == 1:
-        a = ord(final[0])
-        snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
-                  table_b2a_base64[(a << 4 ) & 0x3F] + '=='
-    else:
-        a = ord(final[0])
-        b = ord(final[1])
-        snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
-                  table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
-                  table_b2a_base64[(b << 2) & 0x3F] + '='
-    return ''.join(result) + snippet + '\n'
-
-def a2b_qp(s, header=False):
-    inp = 0
-    odata = []
-    while inp < len(s):
-        if s[inp] == '=':
-            inp += 1
-            if inp >= len(s):
-                break
-            # Soft line breaks
-            if (s[inp] == '\n') or (s[inp] == '\r'):
-                if s[inp] != '\n':
-                    while inp < len(s) and s[inp] != '\n':
-                        inp += 1
-                if inp < len(s):
-                    inp += 1
-            elif s[inp] == '=':
-                # broken case from broken python qp
-                odata.append('=')
-                inp += 1
-            elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
-                ch = chr(int(s[inp:inp+2], 16))
-                inp += 2
-                odata.append(ch)
-            else:
-                odata.append('=')
-        elif header and s[inp] == '_':
-            odata.append(' ')
-            inp += 1
-        else:
-            odata.append(s[inp])
-            inp += 1
-    return ''.join(odata)
-
-def b2a_qp(data, quotetabs=False, istext=True, header=False):
-    """quotetabs=True means that tab and space characters are always
-       quoted.
-       istext=False means that \r and \n are treated as regular characters
-       header=True encodes space characters with '_' and requires
-       real '_' characters to be quoted.
-    """
-    MAXLINESIZE = 76
-
-    # See if this string is using CRLF line ends
-    lf = data.find('\n')
-    crlf = lf > 0 and data[lf-1] == '\r'
-
-    inp = 0
-    linelen = 0
-    odata = []
-    while inp < len(data):
-        c = data[inp]
-        if (c > '~' or
-            c == '=' or
-            (header and c == '_') or
-            (c == '.' and linelen == 0 and (inp+1 == len(data) or
-                                            data[inp+1] == '\n' or
-                                            data[inp+1] == '\r')) or
-            (not istext and (c == '\r' or c == '\n')) or
-            ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
-            (c <= ' ' and c != '\r' and c != '\n' and
-             (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
-            linelen += 3
-            if linelen >= MAXLINESIZE:
-                odata.append('=')
-                if crlf: odata.append('\r')
-                odata.append('\n')
-                linelen = 3
-            odata.append('=' + two_hex_digits(ord(c)))
-            inp += 1
-        else:
-            if (istext and
-                (c == '\n' or (inp+1 < len(data) and c == '\r' and
-                               data[inp+1] == '\n'))):
-                linelen = 0
-                # Protect against whitespace on end of line
-                if (len(odata) > 0 and
-                    (odata[-1] == ' ' or odata[-1] == '\t')):
-                    ch = ord(odata[-1])
-                    odata[-1] = '='
-                    odata.append(two_hex_digits(ch))
-
-                if crlf: odata.append('\r')
-                odata.append('\n')
-                if c == '\r':
-                    inp += 2
-                else:
-                    inp += 1
-            else:
-                if (inp + 1 < len(data) and
-                    data[inp+1] != '\n' and
-                    (linelen + 1) >= MAXLINESIZE):
-                    odata.append('=')
-                    if crlf: odata.append('\r')
-                    odata.append('\n')
-                    linelen = 0
-
-                linelen += 1
-                if header and c == ' ':
-                    c = '_'
-                odata.append(c)
-                inp += 1
-    return ''.join(odata)
-
-hex_numbers = '0123456789ABCDEF'
-def hex(n):
-    if n == 0:
-        return '0'
-    
-    if n < 0:
-        n = -n
-        sign = '-'
-    else:
-        sign = ''
-    arr = []
-
-    def hex_gen(n):
-        """ Yield a nibble at a time. """
-        while n:
-            yield n % 0x10
-            n = n / 0x10
-
-    for nibble in hex_gen(n):
-        arr = [hex_numbers[nibble]] + arr
-    return sign + ''.join(arr)
-
-def two_hex_digits(n):
-    return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
-    
-
-def strhex_to_int(s):
-    i = 0
-    for c in s:
-        i = i * 0x10 + hex_numbers.index(c)
-    return i
-
-hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
-
-DONE = 0x7f
-SKIP = 0x7e
-FAIL = 0x7d
-    
-table_a2b_hqx = [
-    #^@    ^A    ^B    ^C    ^D    ^E    ^F    ^G   
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    #\b    \t    \n    ^K    ^L    \r    ^N    ^O   
-    FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
-    #^P    ^Q    ^R    ^S    ^T    ^U    ^V    ^W   
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    #^X    ^Y    ^Z    ^[    ^\    ^]    ^^    ^_   
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    #      !     "     #     $     %     &     '   
-    FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
-    #(     )     *     +     ,     -     .     /   
-    0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
-    #0     1     2     3     4     5     6     7   
-    0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
-    #8     9     :     ;     <     =     >     ?   
-    0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
-    #@     A     B     C     D     E     F     G   
-    0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
-    #H     I     J     K     L     M     N     O   
-    0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
-    #P     Q     R     S     T     U     V     W   
-    0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
-    #X     Y     Z     [     \     ]     ^     _   
-    0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
-    #`     a     b     c     d     e     f     g   
-    0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
-    #h     i     j     k     l     m     n     o   
-    0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
-    #p     q     r     s     t     u     v     w   
-    0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
-    #x     y     z     {     |     }     ~    ^?   
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-]
-
-def a2b_hqx(s):
-    result = []
-
-    def quadruples_gen(s):
-        t = []
-        for c in s:
-            res = table_a2b_hqx[ord(c)]
-            if res == SKIP:
-                continue
-            elif res == FAIL:
-                raise Error('Illegal character')
-            elif res == DONE:
-                yield t
-                raise Done
-            else:
-                t.append(res)
-            if len(t) == 4:
-                yield t
-                t = []
-        yield t
-        
-    done = 0
-    try:
-        for snippet in quadruples_gen(s):
-            length = len(snippet)
-            if length == 4:
-                result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) 
-                result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) 
-                result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) 
-            elif length == 3:
-                result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) 
-                result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) 
-            elif length == 2:
-                result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) 
-    except Done:
-        done = 1
-    except Error:
-        raise
-    return (''.join(result), done)
-
-def b2a_hqx(s):
-    result =[]
-
-    def triples_gen(s):
-        while s:
-            try:
-                yield ord(s[0]), ord(s[1]), ord(s[2])
-            except IndexError:
-                yield tuple([ord(c) for c in s])
-            s = s[3:]
-
-    for snippet in triples_gen(s):
-        length = len(snippet)
-        if length == 3:
-            result.append(
-                hqx_encoding[(snippet[0] & 0xfc) >> 2])
-            result.append(hqx_encoding[
-                ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
-            result.append(hqx_encoding[
-                (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
-            result.append(hqx_encoding[snippet[2] & 0x3f])
-        elif length == 2:
-            result.append(
-                hqx_encoding[(snippet[0] & 0xfc) >> 2])
-            result.append(hqx_encoding[
-                ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
-            result.append(hqx_encoding[
-                (snippet[1] & 0x0f) << 2])
-        elif length == 1:
-            result.append(
-                hqx_encoding[(snippet[0] & 0xfc) >> 2])
-            result.append(hqx_encoding[
-                ((snippet[0] & 0x03) << 4)])
-    return ''.join(result)
-
-crctab_hqx = [
-        0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
-        0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
-        0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
-        0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
-        0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
-        0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
-        0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
-        0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
-        0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
-        0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
-        0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
-        0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
-        0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
-        0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
-        0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
-        0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
-        0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
-        0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
-        0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
-        0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
-        0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
-        0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
-        0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
-        0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
-        0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
-        0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
-        0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
-        0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
-        0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
-        0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
-        0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
-        0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
-]
-
-def crc_hqx(s, crc):
-    for c in s:
-        crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
-
-    return crc
-
-def rlecode_hqx(s):
-    """
-    Run length encoding for binhex4.
-    The CPython implementation does not do run length encoding
-    of \x90 characters. This implementation does.
-    """
-    if not s:
-        return ''
-    result = []
-    prev = s[0]
-    count = 1
-    # Add a dummy character to get the loop to go one extra round.
-    # The dummy must be different from the last character of s.
-    # In the same step we remove the first character, which has
-    # already been stored in prev.
-    if s[-1] == '!':
-        s = s[1:] + '?'
-    else:
-        s = s[1:] + '!'
-        
-    for c in s:
-        if c == prev and count < 255:
-            count += 1
-        else:
-            if count == 1:
-                if prev != '\x90':
-                    result.append(prev)
-                else:
-                    result.extend(['\x90', '\x00'])
-            elif count < 4:
-                if prev != '\x90':
-                    result.extend([prev] * count)
-                else:
-                    result.extend(['\x90', '\x00'] * count)
-            else:
-                if prev != '\x90':
-                    result.extend([prev, '\x90', chr(count)])
-                else:
-                    result.extend(['\x90', '\x00', '\x90', chr(count)]) 
-            count = 1
-            prev = c
-        
-    return ''.join(result)
-
-def rledecode_hqx(s):
-    s = s.split('\x90')
-    result = [s[0]]
-    prev = s[0]
-    for snippet in s[1:]:
-        count = ord(snippet[0])
-        if count > 0:
-            result.append(prev[-1] * (count-1))
-            prev = snippet
-        else:
-            result. append('\x90')
-            prev = '\x90'
-        result.append(snippet[1:])
-
-    return ''.join(result)
-
-crc_32_tab = [
-    0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
-    0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
-    0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
-    0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
-    0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
-    0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
-    0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
-    0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
-    0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
-    0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
-    0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
-    0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
-    0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
-    0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
-    0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
-    0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
-    0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
-    0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
-    0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
-    0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
-    0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
-    0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
-    0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
-    0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
-    0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
-    0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
-    0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
-    0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
-    0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
-    0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
-    0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
-    0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
-    0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
-    0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
-    0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
-    0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
-    0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
-    0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
-    0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
-    0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
-    0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
-    0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
-    0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
-    0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
-    0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
-    0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
-    0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
-    0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
-    0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
-    0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
-    0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
-    0x2d02ef8dL
-]
-
-def crc32(s, crc=0):
-    result = 0
-    crc = ~long(crc) & 0xffffffffL
-    for c in s:
-        crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
-        #/* Note:  (crc >> 8) MUST zero fill on left
-
-    result = crc ^ 0xffffffffL
-    
-    if result > 2**31:
-        result = ((result + 2**31) % 2**32) - 2**31
-
-    return result
-
-def b2a_hex(s):
-    result = []
-    for char in s:
-        c = (ord(char) >> 4) & 0xf
-        if c > 9:
-            c = c + ord('a') - 10
-        else:
-            c = c + ord('0')
-        result.append(chr(c))
-        c = ord(char) & 0xf
-        if c > 9:
-            c = c + ord('a') - 10
-        else:
-            c = c + ord('0')
-        result.append(chr(c))
-    return ''.join(result)
-
-hexlify = b2a_hex
-
-table_hex = [
-    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    0, 1, 2, 3,  4, 5, 6, 7,  8, 9,-1,-1, -1,-1,-1,-1,
-    -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
-]
-
-
-def a2b_hex(t):
-    result = []
-
-    def pairs_gen(s):
-        while s:
-            try:
-                yield table_hex[ord(s[0])], table_hex[ord(s[1])]
-            except IndexError:
-                if len(s):
-                    raise TypeError('Odd-length string')
-                return
-            s = s[2:]
-
-    for a, b in pairs_gen(t):
-        if a < 0 or b < 0:
-            raise TypeError('Non-hexadecimal digit found')
-        result.append(chr((a << 4) + b))
-    return ''.join(result)
-    
-
-unhexlify = a2b_hex
diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -968,8 +968,7 @@
             self._checkOverflow(t.year)
             result = date(t.year, t.month, t.day)
             return result
-        raise TypeError
-        # XXX Should be 'return NotImplemented', but there's a bug in 2.2...
+        return NotImplemented    # note that this doesn't work on CPython 2.2
 
     __radd__ = __add__
 
diff --git a/lib_pypy/numpypy/core/fromnumeric.py b/lib_pypy/numpypy/core/fromnumeric.py
--- a/lib_pypy/numpypy/core/fromnumeric.py
+++ b/lib_pypy/numpypy/core/fromnumeric.py
@@ -411,7 +411,8 @@
             [3, 7]]])
 
     """
-    raise NotImplementedError('Waiting on interp level method')
+    swapaxes = a.swapaxes
+    return swapaxes(axis1, axis2)
 
 
 def transpose(a, axes=None):
diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py
--- a/lib_pypy/numpypy/core/numeric.py
+++ b/lib_pypy/numpypy/core/numeric.py
@@ -306,6 +306,125 @@
     else:
         return multiarray.set_string_function(f, repr)
 
+def array_equal(a1, a2):
+    """
+    True if two arrays have the same shape and elements, False otherwise.
+
+    Parameters
+    ----------
+    a1, a2 : array_like
+        Input arrays.
+
+    Returns
+    -------
+    b : bool
+        Returns True if the arrays are equal.
+
+    See Also
+    --------
+    allclose: Returns True if two arrays are element-wise equal within a
+              tolerance.
+    array_equiv: Returns True if input arrays are shape consistent and all
+                 elements equal.
+
+    Examples
+    --------
+    >>> np.array_equal([1, 2], [1, 2])
+    True
+    >>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
+    True
+    >>> np.array_equal([1, 2], [1, 2, 3])
+    False
+    >>> np.array_equal([1, 2], [1, 4])
+    False
+
+    """
+    try:
+        a1, a2 = asarray(a1), asarray(a2)
+    except:
+        return False
+    if a1.shape != a2.shape:
+        return False
+    return bool((a1 == a2).all())
+
+def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False):
+    """
+    Convert the input to an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data, in any form that can be converted to an array.  This
+        includes lists, lists of tuples, tuples, tuples of tuples, tuples
+        of lists and ndarrays.
+    dtype : data-type, optional
+        By default, the data-type is inferred from the input data.
+    order : {'C', 'F'}, optional
+        Whether to use row-major ('C') or column-major ('F' for FORTRAN)
+        memory representation.  Defaults to 'C'.
+   maskna : bool or None, optional
+        If this is set to True, it forces the array to have an NA mask.
+        If this is set to False, it forces the array to not have an NA
+        mask.
+    ownmaskna : bool, optional
+        If this is set to True, forces the array to have a mask which
+        it owns.
+
+    Returns
+    -------
+    out : ndarray
+        Array interpretation of `a`.  No copy is performed if the input
+        is already an ndarray.  If `a` is a subclass of ndarray, a base
+        class ndarray is returned.
+
+    See Also
+    --------
+    asanyarray : Similar function which passes through subclasses.
+    ascontiguousarray : Convert input to a contiguous array.
+    asfarray : Convert input to a floating point ndarray.
+    asfortranarray : Convert input to an ndarray with column-major
+                     memory order.
+    asarray_chkfinite : Similar function which checks input for NaNs and Infs.
+    fromiter : Create an array from an iterator.
+    fromfunction : Construct an array by executing a function on grid
+                   positions.
+
+    Examples
+    --------
+    Convert a list into an array:
+
+    >>> a = [1, 2]
+    >>> np.asarray(a)
+    array([1, 2])
+
+    Existing arrays are not copied:
+
+    >>> a = np.array([1, 2])
+    >>> np.asarray(a) is a
+    True
+
+    If `dtype` is set, array is copied only if dtype does not match:
+
+    >>> a = np.array([1, 2], dtype=np.float32)
+    >>> np.asarray(a, dtype=np.float32) is a
+    True
+    >>> np.asarray(a, dtype=np.float64) is a
+    False
+
+    Contrary to `asanyarray`, ndarray subclasses are not passed through:
+
+    >>> issubclass(np.matrix, np.ndarray)
+    True
+    >>> a = np.matrix([[1, 2]])
+    >>> np.asarray(a) is a
+    False
+    >>> np.asanyarray(a) is a
+    True
+
+    """
+    return array(a, dtype, copy=False, order=order,
+                            maskna=maskna, ownmaskna=ownmaskna)
+
 set_string_function(array_str, 0)
 set_string_function(array_repr, 1)
 
diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py
deleted file mode 100644
--- a/lib_pypy/pypy_test/test_binascii.py
+++ /dev/null
@@ -1,168 +0,0 @@
-from __future__ import absolute_import
-import py
-from lib_pypy import binascii
-
-# Create binary test data
-data = "The quick brown fox jumps over the lazy dog.\r\n"
-# Be slow so we don't depend on other modules
-data += "".join(map(chr, xrange(256)))
-data += "\r\nHello world.\n"
-
-def test_exceptions():
-    # Check module exceptions
-    assert issubclass(binascii.Error, Exception)
-    assert issubclass(binascii.Incomplete, Exception)
-
-def test_functions():
-    # Check presence of all functions
-    funcs = []
-    for suffix in "base64", "hqx", "uu", "hex":
-        prefixes = ["a2b_", "b2a_"]
-        if suffix == "hqx":
-            prefixes.extend(["crc_", "rlecode_", "rledecode_"])
-        for prefix in prefixes:
-            name = prefix + suffix
-            assert callable(getattr(binascii, name))
-            py.test.raises(TypeError, getattr(binascii, name))
-    for name in ("hexlify", "unhexlify"):
-        assert callable(getattr(binascii, name))
-        py.test.raises(TypeError, getattr(binascii, name))
-
-def test_base64valid():
-    # Test base64 with valid data
-    MAX_BASE64 = 57
-    lines = []
-    for i in range(0, len(data), MAX_BASE64):
-        b = data[i:i+MAX_BASE64]
-        a = binascii.b2a_base64(b)
-        lines.append(a)
-    res = ""
-    for line in lines:
-        b = binascii.a2b_base64(line)
-        res = res + b
-    assert res == data
-
-def test_base64invalid():
-    # Test base64 with random invalid characters sprinkled throughout
-    # (This requires a new version of binascii.)
-    MAX_BASE64 = 57
-    lines = []
-    for i in range(0, len(data), MAX_BASE64):
-        b = data[i:i+MAX_BASE64]
-        a = binascii.b2a_base64(b)
-        lines.append(a)
-
-    fillers = ""
-    valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/"
-    for i in xrange(256):
-        c = chr(i)
-        if c not in valid:
-            fillers += c
-    def addnoise(line):
-        noise = fillers
-        ratio = len(line) // len(noise)
-        res = ""
-        while line and noise:
-            if len(line) // len(noise) > ratio:
-                c, line = line[0], line[1:]
-            else:
-                c, noise = noise[0], noise[1:]
-            res += c
-        return res + noise + line
-    res = ""
-    for line in map(addnoise, lines):
-        b = binascii.a2b_base64(line)
-        res += b
-    assert res == data
-
-    # Test base64 with just invalid characters, which should return
-    # empty strings. TBD: shouldn't it raise an exception instead ?
-    assert binascii.a2b_base64(fillers) == ''
-
-def test_uu():
-    MAX_UU = 45
-    lines = []
-    for i in range(0, len(data), MAX_UU):
-        b = data[i:i+MAX_UU]
-        a = binascii.b2a_uu(b)
-        lines.append(a)
-    res = ""
-    for line in lines:
-        b = binascii.a2b_uu(line)
-        res += b
-    assert res == data
-
-    assert binascii.a2b_uu("\x7f") == "\x00"*31
-    assert binascii.a2b_uu("\x80") == "\x00"*32
-    assert binascii.a2b_uu("\xff") == "\x00"*31
-    py.test.raises(binascii.Error, binascii.a2b_uu, "\xff\x00")
-    py.test.raises(binascii.Error, binascii.a2b_uu, "!!!!")
-
-    py.test.raises(binascii.Error, binascii.b2a_uu, 46*"!")
-
-def test_crc32():
-    crc = binascii.crc32("Test the CRC-32 of")
-    crc = binascii.crc32(" this string.", crc)
-    assert crc == 1571220330
-    
-    crc = binascii.crc32('frotz\n', 0)
-    assert crc == -372923920
-
-    py.test.raises(TypeError, binascii.crc32)
-
-def test_hex():
-    # test hexlification
-    s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000'
-    t = binascii.b2a_hex(s)
-    u = binascii.a2b_hex(t)
-    assert s == u
-    py.test.raises(TypeError, binascii.a2b_hex, t[:-1])
-    py.test.raises(TypeError, binascii.a2b_hex, t[:-1] + 'q')
-
-    # Verify the treatment of Unicode strings
-    assert binascii.hexlify(unicode('a', 'ascii')) == '61'
-
-def test_qp():
-    # A test for SF bug 534347 (segfaults without the proper fix)
-    try:
-        binascii.a2b_qp("", **{1:1})
-    except TypeError:
-        pass
-    else:
-        fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError")
-    assert binascii.a2b_qp("= ") == "= "
-    assert binascii.a2b_qp("==") == "="
-    assert binascii.a2b_qp("=AX") == "=AX"
-    py.test.raises(TypeError, binascii.b2a_qp, foo="bar")
-    assert binascii.a2b_qp("=00\r\n=00") == "\x00\r\n\x00"
-    assert binascii.b2a_qp("\xff\r\n\xff\n\xff") == "=FF\r\n=FF\r\n=FF"
-    target = "0"*75+"=\r\n=FF\r\n=FF\r\n=FF"
-    assert binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff") == target
-
-def test_empty_string():
-    # A test for SF bug #1022953.  Make sure SystemError is not raised.
-    for n in ['b2a_qp', 'a2b_hex', 'b2a_base64', 'a2b_uu', 'a2b_qp',
-              'b2a_hex', 'unhexlify', 'hexlify', 'crc32', 'b2a_hqx',
-              'a2b_hqx', 'a2b_base64', 'rlecode_hqx', 'b2a_uu',
-              'rledecode_hqx']:
-        f = getattr(binascii, n)
-        f('')
-    binascii.crc_hqx('', 0)
-
-def test_qp_bug_case():
-    assert binascii.b2a_qp('y'*77, False, False) == 'y'*75 + '=\nyy'
-    assert binascii.b2a_qp(' '*77, False, False) == ' '*75 + '=\n =20'
-    assert binascii.b2a_qp('y'*76, False, False) == 'y'*76
-    assert binascii.b2a_qp(' '*76, False, False) == ' '*75 + '=\n=20'
-
-def test_wrong_padding():
-    s = 'CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ'
-    py.test.raises(binascii.Error, binascii.a2b_base64, s)
-
-def test_crap_after_padding():
-    s = 'xxx=axxxx'
-    assert binascii.a2b_base64(s) == '\xc7\x1c'
-
-def test_wrong_args():
-    # this should grow as a way longer list
-    py.test.raises(TypeError, binascii.a2b_base64, 42)
diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py
deleted file mode 100644
--- a/lib_pypy/pypy_test/test_locale.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from __future__ import absolute_import
-import py
-import sys
-
-from lib_pypy.ctypes_config_cache import rebuild
-rebuild.rebuild_one('locale.ctc.py')
-
-from lib_pypy import _locale
-
-
-def setup_module(mod):
-    if sys.platform == 'darwin':
-        py.test.skip("Locale support on MacOSX is minimal and cannot be tested")
-
-class TestLocale:
-    def setup_class(cls):
-        cls.oldlocale = _locale.setlocale(_locale.LC_NUMERIC)
-        if sys.platform.startswith("win"):
-            cls.tloc = "en"
-        elif sys.platform.startswith("freebsd"):
-            cls.tloc = "en_US.US-ASCII"
-        else:
-            cls.tloc = "en_US.UTF8"
-        try:
-            _locale.setlocale(_locale.LC_NUMERIC, cls.tloc)
-        except _locale.Error:
-            py.test.skip("test locale %s not supported" % cls.tloc)
-            
-    def teardown_class(cls):
-        _locale.setlocale(_locale.LC_NUMERIC, cls.oldlocale)
-
-    def test_format(self):
-        py.test.skip("XXX fix or kill me")
-
-        def testformat(formatstr, value, grouping = 0, output=None):
-            if output:
-                print "%s %% %s =? %s ..." %\
-                      (repr(formatstr), repr(value), repr(output)),
-            else:
-                print "%s %% %s works? ..." % (repr(formatstr), repr(value)),
-            result = locale.format(formatstr, value, grouping = grouping)
-            assert result == output
-
-        testformat("%f", 1024, grouping=1, output='1,024.000000')
-        testformat("%f", 102, grouping=1, output='102.000000')
-        testformat("%f", -42, grouping=1, output='-42.000000')
-        testformat("%+f", -42, grouping=1, output='-42.000000')
-        testformat("%20.f", -42, grouping=1, output='                 -42')
-        testformat("%+10.f", -4200, grouping=1, output='    -4,200')
-        testformat("%-10.f", 4200, grouping=1, output='4,200     ')
-
-    def test_getpreferredencoding(self):
-        py.test.skip("XXX fix or kill me")
-        # Invoke getpreferredencoding to make sure it does not cause exceptions
-        _locale.getpreferredencoding()
-
-    # Test BSD Rune locale's bug for isctype functions.
-    def test_bsd_bug(self):
-        def teststrop(s, method, output):
-            print "%s.%s() =? %s ..." % (repr(s), method, repr(output)),
-            result = getattr(s, method)()
-            assert result == output
-
-        oldlocale = _locale.setlocale(_locale.LC_CTYPE)
-        _locale.setlocale(_locale.LC_CTYPE, self.tloc)
-        try:
-            teststrop('\x20', 'isspace', True)
-            teststrop('\xa0', 'isspace', False)
-            teststrop('\xa1', 'isspace', False)
-            teststrop('\xc0', 'isalpha', False)
-            teststrop('\xc0', 'isalnum', False)
-            teststrop('\xc0', 'isupper', False)
-            teststrop('\xc0', 'islower', False)
-            teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc'])
-            teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0')
-            teststrop('\xcc\x85', 'lower', '\xcc\x85')
-            teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0')
-        finally:
-            _locale.setlocale(_locale.LC_CTYPE, oldlocale)
diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py
deleted file mode 100644
--- a/lib_pypy/pypy_test/test_struct_extra.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from __future__ import absolute_import
-from lib_pypy import struct 
-
-def test_simple():
-    morezeros = '\x00' * (struct.calcsize('l')-4)
-    assert struct.pack('<l', 16) == '\x10\x00\x00\x00' + morezeros
-    assert struct.pack('4s', 'WAVE') == 'WAVE'
-    assert struct.pack('<4sl', 'WAVE', 16) == 'WAVE\x10\x00\x00\x00' + morezeros
-    s = 'ABCD01234567\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00'
-    assert struct.unpack('<4s4H2lH', s) == ('ABCD', 0x3130, 0x3332, 0x3534,
-                                            0x3736, 1, 2, 3)
-
-def test_infinity():
-    INFINITY = 1e200 * 1e200
-    assert str(struct.unpack("!d", struct.pack("!d", INFINITY))[0]) \
-           == str(INFINITY)
-    assert str(struct.unpack("!d", struct.pack("!d", -INFINITY))[0]) \
-           == str(-INFINITY)
-
-def test_nan():
-    INFINITY = 1e200 * 1e200
-    NAN = INFINITY / INFINITY
-    assert str(struct.unpack("!d", '\xff\xf8\x00\x00\x00\x00\x00\x00')[0]) \
-           == str(NAN)
-    assert str(struct.unpack("!d", struct.pack("!d", NAN))[0]) == str(NAN)
diff --git a/lib_pypy/struct.py b/lib_pypy/struct.py
deleted file mode 100644
--- a/lib_pypy/struct.py
+++ /dev/null
@@ -1,417 +0,0 @@
-#
-# This module is a pure Python version of pypy.module.struct.
-# It is only imported if the vastly faster pypy.module.struct is not
-# compiled in.  For now we keep this version for reference and
-# because pypy.module.struct is not ootype-backend-friendly yet.
-#
-
-"""Functions to convert between Python values and C structs.
-Python strings are used to hold the data representing the C struct
-and also as format strings to describe the layout of data in the C struct.
-
-The optional first format char indicates byte order, size and alignment:
- @: native order, size & alignment (default)
- =: native order, std. size & alignment
- <: little-endian, std. size & alignment
- >: big-endian, std. size & alignment
- !: same as >
-
-The remaining chars indicate types of args and must match exactly;
-these can be preceded by a decimal repeat count:
-   x: pad byte (no data);
-   c:char;
-   b:signed byte;
-   B:unsigned byte;
-   h:short;
-   H:unsigned short;
-   i:int;
-   I:unsigned int;
-   l:long;
-   L:unsigned long;
-   f:float;
-   d:double.
-Special cases (preceding decimal count indicates length):
-   s:string (array of char); p: pascal string (with count byte).
-Special case (only available in native format):
-   P:an integer type that is wide enough to hold a pointer.
-Special case (not in native mode unless 'long long' in platform C):
-   q:long long;
-   Q:unsigned long long
-Whitespace between formats is ignored.
-
-The variable struct.error is an exception raised on errors."""
-
-import math, sys
-
-# TODO: XXX Find a way to get information on native sizes and alignments
-class StructError(Exception):
-    pass
-error = StructError
-def unpack_int(data,index,size,le):
-    bytes = [ord(b) for b in data[index:index+size]]
-    if le == 'little':
-        bytes.reverse()
-    number = 0L
-    for b in bytes:
-        number = number << 8 | b
-    return int(number)
-
-def unpack_signed_int(data,index,size,le):
-    number = unpack_int(data,index,size,le)
-    max = 2**(size*8)
-    if number > 2**(size*8 - 1) - 1:
-        number = int(-1*(max - number))
-    return number
-
-INFINITY = 1e200 * 1e200
-NAN = INFINITY / INFINITY
-
-def unpack_char(data,index,size,le):
-    return data[index:index+size]
-
-def pack_int(number,size,le):
-    x=number
-    res=[]
-    for i in range(size):
-        res.append(chr(x&0xff))
-        x >>= 8
-    if le == 'big':
-        res.reverse()
-    return ''.join(res)
-
-def pack_signed_int(number,size,le):
-    if not isinstance(number, (int,long)):
-        raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer"
-    if  number > 2**(8*size-1)-1 or number < -1*2**(8*size-1):
-        raise OverflowError,"Number:%i too large to convert" % number
-    return pack_int(number,size,le)
-
-def pack_unsigned_int(number,size,le):
-    if not isinstance(number, (int,long)):
-        raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer"
-    if number < 0:
-        raise TypeError,"can't convert negative long to unsigned"
-    if number > 2**(8*size)-1:
-        raise OverflowError,"Number:%i too large to convert" % number
-    return pack_int(number,size,le)
-
-def pack_char(char,size,le):
-    return str(char)
-
-def isinf(x):
-    return x != 0.0 and x / 2 == x
-def isnan(v):
-    return v != v*1.0 or (v == 1.0 and v == 2.0)
-
-def pack_float(x, size, le):
-    unsigned = float_pack(x, size)
-    result = []
-    for i in range(8):
-        result.append(chr((unsigned >> (i * 8)) & 0xFF))
-    if le == "big":
-        result.reverse()
-    return ''.join(result)
-
-def unpack_float(data, index, size, le):
-    binary = [data[i] for i in range(index, index + 8)]
-    if le == "big":
-        binary.reverse()
-    unsigned = 0
-    for i in range(8):
-        unsigned |= ord(binary[i]) << (i * 8)
-    return float_unpack(unsigned, size, le)
-
-def round_to_nearest(x):
-    """Python 3 style round:  round a float x to the nearest int, but
-    unlike the builtin Python 2.x round function:
-
-      - return an int, not a float
-      - do round-half-to-even, not round-half-away-from-zero.
-
-    We assume that x is finite and nonnegative; except wrong results
-    if you use this for negative x.
-
-    """
-    int_part = int(x)
-    frac_part = x - int_part
-    if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1:
-        int_part += 1
-    return int_part
-
-def float_unpack(Q, size, le):
-    """Convert a 32-bit or 64-bit integer created
-    by float_pack into a Python float."""
-
-    if size == 8:
-        MIN_EXP = -1021  # = sys.float_info.min_exp
-        MAX_EXP = 1024   # = sys.float_info.max_exp
-        MANT_DIG = 53    # = sys.float_info.mant_dig
-        BITS = 64
-    elif size == 4:
-        MIN_EXP = -125   # C's FLT_MIN_EXP
-        MAX_EXP = 128    # FLT_MAX_EXP
-        MANT_DIG = 24    # FLT_MANT_DIG
-        BITS = 32
-    else:
-        raise ValueError("invalid size value")
-
-    if Q >> BITS:
-         raise ValueError("input out of range")
-
-    # extract pieces
-    sign = Q >> BITS - 1
-    exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1
-    mant = Q & ((1 << MANT_DIG - 1) - 1)
-
-    if exp == MAX_EXP - MIN_EXP + 2:
-        # nan or infinity
-        result = float('nan') if mant else float('inf')
-    elif exp == 0:
-        # subnormal or zero
-        result = math.ldexp(float(mant), MIN_EXP - MANT_DIG)
-    else:
-        # normal
-        mant += 1 << MANT_DIG - 1
-        result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1)
-    return -result if sign else result
-
-
-def float_pack(x, size):
-    """Convert a Python float x into a 64-bit unsigned integer
-    with the same byte representation."""
-
-    if size == 8:
-        MIN_EXP = -1021  # = sys.float_info.min_exp
-        MAX_EXP = 1024   # = sys.float_info.max_exp
-        MANT_DIG = 53    # = sys.float_info.mant_dig
-        BITS = 64
-    elif size == 4:
-        MIN_EXP = -125   # C's FLT_MIN_EXP
-        MAX_EXP = 128    # FLT_MAX_EXP
-        MANT_DIG = 24    # FLT_MANT_DIG
-        BITS = 32
-    else:
-        raise ValueError("invalid size value")
-
-    sign = math.copysign(1.0, x) < 0.0
-    if math.isinf(x):
-        mant = 0
-        exp = MAX_EXP - MIN_EXP + 2
-    elif math.isnan(x):
-        mant = 1 << (MANT_DIG-2) # other values possible
-        exp = MAX_EXP - MIN_EXP + 2
-    elif x == 0.0:
-        mant = 0
-        exp = 0
-    else:
-        m, e = math.frexp(abs(x))  # abs(x) == m * 2**e
-        exp = e - (MIN_EXP - 1)
-        if exp > 0:
-            # Normal case.
-            mant = round_to_nearest(m * (1 << MANT_DIG))
-            mant -= 1 << MANT_DIG - 1
-        else:
-            # Subnormal case.
-            if exp + MANT_DIG - 1 >= 0:
-                mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1))
-            else:
-                mant = 0
-            exp = 0
-
-        # Special case: rounding produced a MANT_DIG-bit mantissa.
-        assert 0 <= mant <= 1 << MANT_DIG - 1
-        if mant == 1 << MANT_DIG - 1:
-            mant = 0
-            exp += 1
-
-        # Raise on overflow (in some circumstances, may want to return
-        # infinity instead).
-        if exp >= MAX_EXP - MIN_EXP + 2:
-             raise OverflowError("float too large to pack in this format")
-
-    # check constraints
-    assert 0 <= mant < 1 << MANT_DIG - 1
-    assert 0 <= exp <= MAX_EXP - MIN_EXP + 2
-    assert 0 <= sign <= 1
-    return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant
-
-
-big_endian_format = {
-    'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None},
-    'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int},
-    'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int},
-    'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char},
-    's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None},
-    'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None},
-    'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int},
-    'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int},
-    'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int},
-    'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int},
-    'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int},
-    'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int},
-    'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int},
-    'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int},
-    'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float},
-    'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float},
-    }
-default = big_endian_format
-formatmode={ '<' : (default, 'little'),
-             '>' : (default, 'big'),
-             '!' : (default, 'big'),
-             '=' : (default, sys.byteorder),
-             '@' : (default, sys.byteorder)
-            }
-
-def getmode(fmt):
-    try:
-        formatdef,endianness = formatmode[fmt[0]]
-        index = 1
-    except KeyError:
-        formatdef,endianness = formatmode['@']
-        index = 0
-    return formatdef,endianness,index
-def getNum(fmt,i):
-    num=None
-    cur = fmt[i]
-    while ('0'<= cur ) and ( cur <= '9'):
-        if num == None:
-            num = int(cur)
-        else:
-            num = 10*num + int(cur)
-        i += 1
-        cur = fmt[i]
-    return num,i
-
-def calcsize(fmt):
-    """calcsize(fmt) -> int
-    Return size of C struct described by format string fmt.
-    See struct.__doc__ for more on format strings."""
-
-    formatdef,endianness,i = getmode(fmt)
-    num = 0
-    result = 0
-    while i<len(fmt):
-        num,i = getNum(fmt,i)
-        cur = fmt[i]
-        try:
-            format = formatdef[cur]
-        except KeyError:
-            raise StructError,"%s is not a valid format"%cur
-        if num != None :
-            result += num*format['size']
-        else:
-            result += format['size']
-        num = 0
-        i += 1
-    return result
-
-def pack(fmt,*args):
-    """pack(fmt, v1, v2, ...) -> string
-       Return string containing values v1, v2, ... packed according to fmt.
-       See struct.__doc__ for more on format strings."""
-    formatdef,endianness,i = getmode(fmt)
-    args = list(args)
-    n_args = len(args)
-    result = []
-    while i<len(fmt):
-        num,i = getNum(fmt,i)
-        cur = fmt[i]
-        try:
-            format = formatdef[cur]
-        except KeyError:
-            raise StructError,"%s is not a valid format"%cur
-        if num == None :
-            num_s = 0
-            num = 1
-        else:
-            num_s = num
-
-        if cur == 'x':
-            result += ['\0'*num]
-        elif cur == 's':
-            if isinstance(args[0], str):
-                padding = num - len(args[0])
-                result += [args[0][:num] + '\0'*padding]
-                args.pop(0)
-            else:
-                raise StructError,"arg for string format not a string"
-        elif cur == 'p':
-            if isinstance(args[0], str):
-                padding = num - len(args[0]) - 1
-
-                if padding > 0:
-                    result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding]
-                else:
-                    if num<255:
-                        result += [chr(num-1) + args[0][:num-1]]
-                    else:
-                        result += [chr(255) + args[0][:num-1]]
-                args.pop(0)
-            else:
-                raise StructError,"arg for string format not a string"
-
-        else:
-            if len(args) < num:
-                raise StructError,"insufficient arguments to pack"
-            for var in args[:num]:
-                result += [format['pack'](var,format['size'],endianness)]
-            args=args[num:]
-        num = None
-        i += 1
-    if len(args) != 0:
-        raise StructError,"too many arguments for pack format"
-    return ''.join(result)
-
-def unpack(fmt,data):
-    """unpack(fmt, string) -> (v1, v2, ...)
-       Unpack the string, containing packed C structure data, according
-       to fmt.  Requires len(string)==calcsize(fmt).
-       See struct.__doc__ for more on format strings."""
-    formatdef,endianness,i = getmode(fmt)
-    j = 0
-    num = 0
-    result = []
-    length= calcsize(fmt)
-    if length != len (data):
-        raise StructError,"unpack str size does not match format"
-    while i<len(fmt):
-        num,i=getNum(fmt,i)
-        cur = fmt[i]
-        i += 1
-        try:
-            format = formatdef[cur]
-        except KeyError:
-            raise StructError,"%s is not a valid format"%cur
-
-        if not num :
-            num = 1
-
-        if cur == 'x':
-            j += num
-        elif cur == 's':
-            result.append(data[j:j+num])
-            j += num
-        elif cur == 'p':
-            n=ord(data[j])
-            if n >= num:
-                n = num-1
-            result.append(data[j+1:j+n+1])
-            j += num
-        else:
-            for n in range(num):
-                result += [format['unpack'](data,j,format['size'],endianness)]
-                j += format['size']
-
-    return tuple(result)
-
-def pack_into(fmt, buf, offset, *args):
-    data = pack(fmt, *args)
-    buffer(buf)[offset:offset+len(data)] = data
-
-def unpack_from(fmt, buf, offset=0):
-    size = calcsize(fmt)
-    data = buffer(buf)[offset:offset+size]
-    if len(data) != size:
-        raise error("unpack_from requires a buffer of at least %d bytes"
-                    % (size,))
-    return unpack(fmt, data)
diff --git a/py/__init__.py b/py/__init__.py
--- a/py/__init__.py
+++ b/py/__init__.py
@@ -8,7 +8,7 @@
 
 (c) Holger Krekel and others, 2004-2010
 """
-__version__ = '1.4.4.dev1'
+__version__ = '1.4.7'
 
 from py import _apipkg
 
@@ -70,6 +70,11 @@
         'getrawcode'        : '._code.code:getrawcode',
         'patch_builtins'    : '._code.code:patch_builtins',
         'unpatch_builtins'  : '._code.code:unpatch_builtins',
+        '_AssertionError'   : '._code.assertion:AssertionError',
+        '_reinterpret_old'  : '._code.assertion:reinterpret_old',
+        '_reinterpret'      : '._code.assertion:reinterpret',
+        '_reprcompare'      : '._code.assertion:_reprcompare',
+        '_format_explanation' : '._code.assertion:_format_explanation',
     },
 
     # backports and additions of builtins
diff --git a/py/_builtin.py b/py/_builtin.py
--- a/py/_builtin.py
+++ b/py/_builtin.py
@@ -113,9 +113,12 @@
 
     # some backward compatibility helpers
     _basestring = str
-    def _totext(obj, encoding=None):
+    def _totext(obj, encoding=None, errors=None):
         if isinstance(obj, bytes):
-            obj = obj.decode(encoding)
+            if errors is None:
+                obj = obj.decode(encoding)
+            else:
+                obj = obj.decode(encoding, errors)
         elif not isinstance(obj, str):
             obj = str(obj)
         return obj
@@ -142,7 +145,7 @@
             del back
         elif locs is None:
             locs = globs
-        fp = open(fn, "rb")
+        fp = open(fn, "r")
         try:
             source = fp.read()
         finally:
diff --git a/py/_code/_assertionnew.py b/py/_code/_assertionnew.py
new file mode 100644
--- /dev/null
+++ b/py/_code/_assertionnew.py
@@ -0,0 +1,339 @@
+"""
+Find intermediate evalutation results in assert statements through builtin AST.
+This should replace _assertionold.py eventually.
+"""
+
+import sys
+import ast
+
+import py
+from py._code.assertion import _format_explanation, BuiltinAssertionError
+
+
+if sys.platform.startswith("java") and sys.version_info < (2, 5, 2):
+    # See http://bugs.jython.org/issue1497
+    _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
+              "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
+              "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
+              "List", "Tuple")
+    _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
+              "AugAssign", "Print", "For", "While", "If", "With", "Raise",
+              "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
+              "Exec", "Global", "Expr", "Pass", "Break", "Continue")
+    _expr_nodes = set(getattr(ast, name) for name in _exprs)
+    _stmt_nodes = set(getattr(ast, name) for name in _stmts)
+    def _is_ast_expr(node):
+        return node.__class__ in _expr_nodes
+    def _is_ast_stmt(node):
+        return node.__class__ in _stmt_nodes
+else:
+    def _is_ast_expr(node):
+        return isinstance(node, ast.expr)
+    def _is_ast_stmt(node):
+        return isinstance(node, ast.stmt)
+
+
+class Failure(Exception):
+    """Error found while interpreting AST."""
+
+    def __init__(self, explanation=""):
+        self.cause = sys.exc_info()
+        self.explanation = explanation
+
+
+def interpret(source, frame, should_fail=False):
+    mod = ast.parse(source)
+    visitor = DebugInterpreter(frame)
+    try:
+        visitor.visit(mod)
+    except Failure:
+        failure = sys.exc_info()[1]
+        return getfailure(failure)
+    if should_fail:
+        return ("(assertion failed, but when it was re-run for "
+                "printing intermediate values, it did not fail.  Suggestions: "
+                "compute assert expression before the assert or use --no-assert)")
+
+def run(offending_line, frame=None):
+    if frame is None:
+        frame = py.code.Frame(sys._getframe(1))
+    return interpret(offending_line, frame)
+
+def getfailure(failure):
+    explanation = _format_explanation(failure.explanation)
+    value = failure.cause[1]
+    if str(value):
+        lines = explanation.splitlines()
+        if not lines:
+            lines.append("")
+        lines[0] += " << %s" % (value,)
+        explanation = "\n".join(lines)
+    text = "%s: %s" % (failure.cause[0].__name__, explanation)
+    if text.startswith("AssertionError: assert "):
+        text = text[16:]
+    return text
+
+
+operator_map = {
+    ast.BitOr : "|",
+    ast.BitXor : "^",
+    ast.BitAnd : "&",
+    ast.LShift : "<<",
+    ast.RShift : ">>",
+    ast.Add : "+",
+    ast.Sub : "-",
+    ast.Mult : "*",
+    ast.Div : "/",
+    ast.FloorDiv : "//",
+    ast.Mod : "%",
+    ast.Eq : "==",
+    ast.NotEq : "!=",
+    ast.Lt : "<",
+    ast.LtE : "<=",
+    ast.Gt : ">",
+    ast.GtE : ">=",
+    ast.Pow : "**",
+    ast.Is : "is",
+    ast.IsNot : "is not",
+    ast.In : "in",
+    ast.NotIn : "not in"
+}
+
+unary_map = {
+    ast.Not : "not %s",
+    ast.Invert : "~%s",
+    ast.USub : "-%s",
+    ast.UAdd : "+%s"
+}
+
+
+class DebugInterpreter(ast.NodeVisitor):
+    """Interpret AST nodes to gleam useful debugging information. """
+
+    def __init__(self, frame):
+        self.frame = frame
+
+    def generic_visit(self, node):
+        # Fallback when we don't have a special implementation.
+        if _is_ast_expr(node):
+            mod = ast.Expression(node)
+            co = self._compile(mod)
+            try:
+                result = self.frame.eval(co)
+            except Exception:
+                raise Failure()
+            explanation = self.frame.repr(result)
+            return explanation, result
+        elif _is_ast_stmt(node):
+            mod = ast.Module([node])
+            co = self._compile(mod, "exec")
+            try:
+                self.frame.exec_(co)
+            except Exception:
+                raise Failure()
+            return None, None
+        else:
+            raise AssertionError("can't handle %s" %(node,))
+
+    def _compile(self, source, mode="eval"):
+        return compile(source, "<assertion interpretation>", mode)
+
+    def visit_Expr(self, expr):
+        return self.visit(expr.value)
+
+    def visit_Module(self, mod):
+        for stmt in mod.body:
+            self.visit(stmt)
+
+    def visit_Name(self, name):
+        explanation, result = self.generic_visit(name)
+        # See if the name is local.
+        source = "%r in locals() is not globals()" % (name.id,)
+        co = self._compile(source)
+        try:
+            local = self.frame.eval(co)
+        except Exception:
+            # have to assume it isn't
+            local = False
+        if not local:
+            return name.id, result
+        return explanation, result
+
+    def visit_Compare(self, comp):
+        left = comp.left
+        left_explanation, left_result = self.visit(left)
+        for op, next_op in zip(comp.ops, comp.comparators):
+            next_explanation, next_result = self.visit(next_op)
+            op_symbol = operator_map[op.__class__]
+            explanation = "%s %s %s" % (left_explanation, op_symbol,
+                                        next_explanation)
+            source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
+            co = self._compile(source)
+            try:
+                result = self.frame.eval(co, __exprinfo_left=left_result,
+                                         __exprinfo_right=next_result)
+            except Exception:
+                raise Failure(explanation)
+            try:
+                if not result:
+                    break
+            except KeyboardInterrupt:
+                raise
+            except:
+                break
+            left_explanation, left_result = next_explanation, next_result
+
+        rcomp = py.code._reprcompare
+        if rcomp:
+            res = rcomp(op_symbol, left_result, next_result)
+            if res:
+                explanation = res
+        return explanation, result
+
+    def visit_BoolOp(self, boolop):
+        is_or = isinstance(boolop.op, ast.Or)
+        explanations = []
+        for operand in boolop.values:
+            explanation, result = self.visit(operand)
+            explanations.append(explanation)
+            if result == is_or:
+                break
+        name = is_or and " or " or " and "
+        explanation = "(" + name.join(explanations) + ")"
+        return explanation, result
+
+    def visit_UnaryOp(self, unary):
+        pattern = unary_map[unary.op.__class__]
+        operand_explanation, operand_result = self.visit(unary.operand)
+        explanation = pattern % (operand_explanation,)
+        co = self._compile(pattern % ("__exprinfo_expr",))
+        try:
+            result = self.frame.eval(co, __exprinfo_expr=operand_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, result
+
+    def visit_BinOp(self, binop):
+        left_explanation, left_result = self.visit(binop.left)
+        right_explanation, right_result = self.visit(binop.right)
+        symbol = operator_map[binop.op.__class__]
+        explanation = "(%s %s %s)" % (left_explanation, symbol,
+                                      right_explanation)
+        source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, __exprinfo_left=left_result,
+                                     __exprinfo_right=right_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, result
+
+    def visit_Call(self, call):
+        func_explanation, func = self.visit(call.func)
+        arg_explanations = []
+        ns = {"__exprinfo_func" : func}
+        arguments = []
+        for arg in call.args:
+            arg_explanation, arg_result = self.visit(arg)
+            arg_name = "__exprinfo_%s" % (len(ns),)
+            ns[arg_name] = arg_result
+            arguments.append(arg_name)
+            arg_explanations.append(arg_explanation)
+        for keyword in call.keywords:
+            arg_explanation, arg_result = self.visit(keyword.value)
+            arg_name = "__exprinfo_%s" % (len(ns),)
+            ns[arg_name] = arg_result
+            keyword_source = "%s=%%s" % (keyword.arg)
+            arguments.append(keyword_source % (arg_name,))
+            arg_explanations.append(keyword_source % (arg_explanation,))
+        if call.starargs:
+            arg_explanation, arg_result = self.visit(call.starargs)
+            arg_name = "__exprinfo_star"
+            ns[arg_name] = arg_result
+            arguments.append("*%s" % (arg_name,))
+            arg_explanations.append("*%s" % (arg_explanation,))
+        if call.kwargs:
+            arg_explanation, arg_result = self.visit(call.kwargs)
+            arg_name = "__exprinfo_kwds"
+            ns[arg_name] = arg_result
+            arguments.append("**%s" % (arg_name,))
+            arg_explanations.append("**%s" % (arg_explanation,))
+        args_explained = ", ".join(arg_explanations)
+        explanation = "%s(%s)" % (func_explanation, args_explained)
+        args = ", ".join(arguments)
+        source = "__exprinfo_func(%s)" % (args,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, **ns)
+        except Exception:
+            raise Failure(explanation)
+        pattern = "%s\n{%s = %s\n}"
+        rep = self.frame.repr(result)
+        explanation = pattern % (rep, rep, explanation)
+        return explanation, result
+
+    def _is_builtin_name(self, name):
+        pattern = "%r not in globals() and %r not in locals()"
+        source = pattern % (name.id, name.id)
+        co = self._compile(source)
+        try:
+            return self.frame.eval(co)
+        except Exception:
+            return False
+
+    def visit_Attribute(self, attr):
+        if not isinstance(attr.ctx, ast.Load):
+            return self.generic_visit(attr)
+        source_explanation, source_result = self.visit(attr.value)
+        explanation = "%s.%s" % (source_explanation, attr.attr)
+        source = "__exprinfo_expr.%s" % (attr.attr,)
+        co = self._compile(source)
+        try:
+            result = self.frame.eval(co, __exprinfo_expr=source_result)
+        except Exception:
+            raise Failure(explanation)
+        explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
+                                              self.frame.repr(result),
+                                              source_explanation, attr.attr)
+        # Check if the attr is from an instance.
+        source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
+        source = source % (attr.attr,)
+        co = self._compile(source)
+        try:
+            from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
+        except Exception:
+            from_instance = True
+        if from_instance:
+            rep = self.frame.repr(result)
+            pattern = "%s\n{%s = %s\n}"
+            explanation = pattern % (rep, rep, explanation)
+        return explanation, result
+
+    def visit_Assert(self, assrt):
+        test_explanation, test_result = self.visit(assrt.test)
+        if test_explanation.startswith("False\n{False =") and \
+                test_explanation.endswith("\n"):
+            test_explanation = test_explanation[15:-2]
+        explanation = "assert %s" % (test_explanation,)
+        if not test_result:
+            try:
+                raise BuiltinAssertionError
+            except Exception:
+                raise Failure(explanation)
+        return explanation, test_result
+
+    def visit_Assign(self, assign):
+        value_explanation, value_result = self.visit(assign.value)
+        explanation = "... = %s" % (value_explanation,)
+        name = ast.Name("__exprinfo_expr", ast.Load(),
+                        lineno=assign.value.lineno,
+                        col_offset=assign.value.col_offset)
+        new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
+                                col_offset=assign.col_offset)
+        mod = ast.Module([new_assign])
+        co = self._compile(mod, "exec")
+        try:
+            self.frame.exec_(co, __exprinfo_expr=value_result)
+        except Exception:
+            raise Failure(explanation)
+        return explanation, value_result
diff --git a/py/_code/_assertionold.py b/py/_code/_assertionold.py
new file mode 100644
--- /dev/null
+++ b/py/_code/_assertionold.py
@@ -0,0 +1,555 @@
+import py
+import sys, inspect
+from compiler import parse, ast, pycodegen
+from py._code.assertion import BuiltinAssertionError, _format_explanation
+
+passthroughex = py.builtin._sysex
+
+class Failure:
+    def __init__(self, node):
+        self.exc, self.value, self.tb = sys.exc_info()
+        self.node = node
+
+class View(object):
+    """View base class.
+
+    If C is a subclass of View, then C(x) creates a proxy object around
+    the object x.  The actual class of the proxy is not C in general,
+    but a *subclass* of C determined by the rules below.  To avoid confusion
+    we call view class the class of the proxy (a subclass of C, so of View)
+    and object class the class of x.
+
+    Attributes and methods not found in the proxy are automatically read on x.
+    Other operations like setting attributes are performed on the proxy, as
+    determined by its view class.  The object x is available from the proxy
+    as its __obj__ attribute.
+
+    The view class selection is determined by the __view__ tuples and the
+    optional __viewkey__ method.  By default, the selected view class is the
+    most specific subclass of C whose __view__ mentions the class of x.
+    If no such subclass is found, the search proceeds with the parent
+    object classes.  For example, C(True) will first look for a subclass
+    of C with __view__ = (..., bool, ...) and only if it doesn't find any
+    look for one with __view__ = (..., int, ...), and then ..., object,...
+    If everything fails the class C itself is considered to be the default.
+
+    Alternatively, the view class selection can be driven by another aspect
+    of the object x, instead of the class of x, by overriding __viewkey__.
+    See last example at the end of this module.
+    """
+
+    _viewcache = {}
+    __view__ = ()
+
+    def __new__(rootclass, obj, *args, **kwds):
+        self = object.__new__(rootclass)
+        self.__obj__ = obj
+        self.__rootclass__ = rootclass
+        key = self.__viewkey__()
+        try:
+            self.__class__ = self._viewcache[key]
+        except KeyError:
+            self.__class__ = self._selectsubclass(key)
+        return self
+
+    def __getattr__(self, attr):
+        # attributes not found in the normal hierarchy rooted on View
+        # are looked up in the object's real class
+        return getattr(self.__obj__, attr)
+
+    def __viewkey__(self):
+        return self.__obj__.__class__
+
+    def __matchkey__(self, key, subclasses):
+        if inspect.isclass(key):
+            keys = inspect.getmro(key)
+        else:
+            keys = [key]
+        for key in keys:
+            result = [C for C in subclasses if key in C.__view__]
+            if result:
+                return result
+        return []
+
+    def _selectsubclass(self, key):
+        subclasses = list(enumsubclasses(self.__rootclass__))
+        for C in subclasses:
+            if not isinstance(C.__view__, tuple):
+                C.__view__ = (C.__view__,)
+        choices = self.__matchkey__(key, subclasses)
+        if not choices:
+            return self.__rootclass__
+        elif len(choices) == 1:
+            return choices[0]
+        else:
+            # combine the multiple choices
+            return type('?', tuple(choices), {})
+
+    def __repr__(self):
+        return '%s(%r)' % (self.__rootclass__.__name__, self.__obj__)
+
+
+def enumsubclasses(cls):
+    for subcls in cls.__subclasses__():
+        for subsubclass in enumsubclasses(subcls):
+            yield subsubclass
+    yield cls
+
+
+class Interpretable(View):
+    """A parse tree node with a few extra methods."""
+    explanation = None
+
+    def is_builtin(self, frame):
+        return False
+
+    def eval(self, frame):
+        # fall-back for unknown expression nodes
+        try:
+            expr = ast.Expression(self.__obj__)
+            expr.filename = '<eval>'
+            self.__obj__.filename = '<eval>'
+            co = pycodegen.ExpressionCodeGenerator(expr).getCode()
+            result = frame.eval(co)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        self.result = result
+        self.explanation = self.explanation or frame.repr(self.result)
+
+    def run(self, frame):
+        # fall-back for unknown statement nodes
+        try:
+            expr = ast.Module(None, ast.Stmt([self.__obj__]))
+            expr.filename = '<run>'
+            co = pycodegen.ModuleCodeGenerator(expr).getCode()
+            frame.exec_(co)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+
+    def nice_explanation(self):
+        return _format_explanation(self.explanation)
+
+
+class Name(Interpretable):
+    __view__ = ast.Name
+
+    def is_local(self, frame):
+        source = '%r in locals() is not globals()' % self.name
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def is_global(self, frame):
+        source = '%r in globals()' % self.name
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def is_builtin(self, frame):
+        source = '%r not in locals() and %r not in globals()' % (
+            self.name, self.name)
+        try:
+            return frame.is_true(frame.eval(source))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def eval(self, frame):
+        super(Name, self).eval(frame)
+        if not self.is_local(frame):
+            self.explanation = self.name
+
+class Compare(Interpretable):
+    __view__ = ast.Compare
+
+    def eval(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        for operation, expr2 in self.ops:
+            if hasattr(self, 'result'):
+                # shortcutting in chained expressions
+                if not frame.is_true(self.result):
+                    break
+            expr2 = Interpretable(expr2)
+            expr2.eval(frame)
+            self.explanation = "%s %s %s" % (
+                expr.explanation, operation, expr2.explanation)
+            source = "__exprinfo_left %s __exprinfo_right" % operation
+            try:
+                self.result = frame.eval(source,
+                                         __exprinfo_left=expr.result,
+                                         __exprinfo_right=expr2.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+            expr = expr2
+
+class And(Interpretable):
+    __view__ = ast.And
+
+    def eval(self, frame):
+        explanations = []
+        for expr in self.nodes:
+            expr = Interpretable(expr)
+            expr.eval(frame)
+            explanations.append(expr.explanation)
+            self.result = expr.result
+            if not frame.is_true(expr.result):
+                break
+        self.explanation = '(' + ' and '.join(explanations) + ')'
+
+class Or(Interpretable):
+    __view__ = ast.Or
+
+    def eval(self, frame):
+        explanations = []
+        for expr in self.nodes:
+            expr = Interpretable(expr)
+            expr.eval(frame)
+            explanations.append(expr.explanation)
+            self.result = expr.result
+            if frame.is_true(expr.result):
+                break
+        self.explanation = '(' + ' or '.join(explanations) + ')'
+
+
+# == Unary operations ==
+keepalive = []
+for astclass, astpattern in {
+    ast.Not    : 'not __exprinfo_expr',
+    ast.Invert : '(~__exprinfo_expr)',
+    }.items():
+
+    class UnaryArith(Interpretable):
+        __view__ = astclass
+
+        def eval(self, frame, astpattern=astpattern):
+            expr = Interpretable(self.expr)
+            expr.eval(frame)
+            self.explanation = astpattern.replace('__exprinfo_expr',
+                                                  expr.explanation)
+            try:
+                self.result = frame.eval(astpattern,
+                                         __exprinfo_expr=expr.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+    keepalive.append(UnaryArith)
+
+# == Binary operations ==
+for astclass, astpattern in {
+    ast.Add    : '(__exprinfo_left + __exprinfo_right)',
+    ast.Sub    : '(__exprinfo_left - __exprinfo_right)',
+    ast.Mul    : '(__exprinfo_left * __exprinfo_right)',
+    ast.Div    : '(__exprinfo_left / __exprinfo_right)',
+    ast.Mod    : '(__exprinfo_left % __exprinfo_right)',
+    ast.Power  : '(__exprinfo_left ** __exprinfo_right)',
+    }.items():
+
+    class BinaryArith(Interpretable):
+        __view__ = astclass
+
+        def eval(self, frame, astpattern=astpattern):
+            left = Interpretable(self.left)
+            left.eval(frame)
+            right = Interpretable(self.right)
+            right.eval(frame)
+            self.explanation = (astpattern
+                                .replace('__exprinfo_left',  left .explanation)
+                                .replace('__exprinfo_right', right.explanation))
+            try:
+                self.result = frame.eval(astpattern,
+                                         __exprinfo_left=left.result,
+                                         __exprinfo_right=right.result)
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+    keepalive.append(BinaryArith)
+
+
+class CallFunc(Interpretable):
+    __view__ = ast.CallFunc
+
+    def is_bool(self, frame):
+        source = 'isinstance(__exprinfo_value, bool)'
+        try:
+            return frame.is_true(frame.eval(source,
+                                            __exprinfo_value=self.result))
+        except passthroughex:
+            raise
+        except:
+            return False
+
+    def eval(self, frame):
+        node = Interpretable(self.node)
+        node.eval(frame)
+        explanations = []
+        vars = {'__exprinfo_fn': node.result}
+        source = '__exprinfo_fn('
+        for a in self.args:
+            if isinstance(a, ast.Keyword):
+                keyword = a.name
+                a = a.expr
+            else:
+                keyword = None
+            a = Interpretable(a)
+            a.eval(frame)
+            argname = '__exprinfo_%d' % len(vars)
+            vars[argname] = a.result
+            if keyword is None:
+                source += argname + ','
+                explanations.append(a.explanation)
+            else:
+                source += '%s=%s,' % (keyword, argname)
+                explanations.append('%s=%s' % (keyword, a.explanation))
+        if self.star_args:
+            star_args = Interpretable(self.star_args)
+            star_args.eval(frame)
+            argname = '__exprinfo_star'
+            vars[argname] = star_args.result
+            source += '*' + argname + ','
+            explanations.append('*' + star_args.explanation)
+        if self.dstar_args:
+            dstar_args = Interpretable(self.dstar_args)
+            dstar_args.eval(frame)
+            argname = '__exprinfo_kwds'
+            vars[argname] = dstar_args.result
+            source += '**' + argname + ','
+            explanations.append('**' + dstar_args.explanation)
+        self.explanation = "%s(%s)" % (
+            node.explanation, ', '.join(explanations))
+        if source.endswith(','):
+            source = source[:-1]
+        source += ')'
+        try:
+            self.result = frame.eval(source, **vars)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        if not node.is_builtin(frame) or not self.is_bool(frame):
+            r = frame.repr(self.result)
+            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+class Getattr(Interpretable):
+    __view__ = ast.Getattr
+
+    def eval(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        source = '__exprinfo_expr.%s' % self.attrname
+        try:
+            self.result = frame.eval(source, __exprinfo_expr=expr.result)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+        self.explanation = '%s.%s' % (expr.explanation, self.attrname)
+        # if the attribute comes from the instance, its value is interesting
+        source = ('hasattr(__exprinfo_expr, "__dict__") and '
+                  '%r in __exprinfo_expr.__dict__' % self.attrname)
+        try:
+            from_instance = frame.is_true(
+                frame.eval(source, __exprinfo_expr=expr.result))
+        except passthroughex:
+            raise
+        except:
+            from_instance = True
+        if from_instance:
+            r = frame.repr(self.result)
+            self.explanation = '%s\n{%s = %s\n}' % (r, r, self.explanation)
+
+# == Re-interpretation of full statements ==
+
+class Assert(Interpretable):
+    __view__ = ast.Assert
+
+    def run(self, frame):
+        test = Interpretable(self.test)
+        test.eval(frame)
+        # simplify 'assert False where False = ...'
+        if (test.explanation.startswith('False\n{False = ') and
+            test.explanation.endswith('\n}')):
+            test.explanation = test.explanation[15:-2]
+        # print the result as  'assert <explanation>'
+        self.result = test.result
+        self.explanation = 'assert ' + test.explanation
+        if not frame.is_true(test.result):
+            try:
+                raise BuiltinAssertionError
+            except passthroughex:
+                raise
+            except:
+                raise Failure(self)
+
+class Assign(Interpretable):
+    __view__ = ast.Assign
+
+    def run(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        self.result = expr.result
+        self.explanation = '... = ' + expr.explanation
+        # fall-back-run the rest of the assignment
+        ass = ast.Assign(self.nodes, ast.Name('__exprinfo_expr'))
+        mod = ast.Module(None, ast.Stmt([ass]))
+        mod.filename = '<run>'
+        co = pycodegen.ModuleCodeGenerator(mod).getCode()
+        try:
+            frame.exec_(co, __exprinfo_expr=expr.result)
+        except passthroughex:
+            raise
+        except:
+            raise Failure(self)
+
+class Discard(Interpretable):
+    __view__ = ast.Discard
+
+    def run(self, frame):
+        expr = Interpretable(self.expr)
+        expr.eval(frame)
+        self.result = expr.result
+        self.explanation = expr.explanation
+
+class Stmt(Interpretable):
+    __view__ = ast.Stmt
+
+    def run(self, frame):
+        for stmt in self.nodes:
+            stmt = Interpretable(stmt)
+            stmt.run(frame)
+
+
+def report_failure(e):
+    explanation = e.node.nice_explanation()
+    if explanation:
+        explanation = ", in: " + explanation
+    else:
+        explanation = ""
+    sys.stdout.write("%s: %s%s\n" % (e.exc.__name__, e.value, explanation))
+
+def check(s, frame=None):
+    if frame is None:
+        frame = sys._getframe(1)
+        frame = py.code.Frame(frame)
+    expr = parse(s, 'eval')
+    assert isinstance(expr, ast.Expression)
+    node = Interpretable(expr.node)
+    try:
+        node.eval(frame)
+    except passthroughex:
+        raise
+    except Failure:
+        e = sys.exc_info()[1]
+        report_failure(e)
+    else:
+        if not frame.is_true(node.result):
+            sys.stderr.write("assertion failed: %s\n" % node.nice_explanation())
+
+
+###########################################################
+# API / Entry points
+# #########################################################
+
+def interpret(source, frame, should_fail=False):
+    module = Interpretable(parse(source, 'exec').node)
+    #print "got module", module
+    if isinstance(frame, py.std.types.FrameType):
+        frame = py.code.Frame(frame)
+    try:
+        module.run(frame)
+    except Failure:
+        e = sys.exc_info()[1]
+        return getfailure(e)
+    except passthroughex:
+        raise
+    except:
+        import traceback
+        traceback.print_exc()
+    if should_fail:
+        return ("(assertion failed, but when it was re-run for "
+                "printing intermediate values, it did not fail.  Suggestions: "
+                "compute assert expression before the assert or use --nomagic)")
+    else:
+        return None
+
+def getmsg(excinfo):
+    if isinstance(excinfo, tuple):
+        excinfo = py.code.ExceptionInfo(excinfo)
+    #frame, line = gettbline(tb)
+    #frame = py.code.Frame(frame)
+    #return interpret(line, frame)
+
+    tb = excinfo.traceback[-1]
+    source = str(tb.statement).strip()
+    x = interpret(source, tb.frame, should_fail=True)
+    if not isinstance(x, str):
+        raise TypeError("interpret returned non-string %r" % (x,))
+    return x
+
+def getfailure(e):
+    explanation = e.node.nice_explanation()
+    if str(e.value):
+        lines = explanation.split('\n')
+        lines[0] += "  << %s" % (e.value,)
+        explanation = '\n'.join(lines)
+    text = "%s: %s" % (e.exc.__name__, explanation)
+    if text.startswith('AssertionError: assert '):
+        text = text[16:]
+    return text
+
+def run(s, frame=None):
+    if frame is None:
+        frame = sys._getframe(1)
+        frame = py.code.Frame(frame)
+    module = Interpretable(parse(s, 'exec').node)
+    try:
+        module.run(frame)
+    except Failure:
+        e = sys.exc_info()[1]
+        report_failure(e)
+
+
+if __name__ == '__main__':
+    # example:
+    def f():
+        return 5
+    def g():
+        return 3
+    def h(x):
+        return 'never'
+    check("f() * g() == 5")
+    check("not f()")
+    check("not (f() and g() or 0)")
+    check("f() == g()")
+    i = 4
+    check("i == f()")
+    check("len(f()) == 0")
+    check("isinstance(2+3+4, float)")
+
+    run("x = i")
+    check("x == 5")
+
+    run("assert not f(), 'oops'")
+    run("a, b, c = 1, 2")
+    run("a, b, c = f()")
+
+    check("max([f(),g()]) == 4")
+    check("'hello'[g()] == 'h'")
+    run("'guk%d' % h(f())")
diff --git a/py/_code/assertion.py b/py/_code/assertion.py
new file mode 100644
--- /dev/null
+++ b/py/_code/assertion.py
@@ -0,0 +1,94 @@
+import sys
+import py
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+
+_reprcompare = None # if set, will be called by assert reinterp for comparison ops
+
+def _format_explanation(explanation):
+    """This formats an explanation
+
+    Normally all embedded newlines are escaped, however there are
+    three exceptions: \n{, \n} and \n~.  The first two are intended
+    cover nested explanations, see function and attribute explanations
+    for examples (.visit_Call(), visit_Attribute()).  The last one is
+    for when one explanation needs to span multiple lines, e.g. when
+    displaying diffs.
+    """
+    raw_lines = (explanation or '').split('\n')
+    # escape newlines not followed by {, } and ~
+    lines = [raw_lines[0]]
+    for l in raw_lines[1:]:
+        if l.startswith('{') or l.startswith('}') or l.startswith('~'):
+            lines.append(l)
+        else:
+            lines[-1] += '\\n' + l
+
+    result = lines[:1]
+    stack = [0]
+    stackcnt = [0]
+    for line in lines[1:]:
+        if line.startswith('{'):
+            if stackcnt[-1]:
+                s = 'and   '
+            else:
+                s = 'where '
+            stack.append(len(result))
+            stackcnt[-1] += 1
+            stackcnt.append(0)
+            result.append(' +' + '  '*(len(stack)-1) + s + line[1:])
+        elif line.startswith('}'):
+            assert line.startswith('}')
+            stack.pop()
+            stackcnt.pop()
+            result[stack[-1]] += line[1:]
+        else:
+            assert line.startswith('~')
+            result.append('  '*len(stack) + line[1:])
+    assert len(stack) == 1
+    return '\n'.join(result)
+
+
+class AssertionError(BuiltinAssertionError):
+    def __init__(self, *args):
+        BuiltinAssertionError.__init__(self, *args)
+        if args:
+            try:
+                self.msg = str(args[0])
+            except py.builtin._sysex:
+                raise
+            except:
+                self.msg = "<[broken __repr__] %s at %0xd>" %(
+                    args[0].__class__, id(args[0]))
+        else:
+            f = py.code.Frame(sys._getframe(1))
+            try:
+                source = f.code.fullsource
+                if source is not None:
+                    try:
+                        source = source.getstatement(f.lineno, assertion=True)
+                    except IndexError:
+                        source = None
+                    else:
+                        source = str(source.deindent()).strip()
+            except py.error.ENOENT:
+                source = None
+                # this can also occur during reinterpretation, when the
+                # co_filename is set to "<run>".
+            if source:
+                self.msg = reinterpret(source, f, should_fail=True)
+            else:
+                self.msg = "<could not determine information>"
+            if not self.args:
+                self.args = (self.msg,)
+
+if sys.version_info > (3, 0):
+    AssertionError.__module__ = "builtins"
+    reinterpret_old = "old reinterpretation not available for py3"
+else:
+    from py._code._assertionold import interpret as reinterpret_old
+if sys.version_info >= (2, 6) or (sys.platform.startswith("java")):
+    from py._code._assertionnew import interpret as reinterpret
+else:
+    reinterpret = reinterpret_old
+
diff --git a/py/_code/code.py b/py/_code/code.py
--- a/py/_code/code.py
+++ b/py/_code/code.py
@@ -145,6 +145,17 @@
         return self.frame.f_locals
     locals = property(getlocals, None, None, "locals of underlaying frame")
 
+    def reinterpret(self):
+        """Reinterpret the failing statement and returns a detailed information
+           about what operations are performed."""
+        if self.exprinfo is None:
+            source = str(self.statement).strip()
+            x = py.code._reinterpret(source, self.frame, should_fail=True)
+            if not isinstance(x, str):
+                raise TypeError("interpret returned non-string %r" % (x,))
+            self.exprinfo = x
+        return self.exprinfo
+
     def getfirstlinesource(self):
         # on Jython this firstlineno can be -1 apparently
         return max(self.frame.code.firstlineno, 0)
@@ -158,13 +169,12 @@
         end = self.lineno
         try:
             _, end = source.getstatementrange(end)
-        except IndexError:
+        except (IndexError, ValueError):
             end = self.lineno + 1
         # heuristic to stop displaying source on e.g.
         #   if something:  # assume this causes a NameError
         #      # _this_ lines and the one
                #        below we don't want from entry.getsource()
-        end = min(end, len(source))
         for i in range(self.lineno, end):
             if source[i].rstrip().endswith(':'):
                 end = i + 1
@@ -273,7 +283,11 @@
         """
         cache = {}
         for i, entry in enumerate(self):
-            key = entry.frame.code.path, entry.lineno
+            # id for the code.raw is needed to work around
+            # the strange metaprogramming in the decorator lib from pypi
+            # which generates code objects that have hash/value equality
+            #XXX needs a test
+            key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
             #print "checking for recursion at", key
             l = cache.setdefault(key, [])
             if l:
@@ -308,7 +322,7 @@
                     self._striptext = 'AssertionError: '
         self._excinfo = tup
         self.type, self.value, tb = self._excinfo
-        self.typename = getattr(self.type, "__name__", "???")
+        self.typename = self.type.__name__
         self.traceback = py.code.Traceback(tb)
 
     def __repr__(self):
@@ -347,14 +361,16 @@
             showlocals: show locals per traceback entry
             style: long|short|no|native traceback style
             tbfilter: hide entries (where __tracebackhide__ is true)
+
+            in case of style==native, tbfilter and showlocals is ignored.
         """
         if style == 'native':
-            import traceback
-            return ''.join(traceback.format_exception(
-                self.type,
-                self.value,
-                self.traceback[0]._rawentry,
-                ))
+            return ReprExceptionInfo(ReprTracebackNative(
+                py.std.traceback.format_exception(
+                    self.type,
+                    self.value,
+                    self.traceback[0]._rawentry,
+                )), self._getreprcrash())
 
         fmt = FormattedExcinfo(showlocals=showlocals, style=style,
             abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
@@ -452,7 +468,7 @@
     def repr_locals(self, locals):
         if self.showlocals:
             lines = []
-            keys = list(locals)
+            keys = [loc for loc in locals if loc[0] != "@"]
             keys.sort()
             for name in keys:
                 value = locals[name]
@@ -506,7 +522,10 @@
 
     def _makepath(self, path):
         if not self.abspath:
-            np = py.path.local().bestrelpath(path)
+            try:
+                np = py.path.local().bestrelpath(path)
+            except OSError:
+                return path
             if len(np) < len(str(path)):
                 path = np
         return path
@@ -595,6 +614,19 @@
         if self.extraline:
             tw.line(self.extraline)
 
+class ReprTracebackNative(ReprTraceback):
+    def __init__(self, tblines):
+        self.style = "native"
+        self.reprentries = [ReprEntryNative(tblines)]
+        self.extraline = None
+
+class ReprEntryNative(TerminalRepr):
+    def __init__(self, tblines):
+        self.lines = tblines
+
+    def toterminal(self, tw):
+        tw.write("".join(self.lines))
+
 class ReprEntry(TerminalRepr):
     localssep = "_ "
 
@@ -680,19 +712,26 @@
 
 oldbuiltins = {}
 
-def patch_builtins(compile=True):
-    """ put compile builtins to Python's builtins. """
+def patch_builtins(assertion=True, compile=True):
+    """ put compile and AssertionError builtins to Python's builtins. """
+    if assertion:
+        from py._code import assertion
+        l = oldbuiltins.setdefault('AssertionError', [])
+        l.append(py.builtin.builtins.AssertionError)
+        py.builtin.builtins.AssertionError = assertion.AssertionError
     if compile:
         l = oldbuiltins.setdefault('compile', [])
         l.append(py.builtin.builtins.compile)
         py.builtin.builtins.compile = py.code.compile
 
-def unpatch_builtins(compile=True):
+def unpatch_builtins(assertion=True, compile=True):
     """ remove compile and AssertionError builtins from Python builtins. """
+    if assertion:
+        py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
     if compile:
         py.builtin.builtins.compile = oldbuiltins['compile'].pop()
 
-def getrawcode(obj):
+def getrawcode(obj, trycall=True):
     """ return code object for given function. """
     try:
         return obj.__code__
@@ -701,5 +740,10 @@
         obj = getattr(obj, 'func_code', obj)
         obj = getattr(obj, 'f_code', obj)
         obj = getattr(obj, '__code__', obj)
+        if trycall and not hasattr(obj, 'co_firstlineno'):
+            if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
+                x = getrawcode(obj.__call__, trycall=False)
+                if hasattr(x, 'co_firstlineno'):
+                    return x
         return obj
 
diff --git a/py/_code/source.py b/py/_code/source.py
--- a/py/_code/source.py
+++ b/py/_code/source.py
@@ -108,6 +108,7 @@
     def getstatementrange(self, lineno, assertion=False):
         """ return (start, end) tuple which spans the minimal
             statement region which containing the given lineno.
+            raise an IndexError if no such statementrange can be found.
         """
         # XXX there must be a better than these heuristic ways ...
         # XXX there may even be better heuristics :-)
@@ -116,6 +117,7 @@
 
         # 1. find the start of the statement
         from codeop import compile_command
+        end = None
         for start in range(lineno, -1, -1):
             if assertion:
                 line = self.lines[start]
@@ -139,7 +141,9 @@
                 trysource = self[start:end]
                 if trysource.isparseable():
                     return start, end
-        return start, len(self)
+        if end is None:
+            raise IndexError("no valid source range around line %d " % (lineno,))
+        return start, end
 
     def getblockend(self, lineno):
         # XXX
@@ -257,23 +261,29 @@
 
 
 def getfslineno(obj):
+    """ Return source location (path, lineno) for the given object.
+    If the source cannot be determined return ("", -1)
+    """
     try:
         code = py.code.Code(obj)
     except TypeError:
-        # fallback to
-        fn = (py.std.inspect.getsourcefile(obj) or
-              py.std.inspect.getfile(obj))
+        try:
+            fn = (py.std.inspect.getsourcefile(obj) or
+                  py.std.inspect.getfile(obj))
+        except TypeError:
+            return "", -1
+
         fspath = fn and py.path.local(fn) or None
+        lineno = -1
         if fspath:
             try:
                 _, lineno = findsource(obj)
             except IOError:
-                lineno = None
-        else:
-            lineno = None
+                pass
     else:
         fspath = code.path
         lineno = code.firstlineno
+    assert isinstance(lineno, int)
     return fspath, lineno
 
 #
@@ -286,7 +296,7 @@
     except py.builtin._sysex:
         raise
     except:
-        return None, None
+        return None, -1
     source = Source()
     source.lines = [line.rstrip() for line in sourcelines]
     return source, lineno
diff --git a/py/_error.py b/py/_error.py
--- a/py/_error.py
+++ b/py/_error.py
@@ -23,6 +23,7 @@
     2: errno.ENOENT,
     3: errno.ENOENT,
     17: errno.EEXIST,
+    13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailiable
     22: errno.ENOTDIR,
     267: errno.ENOTDIR,
     5: errno.EACCES,  # anything better?
diff --git a/py/_iniconfig.py b/py/_iniconfig.py
--- a/py/_iniconfig.py
+++ b/py/_iniconfig.py
@@ -103,6 +103,7 @@
     def _parseline(self, line, lineno):
         # comments
         line = line.split('#')[0].rstrip()
+        line = line.split(';')[0].rstrip()
         # blank lines
         if not line:
             return None, None
diff --git a/py/_io/capture.py b/py/_io/capture.py
--- a/py/_io/capture.py
+++ b/py/_io/capture.py
@@ -12,7 +12,7 @@
     class TextIO(StringIO):
         def write(self, data):
             if not isinstance(data, unicode):
-                data = unicode(data, getattr(self, '_encoding', 'UTF-8'))
+                data = unicode(data, getattr(self, '_encoding', 'UTF-8'), 'replace')
             StringIO.write(self, data)
 else:
     TextIO = StringIO
@@ -258,6 +258,9 @@
                 f = getattr(self, name).tmpfile
                 f.seek(0)
                 res = f.read()
+                enc = getattr(f, 'encoding', None)
+                if enc:
+                    res = py.builtin._totext(res, enc, 'replace')
                 f.truncate(0)
                 f.seek(0)
             l.append(res)
diff --git a/py/_io/terminalwriter.py b/py/_io/terminalwriter.py
--- a/py/_io/terminalwriter.py
+++ b/py/_io/terminalwriter.py
@@ -105,6 +105,8 @@
                      Blue=44, Purple=45, Cyan=46, White=47,
                      bold=1, light=2, blink=5, invert=7)
 
+    _newline = None   # the last line printed
+
     # XXX deprecate stringio argument
     def __init__(self, file=None, stringio=False, encoding=None):
         if file is None:
@@ -112,11 +114,9 @@
                 self.stringio = file = py.io.TextIO()
             else:
                 file = py.std.sys.stdout
-                if hasattr(file, 'encoding'):
-                    encoding = file.encoding
         elif hasattr(file, '__call__'):
             file = WriteFile(file, encoding=encoding)
-        self.encoding = encoding
+        self.encoding = encoding or getattr(file, 'encoding', "utf-8")
         self._file = file
         self.fullwidth = get_terminal_width()
         self.hasmarkup = should_do_markup(file)
@@ -182,8 +182,31 @@
         return s
 
     def line(self, s='', **kw):
+        if self._newline == False:
+            self.write("\n")
         self.write(s, **kw)
         self.write('\n')
+        self._newline = True
+
+    def reline(self, line, **opts):
+        if not self.hasmarkup:
+            raise ValueError("cannot use rewrite-line without terminal")
+        if not self._newline:
+            self.write("\r")
+        self.write(line, **opts)
+        # see if we need to fill up some spaces at the end
+        # xxx have a more exact lastlinelen working from self.write?
+        lenline = len(line)
+        try:
+            lastlen = self._lastlinelen
+        except AttributeError:
+            pass
+        else:
+            if lenline < lastlen:
+                self.write(" " * (lastlen - lenline + 1))
+        self._lastlinelen = lenline
+        self._newline = False
+
 
 class Win32ConsoleWriter(TerminalWriter):
     def write(self, s, **kw):
@@ -280,10 +303,10 @@
     SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
     SetConsoleTextAttribute.argtypes = [wintypes.HANDLE, wintypes.WORD]
     SetConsoleTextAttribute.restype = wintypes.BOOL
-        
+
     _GetConsoleScreenBufferInfo = \
         ctypes.windll.kernel32.GetConsoleScreenBufferInfo
-    _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE, 
+    _GetConsoleScreenBufferInfo.argtypes = [wintypes.HANDLE,
                                 ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO)]
     _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
     def GetConsoleInfo(handle):
diff --git a/py/_path/common.py b/py/_path/common.py
--- a/py/_path/common.py
+++ b/py/_path/common.py
@@ -64,7 +64,10 @@
                 else:
                     if bool(value) ^ bool(meth()) ^ invert:
                         return False
-            except (py.error.ENOENT, py.error.ENOTDIR):
+            except (py.error.ENOENT, py.error.ENOTDIR, py.error.EBUSY):
+                # EBUSY feels not entirely correct,
+                # but its kind of necessary since ENOMEDIUM
+                # is not accessible in python     
                 for name in self._depend_on_existence:
                     if name in kw:
                         if kw.get(name):
@@ -368,6 +371,5 @@
         else:
             name = str(path) # path.strpath # XXX svn?
             pattern = '*' + path.sep + pattern
-        from fnmatch import fnmatch
-        return fnmatch(name, pattern)
+        return py.std.fnmatch.fnmatch(name, pattern)
 
diff --git a/py/_path/local.py b/py/_path/local.py
--- a/py/_path/local.py
+++ b/py/_path/local.py
@@ -157,14 +157,16 @@
         return str(self) < str(other)
 
     def samefile(self, other):
-        """ return True if 'other' references the same file as 'self'. """
-        if not iswin32:
-            return py.error.checked_call(
-                    os.path.samefile, str(self), str(other))
+        """ return True if 'other' references the same file as 'self'.
+        """
+        if not isinstance(other, py.path.local):
+            other = os.path.abspath(str(other))
         if self == other:
             return True
-        other = os.path.abspath(str(other))
-        return self == other
+        if iswin32:
+            return False # ther is no samefile
+        return py.error.checked_call(
+                os.path.samefile, str(self), str(other))
 
     def remove(self, rec=1, ignore_errors=False):
         """ remove a file or directory (or a directory tree if rec=1).
@@ -539,7 +541,11 @@
                 if self.basename != "__init__.py":
                     modfile = modfile[:-12]
 
-            if not self.samefile(modfile):
+            try:
+                issame = self.samefile(modfile)
+            except py.error.ENOENT:
+                issame = False
+            if not issame:
                 raise self.ImportMismatchError(modname, modfile, self)
             return mod
         else:
diff --git a/py/_path/svnurl.py b/py/_path/svnurl.py
--- a/py/_path/svnurl.py
+++ b/py/_path/svnurl.py
@@ -233,6 +233,8 @@
                 e = sys.exc_info()[1]
                 if e.err.find('non-existent in that revision') != -1:
                     raise py.error.ENOENT(self, e.err)
+                elif e.err.find("E200009:") != -1:
+                    raise py.error.ENOENT(self, e.err)
                 elif e.err.find('File not found') != -1:
                     raise py.error.ENOENT(self, e.err)
                 elif e.err.find('not part of a repository')!=-1:
diff --git a/py/_path/svnwc.py b/py/_path/svnwc.py
--- a/py/_path/svnwc.py
+++ b/py/_path/svnwc.py
@@ -482,10 +482,13 @@
         except py.process.cmdexec.Error:
             e = sys.exc_info()[1]
             strerr = e.err.lower()
-            if strerr.find('file not found') != -1:
+            if strerr.find('not found') != -1:
+                raise py.error.ENOENT(self)
+            elif strerr.find("E200009:") != -1:
                 raise py.error.ENOENT(self)
             if (strerr.find('file exists') != -1 or
                 strerr.find('file already exists') != -1 or
+                strerr.find('w150002:') != -1 or
                 strerr.find("can't create directory") != -1):
                 raise py.error.EEXIST(self)
             raise
@@ -593,7 +596,7 @@
         out = self._authsvn('lock').strip()
         if not out:
             # warning or error, raise exception
-            raise Exception(out[4:])
+            raise ValueError("unknown error in svn lock command")
 
     def unlock(self):
         """ unset a previously set lock """
@@ -1066,6 +1069,8 @@
                 modrev = '?'
                 author = '?'
                 date = ''
+            elif itemstatus == "replaced":
+                pass
             else:
                 #print entryel.toxml()
                 commitel = entryel.getElementsByTagName('commit')[0]
@@ -1148,7 +1153,11 @@
             raise  ValueError("Not a versioned resource")
             #raise ValueError, "Not a versioned resource %r" % path
         self.kind = d['nodekind'] == 'directory' and 'dir' or d['nodekind']
-        self.rev = int(d['revision'])
+        try:
+            self.rev = int(d['revision'])
+        except KeyError:
+            self.rev = None
+
         self.path = py.path.local(d['path'])
         self.size = self.path.size()
         if 'lastchangedrev' in d:
diff --git a/py/_xmlgen.py b/py/_xmlgen.py
--- a/py/_xmlgen.py
+++ b/py/_xmlgen.py
@@ -52,7 +52,7 @@
     def unicode(self, indent=2):
         l = []
         SimpleUnicodeVisitor(l.append, indent).visit(self)
-        return "".join(l)
+        return u("").join(l)
 
     def __repr__(self):
         name = self.__class__.__name__
@@ -122,11 +122,13 @@
                 if visitmethod is not None:
                     break
             else:
-                visitmethod = self.object
+                visitmethod = self.__object
             self.cache[cls] = visitmethod
         visitmethod(node)
 
-    def object(self, obj):
+    # the default fallback handler is marked private
+    # to avoid clashes with the tag name object
+    def __object(self, obj):
         #self.write(obj)
         self.write(escape(unicode(obj)))
 
@@ -136,7 +138,8 @@
     def list(self, obj):
         assert id(obj) not in self.visited
         self.visited[id(obj)] = 1
-        map(self.visit, obj)
+        for elem in obj:
+            self.visit(elem)
 
     def Tag(self, tag):
         assert id(tag) not in self.visited
@@ -181,7 +184,11 @@
             value = getattr(attrs, name)
             if name.endswith('_'):
                 name = name[:-1]
-            return ' %s="%s"' % (name, escape(unicode(value)))
+            if isinstance(value, raw):
+                insert = value.uniobj
+            else:
+                insert = escape(unicode(value))
+            return ' %s="%s"' % (name, insert)
 
     def getstyle(self, tag):
         """ return attribute list suitable for styling. """
diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py
--- a/pypy/annotation/test/test_annrpython.py
+++ b/pypy/annotation/test/test_annrpython.py
@@ -483,7 +483,7 @@
                 return a_str.strip(' ')
             elif n == 1:
                 return a_str.rstrip(' ')
-            else: 
+            else:
                 return a_str.lstrip(' ')
         s = a.build_types(f, [int, annmodel.SomeString(no_nul=True)])
         assert s.no_nul
@@ -3737,6 +3737,25 @@
         s = a.build_types(f, [int])
         assert s.listdef.listitem.range_step == 0
 
+    def test_specialize_arg_memo(self):
+        @objectmodel.specialize.memo()
+        def g(n):
+            return n
+        @objectmodel.specialize.arg(0)
+        def f(i):
+            return g(i)
+        def main(i):
+            if i == 2:
+                return f(2)
+            elif i == 3:
+                return f(3)
+            else:
+                raise NotImplementedError
+
+        a = self.RPythonAnnotator()
+        s = a.build_types(main, [int])
+        assert isinstance(s, annmodel.SomeInteger)
+
 
 def g(n):
     return [0,1,2,n]
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -158,6 +158,12 @@
 .. __: http://morepypy.blogspot.com/2008/02/python-finalizers-semantics-part-1.html
 .. __: http://morepypy.blogspot.com/2008/02/python-finalizers-semantics-part-2.html
 
+Note that this difference might show up indirectly in some cases.  For
+example, a generator left pending in the middle is --- again ---
+garbage-collected later in PyPy than in CPython.  You can see the
+difference if the ``yield`` keyword it is suspended at is itself
+enclosed in a ``try:`` or a ``with:`` block.
+
 Using the default GC called ``minimark``, the built-in function ``id()``
 works like it does in CPython.  With other GCs it returns numbers that
 are not real addresses (because an object can move around several times)
diff --git a/pypy/doc/discussion/win64_todo.txt b/pypy/doc/discussion/win64_todo.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/discussion/win64_todo.txt
@@ -0,0 +1,9 @@
+2011-11-04
+ll_os.py has a problem with the file rwin32.py.
+Temporarily disabled for the win64_gborg branch. This needs to be
+investigated and re-enabled.
+Resolved, enabled.
+
+2011-11-05
+test_typed.py needs explicit tests to ensure that we
+handle word sizes right.
\ No newline at end of file
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -103,21 +103,13 @@
 
 * A concurrent garbage collector (a lot of work)
 
-Remove the GIL
---------------
+STM, a.k.a. "remove the GIL"
+----------------------------
 
-This is a major task that requires lots of thinking. However, few subprojects
-can be potentially specified, unless a better plan can be thought out:
+Removing the GIL --- or more precisely, a GIL-less thread-less solution ---
+is `now work in progress.`__  Contributions welcome.
 
-* A thread-aware garbage collector
-
-* Better RPython primitives for dealing with concurrency
-
-* JIT passes to remove locks on objects
-
-* (maybe) implement locking in Python interpreter
-
-* alternatively, look at Software Transactional Memory
+.. __: http://pypy.org/tmdonate.html
 
 Introduce new benchmarks
 ------------------------
@@ -157,6 +149,22 @@
 exported.  This would give us a one-size-fits-all generic .so file to be
 imported by any application that wants to load .so files :-)
 
+Optimising cpyext (CPython C-API compatibility layer)
+-----------------------------------------------------
+
+A lot of work has gone into PyPy's implementation of CPython's C-API over
+the last years to let it reach a practical level of compatibility, so that
+C extensions for CPython work on PyPy without major rewrites. However,
+there are still many edges and corner cases where it misbehaves, and it has
+not received any substantial optimisation so far.
+
+The objective of this project is to fix bugs in cpyext and to optimise
+several performance critical parts of it, such as the reference counting
+support and other heavily used C-API functions. The net result would be to
+have CPython extensions run much faster on PyPy than they currently do, or
+to make them work at all if they currently don't. A part of this work would
+be to get cpyext into a shape where it supports running Cython generated
+extensions.
 
 .. _`issue tracker`: http://bugs.pypy.org
 .. _`mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev
diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst
--- a/pypy/doc/sandbox.rst
+++ b/pypy/doc/sandbox.rst
@@ -82,7 +82,10 @@
 
 In pypy/translator/goal::
 
-   ./translate.py --sandbox targetpypystandalone.py
+   ./translate.py -O2 --sandbox targetpypystandalone.py
+
+If you don't have a regular PyPy installed, you should, because it's
+faster to translate, but you can also run ``python translate.py`` instead.
 
 
 To run it, use the tools in the pypy/translator/sandbox directory::
diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst
--- a/pypy/doc/stackless.rst
+++ b/pypy/doc/stackless.rst
@@ -199,17 +199,11 @@
 The following features (present in some past Stackless version of PyPy)
 are for the time being not supported any more:
 
-* Tasklets and channels (currently ``stackless.py`` seems to import,
-  but you have tasklets on top of coroutines on top of greenlets on
-  top of continulets on top of stacklets, and it's probably not too
-  hard to cut two of these levels by adapting ``stackless.py`` to
-  use directly continulets)
-
 * Coroutines (could be rewritten at app-level)
 
-* Pickling and unpickling continulets (*)
-
-* Continuing execution of a continulet in a different thread (*)
+* Continuing execution of a continulet in a different thread
+  (but if it is "simple enough", you can pickle it and unpickle it
+  in the other thread).
 
 * Automatic unlimited stack (must be emulated__ so far)
 
@@ -217,15 +211,6 @@
 
 .. __: `recursion depth limit`_
 
-(*) Pickling, as well as changing threads, could be implemented by using
-a "soft" stack switching mode again.  We would get either "hard" or
-"soft" switches, similarly to Stackless Python 3rd version: you get a
-"hard" switch (like now) when the C stack contains non-trivial C frames
-to save, and a "soft" switch (like previously) when it contains only
-simple calls from Python to Python.  Soft-switched continulets would
-also consume a bit less RAM, and the switch might be a bit faster too
-(unsure about that; what is the Stackless Python experience?).
-
 
 Recursion depth limit
 +++++++++++++++++++++
diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py
new file mode 100644
--- /dev/null
+++ b/pypy/doc/tool/makecontributor.py
@@ -0,0 +1,133 @@
+import py
+import sys
+from collections import defaultdict
+import operator
+import re
+import mercurial.localrepo
+import mercurial.ui
+
+ROOT = py.path.local(__file__).join('..', '..', '..', '..')
+author_re = re.compile('(.*) <.*>')
+pair_programming_re = re.compile(r'^\((.*?)\)')
+excluded = set(["pypy", "convert-repo"])
+
+alias = {
+    'Anders Chrigstrom': ['arre'],
+    'Antonio Cuni': ['antocuni', 'anto'],
+    'Armin Rigo': ['arigo', 'arfigo', 'armin', 'arigato'],
+    'Maciej Fijalkowski': ['fijal'],
+    'Carl Friedrich Bolz': ['cfbolz', 'cf'],
+    'Samuele Pedroni': ['pedronis', 'samuele', 'samule'],
+    'Michael Hudson': ['mwh'],
+    'Holger Krekel': ['hpk', 'holger krekel', 'holger', 'hufpk'],
+    "Amaury Forgeot d'Arc": ['afa'],
+    'Alex Gaynor': ['alex', 'agaynor'],
+    'David Schneider': ['bivab', 'david'],
+    'Christian Tismer': ['chris', 'christian', 'tismer',
+                         'tismer at christia-wjtqxl.localdomain'],
+    'Benjamin Peterson': ['benjamin'],
+    'Hakan Ardo': ['hakan', 'hakanardo'],
+    'Niklaus Haldimann': ['nik'],
+    'Alexander Schremmer': ['xoraxax'],
+    'Anders Hammarquist': ['iko'],
+    'David Edelsohn': ['edelsoh', 'edelsohn'],
+    'Niko Matsakis': ['niko'],
+    'Jakub Gustak': ['jlg'],
+    'Guido Wesdorp': ['guido'],
+    'Michael Foord': ['mfoord'],
+    'Mark Pearse': ['mwp'],
+    'Toon Verwaest': ['tverwaes'],
+    'Eric van Riet Paap': ['ericvrp'],
+    'Jacob Hallen': ['jacob', 'jakob'],
+    'Anders Lehmann': ['ale', 'anders'],
+    'Bert Freudenberg': ['bert'],
+    'Boris Feigin': ['boris', 'boria'],
+    'Valentino Volonghi': ['valentino', 'dialtone'],
+    'Aurelien Campeas': ['aurelien', 'aureliene'],
+    'Adrien Di Mascio': ['adim'],
+    'Jacek Generowicz': ['Jacek', 'jacek'],
+    'Jim Hunziker': ['landtuna at gmail.com'],
+    'Kristjan Valur Jonsson': ['kristjan at kristjan-lp.ccp.ad.local'],
+    'Laura Creighton': ['lac'],
+    'Aaron Iles': ['aliles'],
+    'Ludovic Aubry': ['ludal', 'ludovic'],
+    'Lukas Diekmann': ['l.diekmann', 'ldiekmann'],
+    'Matti Picus': ['Matti Picus matti.picus at gmail.com',
+                    'matthp', 'mattip', 'mattip>'],
+    'Michael Cheng': ['mikefc'],
+    'Richard Emslie': ['rxe'],
+    'Roberto De Ioris': ['roberto at goyle'],
+    'Roberto De Ioris': ['roberto at mrspurr'],
+    'Sven Hager': ['hager'],
+    'Tomo Cocoa': ['cocoatomo'],
+    }
+
+alias_map = {}
+for name, nicks in alias.iteritems():
+    for nick in nicks:
+        alias_map[nick] = name
+
+def get_canonical_author(name):
+    match = author_re.match(name)
+    if match:
+        name = match.group(1)
+    return alias_map.get(name, name)
+
+ignored_nicknames = defaultdict(int)
+
+def get_more_authors(log):
+    match = pair_programming_re.match(log)
+    if not match:
+        return set()
+    ignore_words = ['around', 'consulting', 'yesterday', 'for a bit', 'thanks',
+                    'in-progress', 'bits of', 'even a little', 'floating',]
+    sep_words = ['and', ';', '+', '/', 'with special  by']
+    nicknames = match.group(1)
+    for word in ignore_words:
+        nicknames = nicknames.replace(word, '')
+    for word in sep_words:
+        nicknames = nicknames.replace(word, ',')
+    nicknames = [nick.strip().lower() for nick in nicknames.split(',')]
+    authors = set()
+    for nickname in nicknames:
+        author = alias_map.get(nickname)
+        if not author:
+            ignored_nicknames[nickname] += 1
+        else:
+            authors.add(author)
+    return authors
+
+def main(show_numbers):
+    ui = mercurial.ui.ui()
+    repo = mercurial.localrepo.localrepository(ui, str(ROOT))
+    authors_count = defaultdict(int)
+    for i in repo:
+        ctx = repo[i]
+        authors = set()
+        authors.add(get_canonical_author(ctx.user()))
+        authors.update(get_more_authors(ctx.description()))
+        for author in authors:
+            if author not in excluded:
+                authors_count[author] += 1
+
+    # uncomment the next lines to get the list of nicknamed which could not be
+    # parsed from commit logs
+    ## items = ignored_nicknames.items()
+    ## items.sort(key=operator.itemgetter(1), reverse=True)
+    ## for name, n in items:
+    ##     if show_numbers:
+    ##         print '%5d %s' % (n, name)
+    ##     else:
+    ##         print name
+                
+    items = authors_count.items()
+    items.sort(key=operator.itemgetter(1), reverse=True)
+    for name, n in items:
+        if show_numbers:
+            print '%5d %s' % (n, name)
+        else:
+            print name
+
+if __name__ == '__main__':
+    show_numbers = '-n' in sys.argv
+    main(show_numbers)
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -18,7 +18,8 @@
 Edition.  Other configurations may work as well.
 
 The translation scripts will set up the appropriate environment variables
-for the compiler.  They will attempt to locate the same compiler version that
+for the compiler, so you do not need to run vcvars before translation.  
+They will attempt to locate the same compiler version that
 was used to build the Python interpreter doing the
 translation.  Failing that, they will pick the most recent Visual Studio
 compiler they can find.  In addition, the target architecture
@@ -26,7 +27,7 @@
 using a 32 bit Python and vice versa.
 
 **Note:** PyPy is currently not supported for 64 bit Windows, and translation
-will be aborted in this case.
+will fail in this case.
 
 The compiler is all you need to build pypy-c, but it will miss some
 modules that relies on third-party libraries.  See below how to get
@@ -57,7 +58,8 @@
 install third-party libraries.  We chose to install them in the parent
 directory of the pypy checkout.  For example, if you installed pypy in
 ``d:\pypy\trunk\`` (This directory contains a README file), the base
-directory is ``d:\pypy``.
+directory is ``d:\pypy``. You may choose different values by setting the
+INCLUDE, LIB and PATH (for DLLs)
 
 The Boehm garbage collector
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -126,18 +128,54 @@
 ------------------------
 
 You can compile pypy with the mingw compiler, using the --cc=mingw32 option;
-mingw.exe must be on the PATH.
+gcc.exe must be on the PATH. If the -cc flag does not begin with "ming", it should be
+the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit
+compiler creating a 64 bit target.
 
-libffi for the mingw32 compiler
+libffi for the mingw compiler
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-To enable the _rawffi (and ctypes) module, you need to compile a mingw32
-version of libffi.  I downloaded the `libffi source files`_, and extracted
-them in the base directory.  Then run::
+To enable the _rawffi (and ctypes) module, you need to compile a mingw
+version of libffi.  Here is one way to do this, wich should allow you to try
+to build for win64 or win32:
+
+#. Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw
+#. If you do not use cygwin, you will need msys to provide make, 
+   autoconf tools and other goodies.
+
+    #. Download and unzip a `msys for mingw`_, say into c:\msys
+    #. Edit the c:\msys\etc\fstab file to mount c:\mingw
+
+#. Download and unzip the `libffi source files`_, and extract
+   them in the base directory.  
+#. Run c:\msys\msys.bat or a cygwin shell which should make you
+   feel better since it is a shell prompt with shell tools.
+#. From inside the shell, cd to the libffi directory and do::
 
     sh ./configure
     make
     cp .libs/libffi-5.dll <somewhere on the PATH>
 
+If you can't find the dll, and the libtool issued a warning about 
+"undefined symbols not allowed", you will need to edit the libffi
+Makefile in the toplevel directory. Add the flag -no-undefined to
+the definition of libffi_la_LDFLAGS
+
+If you wish to experiment with win64, you must run configure with flags::
+
+    sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32
+
+or such, depending on your mingw64 download.
+
+hacking on Pypy with the mingw compiler
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Since hacking on Pypy means running tests, you will need a way to specify
+the mingw compiler when hacking (as opposed to translating). As of
+March 2012, --cc is not a valid option for pytest.py. However if you set an
+environment variable CC it will allow you to choose a compiler.
+
+.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds
+.. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds
+.. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29   
 .. _`libffi source files`: http://sourceware.org/libffi/
 .. _`RPython translation toolchain`: translation.html
diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/you-want-to-help.rst
@@ -0,0 +1,86 @@
+
+You want to help with PyPy, now what?
+=====================================
+
+PyPy is a very large project that has a reputation of being hard to dive into.
+Some of this fame is warranted, some of it is purely accidental. There are three
+important lessons that everyone willing to contribute should learn:
+
+* PyPy has layers. There are many pieces of architecture that are very well
+  separated from each other. More about this below, but often the manifestation
+  of this is that things are at a different layer than you would expect them
+  to be. For example if you are looking for the JIT implementation, you will
+  not find it in the implementation of the Python programming language.
+
+* Because of the above, we are very serious about Test Driven Development.
+  It's not only what we believe in, but also that PyPy's architecture is
+  working very well with TDD in mind and not so well without it. Often
+  the development means progressing in an unrelated corner, one unittest
+  at a time; and then flipping a giant switch, bringing it all together.
+  (It generally works out of the box.  If it doesn't, then we didn't
+  write enough unit tests.)  It's worth repeating - PyPy
+  approach is great if you do TDD, not so great otherwise.
+
+* PyPy uses an entirely different set of tools - most of them included
+  in the PyPy repository. There is no Makefile, nor autoconf. More below
+
+Architecture
+============
+
+PyPy has layers. The 100 miles view:
+
+* `RPython`_ is the language in which we write interpreters. Not the entire
+  PyPy project is written in RPython, only the parts that are compiled in
+  the translation process. The interesting point is that RPython has no parser,
+  it's compiled from the live python objects, which make it possible to do
+  all kinds of metaprogramming during import time. In short, Python is a meta
+  programming language for RPython.
+
+  The RPython standard library is to be found in the ``rlib`` subdirectory.
+
+.. _`RPython`: coding-guide.html#RPython
+
+* The translation toolchain - this is the part that takes care about translating
+  RPython to flow graphs and then to C. There is more in the `architecture`_
+  document written about it.
+
+  It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``.
+
+.. _`architecture`: architecture.html 
+
+* Python Interpreter
+
+  xxx
+
+* Python modules
+
+  xxx
+
+* Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the
+  interpreter written in RPython, rather than the user program that it
+  interprets.  As a result it applies to any interpreter, i.e. any
+  language.  But getting it to work correctly is not trivial: it
+  requires a small number of precise "hints" and possibly some small
+  refactorings of the interpreter.  The JIT itself also has several
+  almost-independent parts: the tracer itself in ``jit/metainterp``, the
+  optimizer in ``jit/metainterp/optimizer`` that optimizes a list of
+  residual operations, and the backend in ``jit/backend/<machine-name>``
+  that turns it into machine code.  Writing a new backend is a
+  traditional way to get into the project.
+
+.. _`we have a tracing JIT`: jit/index.html
+
+* Garbage Collectors (GC): as you can notice if you are used to CPython's
+  C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code.
+  `Garbage collection in PyPy`_ is inserted
+  during translation.  Moreover, this is not reference counting; it is a real
+  GC written as more RPython code.  The best one we have so far is in
+  ``rpython/memory/gc/minimark.py``.
+
+.. _`Garbage collection in PyPy`: garbage_collection.html
+
+
+Toolset
+=======
+
+xxx
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -85,6 +85,10 @@
     Collects the arguments of a function call.
 
     Instances should be considered immutable.
+
+    Some parts of this class are written in a slightly convoluted style to help
+    the JIT. It is really crucial to get this right, because Python's argument
+    semantics are complex, but calls occur everywhere.
     """
 
     ###  Construction  ###
@@ -169,9 +173,17 @@
     def _combine_starstarargs_wrapped(self, w_starstararg):
         # unpack the ** arguments
         space = self.space
+        keywords, values_w = space.view_as_kwargs(w_starstararg)
+        if keywords is not None: # this path also taken for empty dicts
+            if self.keywords is None:
+                self.keywords = keywords[:] # copy to make non-resizable
+                self.keywords_w = values_w[:]
+            else:
+                self._check_not_duplicate_kwargs(keywords, values_w)
+                self.keywords = self.keywords + keywords
+                self.keywords_w = self.keywords_w + values_w
+            return not jit.isconstant(len(self.keywords))
         if space.isinstance_w(w_starstararg, space.w_dict):
-            if not space.is_true(w_starstararg):
-                return False # don't call unpackiterable - it's jit-opaque
             keys_w = space.unpackiterable(w_starstararg)
         else:
             try:
@@ -186,11 +198,8 @@
                                    "a mapping, not %s" % (typename,)))
                 raise
             keys_w = space.unpackiterable(w_keys)
-        if keys_w:
-            self._do_combine_starstarargs_wrapped(keys_w, w_starstararg)
-            return True
-        else:
-            return False    # empty dict; don't disable the JIT
+        self._do_combine_starstarargs_wrapped(keys_w, w_starstararg)
+        return True
 
     def _do_combine_starstarargs_wrapped(self, keys_w, w_starstararg):
         space = self.space
@@ -227,6 +236,20 @@
             self.keywords_w = self.keywords_w + keywords_w
         self.keyword_names_w = keys_w
 
+    @jit.look_inside_iff(lambda self, keywords, keywords_w:
+            jit.isconstant(len(keywords) and
+            jit.isconstant(self.keywords)))
+    def _check_not_duplicate_kwargs(self, keywords, keywords_w):
+        # looks quadratic, but the JIT should remove all of it nicely.
+        # Also, all the lists should be small
+        for key in keywords:
+            for otherkey in self.keywords:
+                if otherkey == key:
+                    raise operationerrfmt(self.space.w_TypeError,
+                                          "got multiple values "
+                                          "for keyword argument "
+                                          "'%s'", key)
+
     def fixedunpack(self, argcount):
         """The simplest argument parsing: get the 'argcount' arguments,
         or raise a real ValueError if the length is wrong."""
@@ -385,7 +408,7 @@
 
         # collect extra keyword arguments into the **kwarg
         if has_kwarg:
-            w_kwds = self.space.newdict()
+            w_kwds = self.space.newdict(kwargs=True)
             if num_remainingkwds:
                 #
                 limit = len(keywords)
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -10,16 +10,6 @@
 from pypy.interpreter.astcompiler import ast, consts
 
 
-try:
-    all
-except NameError:
-    def all(iterable):
-        for x in iterable:
-            if not x:
-                return False
-        return True
-
-
 class TestAstBuilder:
 
     def setup_class(cls):
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -296,6 +296,7 @@
         self.check_signal_action = None   # changed by the signal module
         self.user_del_action = UserDelAction(self)
         self.frame_trace_action = FrameTraceAction(self)
+        self._code_of_sys_exc_info = None
 
         from pypy.interpreter.pycode import cpython_magic, default_magic
         self.our_magic = default_magic
@@ -467,9 +468,9 @@
                 if name not in modules:
                     modules.append(name)
 
-        # a bit of custom logic: time2 or rctime take precedence over time
+        # a bit of custom logic: rctime take precedence over time
         # XXX this could probably be done as a "requires" in the config
-        if ('time2' in modules or 'rctime' in modules) and 'time' in modules:
+        if 'rctime' in modules and 'time' in modules:
             modules.remove('time')
 
         if not self.config.objspace.nofaking:
@@ -913,6 +914,12 @@
         """
         return None
 
+    def view_as_kwargs(self, w_dict):
+        """ if w_dict is a kwargs-dict, return two lists, one of unwrapped
+        strings and one of wrapped values. otherwise return (None, None)
+        """
+        return (None, None)
+
     def newlist_str(self, list_s):
         return self.newlist([self.wrap(s) for s in list_s])
 
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -159,6 +159,7 @@
         #operationerr.print_detailed_traceback(self.space)
 
     def _convert_exc(self, operr):
+        # Only for the flow object space
         return operr
 
     def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!!
@@ -171,6 +172,11 @@
             frame = self.getnextframe_nohidden(frame)
         return None
 
+    def set_sys_exc_info(self, operror):
+        frame = self.gettopframe_nohidden()
+        if frame:     # else, the exception goes nowhere and is lost
+            frame.last_exception = operror
+
     def settrace(self, w_func):
         """Set the global trace function."""
         if self.space.is_w(w_func, self.space.w_None):
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -113,6 +113,12 @@
         from pypy.interpreter.pycode import PyCode
 
         code = self.getcode() # hook for the jit
+        #
+        if (jit.we_are_jitted() and code is self.space._code_of_sys_exc_info
+                                and nargs == 0):
+            from pypy.module.sys.vm import exc_info_direct
+            return exc_info_direct(self.space, frame)
+        #
         fast_natural_arity = code.fast_natural_arity
         if nargs == fast_natural_arity:
             if nargs == 0:
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -874,6 +874,12 @@
             fn.add_to_table()
         if gateway.as_classmethod:
             fn = ClassMethod(space.wrap(fn))
+        #
+        from pypy.module.sys.vm import exc_info
+        if code._bltin is exc_info:
+            assert space._code_of_sys_exc_info is None
+            space._code_of_sys_exc_info = code
+        #
         return fn
 
 
diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py
--- a/pypy/interpreter/pyparser/parsestring.py
+++ b/pypy/interpreter/pyparser/parsestring.py
@@ -2,34 +2,39 @@
 from pypy.interpreter import unicodehelper
 from pypy.rlib.rstring import StringBuilder
 
-def parsestr(space, encoding, s, unicode_literals=False):
-    # compiler.transformer.Transformer.decode_literal depends on what 
-    # might seem like minor details of this function -- changes here 
-    # must be reflected there.
+def parsestr(space, encoding, s, unicode_literal=False):
+    """Parses a string or unicode literal, and return a wrapped value.
+
+    If encoding=iso8859-1, the source string is also in this encoding.
+    If encoding=None, the source string is ascii only.
+    In other cases, the source string is in utf-8 encoding.
+
+    When a bytes string is returned, it will be encoded with the
+    original encoding.
+
+    Yes, it's very inefficient.
+    Yes, CPython has very similar code.
+    """
 
     # we use ps as "pointer to s"
     # q is the virtual last char index of the string
     ps = 0
     quote = s[ps]
     rawmode = False
-    unicode = unicode_literals
 
     # string decoration handling
-    o = ord(quote)
-    isalpha = (o>=97 and o<=122) or (o>=65 and o<=90)
-    if isalpha or quote == '_':
-        if quote == 'b' or quote == 'B':
-            ps += 1
-            quote = s[ps]
-            unicode = False
-        elif quote == 'u' or quote == 'U':
-            ps += 1
-            quote = s[ps]
-            unicode = True
-        if quote == 'r' or quote == 'R':
-            ps += 1
-            quote = s[ps]
-            rawmode = True
+    if quote == 'b' or quote == 'B':
+        ps += 1
+        quote = s[ps]
+        unicode_literal = False
+    elif quote == 'u' or quote == 'U':
+        ps += 1
+        quote = s[ps]
+        unicode_literal = True
+    if quote == 'r' or quote == 'R':
+        ps += 1
+        quote = s[ps]
+        rawmode = True
     if quote != "'" and quote != '"':
         raise_app_valueerror(space,
                              'Internal error: parser passed unquoted literal')
@@ -46,21 +51,28 @@
                                         'unmatched triple quotes in literal')
         q -= 2
 
-    if unicode: # XXX Py_UnicodeFlag is ignored for now
+    if unicode_literal: # XXX Py_UnicodeFlag is ignored for now
         if encoding is None or encoding == "iso-8859-1":
+            # 'unicode_escape' expects latin-1 bytes, string is ready.
             buf = s
             bufp = ps
             bufq = q
             u = None
         else:
-            # "\XX" may become "\u005c\uHHLL" (12 bytes)
+            # String is utf8-encoded, but 'unicode_escape' expects
+            # latin-1; So multibyte sequences must be escaped.
             lis = [] # using a list to assemble the value
             end = q
+            # Worst case: "\XX" may become "\u005c\uHHLL" (12 bytes)
             while ps < end:
                 if s[ps] == '\\':
                     lis.append(s[ps])
                     ps += 1
                     if ord(s[ps]) & 0x80:
+                        # A multibyte sequence will follow, it will be
+                        # escaped like \u1234. To avoid confusion with
+                        # the backslash we just wrote, we emit "\u005c"
+                        # instead.
                         lis.append("u005c")
                 if ord(s[ps]) & 0x80: # XXX inefficient
                     w, ps = decode_utf8(space, s, ps, end, "utf-16-be")
@@ -86,13 +98,11 @@
 
     need_encoding = (encoding is not None and
                      encoding != "utf-8" and encoding != "iso-8859-1")
-    # XXX add strchr like interface to rtyper
     assert 0 <= ps <= q
     substr = s[ps : q]
     if rawmode or '\\' not in s[ps:]:
         if need_encoding:
             w_u = space.wrap(unicodehelper.PyUnicode_DecodeUTF8(space, substr))
-            #w_v = space.wrap(space.unwrap(w_u).encode(encoding)) this works
             w_v = unicodehelper.PyUnicode_AsEncodedString(space, w_u, space.wrap(encoding))
             return w_v
         else:
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -75,7 +75,10 @@
     def unpackiterable(self, it):
         return list(it)
 
-    def newdict(self):
+    def view_as_kwargs(self, x):
+        return None, None
+
+    def newdict(self, kwargs=False):
         return {}
 
     def newlist(self, l=[]):
@@ -488,6 +491,57 @@
         assert len(l) == 1
         assert l[0] == space.wrap(5)
 
+    def test_starstarargs_special(self):
+        class kwargs(object):
+            def __init__(self, k, v):
+                self.k = k
+                self.v = v
+        class MyDummySpace(DummySpace):
+            def view_as_kwargs(self, kw):
+                if isinstance(kw, kwargs):
+                    return kw.k, kw.v
+                return None, None
+        space = MyDummySpace()
+        for i in range(3):
+            kwds = [("c", 3)]
+            kwds_w = dict(kwds[:i])
+            keywords = kwds_w.keys()
+            keywords_w = kwds_w.values()
+            rest = dict(kwds[i:])
+            w_kwds = kwargs(rest.keys(), rest.values())
+            if i == 2:
+                w_kwds = None
+            assert len(keywords) == len(keywords_w)
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None]
+            args._match_signature(None, l, Signature(["a", "b", "c"]), defaults_w=[4])
+            assert l == [1, 2, 3]
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None, None]
+            args._match_signature(None, l, Signature(["a", "b", "b1", "c"]), defaults_w=[4, 5])
+            assert l == [1, 2, 4, 3]
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None, None]
+            args._match_signature(None, l, Signature(["a", "b", "c", "d"]), defaults_w=[4, 5])
+            assert l == [1, 2, 3, 5]
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None, None]
+            py.test.raises(ArgErr, args._match_signature, None, l,
+                           Signature(["c", "b", "a", "d"]), defaults_w=[4, 5])
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None, None]
+            py.test.raises(ArgErr, args._match_signature, None, l,
+                           Signature(["a", "b", "c1", "d"]), defaults_w=[4, 5])
+            args = Arguments(space, [1, 2], keywords[:], keywords_w[:], w_starstararg=w_kwds)
+            l = [None, None, None]
+            args._match_signature(None, l, Signature(["a", "b"], None, "**"))
+            assert l == [1, 2, {'c': 3}]
+        excinfo = py.test.raises(OperationError, Arguments, space, [], ["a"],
+                                 [1], w_starstararg=kwargs(["a"], [2]))
+        assert excinfo.value.w_type is TypeError
+
+
+
 class TestErrorHandling(object):
     def test_missing_args(self):
         # got_nargs, nkwds, expected_nargs, has_vararg, has_kwarg,
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -312,8 +312,8 @@
             mods = space.get_builtinmodule_to_install()
             
             assert '__pypy__' in mods                # real builtin
-            assert 'array' not in mods               # in lib_pypy
-            assert 'faked+array' not in mods         # in lib_pypy
+            assert '_functools' not in mods               # in lib_pypy
+            assert 'faked+_functools' not in mods         # in lib_pypy
             assert 'this_doesnt_exist' not in mods   # not in lib_pypy
             assert 'faked+this_doesnt_exist' in mods # not in lib_pypy, but in
                                                      # ALL_BUILTIN_MODULES
diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py
--- a/pypy/interpreter/test/test_zzpickle_and_slow.py
+++ b/pypy/interpreter/test/test_zzpickle_and_slow.py
@@ -75,6 +75,7 @@
 class AppTestInterpObjectPickling:
     pytestmark = py.test.mark.skipif("config.option.runappdirect")
     def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=['struct'])
         _attach_helpers(cls.space)
 
     def teardown_class(cls):
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -321,7 +321,7 @@
             
             def user_setup(self, space, w_subtype):
                 self.w__dict__ = space.newdict(
-                    instance=True, classofinstance=w_subtype)
+                    instance=True)
                 base_user_setup(self, space, w_subtype)
 
             def setclass(self, space, w_subtype):
diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py
--- a/pypy/jit/backend/llgraph/llimpl.py
+++ b/pypy/jit/backend/llgraph/llimpl.py
@@ -1797,6 +1797,7 @@
         if specialize_as_constant:
             def specialize_call(self, hop):
                 llvalue = func(hop.args_s[0].const)
+                hop.exception_cannot_occur()
                 return hop.inputconst(lltype.typeOf(llvalue), llvalue)
         else:
             # specialize as direct_call
@@ -1813,6 +1814,7 @@
                     sm = ootype._static_meth(FUNCTYPE, _name=func.__name__, _callable=func)
                     cfunc = hop.inputconst(FUNCTYPE, sm)
                 args_v = hop.inputargs(*hop.args_r)
+                hop.exception_is_here()
                 return hop.genop('direct_call', [cfunc] + args_v, hop.r_result)
 
 
diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py
--- a/pypy/jit/backend/test/runner_test.py
+++ b/pypy/jit/backend/test/runner_test.py
@@ -27,6 +27,12 @@
 def constfloat(x):
     return ConstFloat(longlong.getfloatstorage(x))
 
+def boxlonglong(ll):
+    if longlong.is_64_bit:
+        return BoxInt(ll)
+    else:
+        return BoxFloat(ll)
+
 
 class Runner(object):
 
@@ -1623,6 +1629,11 @@
                                      [boxfloat(2.5)], t).value
         assert res == longlong2float.float2longlong(2.5)
 
+        bytes = longlong2float.float2longlong(2.5)
+        res = self.execute_operation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT,
+                                     [boxlonglong(res)], 'float').value
+        assert longlong.getrealfloat(res) == 2.5
+
     def test_ooops_non_gc(self):
         x = lltype.malloc(lltype.Struct('x'), flavor='raw')
         v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x))
diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py
--- a/pypy/jit/backend/test/test_random.py
+++ b/pypy/jit/backend/test/test_random.py
@@ -328,6 +328,15 @@
     def produce_into(self, builder, r):
         self.put(builder, [r.choice(builder.intvars)])
 
+class CastLongLongToFloatOperation(AbstractFloatOperation):
+    def produce_into(self, builder, r):
+        if longlong.is_64_bit:
+            self.put(builder, [r.choice(builder.intvars)])
+        else:
+            if not builder.floatvars:
+                raise CannotProduceOperation
+            self.put(builder, [r.choice(builder.floatvars)])
+
 class CastFloatToIntOperation(AbstractFloatOperation):
     def produce_into(self, builder, r):
         if not builder.floatvars:
@@ -450,6 +459,7 @@
 OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT))
 OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT))
 OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG))
+OPERATIONS.append(CastLongLongToFloatOperation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT))
 
 OperationBuilder.OPERATIONS = OPERATIONS
 
diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
--- a/pypy/jit/backend/x86/assembler.py
+++ b/pypy/jit/backend/x86/assembler.py
@@ -1251,6 +1251,15 @@
         else:
             self.mov(loc0, resloc)
 
+    def genop_convert_longlong_bytes_to_float(self, op, arglocs, resloc):
+        loc0, = arglocs
+        if longlong.is_64_bit:
+            assert isinstance(resloc, RegLoc)
+            assert isinstance(loc0, RegLoc)
+            self.mc.MOVD(resloc, loc0)
+        else:
+            self.mov(loc0, resloc)
+
     def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc):
         guard_opnum = guard_op.getopnum()
         self.mc.CMP(arglocs[0], imm0)
diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
--- a/pypy/jit/backend/x86/regalloc.py
+++ b/pypy/jit/backend/x86/regalloc.py
@@ -773,10 +773,24 @@
             self.Perform(op, [loc0], loc1)
             self.xrm.possibly_free_var(op.getarg(0))
         else:
-            loc0 = self.xrm.loc(op.getarg(0))
+            arg0 = op.getarg(0)
+            loc0 = self.xrm.loc(arg0)
+            loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0])
+            self.Perform(op, [loc0], loc1)
+            self.xrm.possibly_free_var(arg0)
+
+    def consider_convert_longlong_bytes_to_float(self, op):
+        if longlong.is_64_bit:
+            loc0 = self.rm.make_sure_var_in_reg(op.getarg(0))
             loc1 = self.xrm.force_allocate_reg(op.result)
             self.Perform(op, [loc0], loc1)
-            self.xrm.possibly_free_var(op.getarg(0))
+            self.rm.possibly_free_var(op.getarg(0))
+        else:
+            arg0 = op.getarg(0)
+            loc0 = self.xrm.make_sure_var_in_reg(arg0)
+            loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0])
+            self.Perform(op, [loc0], loc1)
+            self.xrm.possibly_free_var(arg0)
 
     def _consider_llong_binop_xx(self, op):
         # must force both arguments into xmm registers, because we don't
diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py
--- a/pypy/jit/backend/x86/rx86.py
+++ b/pypy/jit/backend/x86/rx86.py
@@ -601,7 +601,9 @@
     CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A',
                           register(1, 8), stack_bp(2))
 
-    # These work on machine sized registers.
+    # These work on machine sized registers, so MOVD is actually MOVQ
+    # when running on 64 bits.  Note a bug in the Intel documentation:
+    # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html
     MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0')
     MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0')
     MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2))
diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
--- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
+++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
@@ -182,6 +182,12 @@
         filename  = str(testdir.join(FILENAME  % methname))
         g = open(inputname, 'w')
         g.write('\x09.string "%s"\n' % BEGIN_TAG)
+        #
+        if instrname == 'MOVD' and self.WORD == 8:
+            instrname = 'MOVQ'
+            if argmodes == 'xb':
+                py.test.skip('"as" uses an undocumented alternate encoding??')
+        #
         for args in args_lists:
             suffix = ""
     ##        all = instr.as_all_suffixes
@@ -229,9 +235,6 @@
                 # movq $xxx, %rax => movl $xxx, %eax
                 suffix = 'l'
                 ops[1] = reduce_to_32bit(ops[1])
-            if instrname.lower() == 'movd':
-                ops[0] = reduce_to_32bit(ops[0])
-                ops[1] = reduce_to_32bit(ops[1])
             #
             op = '\t%s%s %s%s' % (instrname.lower(), suffix,
                                   ', '.join(ops), following)
diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py
--- a/pypy/jit/codewriter/jtransform.py
+++ b/pypy/jit/codewriter/jtransform.py
@@ -295,6 +295,7 @@
         return op
 
     rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite
+    rewrite_op_convert_longlong_bytes_to_float = _noop_rewrite
 
     # ----------
     # Various kinds of calls
diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py
--- a/pypy/jit/codewriter/test/test_flatten.py
+++ b/pypy/jit/codewriter/test/test_flatten.py
@@ -968,20 +968,22 @@
             int_return %i2
         """, transform=True)
 
-    def test_convert_float_bytes_to_int(self):
-        from pypy.rlib.longlong2float import float2longlong
+    def test_convert_float_bytes(self):
+        from pypy.rlib.longlong2float import float2longlong, longlong2float
         def f(x):
-            return float2longlong(x)
+            ll = float2longlong(x)
+            return longlong2float(ll)
         if longlong.is_64_bit:
-            result_var = "%i0"
-            return_op = "int_return"
+            tmp_var = "%i0"
+            result_var = "%f1"
         else:
-            result_var = "%f1"
-            return_op = "float_return"
+            tmp_var = "%f1"
+            result_var = "%f2"
         self.encoding_test(f, [25.0], """
-            convert_float_bytes_to_longlong %%f0 -> %(result_var)s
-            %(return_op)s %(result_var)s
-        """ % {"result_var": result_var, "return_op": return_op})
+            convert_float_bytes_to_longlong %%f0 -> %(tmp_var)s
+            convert_longlong_bytes_to_float %(tmp_var)s -> %(result_var)s
+            float_return %(result_var)s
+        """ % {"result_var": result_var, "tmp_var": tmp_var}, transform=True)
 
 
 def check_force_cast(FROM, TO, operations, value):
diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py
--- a/pypy/jit/metainterp/blackhole.py
+++ b/pypy/jit/metainterp/blackhole.py
@@ -672,6 +672,11 @@
         a = longlong.getrealfloat(a)
         return longlong2float.float2longlong(a)
 
+    @arguments(LONGLONG_TYPECODE, returns="f")
+    def bhimpl_convert_longlong_bytes_to_float(a):
+        a = longlong2float.longlong2float(a)
+        return longlong.getfloatstorage(a)
+
     # ----------
     # control flow operations
 
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -224,6 +224,7 @@
                     'float_neg', 'float_abs',
                     'cast_ptr_to_int', 'cast_int_to_ptr',
                     'convert_float_bytes_to_longlong',
+                    'convert_longlong_bytes_to_float',
                     ]:
         exec py.code.Source('''
             @arguments("box")
@@ -1222,7 +1223,7 @@
     def run_one_step(self):
         # Execute the frame forward.  This method contains a loop that leaves
         # whenever the 'opcode_implementations' (which is one of the 'opimpl_'
-        # methods) returns True.  This is the case when the current frame
+        # methods) raises ChangeFrame.  This is the case when the current frame
         # changes, due to a call or a return.
         try:
             staticdata = self.metainterp.staticdata
diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py
--- a/pypy/jit/metainterp/resoperation.py
+++ b/pypy/jit/metainterp/resoperation.py
@@ -420,6 +420,7 @@
     'CAST_FLOAT_TO_SINGLEFLOAT/1',
     'CAST_SINGLEFLOAT_TO_FLOAT/1',
     'CONVERT_FLOAT_BYTES_TO_LONGLONG/1',
+    'CONVERT_LONGLONG_BYTES_TO_FLOAT/1',
     #
     'INT_LT/2b',
     'INT_LE/2b',
diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py
--- a/pypy/jit/metainterp/test/test_ajit.py
+++ b/pypy/jit/metainterp/test/test_ajit.py
@@ -1,3 +1,4 @@
+import math
 import sys
 
 import py
@@ -15,7 +16,7 @@
     loop_invariant, elidable, promote, jit_debug, assert_green,
     AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff,
     isconstant, isvirtual, promote_string, set_param, record_known_class)
-from pypy.rlib.longlong2float import float2longlong
+from pypy.rlib.longlong2float import float2longlong, longlong2float
 from pypy.rlib.rarithmetic import ovfcheck, is_valid_int
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.rpython.ootypesystem import ootype
@@ -3795,15 +3796,15 @@
         res = self.interp_operations(g, [1])
         assert res == 3
 
-    def test_float2longlong(self):
+    def test_float_bytes(self):
         def f(n):
-            return float2longlong(n)
+            ll = float2longlong(n)
+            return longlong2float(ll)
 
         for x in [2.5, float("nan"), -2.5, float("inf")]:
             # There are tests elsewhere to verify the correctness of this.
-            expected = float2longlong(x)
             res = self.interp_operations(f, [x])
-            assert longlong.getfloatstorage(res) == expected
+            assert res == x or math.isnan(x) and math.isnan(res)
 
 
 class TestLLtype(BaseLLtypeTests, LLJitMixin):
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -16,13 +16,15 @@
     appleveldefs = {}
     interpleveldefs = {}
     if sys.platform.startswith("linux"):
+        from pypy.module.__pypy__ import interp_time
         interpleveldefs["clock_gettime"] = "interp_time.clock_gettime"
         interpleveldefs["clock_getres"] = "interp_time.clock_getres"
         for name in [
             "CLOCK_REALTIME", "CLOCK_MONOTONIC", "CLOCK_MONOTONIC_RAW",
             "CLOCK_PROCESS_CPUTIME_ID", "CLOCK_THREAD_CPUTIME_ID"
         ]:
-            interpleveldefs[name] = "space.wrap(interp_time.%s)" % name
+            if getattr(interp_time, name) is not None:
+                interpleveldefs[name] = "space.wrap(interp_time.%s)" % name
 
 
 class Module(MixedModule):
diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py
--- a/pypy/module/__pypy__/interp_time.py
+++ b/pypy/module/__pypy__/interp_time.py
@@ -1,3 +1,4 @@
+from __future__ import with_statement
 import sys
 
 from pypy.interpreter.error import exception_from_errno
diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py
--- a/pypy/module/_ast/test/test_ast.py
+++ b/pypy/module/_ast/test/test_ast.py
@@ -1,9 +1,10 @@
 import py
-
+from pypy.conftest import gettestobjspace
 
 class AppTestAST:
 
     def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=['struct'])
         cls.w_ast = cls.space.appexec([], """():
     import _ast
     return _ast""")
diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py
--- a/pypy/module/_codecs/test/test_codecs.py
+++ b/pypy/module/_codecs/test/test_codecs.py
@@ -4,7 +4,7 @@
 
 class AppTestCodecs:
     def setup_class(cls):
-        space = gettestobjspace(usemodules=('unicodedata',))
+        space = gettestobjspace(usemodules=('unicodedata', 'struct'))
         cls.space = space
 
     def test_register_noncallable(self):
diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py
--- a/pypy/module/_continuation/test/test_zpickle.py
+++ b/pypy/module/_continuation/test/test_zpickle.py
@@ -106,8 +106,9 @@
     version = 0
 
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=('_continuation',),
+        cls.space = gettestobjspace(usemodules=('_continuation', 'struct'),
                                     CALL_METHOD=True)
+        cls.space.config.translation.continuation = True
         cls.space.appexec([], """():
             global continulet, A, __name__
 
diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py
--- a/pypy/module/_hashlib/test/test_hashlib.py
+++ b/pypy/module/_hashlib/test/test_hashlib.py
@@ -3,7 +3,7 @@
 
 class AppTestHashlib:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['_hashlib'])
+        cls.space = gettestobjspace(usemodules=['_hashlib', 'array', 'struct'])
 
     def test_simple(self):
         import _hashlib
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -341,9 +341,13 @@
 
     def add(self, w_iobase):
         assert w_iobase.streamholder is None
-        holder = StreamHolder(w_iobase)
-        w_iobase.streamholder = holder
-        self.streams[holder] = None
+        if rweakref.has_weakref_support():
+            holder = StreamHolder(w_iobase)
+            w_iobase.streamholder = holder
+            self.streams[holder] = None
+        #else:
+        #   no support for weakrefs, so ignore and we
+        #   will not get autoflushing
 
     def remove(self, w_iobase):
         holder = w_iobase.streamholder
diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py
--- a/pypy/module/_io/test/test_io.py
+++ b/pypy/module/_io/test/test_io.py
@@ -158,7 +158,7 @@
 
 class AppTestOpen:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['_io', '_locale'])
+        cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array', 'struct'])
         tmpfile = udir.join('tmpfile').ensure()
         cls.w_tmpfile = cls.space.wrap(str(tmpfile))
 
diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py
--- a/pypy/module/_lsprof/interp_lsprof.py
+++ b/pypy/module/_lsprof/interp_lsprof.py
@@ -185,6 +185,7 @@
             if subentry is not None:
                 subentry._stop(tt, it)
 
+ at jit.elidable_promote()
 def create_spec(space, w_arg):
     if isinstance(w_arg, Method):
         w_function = w_arg.w_function
diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py
--- a/pypy/module/_multiprocessing/test/test_connection.py
+++ b/pypy/module/_multiprocessing/test/test_connection.py
@@ -92,7 +92,8 @@
 
 class AppTestSocketConnection(BaseConnectionTest):
     def setup_class(cls):
-        space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal'))
+        space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal',
+                                            'struct', 'array'))
         cls.space = space
         cls.w_connections = space.newlist([])
 
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -6,7 +6,7 @@
 from pypy.rpython.lltypesystem import lltype, rffi
 
 def setup_module(mod):
-    mod.space = gettestobjspace(usemodules=['_socket', 'array'])
+    mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct'])
     global socket
     import socket
     mod.w_socket = space.appexec([], "(): import _socket as m; return m")
@@ -372,10 +372,9 @@
     def test_socket_connect(self):
         import _socket, os
         s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
-        # XXX temporarily we use python.org to test, will have more robust tests
-        # in the absence of a network connection later when more parts of the
-        # socket API are implemented.  Currently skip the test if there is no
-        # connection.
+        # it would be nice to have a test which works even if there is no
+        # network connection. However, this one is "good enough" for now. Skip
+        # it if there is no connection.
         try:
             s.connect(("www.python.org", 80))
         except _socket.gaierror, ex:
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -432,7 +432,8 @@
                     raise _ssl_seterror(self.space, self, length)
                 try:
                     # this is actually an immutable bytes sequence
-                    return self.space.wrap(rffi.charp2str(buf_ptr[0]))
+                    return self.space.wrap(rffi.charpsize2str(buf_ptr[0],
+                                                              length))
                 finally:
                     libssl_OPENSSL_free(buf_ptr[0])
         else:
diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py
--- a/pypy/module/_ssl/test/test_ssl.py
+++ b/pypy/module/_ssl/test/test_ssl.py
@@ -90,7 +90,7 @@
 
 class AppTestConnectedSSL:
     def setup_class(cls):
-        space = gettestobjspace(usemodules=('_ssl', '_socket'))
+        space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct'))
         cls.space = space
 
     def setup_method(self, method):
@@ -179,7 +179,7 @@
     # to exercise the poll() calls
 
     def setup_class(cls):
-        space = gettestobjspace(usemodules=('_ssl', '_socket'))
+        space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct'))
         cls.space = space
         cls.space.appexec([], """():
             import socket; socket.setdefaulttimeout(1)
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -11,7 +11,7 @@
 from pypy.objspace.std.register_all import register_all
 from pypy.rlib.rarithmetic import ovfcheck
 from pypy.rlib.unroll import unrolling_iterable
-from pypy.rlib.objectmodel import specialize
+from pypy.rlib.objectmodel import specialize, keepalive_until_here
 from pypy.rpython.lltypesystem import lltype, rffi
 
 
@@ -145,18 +145,24 @@
 unroll_typecodes = unrolling_iterable(types.keys())
 
 class ArrayBuffer(RWBuffer):
-    def __init__(self, data, bytes):
-        self.data = data
-        self.len = bytes
+    def __init__(self, array):
+        self.array = array
 
     def getlength(self):
-        return self.len
+        return self.array.len * self.array.itemsize
 
     def getitem(self, index):
-        return self.data[index]
+        array = self.array
+        data = array._charbuf_start()
+        char = data[index]
+        array._charbuf_stop()
+        return char
 
     def setitem(self, index, char):
-        self.data[index] = char
+        array = self.array
+        data = array._charbuf_start()
+        data[index] = char
+        array._charbuf_stop()
 
 
 def make_array(mytype):
@@ -278,9 +284,10 @@
             oldlen = self.len
             new = len(s) / mytype.bytes
             self.setlen(oldlen + new)
-            cbuf = self.charbuf()
+            cbuf = self._charbuf_start()
             for i in range(len(s)):
                 cbuf[oldlen * mytype.bytes + i] = s[i]
+            self._charbuf_stop()
 
         def fromlist(self, w_lst):
             s = self.len
@@ -310,8 +317,11 @@
             else:
                 self.fromsequence(w_iterable)
 
-        def charbuf(self):
-            return  rffi.cast(rffi.CCHARP, self.buffer)
+        def _charbuf_start(self):
+            return rffi.cast(rffi.CCHARP, self.buffer)
+
+        def _charbuf_stop(self):
+            keepalive_until_here(self)
 
         def w_getitem(self, space, idx):
             item = self.buffer[idx]
@@ -530,8 +540,10 @@
         self.fromstring(space.str_w(w_s))
 
     def array_tostring__Array(space, self):
-        cbuf = self.charbuf()
-        return self.space.wrap(rffi.charpsize2str(cbuf, self.len * mytype.bytes))
+        cbuf = self._charbuf_start()
+        s = rffi.charpsize2str(cbuf, self.len * mytype.bytes)
+        self._charbuf_stop()
+        return self.space.wrap(s)
 
     def array_fromfile__Array_ANY_ANY(space, self, w_f, w_n):
         if not isinstance(w_f, W_File):
@@ -613,8 +625,7 @@
     # Misc methods
 
     def buffer__Array(space, self):
-        b = ArrayBuffer(self.charbuf(), self.len * mytype.bytes)
-        return space.wrap(b)
+        return space.wrap(ArrayBuffer(self))
 
     def array_buffer_info__Array(space, self):
         w_ptr = space.wrap(rffi.cast(lltype.Unsigned, self.buffer))
@@ -649,7 +660,7 @@
             raise OperationError(space.w_RuntimeError, space.wrap(msg))
         if self.len == 0:
             return
-        bytes = self.charbuf()
+        bytes = self._charbuf_start()
         tmp = [bytes[0]] * mytype.bytes
         for start in range(0, self.len * mytype.bytes, mytype.bytes):
             stop = start + mytype.bytes - 1
@@ -657,6 +668,7 @@
                 tmp[i] = bytes[start + i]
             for i in range(mytype.bytes):
                 bytes[stop - i] = tmp[i]
+        self._charbuf_stop()
 
     def repr__Array(space, self):
         if self.len == 0:
diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py
--- a/pypy/module/array/test/test_array.py
+++ b/pypy/module/array/test/test_array.py
@@ -433,7 +433,25 @@
         a = self.array('h', 'Hi')
         buf = buffer(a)
         assert buf[1] == 'i'
-        #raises(TypeError, buf.__setitem__, 1, 'o')
+
+    def test_buffer_write(self):
+        a = self.array('c', 'hello')
+        buf = buffer(a)
+        print repr(buf)
+        try:
+            buf[3] = 'L'
+        except TypeError:
+            skip("buffer(array) returns a read-only buffer on CPython")
+        assert a.tostring() == 'helLo'
+
+    def test_buffer_keepalive(self):
+        buf = buffer(self.array('c', 'text'))
+        assert buf[2] == 'x'
+        #
+        a = self.array('c', 'foobarbaz')
+        buf = buffer(a)
+        a.fromstring('some extra text')
+        assert buf[:] == 'foobarbazsome extra text'
 
     def test_list_methods(self):
         assert repr(self.array('i')) == "array('i')"
diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py
--- a/pypy/module/cpyext/bufferobject.py
+++ b/pypy/module/cpyext/bufferobject.py
@@ -2,8 +2,10 @@
 from pypy.module.cpyext.api import (
     cpython_api, Py_ssize_t, cpython_struct, bootstrap_function,
     PyObjectFields, PyObject)
-from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef
+from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref
 from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer
+from pypy.interpreter.error import OperationError
+from pypy.module.array.interp_array import ArrayBuffer
 
 
 PyBufferObjectStruct = lltype.ForwardReference()
@@ -41,26 +43,38 @@
         py_buf.c_b_offset = w_obj.offset
         w_obj = w_obj.buffer
 
+    # If w_obj already allocated a fixed buffer, use it, and keep a
+    # reference to w_obj.
+    # Otherwise, b_base stays NULL, and we own the b_ptr.
+
     if isinstance(w_obj, StringBuffer):
-        py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value)
-        py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str()))
+        py_buf.c_b_base = lltype.nullptr(PyObject.TO)
+        py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.value))
+        py_buf.c_b_size = w_obj.getlength()
+    elif isinstance(w_obj, ArrayBuffer):
+        w_base = w_obj.array
+        py_buf.c_b_base = make_ref(space, w_base)
+        py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.array._charbuf_start())
         py_buf.c_b_size = w_obj.getlength()
     else:
-        raise Exception("Fail fail fail fail fail")
+        raise OperationError(space.w_NotImplementedError, space.wrap(
+            "buffer flavor not supported"))
 
 
 def buffer_realize(space, py_obj):
     """
     Creates the buffer in the PyPy interpreter from a cpyext representation.
     """
-    raise Exception("realize fail fail fail")
-
+    raise OperationError(space.w_NotImplementedError, space.wrap(
+        "Don't know how to realize a buffer"))
 
 
 @cpython_api([PyObject], lltype.Void, external=False)
 def buffer_dealloc(space, py_obj):
     py_buf = rffi.cast(PyBufferObject, py_obj)
-    Py_DecRef(space, py_buf.c_b_base)
-    rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr))
+    if py_buf.c_b_base:
+        Py_DecRef(space, py_buf.c_b_base)
+    else:
+        rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr))
     from pypy.module.cpyext.object import PyObject_dealloc
     PyObject_dealloc(space, py_obj)
diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -29,7 +29,7 @@
 #define PY_VERSION		"2.7.2"
 
 /* PyPy version as a string */
-#define PYPY_VERSION "1.8.1"
+#define PYPY_VERSION "1.9.1"
 
 /* Subversion Revision number of this file (not of the repository).
  * Empty since Mercurial migration. */
diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py
--- a/pypy/module/cpyext/pyerrors.py
+++ b/pypy/module/cpyext/pyerrors.py
@@ -2,6 +2,7 @@
 
 from pypy.rpython.lltypesystem import rffi, lltype
 from pypy.interpreter.error import OperationError
+from pypy.interpreter import pytraceback
 from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, CONST_STRING
 from pypy.module.exceptions.interp_exceptions import W_RuntimeWarning
 from pypy.module.cpyext.pyobject import (
@@ -315,3 +316,65 @@
     It may be called without holding the interpreter lock."""
     space.check_signal_action.set_interrupt()
 
+ at cpython_api([PyObjectP, PyObjectP, PyObjectP], lltype.Void)
+def PyErr_GetExcInfo(space, ptype, pvalue, ptraceback):
+    """---Cython extension---
+
+    Retrieve the exception info, as known from ``sys.exc_info()``.  This
+    refers to an exception that was already caught, not to an exception
+    that was freshly raised.  Returns new references for the three
+    objects, any of which may be *NULL*.  Does not modify the exception
+    info state.
+
+    .. note::
+
+       This function is not normally used by code that wants to handle
+       exceptions.  Rather, it can be used when code needs to save and
+       restore the exception state temporarily.  Use
+       :c:func:`PyErr_SetExcInfo` to restore or clear the exception
+       state.
+    """
+    ec = space.getexecutioncontext()
+    operror = ec.sys_exc_info()
+    if operror:
+        ptype[0] = make_ref(space, operror.w_type)
+        pvalue[0] = make_ref(space, operror.get_w_value(space))
+        ptraceback[0] = make_ref(space, space.wrap(operror.get_traceback()))
+    else:
+        ptype[0] = lltype.nullptr(PyObject.TO)
+        pvalue[0] = lltype.nullptr(PyObject.TO)
+        ptraceback[0] = lltype.nullptr(PyObject.TO)
+
+ at cpython_api([PyObject, PyObject, PyObject], lltype.Void)
+def PyErr_SetExcInfo(space, w_type, w_value, w_traceback):
+    """---Cython extension---
+
+    Set the exception info, as known from ``sys.exc_info()``.  This refers
+    to an exception that was already caught, not to an exception that was
+    freshly raised.  This function steals the references of the arguments.
+    To clear the exception state, pass *NULL* for all three arguments.
+    For general rules about the three arguments, see :c:func:`PyErr_Restore`.
+ 
+    .. note::
+ 
+       This function is not normally used by code that wants to handle
+       exceptions.  Rather, it can be used when code needs to save and
+       restore the exception state temporarily.  Use
+       :c:func:`PyErr_GetExcInfo` to read the exception state.
+    """
+    if w_value is None or space.is_w(w_value, space.w_None):
+        operror = None
+    else:
+        tb = None
+        if w_traceback is not None:
+            try:
+                tb = pytraceback.check_traceback(space, w_traceback, '?')
+            except OperationError:    # catch and ignore bogus objects
+                pass
+        operror = OperationError(w_type, w_value, tb)
+    #
+    ec = space.getexecutioncontext()
+    ec.set_sys_exc_info(operror)
+    Py_DecRef(space, w_type)
+    Py_DecRef(space, w_value)
+    Py_DecRef(space, w_traceback)
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -167,14 +167,16 @@
     if rffi.cast(lltype.Signed, res) == -1:
         space.fromcache(State).check_and_raise_exception(always=True)
 
+# Warning, confusing function name (like CPython).  Used only for sq_contains.
 def wrap_objobjproc(space, w_self, w_args, func):
     func_target = rffi.cast(objobjproc, func)
     check_num_args(space, w_args, 1)
     w_value, = space.fixedview(w_args)
     res = generic_cpy_call(space, func_target, w_self, w_value)
-    if rffi.cast(lltype.Signed, res) == -1:
+    res = rffi.cast(lltype.Signed, res)
+    if res == -1:
         space.fromcache(State).check_and_raise_exception(always=True)
-    return space.wrap(res)
+    return space.wrap(bool(res))
 
 def wrap_objobjargproc(space, w_self, w_args, func):
     func_target = rffi.cast(objobjargproc, func)
@@ -183,7 +185,7 @@
     res = generic_cpy_call(space, func_target, w_self, w_key, w_value)
     if rffi.cast(lltype.Signed, res) == -1:
         space.fromcache(State).check_and_raise_exception(always=True)
-    return space.wrap(res)
+    return space.w_None
 
 def wrap_delitem(space, w_self, w_args, func):
     func_target = rffi.cast(objobjargproc, func)
diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py
--- a/pypy/module/cpyext/stubs.py
+++ b/pypy/module/cpyext/stubs.py
@@ -2253,24 +2253,6 @@
     """Concat two strings giving a new Unicode string."""
     raise NotImplementedError
 
- at cpython_api([PyObject, PyObject, Py_ssize_t], PyObject)
-def PyUnicode_Split(space, s, sep, maxsplit):
-    """Split a string giving a list of Unicode strings.  If sep is NULL, splitting
-    will be done at all whitespace substrings.  Otherwise, splits occur at the given
-    separator.  At most maxsplit splits will be done.  If negative, no limit is
-    set.  Separators are not included in the resulting list.
-
-    This function used an int type for maxsplit. This might require
-    changes in your code for properly supporting 64-bit systems."""
-    raise NotImplementedError
-
- at cpython_api([PyObject, rffi.INT_real], PyObject)
-def PyUnicode_Splitlines(space, s, keepend):
-    """Split a Unicode string at line breaks, returning a list of Unicode strings.
-    CRLF is considered to be one line break.  If keepend is 0, the Line break
-    characters are not included in the resulting strings."""
-    raise NotImplementedError
-
 @cpython_api([PyObject, PyObject, rffi.CCHARP], PyObject)
 def PyUnicode_Translate(space, str, table, errors):
     """Translate a string by applying a character mapping table to it and return the
@@ -2287,29 +2269,6 @@
     use the default error handling."""
     raise NotImplementedError
 
- at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], Py_ssize_t, error=-2)
-def PyUnicode_Find(space, str, substr, start, end, direction):
-    """Return the first position of substr in str*[*start:end] using the given
-    direction (direction == 1 means to do a forward search, direction == -1 a
-    backward search).  The return value is the index of the first match; a value of
-    -1 indicates that no match was found, and -2 indicates that an error
-    occurred and an exception has been set.
-
-    This function used an int type for start and end. This
-    might require changes in your code for properly supporting 64-bit
-    systems."""
-    raise NotImplementedError
-
- at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t], Py_ssize_t, error=-1)
-def PyUnicode_Count(space, str, substr, start, end):
-    """Return the number of non-overlapping occurrences of substr in
-    str[start:end].  Return -1 if an error occurred.
-
-    This function returned an int type and used an int
-    type for start and end. This might require changes in your code for
-    properly supporting 64-bit systems."""
-    raise NotImplementedError
-
 @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject)
 def PyUnicode_RichCompare(space, left, right, op):
     """Rich compare two unicode strings and return one of the following:
diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py
--- a/pypy/module/cpyext/test/conftest.py
+++ b/pypy/module/cpyext/test/conftest.py
@@ -10,7 +10,7 @@
     return False
 
 def pytest_funcarg__space(request):
-    return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+    return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array'])
 
 def pytest_funcarg__api(request):
     return request.cls.api
diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c
--- a/pypy/module/cpyext/test/foo.c
+++ b/pypy/module/cpyext/test/foo.c
@@ -176,6 +176,8 @@
     {NULL}  /* Sentinel */
 };
 
+PyDoc_STRVAR(foo_doc, "foo is for testing.");
+
 static PyTypeObject footype = {
     PyVarObject_HEAD_INIT(NULL, 0)
     "foo.foo",               /*tp_name*/
@@ -198,7 +200,7 @@
     (setattrofunc)foo_setattro, /*tp_setattro*/
     0,                       /*tp_as_buffer*/
     Py_TPFLAGS_DEFAULT,      /*tp_flags*/
-    0,                       /*tp_doc*/
+    foo_doc,                 /*tp_doc*/
     0,                       /*tp_traverse*/
     0,                       /*tp_clear*/
     0,                       /*tp_richcompare*/
diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py
--- a/pypy/module/cpyext/test/test_api.py
+++ b/pypy/module/cpyext/test/test_api.py
@@ -19,7 +19,8 @@
 
 class BaseApiTest(LeakCheckingTest):
     def setup_class(cls):
-        cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+        cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi',
+                                                        'array'])
 
         # warm up reference counts:
         # - the posix module allocates a HCRYPTPROV on Windows
diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py
--- a/pypy/module/cpyext/test/test_arraymodule.py
+++ b/pypy/module/cpyext/test/test_arraymodule.py
@@ -1,3 +1,4 @@
+from pypy.conftest import gettestobjspace
 from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
 
 import py
diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py
--- a/pypy/module/cpyext/test/test_bufferobject.py
+++ b/pypy/module/cpyext/test/test_bufferobject.py
@@ -48,3 +48,17 @@
             ])
         b = module.buffer_new()
         raises(AttributeError, getattr, b, 'x')
+
+    def test_array_buffer(self):
+        module = self.import_extension('foo', [
+            ("roundtrip", "METH_O",
+             """
+                 PyBufferObject *buf = (PyBufferObject *)args;
+                 return PyString_FromStringAndSize(buf->b_ptr, buf->b_size);
+             """),
+            ])
+        import array
+        a = array.array('c', 'text')
+        b = buffer(a)
+        assert module.roundtrip(b) == 'text'
+        
diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -35,7 +35,7 @@
 
 class AppTestApi:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array'])
         from pypy.rlib.libffi import get_libc_name
         cls.w_libc = cls.space.wrap(get_libc_name())
 
@@ -165,8 +165,9 @@
         return leaking
 
 class AppTestCpythonExtensionBase(LeakCheckingTest):
+    
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array'])
         cls.space.getbuiltinmodule("cpyext")
         from pypy.module.imp.importing import importhook
         importhook(cls.space, "os") # warm up reference counts
diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py
--- a/pypy/module/cpyext/test/test_import.py
+++ b/pypy/module/cpyext/test/test_import.py
@@ -19,7 +19,7 @@
                                          space.wrap('__name__'))) == 'foobar'
 
     def test_getmoduledict(self, space, api):
-        testmod = "binascii"
+        testmod = "_functools"
         w_pre_dict = api.PyImport_GetModuleDict()
         assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod)))
 
diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py
--- a/pypy/module/cpyext/test/test_pyerrors.py
+++ b/pypy/module/cpyext/test/test_pyerrors.py
@@ -218,3 +218,51 @@
             assert e.filename == "blyf"
             assert e.errno == errno.EBADF
             assert e.strerror == os.strerror(errno.EBADF)
+
+    def test_GetSetExcInfo(self):
+        import sys
+        module = self.import_extension('foo', [
+            ("getset_exc_info", "METH_VARARGS",
+             r'''
+             PyObject *type, *val, *tb;
+             PyObject *new_type, *new_val, *new_tb;
+             PyObject *result;
+
+             if (!PyArg_ParseTuple(args, "OOO", &new_type, &new_val, &new_tb))
+                 return NULL;
+
+             PyErr_GetExcInfo(&type, &val, &tb);
+
+             Py_INCREF(new_type);
+             Py_INCREF(new_val);
+             Py_INCREF(new_tb);
+             PyErr_SetExcInfo(new_type, new_val, new_tb);
+
+             result = Py_BuildValue("OOO",
+                                    type ? type : Py_None,
+                                    val  ? val  : Py_None,
+                                    tb   ? tb   : Py_None);
+             Py_XDECREF(type);
+             Py_XDECREF(val);
+             Py_XDECREF(tb);
+             return result;
+             '''
+             ),
+            ])
+        try:
+            raise ValueError(5)
+        except ValueError, old_exc:
+            new_exc = TypeError("TEST")
+            orig_sys_exc_info = sys.exc_info()
+            orig_exc_info = module.getset_exc_info(new_exc.__class__,
+                                                   new_exc, None)
+            new_sys_exc_info = sys.exc_info()
+            new_exc_info = module.getset_exc_info(*orig_exc_info)
+            reset_sys_exc_info = sys.exc_info()
+
+            assert orig_exc_info[0] is old_exc.__class__
+            assert orig_exc_info[1] is old_exc
+            assert orig_exc_info == orig_sys_exc_info
+            assert orig_exc_info == reset_sys_exc_info
+            assert new_exc_info == (new_exc.__class__, new_exc, None)
+            assert new_exc_info == new_sys_exc_info
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -20,6 +20,7 @@
         assert type(obj) is module.fooType
         print "type of obj has type", type(type(obj))
         print "type of type of obj has type", type(type(type(obj)))
+        assert module.fooType.__doc__ == "foo is for testing."
 
     def test_typeobject_method_descriptor(self):
         module = self.import_module(name='foo')
@@ -414,8 +415,11 @@
             static int
             mp_ass_subscript(PyObject *self, PyObject *key, PyObject *value)
             {
-                PyErr_SetNone(PyExc_ZeroDivisionError);
-                return -1;
+                if (PyInt_Check(key)) {
+                    PyErr_SetNone(PyExc_ZeroDivisionError);
+                    return -1;
+                }
+                return 0;
             }
             PyMappingMethods tp_as_mapping;
             static PyTypeObject Foo_Type = {
@@ -425,6 +429,36 @@
             ''')
         obj = module.new_obj()
         raises(ZeroDivisionError, obj.__setitem__, 5, None)
+        res = obj.__setitem__('foo', None)
+        assert res is None
+
+    def test_sq_contains(self):
+        module = self.import_extension('foo', [
+           ("new_obj", "METH_NOARGS",
+            '''
+                PyObject *obj;
+                Foo_Type.tp_as_sequence = &tp_as_sequence;
+                tp_as_sequence.sq_contains = sq_contains;
+                if (PyType_Ready(&Foo_Type) < 0) return NULL;
+                obj = PyObject_New(PyObject, &Foo_Type);
+                return obj;
+            '''
+            )],
+            '''
+            static int
+            sq_contains(PyObject *self, PyObject *value)
+            {
+                return 42;
+            }
+            PySequenceMethods tp_as_sequence;
+            static PyTypeObject Foo_Type = {
+                PyVarObject_HEAD_INIT(NULL, 0)
+                "foo.foo",
+            };
+            ''')
+        obj = module.new_obj()
+        res = "foo" in obj
+        assert res is True
 
     def test_tp_iter(self):
         module = self.import_extension('foo', [
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -457,3 +457,31 @@
         assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 1, 5, -1) == 1
         self.raises(space, api, TypeError,
                     api.PyUnicode_Tailmatch, w_str, space.wrap(3), 2, 10, 1)
+
+    def test_count(self, space, api):
+        w_str = space.wrap(u"abcabdab")
+        assert api.PyUnicode_Count(w_str, space.wrap(u"ab"), 0, -1) == 2
+        assert api.PyUnicode_Count(w_str, space.wrap(u"ab"), 0, 2) == 1
+        assert api.PyUnicode_Count(w_str, space.wrap(u"ab"), -5, 30) == 2
+
+    def test_find(self, space, api):
+        w_str = space.wrap(u"abcabcd")
+        assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 0, 7, 1) == 2
+        assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 3, 7, 1) == 5
+        assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 0, 7, -1) == 5
+        assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 3, 7, -1) == 5
+        assert api.PyUnicode_Find(w_str, space.wrap(u"c"), 0, 4, -1) == 2
+        assert api.PyUnicode_Find(w_str, space.wrap(u"z"), 0, 4, -1) == -1
+
+    def test_split(self, space, api):
+        w_str = space.wrap(u"a\nb\nc\nd")
+        assert "[u'a', u'b', u'c', u'd']" == space.unwrap(space.repr(
+                api.PyUnicode_Split(w_str, space.wrap('\n'), -1)))
+        assert r"[u'a', u'b', u'c\nd']" == space.unwrap(space.repr(
+                api.PyUnicode_Split(w_str, space.wrap('\n'), 2)))
+        assert r"[u'a', u'b', u'c d']" == space.unwrap(space.repr(
+                api.PyUnicode_Split(space.wrap(u'a\nb  c d'), None, 2)))
+        assert "[u'a', u'b', u'c', u'd']" == space.unwrap(space.repr(
+                api.PyUnicode_Splitlines(w_str, 0)))
+        assert r"[u'a\n', u'b\n', u'c\n', u'd']" == space.unwrap(space.repr(
+                api.PyUnicode_Splitlines(w_str, 1)))
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -307,6 +307,8 @@
         if not space.is_true(space.issubtype(self, space.w_type)):
             self.flag_cpytype = True
         self.flag_heaptype = False
+        if pto.c_tp_doc:
+            self.w_doc = space.wrap(rffi.charp2str(pto.c_tp_doc))
 
 @bootstrap_function
 def init_typeobject(space):
@@ -624,7 +626,6 @@
     Creates an interpreter type from a PyTypeObject structure.
     """
     # missing:
-    # setting __doc__ if not defined and tp_doc defined
     # inheriting tp_as_* slots
     # unsupported:
     # tp_mro, tp_subclasses
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -598,3 +598,46 @@
     else:
         return stringtype.stringendswith(str, substr, start, end)
 
+ at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t], Py_ssize_t, error=-1)
+def PyUnicode_Count(space, w_str, w_substr, start, end):
+    """Return the number of non-overlapping occurrences of substr in
+    str[start:end].  Return -1 if an error occurred."""
+    w_count = space.call_method(w_str, "count", w_substr,
+                                space.wrap(start), space.wrap(end))
+    return space.int_w(w_count)
+
+ at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real],
+             Py_ssize_t, error=-2)
+def PyUnicode_Find(space, w_str, w_substr, start, end, direction):
+    """Return the first position of substr in str*[*start:end] using
+    the given direction (direction == 1 means to do a forward search,
+    direction == -1 a backward search).  The return value is the index
+    of the first match; a value of -1 indicates that no match was
+    found, and -2 indicates that an error occurred and an exception
+    has been set."""
+    if rffi.cast(lltype.Signed, direction) > 0:
+        w_pos = space.call_method(w_str, "find", w_substr,
+                                  space.wrap(start), space.wrap(end))
+    else:
+        w_pos = space.call_method(w_str, "rfind", w_substr,
+                                  space.wrap(start), space.wrap(end))
+    return space.int_w(w_pos)
+
+ at cpython_api([PyObject, PyObject, Py_ssize_t], PyObject)
+def PyUnicode_Split(space, w_str, w_sep, maxsplit):
+    """Split a string giving a list of Unicode strings.  If sep is
+    NULL, splitting will be done at all whitespace substrings.
+    Otherwise, splits occur at the given separator.  At most maxsplit
+    splits will be done.  If negative, no limit is set.  Separators
+    are not included in the resulting list."""
+    if w_sep is None:
+        w_sep = space.w_None
+    return space.call_method(w_str, "split", w_sep, space.wrap(maxsplit))
+
+ at cpython_api([PyObject, rffi.INT_real], PyObject)
+def PyUnicode_Splitlines(space, w_str, keepend):
+    """Split a Unicode string at line breaks, returning a list of
+    Unicode strings.  CRLF is considered to be one line break.  If
+    keepend is 0, the Line break characters are not included in the
+    resulting strings."""
+    return space.call_method(w_str, "splitlines", space.wrap(keepend))
diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py
--- a/pypy/module/fcntl/test/test_fcntl.py
+++ b/pypy/module/fcntl/test/test_fcntl.py
@@ -13,7 +13,7 @@
 
 class AppTestFcntl:
     def setup_class(cls):
-        space = gettestobjspace(usemodules=('fcntl', 'array'))
+        space = gettestobjspace(usemodules=('fcntl', 'array', 'struct'))
         cls.space = space
         tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_'))
         cls.w_tmp = space.wrap(tmpprefix)
diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
--- a/pypy/module/imp/test/test_import.py
+++ b/pypy/module/imp/test/test_import.py
@@ -987,6 +987,10 @@
             os.environ['LANG'] = oldlang
 
 class AppTestImportHooks(object):
+
+    def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=('struct',))
+    
     def test_meta_path(self):
         tried_imports = []
         class Importer(object):
diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py
--- a/pypy/module/itertools/test/test_itertools.py
+++ b/pypy/module/itertools/test/test_itertools.py
@@ -891,7 +891,7 @@
 
 class AppTestItertools27:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['itertools'])
+        cls.space = gettestobjspace(usemodules=['itertools', 'struct'])
         if cls.space.is_true(cls.space.appexec([], """():
             import sys; return sys.version_info < (2, 7)
             """)):
diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py
--- a/pypy/module/math/test/test_math.py
+++ b/pypy/module/math/test/test_math.py
@@ -6,7 +6,7 @@
 
 class AppTestMath:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['math'])
+        cls.space = gettestobjspace(usemodules=['math', 'struct'])
         cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES)
         cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host)
 
diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -29,6 +29,7 @@
         'flatiter': 'interp_numarray.W_FlatIterator',
         'isna': 'interp_numarray.isna',
         'concatenate': 'interp_numarray.concatenate',
+        'repeat': 'interp_numarray.repeat',
 
         'set_string_function': 'appbridge.set_string_function',
 
@@ -99,9 +100,12 @@
         ("exp2", "exp2"),
         ("expm1", "expm1"),
         ("fabs", "fabs"),
+        ("fmax", "fmax"),
+        ("fmin", "fmin"),
         ("fmod", "fmod"),
         ("floor", "floor"),
         ("ceil", "ceil"),
+        ("trunc", "trunc"),
         ("greater", "greater"),
         ("greater_equal", "greater_equal"),
         ("less", "less"),
@@ -122,12 +126,16 @@
         ("sinh", "sinh"),
         ("subtract", "subtract"),
         ('sqrt', 'sqrt'),
+        ('square', 'square'),
         ("tan", "tan"),
         ("tanh", "tanh"),
         ('bitwise_and', 'bitwise_and'),
         ('bitwise_or', 'bitwise_or'),
         ('bitwise_xor', 'bitwise_xor'),
         ('bitwise_not', 'invert'),
+        ('left_shift', 'left_shift'),
+        ('right_shift', 'right_shift'),
+        ('invert', 'invert'),
         ('isnan', 'isnan'),
         ('isinf', 'isinf'),
         ('isneginf', 'isneginf'),
diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py
--- a/pypy/module/micronumpy/app_numpy.py
+++ b/pypy/module/micronumpy/app_numpy.py
@@ -16,7 +16,7 @@
         a[i][i] = 1
     return a
 
-def sum(a,axis=None):
+def sum(a,axis=None, out=None):
     '''sum(a, axis=None)
     Sum of array elements over a given axis.
 
@@ -43,17 +43,17 @@
     # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements.
     if not hasattr(a, "sum"):
         a = _numpypy.array(a)
-    return a.sum(axis)
+    return a.sum(axis=axis, out=out)
 
-def min(a, axis=None):
+def min(a, axis=None, out=None):
     if not hasattr(a, "min"):
         a = _numpypy.array(a)
-    return a.min(axis)
+    return a.min(axis=axis, out=out)
 
-def max(a, axis=None):
+def max(a, axis=None, out=None):
     if not hasattr(a, "max"):
         a = _numpypy.array(a)
-    return a.max(axis)
+    return a.max(axis=axis, out=out)
 
 def arange(start, stop=None, step=1, dtype=None):
     '''arange([start], stop[, step], dtype=None)
diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py
--- a/pypy/module/micronumpy/interp_boxes.py
+++ b/pypy/module/micronumpy/interp_boxes.py
@@ -62,21 +62,24 @@
         return space.wrap(dtype.itemtype.bool(self))
 
     def _binop_impl(ufunc_name):
-        def impl(self, space, w_other):
+        def impl(self, space, w_other, w_out=None):
             from pypy.module.micronumpy import interp_ufuncs
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other])
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space,
+                                                            [self, w_other, w_out])
         return func_with_new_name(impl, "binop_%s_impl" % ufunc_name)
 
     def _binop_right_impl(ufunc_name):
-        def impl(self, space, w_other):
+        def impl(self, space, w_other, w_out=None):
             from pypy.module.micronumpy import interp_ufuncs
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self])
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, 
+                                                            [w_other, self, w_out])
         return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name)
 
     def _unaryop_impl(ufunc_name):
-        def impl(self, space):
+        def impl(self, space, w_out=None):
             from pypy.module.micronumpy import interp_ufuncs
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self])
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space,
+                                                                    [self, w_out])
         return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name)
 
     descr_add = _binop_impl("add")
diff --git a/pypy/module/micronumpy/interp_dtype.py b/pypy/module/micronumpy/interp_dtype.py
--- a/pypy/module/micronumpy/interp_dtype.py
+++ b/pypy/module/micronumpy/interp_dtype.py
@@ -359,6 +359,7 @@
             name="int64",
             char="q",
             w_box_type=space.gettypefor(interp_boxes.W_Int64Box),
+            alternate_constructors=[space.w_long],
         )
         self.w_uint64dtype = W_Dtype(
             types.UInt64(),
@@ -386,23 +387,6 @@
             alternate_constructors=[space.w_float],
             aliases=["float"],
         )
-        self.w_longlongdtype = W_Dtype(
-            types.Int64(),
-            num=9,
-            kind=SIGNEDLTR,
-            name='int64',
-            char='q',
-            w_box_type = space.gettypefor(interp_boxes.W_LongLongBox),
-            alternate_constructors=[space.w_long],
-        )
-        self.w_ulonglongdtype = W_Dtype(
-            types.UInt64(),
-            num=10,
-            kind=UNSIGNEDLTR,
-            name='uint64',
-            char='Q',
-            w_box_type = space.gettypefor(interp_boxes.W_ULongLongBox),
-        )
         self.w_stringdtype = W_Dtype(
             types.StringType(1),
             num=18,
@@ -435,17 +419,19 @@
             self.w_booldtype, self.w_int8dtype, self.w_uint8dtype,
             self.w_int16dtype, self.w_uint16dtype, self.w_int32dtype,
             self.w_uint32dtype, self.w_longdtype, self.w_ulongdtype,
-            self.w_longlongdtype, self.w_ulonglongdtype,
+            self.w_int64dtype, self.w_uint64dtype,
             self.w_float32dtype,
             self.w_float64dtype, self.w_stringdtype, self.w_unicodedtype,
             self.w_voiddtype,
         ]
-        self.dtypes_by_num_bytes = sorted(
+        self.float_dtypes_by_num_bytes = sorted(
             (dtype.itemtype.get_element_size(), dtype)
-            for dtype in self.builtin_dtypes
+            for dtype in [self.w_float32dtype, self.w_float64dtype]
         )
         self.dtypes_by_name = {}
-        for dtype in self.builtin_dtypes:
+        # we reverse, so the stuff with lower numbers override stuff with
+        # higher numbers
+        for dtype in reversed(self.builtin_dtypes):
             self.dtypes_by_name[dtype.name] = dtype
             can_name = dtype.kind + str(dtype.itemtype.get_element_size())
             self.dtypes_by_name[can_name] = dtype
@@ -473,7 +459,7 @@
             'LONG': self.w_longdtype,
             'UNICODE': self.w_unicodedtype,
             #'OBJECT',
-            'ULONGLONG': self.w_ulonglongdtype,
+            'ULONGLONG': self.w_uint64dtype,
             'STRING': self.w_stringdtype,
             #'CDOUBLE',
             #'DATETIME',
diff --git a/pypy/module/micronumpy/interp_iter.py b/pypy/module/micronumpy/interp_iter.py
--- a/pypy/module/micronumpy/interp_iter.py
+++ b/pypy/module/micronumpy/interp_iter.py
@@ -269,7 +269,7 @@
 
     def apply_transformations(self, arr, transformations):
         v = BaseIterator.apply_transformations(self, arr, transformations)
-        if len(arr.shape) == 1:
+        if len(arr.shape) == 1 and len(v.res_shape) == 1:
             return OneDimIterator(self.offset, self.strides[0],
                                   self.res_shape[0])
         return v
diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
--- a/pypy/module/micronumpy/interp_numarray.py
+++ b/pypy/module/micronumpy/interp_numarray.py
@@ -83,8 +83,9 @@
         return space.wrap(W_NDimArray(shape[:], dtype=dtype))
 
     def _unaryop_impl(ufunc_name):
-        def impl(self, space):
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self])
+        def impl(self, space, w_out=None):
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space,
+                                                                [self, w_out])
         return func_with_new_name(impl, "unaryop_%s_impl" % ufunc_name)
 
     descr_pos = _unaryop_impl("positive")
@@ -93,8 +94,9 @@
     descr_invert = _unaryop_impl("invert")
 
     def _binop_impl(ufunc_name):
-        def impl(self, space, w_other):
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [self, w_other])
+        def impl(self, space, w_other, w_out=None):
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space,
+                                                        [self, w_other, w_out])
         return func_with_new_name(impl, "binop_%s_impl" % ufunc_name)
 
     descr_add = _binop_impl("add")
@@ -124,12 +126,12 @@
         return space.newtuple([w_quotient, w_remainder])
 
     def _binop_right_impl(ufunc_name):
-        def impl(self, space, w_other):
+        def impl(self, space, w_other, w_out=None):
             w_other = scalar_w(space,
                 interp_ufuncs.find_dtype_for_scalar(space, w_other, self.find_dtype()),
                 w_other
             )
-            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self])
+            return getattr(interp_ufuncs.get(space), ufunc_name).call(space, [w_other, self, w_out])
         return func_with_new_name(impl, "binop_right_%s_impl" % ufunc_name)
 
     descr_radd = _binop_right_impl("add")
@@ -152,13 +154,21 @@
         return space.newtuple([w_quotient, w_remainder])
 
     def _reduce_ufunc_impl(ufunc_name, promote_to_largest=False):
-        def impl(self, space, w_axis=None):
+        def impl(self, space, w_axis=None, w_out=None):
             if space.is_w(w_axis, space.w_None):
                 axis = -1
             else:
                 axis = space.int_w(w_axis)
+            if space.is_w(w_out, space.w_None) or not w_out:
+                out = None
+            elif not isinstance(w_out, BaseArray):
+                raise OperationError(space.w_TypeError, space.wrap( 
+                        'output must be an array'))
+            else:
+                out = w_out
             return getattr(interp_ufuncs.get(space), ufunc_name).reduce(space,
-                                        self, True, promote_to_largest, axis)
+                                        self, True, promote_to_largest, axis,
+                                                                   False, out)
         return func_with_new_name(impl, "reduce_%s_impl" % ufunc_name)
 
     descr_sum = _reduce_ufunc_impl("add")
@@ -213,6 +223,7 @@
     def descr_dot(self, space, w_other):
         other = convert_to_array(space, w_other)
         if isinstance(other, Scalar):
+            #Note: w_out is not modified, this is numpy compliant.
             return self.descr_mul(space, other)
         elif len(self.shape) < 2 and len(other.shape) < 2:
             w_res = self.descr_mul(space, other)
@@ -502,7 +513,30 @@
             arr = concrete.copy(space)
             arr.setshape(space, new_shape)
         return arr
-
+       
+    @unwrap_spec(axis1=int, axis2=int)
+    def descr_swapaxes(self, space, axis1, axis2):
+        """a.swapaxes(axis1, axis2)
+    
+        Return a view of the array with `axis1` and `axis2` interchanged.
+    
+        Refer to `numpy.swapaxes` for full documentation.
+    
+        See Also
+        --------
+        numpy.swapaxes : equivalent function
+        """
+        concrete = self.get_concrete()
+        shape = concrete.shape[:]
+        strides = concrete.strides[:]
+        backstrides = concrete.backstrides[:]
+        shape[axis1], shape[axis2] = shape[axis2], shape[axis1]   
+        strides[axis1], strides[axis2] = strides[axis2], strides[axis1]
+        backstrides[axis1], backstrides[axis2] = backstrides[axis2], backstrides[axis1] 
+        arr = W_NDimSlice(concrete.start, strides, 
+                           backstrides, shape, concrete)
+        return space.wrap(arr)   
+                                      
     def descr_tolist(self, space):
         if len(self.shape) == 0:
             assert isinstance(self, Scalar)
@@ -514,14 +548,14 @@
             )
         return w_result
 
-    def descr_mean(self, space, w_axis=None):
+    def descr_mean(self, space, w_axis=None, w_out=None):
         if space.is_w(w_axis, space.w_None):
             w_axis = space.wrap(-1)
             w_denom = space.wrap(support.product(self.shape))
         else:
             dim = space.int_w(w_axis)
             w_denom = space.wrap(self.shape[dim])
-        return space.div(self.descr_sum_promote(space, w_axis), w_denom)
+        return space.div(self.descr_sum_promote(space, w_axis, w_out), w_denom)
 
     def descr_var(self, space, w_axis=None):
         return get_appbridge_cache(space).call_method(space, '_var', self,
@@ -662,6 +696,10 @@
     def compute_first_step(self, sig, frame):
         pass
 
+    @unwrap_spec(repeats=int)
+    def descr_repeat(self, space, repeats, w_axis=None):
+        return repeat(space, self, repeats, w_axis)
+
 def convert_to_array(space, w_obj):
     if isinstance(w_obj, BaseArray):
         return w_obj
@@ -714,11 +752,12 @@
     """
     Class for representing virtual arrays, such as binary ops or ufuncs
     """
-    def __init__(self, name, shape, res_dtype):
+    def __init__(self, name, shape, res_dtype, out_arg=None):
         BaseArray.__init__(self, shape)
         self.forced_result = None
         self.res_dtype = res_dtype
         self.name = name
+        self.res = out_arg
         self.size = support.product(self.shape) * res_dtype.get_size()
 
     def _del_sources(self):
@@ -727,13 +766,18 @@
         raise NotImplementedError
 
     def compute(self):
-        ra = ResultArray(self, self.shape, self.res_dtype)
+        ra = ResultArray(self, self.shape, self.res_dtype, self.res)
         loop.compute(ra)
+        if self.res:
+            broadcast_dims = len(self.res.shape) - len(self.shape)
+            chunks = [Chunk(0,0,0,0)] * broadcast_dims + \
+                     [Chunk(0, i, 1, i) for i in self.shape]
+            return Chunks(chunks).apply(self.res)
         return ra.left
 
     def force_if_needed(self):
         if self.forced_result is None:
-            self.forced_result = self.compute()
+            self.forced_result = self.compute().get_concrete()
             self._del_sources()
 
     def get_concrete(self):
@@ -773,8 +817,9 @@
 
 
 class Call1(VirtualArray):
-    def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values):
-        VirtualArray.__init__(self, name, shape, res_dtype)
+    def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, values,
+                                                            out_arg=None):
+        VirtualArray.__init__(self, name, shape, res_dtype, out_arg)
         self.values = values
         self.size = values.size
         self.ufunc = ufunc
@@ -786,6 +831,12 @@
     def create_sig(self):
         if self.forced_result is not None:
             return self.forced_result.create_sig()
+        if self.shape != self.values.shape:
+            #This happens if out arg is used
+            return signature.BroadcastUfunc(self.ufunc, self.name,
+                                            self.calc_dtype,
+                                            self.values.create_sig(),
+                                            self.res.create_sig())
         return signature.Call1(self.ufunc, self.name, self.calc_dtype,
                                self.values.create_sig())
 
@@ -793,8 +844,9 @@
     """
     Intermediate class for performing binary operations.
     """
-    def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right):
-        VirtualArray.__init__(self, name, shape, res_dtype)
+    def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right,
+            out_arg=None):
+        VirtualArray.__init__(self, name, shape, res_dtype, out_arg)
         self.ufunc = ufunc
         self.left = left
         self.right = right
@@ -832,8 +884,13 @@
         Call2.__init__(self, None, 'assign', shape, dtype, dtype, res, child)
 
     def create_sig(self):
-        return signature.ResultSignature(self.res_dtype, self.left.create_sig(),
-                                         self.right.create_sig())
+        if self.left.shape != self.right.shape:
+            sig = signature.BroadcastResultSignature(self.res_dtype,
+                        self.left.create_sig(), self.right.create_sig())
+        else:
+            sig = signature.ResultSignature(self.res_dtype, 
+                        self.left.create_sig(), self.right.create_sig())
+        return sig
 
 class ToStringArray(Call1):
     def __init__(self, child):
@@ -842,9 +899,9 @@
         self.s = StringBuilder(child.size * self.item_size)
         Call1.__init__(self, None, 'tostring', child.shape, dtype, dtype,
                        child)
-        self.res = W_NDimArray([1], dtype, 'C')
-        self.res_casted = rffi.cast(rffi.CArrayPtr(lltype.Char),
-                                    self.res.storage)
+        self.res_str = W_NDimArray([1], dtype, order='C')
+        self.res_str_casted = rffi.cast(rffi.CArrayPtr(lltype.Char),
+                                    self.res_str.storage)
 
     def create_sig(self):
         return signature.ToStringSignature(self.calc_dtype,
@@ -950,7 +1007,7 @@
 
     def setitem(self, item, value):
         self.invalidated()
-        self.dtype.setitem(self, item, value)
+        self.dtype.setitem(self, item, value.convert_to(self.dtype))
 
     def calc_strides(self, shape):
         dtype = self.find_dtype()
@@ -1125,7 +1182,8 @@
 
 @unwrap_spec(subok=bool, copy=bool, ownmaskna=bool)
 def array(space, w_item_or_iterable, w_dtype=None, w_order=None,
-          subok=True, copy=True, w_maskna=None, ownmaskna=False):
+          subok=True, copy=True, w_maskna=None, ownmaskna=False,
+          w_ndmin=None):
     # find scalar
     if w_maskna is None:
         w_maskna = space.w_None
@@ -1170,8 +1228,13 @@
                 break
         if dtype is None:
             dtype = interp_dtype.get_dtype_cache(space).w_float64dtype
+    shapelen = len(shape)
+    if w_ndmin is not None and not space.is_w(w_ndmin, space.w_None):
+        ndmin = space.int_w(w_ndmin)
+        if ndmin > shapelen:
+            shape = [1] * (ndmin - shapelen) + shape
+            shapelen = ndmin
     arr = W_NDimArray(shape[:], dtype=dtype, order=order)
-    shapelen = len(shape)
     arr_iter = arr.create_iter()
     # XXX we might want to have a jitdriver here
     for i in range(len(elems_w)):
@@ -1225,6 +1288,31 @@
         return convert_to_array(space, w_obj2).descr_dot(space, w_arr)
     return w_arr.descr_dot(space, w_obj2)
 
+ at unwrap_spec(repeats=int)
+def repeat(space, w_arr, repeats, w_axis=None):
+    arr = convert_to_array(space, w_arr)
+    if space.is_w(w_axis, space.w_None):
+        arr = arr.descr_flatten(space).get_concrete()
+        orig_size = arr.shape[0]
+        shape = [arr.shape[0] * repeats]
+        res = W_NDimArray(shape, arr.find_dtype())
+        for i in range(repeats):
+            Chunks([Chunk(i, shape[0] - repeats + i, repeats,
+                          orig_size)]).apply(res).setslice(space, arr)
+    else:
+        arr = arr.get_concrete()
+        axis = space.int_w(w_axis)
+        shape = arr.shape[:]
+        chunks = [Chunk(0, i, 1, i) for i in shape]
+        orig_size = shape[axis]
+        shape[axis] *= repeats
+        res = W_NDimArray(shape, arr.find_dtype())
+        for i in range(repeats):
+            chunks[axis] = Chunk(i, shape[axis] - repeats + i, repeats,
+                                 orig_size)
+            Chunks(chunks).apply(res).setslice(space, arr)
+    return res
+
 @unwrap_spec(axis=int)
 def concatenate(space, w_args, axis=0):
     args_w = space.listview(w_args)
@@ -1347,9 +1435,11 @@
     copy = interp2app(BaseArray.descr_copy),
     flatten = interp2app(BaseArray.descr_flatten),
     reshape = interp2app(BaseArray.descr_reshape),
+    swapaxes = interp2app(BaseArray.descr_swapaxes),
     tolist = interp2app(BaseArray.descr_tolist),
     take = interp2app(BaseArray.descr_take),
     compress = interp2app(BaseArray.descr_compress),
+    repeat = interp2app(BaseArray.descr_repeat),
 )
 
 
diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
--- a/pypy/module/micronumpy/interp_ufuncs.py
+++ b/pypy/module/micronumpy/interp_ufuncs.py
@@ -3,9 +3,11 @@
 from pypy.interpreter.gateway import interp2app, unwrap_spec, NoneNotWrapped
 from pypy.interpreter.typedef import TypeDef, GetSetProperty, interp_attrproperty
 from pypy.module.micronumpy import interp_boxes, interp_dtype, support, loop
+from pypy.rlib import jit
 from pypy.rlib.rarithmetic import LONG_BIT
 from pypy.tool.sourcetools import func_with_new_name
 
+
 class W_Ufunc(Wrappable):
     _attrs_ = ["name", "promote_to_float", "promote_bools", "identity"]
     _immutable_fields_ = ["promote_to_float", "promote_bools", "name"]
@@ -28,26 +30,38 @@
         return self.identity
 
     def descr_call(self, space, __args__):
+        from interp_numarray import BaseArray
         args_w, kwds_w = __args__.unpack()
         # it occurs to me that we don't support any datatypes that
         # require casting, change it later when we do
         kwds_w.pop('casting', None)
         w_subok = kwds_w.pop('subok', None)
         w_out = kwds_w.pop('out', space.w_None)
-        if ((w_subok is not None and space.is_true(w_subok)) or
-            not space.is_w(w_out, space.w_None)):
+        # Setup a default value for out
+        if space.is_w(w_out, space.w_None):
+            out = None
+        else:
+            out = w_out
+        if (w_subok is not None and space.is_true(w_subok)):
             raise OperationError(space.w_NotImplementedError,
                                  space.wrap("parameters unsupported"))
         if kwds_w or len(args_w) < self.argcount:
             raise OperationError(space.w_ValueError,
                 space.wrap("invalid number of arguments")
             )
-        elif len(args_w) > self.argcount:
-            # The extra arguments should actually be the output array, but we
-            # don't support that yet.
+        elif (len(args_w) > self.argcount and out is not None) or \
+             (len(args_w) > self.argcount + 1):
             raise OperationError(space.w_TypeError,
                 space.wrap("invalid number of arguments")
             )
+        # Override the default out value, if it has been provided in w_wargs
+        if len(args_w) > self.argcount:
+            out = args_w[-1]
+        else:
+            args_w = args_w[:] + [out]
+        if out is not None and not isinstance(out, BaseArray):
+            raise OperationError(space.w_TypeError, space.wrap(
+                                            'output must be an array'))
         return self.call(space, args_w)
 
     @unwrap_spec(skipna=bool, keepdims=bool)
@@ -105,28 +119,33 @@
         array([[ 1,  5],
                [ 9, 13]])
         """
-        if not space.is_w(w_out, space.w_None):
-            raise OperationError(space.w_NotImplementedError, space.wrap(
-                "out not supported"))
+        from pypy.module.micronumpy.interp_numarray import BaseArray
         if w_axis is None:
             axis = 0
         elif space.is_w(w_axis, space.w_None):
             axis = -1
         else:
             axis = space.int_w(w_axis)
-        return self.reduce(space, w_obj, False, False, axis, keepdims)
+        if space.is_w(w_out, space.w_None):
+            out = None
+        elif not isinstance(w_out, BaseArray):
+            raise OperationError(space.w_TypeError, space.wrap(
+                                                'output must be an array'))
+        else:
+            out = w_out
+        return self.reduce(space, w_obj, False, False, axis, keepdims, out)
 
-    def reduce(self, space, w_obj, multidim, promote_to_largest, dim,
-               keepdims=False):
+    def reduce(self, space, w_obj, multidim, promote_to_largest, axis,
+               keepdims=False, out=None):
         from pypy.module.micronumpy.interp_numarray import convert_to_array, \
-                                                           Scalar, ReduceArray
+                                             Scalar, ReduceArray, W_NDimArray
         if self.argcount != 2:
             raise OperationError(space.w_ValueError, space.wrap("reduce only "
                 "supported for binary functions"))
         assert isinstance(self, W_Ufunc2)
         obj = convert_to_array(space, w_obj)
-        if dim >= len(obj.shape):
-            raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % dim))
+        if axis >= len(obj.shape):
+            raise OperationError(space.w_ValueError, space.wrap("axis(=%d) out of bounds" % axis))
         if isinstance(obj, Scalar):
             raise OperationError(space.w_TypeError, space.wrap("cannot reduce "
                 "on a scalar"))
@@ -144,21 +163,55 @@
         if self.identity is None and size == 0:
             raise operationerrfmt(space.w_ValueError, "zero-size array to "
                     "%s.reduce without identity", self.name)
-        if shapelen > 1 and dim >= 0:
-            return self.do_axis_reduce(obj, dtype, dim, keepdims)
-        arr = ReduceArray(self.func, self.name, self.identity, obj, dtype)
-        return loop.compute(arr)
+        if shapelen > 1 and axis >= 0:
+            if keepdims:
+                shape = obj.shape[:axis] + [1] + obj.shape[axis + 1:]
+            else:
+                shape = obj.shape[:axis] + obj.shape[axis + 1:]
+            if out:
+                #Test for shape agreement
+                if len(out.shape) > len(shape):
+                    raise operationerrfmt(space.w_ValueError,
+                        'output parameter for reduction operation %s' +
+                        ' has too many dimensions', self.name)
+                elif len(out.shape) < len(shape):
+                    raise operationerrfmt(space.w_ValueError,
+                        'output parameter for reduction operation %s' +
+                        ' does not have enough dimensions', self.name)
+                elif out.shape != shape:
+                    raise operationerrfmt(space.w_ValueError,
+                        'output parameter shape mismatch, expecting [%s]' +
+                        ' , got [%s]',
+                        ",".join([str(x) for x in shape]),
+                        ",".join([str(x) for x in out.shape]),
+                        )
+                #Test for dtype agreement, perhaps create an itermediate
+                #if out.dtype != dtype:
+                #    raise OperationError(space.w_TypeError, space.wrap(
+                #        "mismatched  dtypes"))
+                return self.do_axis_reduce(obj, out.find_dtype(), axis, out)
+            else:
+                result = W_NDimArray(shape, dtype)
+                return self.do_axis_reduce(obj, dtype, axis, result)
+        if out:
+            if len(out.shape)>0:
+                raise operationerrfmt(space.w_ValueError, "output parameter "
+                              "for reduction operation %s has too many"
+                              " dimensions",self.name)
+            arr = ReduceArray(self.func, self.name, self.identity, obj,
+                                                            out.find_dtype())
+            val = loop.compute(arr)
+            assert isinstance(out, Scalar)
+            out.value = val
+        else:
+            arr = ReduceArray(self.func, self.name, self.identity, obj, dtype)
+            val = loop.compute(arr)
+        return val
 
-    def do_axis_reduce(self, obj, dtype, dim, keepdims):
-        from pypy.module.micronumpy.interp_numarray import AxisReduce,\
-             W_NDimArray
-        if keepdims:
-            shape = obj.shape[:dim] + [1] + obj.shape[dim + 1:]
-        else:
-            shape = obj.shape[:dim] + obj.shape[dim + 1:]
-        result = W_NDimArray(shape, dtype)
+    def do_axis_reduce(self, obj, dtype, axis, result):
+        from pypy.module.micronumpy.interp_numarray import AxisReduce
         arr = AxisReduce(self.func, self.name, self.identity, obj.shape, dtype,
-                         result, obj, dim)
+                         result, obj, axis)
         loop.compute(arr)
         return arr.left
 
@@ -176,24 +229,55 @@
         self.bool_result = bool_result
 
     def call(self, space, args_w):
-        from pypy.module.micronumpy.interp_numarray import (Call1,
-            convert_to_array, Scalar)
-
-        [w_obj] = args_w
+        from pypy.module.micronumpy.interp_numarray import (Call1, BaseArray,
+            convert_to_array, Scalar, shape_agreement)
+        if len(args_w)<2:
+            [w_obj] = args_w
+            out = None
+        else:
+            [w_obj, out] = args_w
+            if space.is_w(out, space.w_None):
+                out = None
         w_obj = convert_to_array(space, w_obj)
         calc_dtype = find_unaryop_result_dtype(space,
                                   w_obj.find_dtype(),
                                   promote_to_float=self.promote_to_float,
                                   promote_bools=self.promote_bools)
-        if self.bool_result:
+        if out:
+            if not isinstance(out, BaseArray):
+                raise OperationError(space.w_TypeError, space.wrap(
+                                                'output must be an array'))
+            res_dtype = out.find_dtype()
+        elif self.bool_result:
             res_dtype = interp_dtype.get_dtype_cache(space).w_booldtype
         else:
             res_dtype = calc_dtype
         if isinstance(w_obj, Scalar):
-            return space.wrap(self.func(calc_dtype, w_obj.value.convert_to(calc_dtype)))
-
-        w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype, res_dtype,
-                      w_obj)
+            arr = self.func(calc_dtype, w_obj.value.convert_to(calc_dtype))
+            if isinstance(out,Scalar):
+                out.value = arr
+            elif isinstance(out, BaseArray):
+                out.fill(space, arr)
+            else:
+                out = arr
+            return space.wrap(out)
+        if out:
+            assert isinstance(out, BaseArray) # For translation
+            broadcast_shape =  shape_agreement(space, w_obj.shape, out.shape)
+            if not broadcast_shape or broadcast_shape != out.shape:
+                raise operationerrfmt(space.w_ValueError,
+                    'output parameter shape mismatch, could not broadcast [%s]' +
+                    ' to [%s]',
+                    ",".join([str(x) for x in w_obj.shape]),
+                    ",".join([str(x) for x in out.shape]),
+                    )
+            w_res = Call1(self.func, self.name, out.shape, calc_dtype,
+                                         res_dtype, w_obj, out)
+            #Force it immediately
+            w_res.get_concrete()
+        else:
+            w_res = Call1(self.func, self.name, w_obj.shape, calc_dtype,
+                                         res_dtype, w_obj)
         w_obj.add_invalidates(w_res)
         return w_res
 
@@ -210,34 +294,63 @@
         self.func = func
         self.comparison_func = comparison_func
 
+    @jit.unroll_safe
     def call(self, space, args_w):
         from pypy.module.micronumpy.interp_numarray import (Call2,
-            convert_to_array, Scalar, shape_agreement)
-
-        [w_lhs, w_rhs] = args_w
+            convert_to_array, Scalar, shape_agreement, BaseArray)
+        if len(args_w) > 2:
+            [w_lhs, w_rhs, w_out] = args_w
+        else:
+            [w_lhs, w_rhs] = args_w
+            w_out = None
         w_lhs = convert_to_array(space, w_lhs)
         w_rhs = convert_to_array(space, w_rhs)
-        calc_dtype = find_binop_result_dtype(space,
-            w_lhs.find_dtype(), w_rhs.find_dtype(),
-            int_only=self.int_only,
-            promote_to_float=self.promote_to_float,
-            promote_bools=self.promote_bools,
-        )
+        if space.is_w(w_out, space.w_None) or w_out is None:
+            out = None
+            calc_dtype = find_binop_result_dtype(space,
+                w_lhs.find_dtype(), w_rhs.find_dtype(),
+                int_only=self.int_only,
+                promote_to_float=self.promote_to_float,
+                promote_bools=self.promote_bools,


More information about the pypy-commit mailing list