[pypy-commit] pypy pytest: merge from default

RonnyPfannschmidt noreply at buildbot.pypy.org
Mon Apr 9 11:40:31 CEST 2012


Author: Ronny Pfannschmidt <Ronny.Pfannschmidt at gmx.de>
Branch: pytest
Changeset: r54265:026db0b5babb
Date: 2012-04-09 11:39 +0200
http://bitbucket.org/pypy/pypy/changeset/026db0b5babb/

Log:	merge from default

diff too long, truncating to 10000 out of 28906 lines

diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py
--- a/dotviewer/graphparse.py
+++ b/dotviewer/graphparse.py
@@ -93,6 +93,7 @@
     return result
 
 def parse_plain(graph_id, plaincontent, links={}, fixedfont=False):
+    plaincontent = plaincontent.replace('\r\n', '\n')    # fix Windows EOL
     lines = plaincontent.splitlines(True)
     for i in range(len(lines)-2, -1, -1):
         if lines[i].endswith('\\\n'):   # line ending in '\'
diff --git a/lib-python/2.7/SimpleXMLRPCServer.py b/lib-python/2.7/SimpleXMLRPCServer.py
--- a/lib-python/2.7/SimpleXMLRPCServer.py
+++ b/lib-python/2.7/SimpleXMLRPCServer.py
@@ -486,7 +486,10 @@
             L = []
             while size_remaining:
                 chunk_size = min(size_remaining, max_chunk_size)
-                L.append(self.rfile.read(chunk_size))
+                chunk = self.rfile.read(chunk_size)
+                if not chunk:
+                    break
+                L.append(chunk)
                 size_remaining -= len(L[-1])
             data = ''.join(L)
 
diff --git a/lib-python/2.7/test/test_xmlrpc.py b/lib-python/2.7/test/test_xmlrpc.py
--- a/lib-python/2.7/test/test_xmlrpc.py
+++ b/lib-python/2.7/test/test_xmlrpc.py
@@ -308,7 +308,7 @@
         global ADDR, PORT, URL
         ADDR, PORT = serv.socket.getsockname()
         #connect to IP address directly.  This avoids socket.create_connection()
-        #trying to connect to to "localhost" using all address families, which
+        #trying to connect to "localhost" using all address families, which
         #causes slowdown e.g. on vista which supports AF_INET6.  The server listens
         #on AF_INET only.
         URL = "http://%s:%d"%(ADDR, PORT)
@@ -367,7 +367,7 @@
         global ADDR, PORT, URL
         ADDR, PORT = serv.socket.getsockname()
         #connect to IP address directly.  This avoids socket.create_connection()
-        #trying to connect to to "localhost" using all address families, which
+        #trying to connect to "localhost" using all address families, which
         #causes slowdown e.g. on vista which supports AF_INET6.  The server listens
         #on AF_INET only.
         URL = "http://%s:%d"%(ADDR, PORT)
@@ -472,6 +472,9 @@
                 # protocol error; provide additional information in test output
                 self.fail("%s\n%s" % (e, getattr(e, "headers", "")))
 
+    def test_unicode_host(self):
+        server = xmlrpclib.ServerProxy(u"http://%s:%d/RPC2"%(ADDR, PORT))
+        self.assertEqual(server.add("a", u"\xe9"), u"a\xe9")
 
     # [ch] The test 404 is causing lots of false alarms.
     def XXXtest_404(self):
@@ -586,6 +589,12 @@
         # This avoids waiting for the socket timeout.
         self.test_simple1()
 
+    def test_partial_post(self):
+        # Check that a partial POST doesn't make the server loop: issue #14001.
+        conn = httplib.HTTPConnection(ADDR, PORT)
+        conn.request('POST', '/RPC2 HTTP/1.0\r\nContent-Length: 100\r\n\r\nbye')
+        conn.close()
+
 class MultiPathServerTestCase(BaseServerTestCase):
     threadFunc = staticmethod(http_multi_server)
     request_count = 2
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -311,7 +311,7 @@
     RegrTest('test_mimetypes.py'),
     RegrTest('test_MimeWriter.py', core=False),
     RegrTest('test_minidom.py'),
-    RegrTest('test_mmap.py'),
+    RegrTest('test_mmap.py', usemodules="mmap"),
     RegrTest('test_module.py', core=True),
     RegrTest('test_modulefinder.py'),
     RegrTest('test_msilib.py', skip=only_win32),
diff --git a/lib-python/modified-2.7/distutils/command/bdist_wininst.py b/lib-python/modified-2.7/distutils/command/bdist_wininst.py
--- a/lib-python/modified-2.7/distutils/command/bdist_wininst.py
+++ b/lib-python/modified-2.7/distutils/command/bdist_wininst.py
@@ -298,7 +298,8 @@
                              bitmaplen,        # number of bytes in bitmap
                              )
         file.write(header)
-        file.write(open(arcname, "rb").read())
+        with open(arcname, "rb") as arcfile:
+            file.write(arcfile.read())
 
     # create_exe()
 
diff --git a/lib-python/modified-2.7/distutils/sysconfig_pypy.py b/lib-python/modified-2.7/distutils/sysconfig_pypy.py
--- a/lib-python/modified-2.7/distutils/sysconfig_pypy.py
+++ b/lib-python/modified-2.7/distutils/sysconfig_pypy.py
@@ -60,6 +60,7 @@
     g['EXE'] = ""
     g['SO'] = _get_so_extension() or ".so"
     g['SOABI'] = g['SO'].rsplit('.')[0]
+    g['LIBDIR'] = os.path.join(sys.prefix, 'lib')
 
     global _config_vars
     _config_vars = g
diff --git a/lib-python/modified-2.7/opcode.py b/lib-python/modified-2.7/opcode.py
--- a/lib-python/modified-2.7/opcode.py
+++ b/lib-python/modified-2.7/opcode.py
@@ -192,5 +192,6 @@
 def_op('LOOKUP_METHOD', 201)          # Index in name list
 hasname.append(201)
 def_op('CALL_METHOD', 202)            # #args not including 'self'
+def_op('BUILD_LIST_FROM_ARG', 203)
 
 del def_op, name_op, jrel_op, jabs_op
diff --git a/lib-python/modified-2.7/site.py b/lib-python/modified-2.7/site.py
--- a/lib-python/modified-2.7/site.py
+++ b/lib-python/modified-2.7/site.py
@@ -550,9 +550,18 @@
                 "'import usercustomize' failed; use -v for traceback"
 
 
+def import_builtin_stuff():
+    """PyPy specific: pre-import a few built-in modules, because
+    some programs actually rely on them to be in sys.modules :-("""
+    import exceptions
+    if 'zipimport' in sys.builtin_module_names:
+        import zipimport
+
+
 def main():
     global ENABLE_USER_SITE
 
+    import_builtin_stuff()
     abs__file__()
     known_paths = removeduppaths()
     if (os.name == "posix" and sys.path and
diff --git a/lib-python/modified-2.7/test/test_dis.py b/lib-python/modified-2.7/test/test_dis.py
new file mode 100644
--- /dev/null
+++ b/lib-python/modified-2.7/test/test_dis.py
@@ -0,0 +1,150 @@
+# Minimal tests for dis module
+
+from test.test_support import run_unittest
+import unittest
+import sys
+import dis
+import StringIO
+
+
+def _f(a):
+    print a
+    return 1
+
+dis_f = """\
+ %-4d         0 LOAD_FAST                0 (a)
+              3 PRINT_ITEM
+              4 PRINT_NEWLINE
+
+ %-4d         5 LOAD_CONST               1 (1)
+              8 RETURN_VALUE
+"""%(_f.func_code.co_firstlineno + 1,
+     _f.func_code.co_firstlineno + 2)
+
+
+def bug708901():
+    for res in range(1,
+                     10):
+        pass
+
+dis_bug708901 = """\
+ %-4d         0 SETUP_LOOP              23 (to 26)
+              3 LOAD_GLOBAL              0 (range)
+              6 LOAD_CONST               1 (1)
+
+ %-4d         9 LOAD_CONST               2 (10)
+             12 CALL_FUNCTION            2
+             15 GET_ITER
+        >>   16 FOR_ITER                 6 (to 25)
+             19 STORE_FAST               0 (res)
+
+ %-4d        22 JUMP_ABSOLUTE           16
+        >>   25 POP_BLOCK
+        >>   26 LOAD_CONST               0 (None)
+             29 RETURN_VALUE
+"""%(bug708901.func_code.co_firstlineno + 1,
+     bug708901.func_code.co_firstlineno + 2,
+     bug708901.func_code.co_firstlineno + 3)
+
+
+def bug1333982(x=[]):
+    assert 0, ([s for s in x] +
+              1)
+    pass
+
+dis_bug1333982 = """\
+ %-4d         0 LOAD_CONST               1 (0)
+              3 POP_JUMP_IF_TRUE        38
+              6 LOAD_GLOBAL              0 (AssertionError)
+              9 LOAD_FAST                0 (x)
+             12 BUILD_LIST_FROM_ARG      0
+             15 GET_ITER
+        >>   16 FOR_ITER                12 (to 31)
+             19 STORE_FAST               1 (s)
+             22 LOAD_FAST                1 (s)
+             25 LIST_APPEND              2
+             28 JUMP_ABSOLUTE           16
+
+ %-4d   >>   31 LOAD_CONST               2 (1)
+             34 BINARY_ADD
+             35 RAISE_VARARGS            2
+
+ %-4d   >>   38 LOAD_CONST               0 (None)
+             41 RETURN_VALUE
+"""%(bug1333982.func_code.co_firstlineno + 1,
+     bug1333982.func_code.co_firstlineno + 2,
+     bug1333982.func_code.co_firstlineno + 3)
+
+_BIG_LINENO_FORMAT = """\
+%3d           0 LOAD_GLOBAL              0 (spam)
+              3 POP_TOP
+              4 LOAD_CONST               0 (None)
+              7 RETURN_VALUE
+"""
+
+class DisTests(unittest.TestCase):
+    def do_disassembly_test(self, func, expected):
+        s = StringIO.StringIO()
+        save_stdout = sys.stdout
+        sys.stdout = s
+        dis.dis(func)
+        sys.stdout = save_stdout
+        got = s.getvalue()
+        # Trim trailing blanks (if any).
+        lines = got.split('\n')
+        lines = [line.rstrip() for line in lines]
+        expected = expected.split("\n")
+        import difflib
+        if expected != lines:
+            self.fail(
+                "events did not match expectation:\n" +
+                "\n".join(difflib.ndiff(expected,
+                                        lines)))
+
+    def test_opmap(self):
+        self.assertEqual(dis.opmap["STOP_CODE"], 0)
+        self.assertIn(dis.opmap["LOAD_CONST"], dis.hasconst)
+        self.assertIn(dis.opmap["STORE_NAME"], dis.hasname)
+
+    def test_opname(self):
+        self.assertEqual(dis.opname[dis.opmap["LOAD_FAST"]], "LOAD_FAST")
+
+    def test_boundaries(self):
+        self.assertEqual(dis.opmap["EXTENDED_ARG"], dis.EXTENDED_ARG)
+        self.assertEqual(dis.opmap["STORE_NAME"], dis.HAVE_ARGUMENT)
+
+    def test_dis(self):
+        self.do_disassembly_test(_f, dis_f)
+
+    def test_bug_708901(self):
+        self.do_disassembly_test(bug708901, dis_bug708901)
+
+    def test_bug_1333982(self):
+        # This one is checking bytecodes generated for an `assert` statement,
+        # so fails if the tests are run with -O.  Skip this test then.
+        if __debug__:
+            self.do_disassembly_test(bug1333982, dis_bug1333982)
+
+    def test_big_linenos(self):
+        def func(count):
+            namespace = {}
+            func = "def foo():\n " + "".join(["\n "] * count + ["spam\n"])
+            exec func in namespace
+            return namespace['foo']
+
+        # Test all small ranges
+        for i in xrange(1, 300):
+            expected = _BIG_LINENO_FORMAT % (i + 2)
+            self.do_disassembly_test(func(i), expected)
+
+        # Test some larger ranges too
+        for i in xrange(300, 5000, 10):
+            expected = _BIG_LINENO_FORMAT % (i + 2)
+            self.do_disassembly_test(func(i), expected)
+
+def test_main():
+    run_unittest(DisTests)
+
+
+if __name__ == "__main__":
+    test_main()
diff --git a/lib-python/modified-2.7/test/test_set.py b/lib-python/modified-2.7/test/test_set.py
--- a/lib-python/modified-2.7/test/test_set.py
+++ b/lib-python/modified-2.7/test/test_set.py
@@ -1568,7 +1568,7 @@
             for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
                 for g in (G, I, Ig, L, R):
                     expected = meth(data)
-                    actual = meth(G(data))
+                    actual = meth(g(data))
                     if isinstance(expected, bool):
                         self.assertEqual(actual, expected)
                     else:
diff --git a/lib_pypy/_csv.py b/lib_pypy/_csv.py
--- a/lib_pypy/_csv.py
+++ b/lib_pypy/_csv.py
@@ -414,7 +414,7 @@
 
     def _parse_add_char(self, c):
         if len(self.field) + len(c) > _field_limit:
-            raise Error("field larget than field limit (%d)" % (_field_limit))
+            raise Error("field larger than field limit (%d)" % (_field_limit))
         self.field += c
         
 
diff --git a/lib_pypy/_ctypes/builtin.py b/lib_pypy/_ctypes/builtin.py
--- a/lib_pypy/_ctypes/builtin.py
+++ b/lib_pypy/_ctypes/builtin.py
@@ -31,24 +31,20 @@
     arg = cobj._get_buffer_value()
     return _rawffi.wcharp2rawunicode(arg, lgt)
 
-class ErrorObject(local):
-    def __init__(self):
-        self.errno = 0
-        self.winerror = 0
-_error_object = ErrorObject()
+_err = local()
 
 def get_errno():
-    return _error_object.errno
+    return getattr(_err, "errno", 0)
 
 def set_errno(errno):
-    old_errno = _error_object.errno
-    _error_object.errno = errno
+    old_errno = get_errno()
+    _err.errno = errno
     return old_errno
 
 def get_last_error():
-    return _error_object.winerror
+    return getattr(_err, "winerror", 0)
 
 def set_last_error(winerror):
-    old_winerror = _error_object.winerror
-    _error_object.winerror = winerror
+    old_winerror = get_last_error()
+    _err.winerror = winerror
     return old_winerror
diff --git a/lib_pypy/_ctypes/function.py b/lib_pypy/_ctypes/function.py
--- a/lib_pypy/_ctypes/function.py
+++ b/lib_pypy/_ctypes/function.py
@@ -3,7 +3,7 @@
 from _ctypes.primitive import SimpleType, _SimpleCData
 from _ctypes.basics import ArgumentError, keepalive_key
 from _ctypes.basics import is_struct_shape
-from _ctypes.builtin import set_errno, set_last_error
+from _ctypes.builtin import get_errno, set_errno, get_last_error, set_last_error
 import _rawffi
 import _ffi
 import sys
@@ -350,16 +350,24 @@
     def _call_funcptr(self, funcptr, *newargs):
 
         if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO:
-            set_errno(_rawffi.get_errno())
+            tmp = _rawffi.get_errno()
+            _rawffi.set_errno(get_errno())
+            set_errno(tmp)
         if self._flags_ & _rawffi.FUNCFLAG_USE_LASTERROR:
-            set_last_error(_rawffi.get_last_error())
+            tmp = _rawffi.get_last_error()
+            _rawffi.set_last_error(get_last_error())
+            set_last_error(tmp)
         try:
             result = funcptr(*newargs)
         finally:
             if self._flags_ & _rawffi.FUNCFLAG_USE_ERRNO:
-                set_errno(_rawffi.get_errno())
+                tmp = _rawffi.get_errno()
+                _rawffi.set_errno(get_errno())
+                set_errno(tmp)
             if self._flags_ & _rawffi.FUNCFLAG_USE_LASTERROR:
-                set_last_error(_rawffi.get_last_error())
+                tmp = _rawffi.get_last_error()
+                _rawffi.set_last_error(get_last_error())
+                set_last_error(tmp)
         #
         try:
             return self._build_result(self._restype_, result, newargs)
diff --git a/lib_pypy/_locale.py b/lib_pypy/_locale.py
deleted file mode 100644
--- a/lib_pypy/_locale.py
+++ /dev/null
@@ -1,337 +0,0 @@
-# ctypes implementation of _locale module by Victor Stinner, 2008-03-27
-
-# ------------------------------------------------------------
-#  Note that we also have our own interp-level implementation
-# ------------------------------------------------------------
-
-"""
-Support for POSIX locales.
-"""
-
-from ctypes import (Structure, POINTER, create_string_buffer,
-    c_ubyte, c_int, c_char_p, c_wchar_p, c_size_t)
-from ctypes_support import standard_c_lib as libc
-from ctypes_support import get_errno
-
-# load the platform-specific cache made by running locale.ctc.py
-from ctypes_config_cache._locale_cache import *
-
-try: from __pypy__ import builtinify
-except ImportError: builtinify = lambda f: f
-
-
-# Ubuntu Gusty i386 structure
-class lconv(Structure):
-    _fields_ = (
-        # Numeric (non-monetary) information.
-        ("decimal_point", c_char_p),    # Decimal point character.
-        ("thousands_sep", c_char_p),    # Thousands separator.
-
-        # Each element is the number of digits in each group;
-        # elements with higher indices are farther left.
-        # An element with value CHAR_MAX means that no further grouping is done.
-        # An element with value 0 means that the previous element is used
-        # for all groups farther left.  */
-        ("grouping", c_char_p),
-
-        # Monetary information.
-
-        # First three chars are a currency symbol from ISO 4217.
-        # Fourth char is the separator.  Fifth char is '\0'.
-        ("int_curr_symbol", c_char_p),
-        ("currency_symbol", c_char_p),   # Local currency symbol.
-        ("mon_decimal_point", c_char_p), # Decimal point character.
-        ("mon_thousands_sep", c_char_p), # Thousands separator.
-        ("mon_grouping", c_char_p),      # Like `grouping' element (above).
-        ("positive_sign", c_char_p),     # Sign for positive values.
-        ("negative_sign", c_char_p),     # Sign for negative values.
-        ("int_frac_digits", c_ubyte),    # Int'l fractional digits.
-        ("frac_digits", c_ubyte),        # Local fractional digits.
-        # 1 if currency_symbol precedes a positive value, 0 if succeeds.
-        ("p_cs_precedes", c_ubyte),
-        # 1 iff a space separates currency_symbol from a positive value.
-        ("p_sep_by_space", c_ubyte),
-        # 1 if currency_symbol precedes a negative value, 0 if succeeds.
-        ("n_cs_precedes", c_ubyte),
-        # 1 iff a space separates currency_symbol from a negative value.
-        ("n_sep_by_space", c_ubyte),
-
-        # Positive and negative sign positions:
-        # 0 Parentheses surround the quantity and currency_symbol.
-        # 1 The sign string precedes the quantity and currency_symbol.
-        # 2 The sign string follows the quantity and currency_symbol.
-        # 3 The sign string immediately precedes the currency_symbol.
-        # 4 The sign string immediately follows the currency_symbol.
-        ("p_sign_posn", c_ubyte),
-        ("n_sign_posn", c_ubyte),
-        # 1 if int_curr_symbol precedes a positive value, 0 if succeeds.
-        ("int_p_cs_precedes", c_ubyte),
-        # 1 iff a space separates int_curr_symbol from a positive value.
-        ("int_p_sep_by_space", c_ubyte),
-        # 1 if int_curr_symbol precedes a negative value, 0 if succeeds.
-        ("int_n_cs_precedes", c_ubyte),
-        # 1 iff a space separates int_curr_symbol from a negative value.
-        ("int_n_sep_by_space", c_ubyte),
-         # Positive and negative sign positions:
-         # 0 Parentheses surround the quantity and int_curr_symbol.
-         # 1 The sign string precedes the quantity and int_curr_symbol.
-         # 2 The sign string follows the quantity and int_curr_symbol.
-         # 3 The sign string immediately precedes the int_curr_symbol.
-         # 4 The sign string immediately follows the int_curr_symbol.
-        ("int_p_sign_posn", c_ubyte),
-        ("int_n_sign_posn", c_ubyte),
-    )
-
-_setlocale = libc.setlocale
-_setlocale.argtypes = (c_int, c_char_p)
-_setlocale.restype = c_char_p
-
-_localeconv = libc.localeconv
-_localeconv.argtypes = None
-_localeconv.restype = POINTER(lconv)
-
-_strcoll = libc.strcoll
-_strcoll.argtypes = (c_char_p, c_char_p)
-_strcoll.restype = c_int
-
-_wcscoll = libc.wcscoll
-_wcscoll.argtypes = (c_wchar_p, c_wchar_p)
-_wcscoll.restype = c_int
-
-_strxfrm = libc.strxfrm
-_strxfrm.argtypes = (c_char_p, c_char_p, c_size_t)
-_strxfrm.restype = c_size_t
-
-HAS_LIBINTL = hasattr(libc, 'gettext')
-if HAS_LIBINTL:
-    _gettext = libc.gettext
-    _gettext.argtypes = (c_char_p,)
-    _gettext.restype = c_char_p
-
-    _dgettext = libc.dgettext
-    _dgettext.argtypes = (c_char_p, c_char_p)
-    _dgettext.restype = c_char_p
-
-    _dcgettext = libc.dcgettext
-    _dcgettext.argtypes = (c_char_p, c_char_p, c_int)
-    _dcgettext.restype = c_char_p
-
-    _textdomain = libc.textdomain
-    _textdomain.argtypes = (c_char_p,)
-    _textdomain.restype = c_char_p
-
-    _bindtextdomain = libc.bindtextdomain
-    _bindtextdomain.argtypes = (c_char_p, c_char_p)
-    _bindtextdomain.restype = c_char_p
-
-    HAS_BIND_TEXTDOMAIN_CODESET = hasattr(libc, 'bindtextdomain_codeset')
-    if HAS_BIND_TEXTDOMAIN_CODESET:
-        _bind_textdomain_codeset = libc.bindtextdomain_codeset
-        _bind_textdomain_codeset.argtypes = (c_char_p, c_char_p)
-        _bind_textdomain_codeset.restype = c_char_p
-
-class Error(Exception):
-    pass
-
-def fixup_ulcase():
-    import string
-    #import strop
-
-    # create uppercase map string
-    ul = []
-    for c in xrange(256):
-        c = chr(c)
-        if c.isupper():
-            ul.append(c)
-    ul = ''.join(ul)
-    string.uppercase = ul
-    #strop.uppercase = ul
-
-    # create lowercase string
-    ul = []
-    for c in xrange(256):
-        c = chr(c)
-        if c.islower():
-            ul.append(c)
-    ul = ''.join(ul)
-    string.lowercase = ul
-    #strop.lowercase = ul
-
-    # create letters string
-    ul = []
-    for c in xrange(256):
-        c = chr(c)
-        if c.isalpha():
-            ul.append(c)
-    ul = ''.join(ul)
-    string.letters = ul
-
- at builtinify
-def setlocale(category, locale=None):
-    "(integer,string=None) -> string. Activates/queries locale processing."
-    if locale:
-        # set locale
-        result = _setlocale(category, locale)
-        if not result:
-            raise Error("unsupported locale setting")
-
-        # record changes to LC_CTYPE
-        if category in (LC_CTYPE, LC_ALL):
-            fixup_ulcase()
-    else:
-        # get locale
-        result = _setlocale(category, None)
-        if not result:
-            raise Error("locale query failed")
-    return result
-
-def _copy_grouping(text):
-    groups = [ ord(group) for group in text ]
-    if groups:
-        groups.append(0)
-    return groups
-
- at builtinify
-def localeconv():
-    "() -> dict. Returns numeric and monetary locale-specific parameters."
-
-    # if LC_NUMERIC is different in the C library, use saved value
-    lp = _localeconv()
-    l = lp.contents
-
-    # hopefully, the localeconv result survives the C library calls
-    # involved herein
-
-    # Numeric information
-    result = {
-        "decimal_point": l.decimal_point,
-        "thousands_sep": l.thousands_sep,
-        "grouping": _copy_grouping(l.grouping),
-        "int_curr_symbol": l.int_curr_symbol,
-        "currency_symbol": l.currency_symbol,
-        "mon_decimal_point": l.mon_decimal_point,
-        "mon_thousands_sep": l.mon_thousands_sep,
-        "mon_grouping": _copy_grouping(l.mon_grouping),
-        "positive_sign": l.positive_sign,
-        "negative_sign": l.negative_sign,
-        "int_frac_digits": l.int_frac_digits,
-        "frac_digits": l.frac_digits,
-        "p_cs_precedes": l.p_cs_precedes,
-        "p_sep_by_space": l.p_sep_by_space,
-        "n_cs_precedes": l.n_cs_precedes,
-        "n_sep_by_space": l.n_sep_by_space,
-        "p_sign_posn": l.p_sign_posn,
-        "n_sign_posn": l.n_sign_posn,
-    }
-    return result
-
- at builtinify
-def strcoll(s1, s2):
-    "string,string -> int. Compares two strings according to the locale."
-
-    # If both arguments are byte strings, use strcoll.
-    if isinstance(s1, str) and isinstance(s2, str):
-        return _strcoll(s1, s2)
-
-    # If neither argument is unicode, it's an error.
-    if not isinstance(s1, unicode) and not isinstance(s2, unicode):
-        raise ValueError("strcoll arguments must be strings")
-
-    # Convert the non-unicode argument to unicode.
-    s1 = unicode(s1)
-    s2 = unicode(s2)
-
-    # Collate the strings.
-    return _wcscoll(s1, s2)
-
- at builtinify
-def strxfrm(s):
-    "string -> string. Returns a string that behaves for cmp locale-aware."
-
-    # assume no change in size, first
-    n1 = len(s) + 1
-    buf = create_string_buffer(n1)
-    n2 = _strxfrm(buf, s, n1) + 1
-    if n2 > n1:
-        # more space needed
-        buf = create_string_buffer(n2)
-        _strxfrm(buf, s, n2)
-    return buf.value
-
- at builtinify
-def getdefaultlocale():
-    # TODO: Port code from CPython for Windows and Mac OS
-    raise NotImplementedError()
-
-if HAS_LANGINFO:
-    _nl_langinfo = libc.nl_langinfo
-    _nl_langinfo.argtypes = (nl_item,)
-    _nl_langinfo.restype = c_char_p
-
-    def nl_langinfo(key):
-        """nl_langinfo(key) -> string
-        Return the value for the locale information associated with key."""
-        # Check whether this is a supported constant. GNU libc sometimes
-        # returns numeric values in the char* return value, which would
-        # crash PyString_FromString.
-        result = _nl_langinfo(key)
-        if result is not None:
-            return result
-        raise ValueError("unsupported langinfo constant")
-
-if HAS_LIBINTL:
-    @builtinify
-    def gettext(msg):
-        """gettext(msg) -> string
-        Return translation of msg."""
-        return _gettext(msg)
-
-    @builtinify
-    def dgettext(domain, msg):
-        """dgettext(domain, msg) -> string
-        Return translation of msg in domain."""
-        return _dgettext(domain, msg)
-
-    @builtinify
-    def dcgettext(domain, msg, category):
-        """dcgettext(domain, msg, category) -> string
-        Return translation of msg in domain and category."""
-        return _dcgettext(domain, msg, category)
-
-    @builtinify
-    def textdomain(domain):
-        """textdomain(domain) -> string
-        Set the C library's textdomain to domain, returning the new domain."""
-        return _textdomain(domain)
-
-    @builtinify
-    def bindtextdomain(domain, dir):
-        """bindtextdomain(domain, dir) -> string
-        Bind the C library's domain to dir."""
-        dirname = _bindtextdomain(domain, dir)
-        if not dirname:
-            errno = get_errno()
-            raise OSError(errno)
-        return dirname
-
-    if HAS_BIND_TEXTDOMAIN_CODESET:
-        @builtinify
-        def bind_textdomain_codeset(domain, codeset):
-            """bind_textdomain_codeset(domain, codeset) -> string
-            Bind the C library's domain to codeset."""
-            codeset = _bind_textdomain_codeset(domain, codeset)
-            if codeset:
-                return codeset
-            return None
-
-__all__ = (
-    'Error',
-    'setlocale', 'localeconv', 'strxfrm', 'strcoll',
-) + ALL_CONSTANTS
-if HAS_LIBINTL:
-    __all__ += ('gettext', 'dgettext', 'dcgettext', 'textdomain',
-                'bindtextdomain')
-    if HAS_BIND_TEXTDOMAIN_CODESET:
-        __all__ += ('bind_textdomain_codeset',)
-if HAS_LANGINFO:
-    __all__ += ('nl_langinfo',)
diff --git a/lib_pypy/array.py b/lib_pypy/array.py
deleted file mode 100644
--- a/lib_pypy/array.py
+++ /dev/null
@@ -1,531 +0,0 @@
-"""This module defines an object type which can efficiently represent
-an array of basic values: characters, integers, floating point
-numbers.  Arrays are sequence types and behave very much like lists,
-except that the type of objects stored in them is constrained.  The
-type is specified at object creation time by using a type code, which
-is a single character.  The following type codes are defined:
-
-    Type code   C Type             Minimum size in bytes 
-    'c'         character          1 
-    'b'         signed integer     1 
-    'B'         unsigned integer   1 
-    'u'         Unicode character  2 
-    'h'         signed integer     2 
-    'H'         unsigned integer   2 
-    'i'         signed integer     2 
-    'I'         unsigned integer   2 
-    'l'         signed integer     4 
-    'L'         unsigned integer   4 
-    'f'         floating point     4 
-    'd'         floating point     8 
-
-The constructor is:
-
-array(typecode [, initializer]) -- create a new array
-"""
-
-from struct import calcsize, pack, pack_into, unpack_from
-import operator
-
-# the buffer-like object to use internally: trying from
-# various places in order...
-try:
-    import _rawffi                    # a reasonable implementation based
-    _RAWARRAY = _rawffi.Array('c')    # on raw_malloc, and providing a
-    def bytebuffer(size):             # real address
-        return _RAWARRAY(size, autofree=True)
-    def getbufaddress(buf):
-        return buf.buffer
-except ImportError:
-    try:
-        from __pypy__ import bytebuffer     # a reasonable implementation
-        def getbufaddress(buf):             # compatible with oo backends,
-            return 0                        # but no address
-    except ImportError:
-        # not running on PyPy.  Fall back to ctypes...
-        import ctypes
-        bytebuffer = ctypes.create_string_buffer
-        def getbufaddress(buf):
-            voidp = ctypes.cast(ctypes.pointer(buf), ctypes.c_void_p)
-            return voidp.value
-
-# ____________________________________________________________
-
-TYPECODES = "cbBuhHiIlLfd"
-
-class array(object):
-    """array(typecode [, initializer]) -> array
-    
-    Return a new array whose items are restricted by typecode, and
-    initialized from the optional initializer value, which must be a list,
-    string. or iterable over elements of the appropriate type.
-    
-    Arrays represent basic values and behave very much like lists, except
-    the type of objects stored in them is constrained.
-    
-    Methods:
-    
-    append() -- append a new item to the end of the array
-    buffer_info() -- return information giving the current memory info
-    byteswap() -- byteswap all the items of the array
-    count() -- return number of occurences of an object
-    extend() -- extend array by appending multiple elements from an iterable
-    fromfile() -- read items from a file object
-    fromlist() -- append items from the list
-    fromstring() -- append items from the string
-    index() -- return index of first occurence of an object
-    insert() -- insert a new item into the array at a provided position
-    pop() -- remove and return item (default last)
-    read() -- DEPRECATED, use fromfile()
-    remove() -- remove first occurence of an object
-    reverse() -- reverse the order of the items in the array
-    tofile() -- write all items to a file object
-    tolist() -- return the array converted to an ordinary list
-    tostring() -- return the array converted to a string
-    write() -- DEPRECATED, use tofile()
-    
-    Attributes:
-    
-    typecode -- the typecode character used to create the array
-    itemsize -- the length in bytes of one array item
-    """
-    __slots__ = ["typecode", "itemsize", "_data", "_descriptor", "__weakref__"]
-
-    def __new__(cls, typecode, initializer=[], **extrakwds):
-        self = object.__new__(cls)
-        if cls is array and extrakwds:
-            raise TypeError("array() does not take keyword arguments")
-        if not isinstance(typecode, str) or len(typecode) != 1:
-            raise TypeError(
-                     "array() argument 1 must be char, not %s" % type(typecode))
-        if typecode not in TYPECODES:
-            raise ValueError(
-                  "bad typecode (must be one of %s)" % ', '.join(TYPECODES))
-        self._data = bytebuffer(0)
-        self.typecode = typecode
-        self.itemsize = calcsize(typecode)
-        if isinstance(initializer, list):
-            self.fromlist(initializer)
-        elif isinstance(initializer, str):
-            self.fromstring(initializer)
-        elif isinstance(initializer, unicode) and self.typecode == "u":
-            self.fromunicode(initializer)
-        else:
-            self.extend(initializer)
-        return self
-
-    def _clear(self):
-        self._data = bytebuffer(0)
-
-    ##### array-specific operations
-
-    def fromfile(self, f, n):
-        """Read n objects from the file object f and append them to the end of
-        the array. Also called as read."""
-        if not isinstance(f, file):
-            raise TypeError("arg1 must be open file")
-        size = self.itemsize * n
-        item = f.read(size)
-        if len(item) < size:
-            raise EOFError("not enough items in file")
-        self.fromstring(item)
-
-    def fromlist(self, l):
-        """Append items to array from list."""
-        if not isinstance(l, list):
-            raise TypeError("arg must be list")
-        self._fromiterable(l)
-        
-    def fromstring(self, s):
-        """Appends items from the string, interpreting it as an array of machine
-        values, as if it had been read from a file using the fromfile()
-        method."""
-        if isinstance(s, unicode):
-            s = str(s)
-        self._frombuffer(s)
-
-    def _frombuffer(self, s):
-        length = len(s)
-        if length % self.itemsize != 0:
-            raise ValueError("string length not a multiple of item size")
-        boundary = len(self._data)
-        newdata = bytebuffer(boundary + length)
-        newdata[:boundary] = self._data
-        newdata[boundary:] = s
-        self._data = newdata
-
-    def fromunicode(self, ustr):
-        """Extends this array with data from the unicode string ustr. The array
-        must be a type 'u' array; otherwise a ValueError is raised. Use
-        array.fromstring(ustr.encode(...)) to append Unicode data to an array of
-        some other type."""
-        if not self.typecode == "u":
-            raise ValueError(
-                          "fromunicode() may only be called on type 'u' arrays")
-        # XXX the following probable bug is not emulated:
-        # CPython accepts a non-unicode string or a buffer, and then
-        # behaves just like fromstring(), except that it strangely truncates
-        # string arguments at multiples of the unicode byte size.
-        # Let's only accept unicode arguments for now.
-        if not isinstance(ustr, unicode):
-            raise TypeError("fromunicode() argument should probably be "
-                            "a unicode string")
-        # _frombuffer() does the currect thing using
-        # the buffer behavior of unicode objects
-        self._frombuffer(buffer(ustr))
-
-    def tofile(self, f):
-        """Write all items (as machine values) to the file object f.  Also
-        called as write."""
-        if not isinstance(f, file):
-            raise TypeError("arg must be open file")
-        f.write(self.tostring())
-        
-    def tolist(self):
-        """Convert array to an ordinary list with the same items."""
-        count = len(self._data) // self.itemsize
-        return list(unpack_from('%d%s' % (count, self.typecode), self._data))
-
-    def tostring(self):
-        return self._data[:]
-
-    def __buffer__(self):
-        return buffer(self._data)
-
-    def tounicode(self):
-        """Convert the array to a unicode string. The array must be a type 'u'
-        array; otherwise a ValueError is raised. Use array.tostring().decode()
-        to obtain a unicode string from an array of some other type."""
-        if self.typecode != "u":
-            raise ValueError("tounicode() may only be called on type 'u' arrays")
-        # XXX performance is not too good
-        return u"".join(self.tolist())
-
-    def byteswap(self):
-        """Byteswap all items of the array.  If the items in the array are not
-        1, 2, 4, or 8 bytes in size, RuntimeError is raised."""
-        if self.itemsize not in [1, 2, 4, 8]:
-            raise RuntimeError("byteswap not supported for this array")
-        # XXX slowish
-        itemsize = self.itemsize
-        bytes = self._data
-        for start in range(0, len(bytes), itemsize):
-            stop = start + itemsize
-            bytes[start:stop] = bytes[start:stop][::-1]
-
-    def buffer_info(self):
-        """Return a tuple (address, length) giving the current memory address
-        and the length in items of the buffer used to hold array's contents. The
-        length should be multiplied by the itemsize attribute to calculate the
-        buffer length in bytes. On PyPy the address might be meaningless
-        (returned as 0), depending on the available modules."""
-        return (getbufaddress(self._data), len(self))
-    
-    read = fromfile
-
-    write = tofile
-
-    ##### general object protocol
-    
-    def __repr__(self):
-        if len(self._data) == 0:
-            return "array('%s')" % self.typecode
-        elif self.typecode == "c":
-            return "array('%s', %s)" % (self.typecode, repr(self.tostring()))
-        elif self.typecode == "u":
-            return "array('%s', %s)" % (self.typecode, repr(self.tounicode()))
-        else:
-            return "array('%s', %s)" % (self.typecode, repr(self.tolist()))
-
-    def __copy__(self):
-        a = array(self.typecode)
-        a._data = bytebuffer(len(self._data))
-        a._data[:] = self._data
-        return a
-
-    def __eq__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) == buffer(other._data)
-        else:
-            return self.tolist() == other.tolist()
-
-    def __ne__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) != buffer(other._data)
-        else:
-            return self.tolist() != other.tolist()
-
-    def __lt__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) < buffer(other._data)
-        else:
-            return self.tolist() < other.tolist()
-
-    def __gt__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) > buffer(other._data)
-        else:
-            return self.tolist() > other.tolist()
-
-    def __le__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) <= buffer(other._data)
-        else:
-            return self.tolist() <= other.tolist()
-
-    def __ge__(self, other):
-        if not isinstance(other, array):
-            return NotImplemented
-        if self.typecode == 'c':
-            return buffer(self._data) >= buffer(other._data)
-        else:
-            return self.tolist() >= other.tolist()
-
-    def __reduce__(self):
-        dict = getattr(self, '__dict__', None)
-        data = self.tostring()
-        if data:
-            initargs = (self.typecode, data)
-        else:
-            initargs = (self.typecode,)
-        return (type(self), initargs, dict)
-
-    ##### list methods
-    
-    def append(self, x):
-        """Append new value x to the end of the array."""
-        self._frombuffer(pack(self.typecode, x))
-
-    def count(self, x):
-        """Return number of occurences of x in the array."""
-        return operator.countOf(self, x)
-
-    def extend(self, iterable):
-        """Append items to the end of the array."""
-        if isinstance(iterable, array) \
-                                    and not self.typecode == iterable.typecode:
-            raise TypeError("can only extend with array of same kind")
-        self._fromiterable(iterable)
-
-    def index(self, x):
-        """Return index of first occurence of x in the array."""
-        return operator.indexOf(self, x)
-    
-    def insert(self, i, x):
-        """Insert a new item x into the array before position i."""
-        seqlength = len(self)
-        if i < 0:
-            i += seqlength
-            if i < 0:
-                i = 0
-        elif i > seqlength:
-            i = seqlength
-        boundary = i * self.itemsize
-        data = pack(self.typecode, x)
-        newdata = bytebuffer(len(self._data) + len(data))
-        newdata[:boundary] = self._data[:boundary]
-        newdata[boundary:boundary+self.itemsize] = data
-        newdata[boundary+self.itemsize:] = self._data[boundary:]
-        self._data = newdata
-        
-    def pop(self, i=-1):
-        """Return the i-th element and delete it from the array. i defaults to
-        -1."""
-        seqlength = len(self)
-        if i < 0:
-            i += seqlength
-        if not (0 <= i < seqlength):
-            raise IndexError(i)
-        boundary = i * self.itemsize
-        result = unpack_from(self.typecode, self._data, boundary)[0]
-        newdata = bytebuffer(len(self._data) - self.itemsize)
-        newdata[:boundary] = self._data[:boundary]
-        newdata[boundary:] = self._data[boundary+self.itemsize:]
-        self._data = newdata
-        return result
-        
-    def remove(self, x):
-        """Remove the first occurence of x in the array."""
-        self.pop(self.index(x))
-        
-    def reverse(self):
-        """Reverse the order of the items in the array."""
-        lst = self.tolist()
-        lst.reverse()
-        self._clear()
-        self.fromlist(lst)
-
-    ##### list protocol
-    
-    def __len__(self):
-        return len(self._data) // self.itemsize
-    
-    def __add__(self, other):
-        if not isinstance(other, array):
-            raise TypeError("can only append array to array")
-        if self.typecode != other.typecode:
-            raise TypeError("bad argument type for built-in operation")
-        return array(self.typecode, buffer(self._data) + buffer(other._data))
-
-    def __mul__(self, repeat):
-        return array(self.typecode, buffer(self._data) * repeat)
-
-    __rmul__ = __mul__
-
-    def __getitem__(self, i):
-        seqlength = len(self)
-        if isinstance(i, slice):
-            start, stop, step = i.indices(seqlength)
-            if step != 1:
-                sublist = self.tolist()[i]    # fall-back
-                return array(self.typecode, sublist)
-            if start < 0:
-                start = 0
-            if stop < start:
-                stop = start
-            assert stop <= seqlength
-            return array(self.typecode, self._data[start * self.itemsize :
-                                                   stop * self.itemsize])
-        else:
-            if i < 0:
-                i += seqlength
-            if self.typecode == 'c':  # speed trick
-                return self._data[i]
-            if not (0 <= i < seqlength):
-                raise IndexError(i)
-            boundary = i * self.itemsize
-            return unpack_from(self.typecode, self._data, boundary)[0]
-
-    def __getslice__(self, i, j):
-        return self.__getitem__(slice(i, j))
-
-    def __setitem__(self, i, x):
-        if isinstance(i, slice):
-            if (not isinstance(x, array)
-                or self.typecode != x.typecode):
-                raise TypeError("can only assign array of same kind"
-                                " to array slice")
-            seqlength = len(self)
-            start, stop, step = i.indices(seqlength)
-            if step != 1:
-                sublist = self.tolist()    # fall-back
-                sublist[i] = x.tolist()
-                self._clear()
-                self.fromlist(sublist)
-                return
-            if start < 0:
-                start = 0
-            if stop < start:
-                stop = start
-            assert stop <= seqlength
-            boundary1 = start * self.itemsize
-            boundary2 = stop * self.itemsize
-            boundary2new = boundary1 + len(x._data)
-            if boundary2 == boundary2new:
-                self._data[boundary1:boundary2] = x._data
-            else:
-                newdata = bytebuffer(len(self._data) + boundary2new-boundary2)
-                newdata[:boundary1] = self._data[:boundary1]
-                newdata[boundary1:boundary2new] = x._data
-                newdata[boundary2new:] = self._data[boundary2:]
-                self._data = newdata
-        else:
-            seqlength = len(self)
-            if i < 0:
-                i += seqlength
-            if self.typecode == 'c':  # speed trick
-                self._data[i] = x
-                return
-            if not (0 <= i < seqlength):
-                raise IndexError(i)
-            boundary = i * self.itemsize
-            pack_into(self.typecode, self._data, boundary, x)
-
-    def __setslice__(self, i, j, x):
-        self.__setitem__(slice(i, j), x)
-
-    def __delitem__(self, i):
-        if isinstance(i, slice):
-            seqlength = len(self)
-            start, stop, step = i.indices(seqlength)
-            if start < 0:
-                start = 0
-            if stop < start:
-                stop = start
-            assert stop <= seqlength
-            if step != 1:
-                sublist = self.tolist()    # fall-back
-                del sublist[i]
-                self._clear()
-                self.fromlist(sublist)
-                return
-            dellength = stop - start
-            boundary1 = start * self.itemsize
-            boundary2 = stop * self.itemsize
-            newdata = bytebuffer(len(self._data) - (boundary2-boundary1))
-            newdata[:boundary1] = self._data[:boundary1]
-            newdata[boundary1:] = self._data[boundary2:]
-            self._data = newdata
-        else:            
-            seqlength = len(self)
-            if i < 0:
-                i += seqlength
-            if not (0 <= i < seqlength):
-                raise IndexError(i)
-            boundary = i * self.itemsize
-            newdata = bytebuffer(len(self._data) - self.itemsize)
-            newdata[:boundary] = self._data[:boundary]
-            newdata[boundary:] = self._data[boundary+self.itemsize:]
-            self._data = newdata
-
-    def __delslice__(self, i, j):
-        self.__delitem__(slice(i, j))
-
-    def __contains__(self, item):
-        for x in self:
-            if x == item:
-                return True
-        return False
-
-    def __iadd__(self, other):
-        if not isinstance(other, array):
-            raise TypeError("can only extend array with array")
-        self.extend(other)
-        return self
-
-    def __imul__(self, repeat):
-        newdata = buffer(self._data) * repeat
-        self._data = bytebuffer(len(newdata))
-        self._data[:] = newdata
-        return self
-
-    def __iter__(self):
-        p = 0
-        typecode = self.typecode
-        itemsize = self.itemsize
-        while p < len(self._data):
-            yield unpack_from(typecode, self._data, p)[0]
-            p += itemsize
-
-    ##### internal methods
-
-    def _fromiterable(self, iterable):
-        iterable = tuple(iterable)
-        n = len(iterable)
-        boundary = len(self._data)
-        newdata = bytebuffer(boundary + n * self.itemsize)
-        newdata[:boundary] = self._data
-        pack_into('%d%s' % (n, self.typecode), newdata, boundary, *iterable)
-        self._data = newdata
-
-ArrayType = array
diff --git a/lib_pypy/binascii.py b/lib_pypy/binascii.py
deleted file mode 100644
--- a/lib_pypy/binascii.py
+++ /dev/null
@@ -1,720 +0,0 @@
-"""A pure Python implementation of binascii.
-
-Rather slow and buggy in corner cases.
-PyPy provides an RPython version too.
-"""
-
-class Error(Exception):
-    pass
-
-class Done(Exception):
-    pass
-
-class Incomplete(Exception):
-    pass
-
-def a2b_uu(s):
-    if not s:
-        return ''
-    
-    length = (ord(s[0]) - 0x20) % 64
-
-    def quadruplets_gen(s):
-        while s:
-            try:
-                yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
-            except IndexError:
-                s += '   '
-                yield ord(s[0]), ord(s[1]), ord(s[2]), ord(s[3])
-                return
-            s = s[4:]
-
-    try:
-        result = [''.join(
-            [chr((A - 0x20) << 2 | (((B - 0x20) >> 4) & 0x3)),
-            chr(((B - 0x20) & 0xf) << 4 | (((C - 0x20) >> 2) & 0xf)),
-            chr(((C - 0x20) & 0x3) << 6 | ((D - 0x20) & 0x3f))
-            ]) for A, B, C, D in quadruplets_gen(s[1:].rstrip())]
-    except ValueError:
-        raise Error('Illegal char')
-    result = ''.join(result)
-    trailingdata = result[length:]
-    if trailingdata.strip('\x00'):
-        raise Error('Trailing garbage')
-    result = result[:length]
-    if len(result) < length:
-        result += ((length - len(result)) * '\x00')
-    return result
-
-                               
-def b2a_uu(s):
-    length = len(s)
-    if length > 45:
-        raise Error('At most 45 bytes at once')
-
-    def triples_gen(s):
-        while s:
-            try:
-                yield ord(s[0]), ord(s[1]), ord(s[2])
-            except IndexError:
-                s += '\0\0'
-                yield ord(s[0]), ord(s[1]), ord(s[2])
-                return
-            s = s[3:]
-
-    result = [''.join(
-        [chr(0x20 + (( A >> 2                    ) & 0x3F)),
-         chr(0x20 + (((A << 4) | ((B >> 4) & 0xF)) & 0x3F)),
-         chr(0x20 + (((B << 2) | ((C >> 6) & 0x3)) & 0x3F)),
-         chr(0x20 + (( C                         ) & 0x3F))])
-              for A, B, C in triples_gen(s)]
-    return chr(ord(' ') + (length & 077)) + ''.join(result) + '\n'
-
-
-table_a2b_base64 = {
-    'A': 0,
-    'B': 1,
-    'C': 2,
-    'D': 3,
-    'E': 4,
-    'F': 5,
-    'G': 6,
-    'H': 7,
-    'I': 8,
-    'J': 9,
-    'K': 10,
-    'L': 11,
-    'M': 12,
-    'N': 13,
-    'O': 14,
-    'P': 15,
-    'Q': 16,
-    'R': 17,
-    'S': 18,
-    'T': 19,
-    'U': 20,
-    'V': 21,
-    'W': 22,
-    'X': 23,
-    'Y': 24,
-    'Z': 25,
-    'a': 26,
-    'b': 27,
-    'c': 28,
-    'd': 29,
-    'e': 30,
-    'f': 31,
-    'g': 32,
-    'h': 33,
-    'i': 34,
-    'j': 35,
-    'k': 36,
-    'l': 37,
-    'm': 38,
-    'n': 39,
-    'o': 40,
-    'p': 41,
-    'q': 42,
-    'r': 43,
-    's': 44,
-    't': 45,
-    'u': 46,
-    'v': 47,
-    'w': 48,
-    'x': 49,
-    'y': 50,
-    'z': 51,
-    '0': 52,
-    '1': 53,
-    '2': 54,
-    '3': 55,
-    '4': 56,
-    '5': 57,
-    '6': 58,
-    '7': 59,
-    '8': 60,
-    '9': 61,
-    '+': 62,
-    '/': 63,
-    '=': 0,
-}
-
-
-def a2b_base64(s):
-    if not isinstance(s, (str, unicode)):
-        raise TypeError("expected string or unicode, got %r" % (s,))
-    s = s.rstrip()
-    # clean out all invalid characters, this also strips the final '=' padding
-    # check for correct padding
-
-    def next_valid_char(s, pos):
-        for i in range(pos + 1, len(s)):
-            c = s[i]
-            if c < '\x7f':
-                try:
-                    table_a2b_base64[c]
-                    return c
-                except KeyError:
-                    pass
-        return None
-    
-    quad_pos = 0
-    leftbits = 0
-    leftchar = 0
-    res = []
-    for i, c in enumerate(s):
-        if c > '\x7f' or c == '\n' or c == '\r' or c == ' ':
-            continue
-        if c == '=':
-            if quad_pos < 2 or (quad_pos == 2 and next_valid_char(s, i) != '='):
-                continue
-            else:
-                leftbits = 0
-                break
-        try:
-            next_c = table_a2b_base64[c]
-        except KeyError:
-            continue
-        quad_pos = (quad_pos + 1) & 0x03
-        leftchar = (leftchar << 6) | next_c
-        leftbits += 6
-        if leftbits >= 8:
-            leftbits -= 8
-            res.append((leftchar >> leftbits & 0xff))
-            leftchar &= ((1 << leftbits) - 1)
-    if leftbits != 0:
-        raise Error('Incorrect padding')
-    
-    return ''.join([chr(i) for i in res])
-    
-table_b2a_base64 = \
-"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
-
-def b2a_base64(s):
-    length = len(s)
-    final_length = length % 3
-
-    def triples_gen(s):
-        while s:
-            try:
-                yield ord(s[0]), ord(s[1]), ord(s[2])
-            except IndexError:
-                s += '\0\0'
-                yield ord(s[0]), ord(s[1]), ord(s[2])
-                return
-            s = s[3:]
-
-    
-    a = triples_gen(s[ :length - final_length])
-
-    result = [''.join(
-        [table_b2a_base64[( A >> 2                    ) & 0x3F],
-         table_b2a_base64[((A << 4) | ((B >> 4) & 0xF)) & 0x3F],
-         table_b2a_base64[((B << 2) | ((C >> 6) & 0x3)) & 0x3F],
-         table_b2a_base64[( C                         ) & 0x3F]])
-              for A, B, C in a]
-
-    final = s[length - final_length:]
-    if final_length == 0:
-        snippet = ''
-    elif final_length == 1:
-        a = ord(final[0])
-        snippet = table_b2a_base64[(a >> 2 ) & 0x3F] + \
-                  table_b2a_base64[(a << 4 ) & 0x3F] + '=='
-    else:
-        a = ord(final[0])
-        b = ord(final[1])
-        snippet = table_b2a_base64[(a >> 2) & 0x3F] + \
-                  table_b2a_base64[((a << 4) | (b >> 4) & 0xF) & 0x3F] + \
-                  table_b2a_base64[(b << 2) & 0x3F] + '='
-    return ''.join(result) + snippet + '\n'
-
-def a2b_qp(s, header=False):
-    inp = 0
-    odata = []
-    while inp < len(s):
-        if s[inp] == '=':
-            inp += 1
-            if inp >= len(s):
-                break
-            # Soft line breaks
-            if (s[inp] == '\n') or (s[inp] == '\r'):
-                if s[inp] != '\n':
-                    while inp < len(s) and s[inp] != '\n':
-                        inp += 1
-                if inp < len(s):
-                    inp += 1
-            elif s[inp] == '=':
-                # broken case from broken python qp
-                odata.append('=')
-                inp += 1
-            elif s[inp] in hex_numbers and s[inp + 1] in hex_numbers:
-                ch = chr(int(s[inp:inp+2], 16))
-                inp += 2
-                odata.append(ch)
-            else:
-                odata.append('=')
-        elif header and s[inp] == '_':
-            odata.append(' ')
-            inp += 1
-        else:
-            odata.append(s[inp])
-            inp += 1
-    return ''.join(odata)
-
-def b2a_qp(data, quotetabs=False, istext=True, header=False):
-    """quotetabs=True means that tab and space characters are always
-       quoted.
-       istext=False means that \r and \n are treated as regular characters
-       header=True encodes space characters with '_' and requires
-       real '_' characters to be quoted.
-    """
-    MAXLINESIZE = 76
-
-    # See if this string is using CRLF line ends
-    lf = data.find('\n')
-    crlf = lf > 0 and data[lf-1] == '\r'
-
-    inp = 0
-    linelen = 0
-    odata = []
-    while inp < len(data):
-        c = data[inp]
-        if (c > '~' or
-            c == '=' or
-            (header and c == '_') or
-            (c == '.' and linelen == 0 and (inp+1 == len(data) or
-                                            data[inp+1] == '\n' or
-                                            data[inp+1] == '\r')) or
-            (not istext and (c == '\r' or c == '\n')) or
-            ((c == '\t' or c == ' ') and (inp + 1 == len(data))) or
-            (c <= ' ' and c != '\r' and c != '\n' and
-             (quotetabs or (not quotetabs and (c != '\t' and c != ' '))))):
-            linelen += 3
-            if linelen >= MAXLINESIZE:
-                odata.append('=')
-                if crlf: odata.append('\r')
-                odata.append('\n')
-                linelen = 3
-            odata.append('=' + two_hex_digits(ord(c)))
-            inp += 1
-        else:
-            if (istext and
-                (c == '\n' or (inp+1 < len(data) and c == '\r' and
-                               data[inp+1] == '\n'))):
-                linelen = 0
-                # Protect against whitespace on end of line
-                if (len(odata) > 0 and
-                    (odata[-1] == ' ' or odata[-1] == '\t')):
-                    ch = ord(odata[-1])
-                    odata[-1] = '='
-                    odata.append(two_hex_digits(ch))
-
-                if crlf: odata.append('\r')
-                odata.append('\n')
-                if c == '\r':
-                    inp += 2
-                else:
-                    inp += 1
-            else:
-                if (inp + 1 < len(data) and
-                    data[inp+1] != '\n' and
-                    (linelen + 1) >= MAXLINESIZE):
-                    odata.append('=')
-                    if crlf: odata.append('\r')
-                    odata.append('\n')
-                    linelen = 0
-
-                linelen += 1
-                if header and c == ' ':
-                    c = '_'
-                odata.append(c)
-                inp += 1
-    return ''.join(odata)
-
-hex_numbers = '0123456789ABCDEF'
-def hex(n):
-    if n == 0:
-        return '0'
-    
-    if n < 0:
-        n = -n
-        sign = '-'
-    else:
-        sign = ''
-    arr = []
-
-    def hex_gen(n):
-        """ Yield a nibble at a time. """
-        while n:
-            yield n % 0x10
-            n = n / 0x10
-
-    for nibble in hex_gen(n):
-        arr = [hex_numbers[nibble]] + arr
-    return sign + ''.join(arr)
-
-def two_hex_digits(n):
-    return hex_numbers[n / 0x10] + hex_numbers[n % 0x10]
-    
-
-def strhex_to_int(s):
-    i = 0
-    for c in s:
-        i = i * 0x10 + hex_numbers.index(c)
-    return i
-
-hqx_encoding = '!"#$%&\'()*+,-012345689 at ABCDEFGHIJKLMNPQRSTUVXYZ[`abcdefhijklmpqr'
-
-DONE = 0x7f
-SKIP = 0x7e
-FAIL = 0x7d
-    
-table_a2b_hqx = [
-    #^@    ^A    ^B    ^C    ^D    ^E    ^F    ^G   
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    #\b    \t    \n    ^K    ^L    \r    ^N    ^O   
-    FAIL, FAIL, SKIP, FAIL, FAIL, SKIP, FAIL, FAIL,
-    #^P    ^Q    ^R    ^S    ^T    ^U    ^V    ^W   
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    #^X    ^Y    ^Z    ^[    ^\    ^]    ^^    ^_   
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    #      !     "     #     $     %     &     '   
-    FAIL, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
-    #(     )     *     +     ,     -     .     /   
-    0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, FAIL, FAIL,
-    #0     1     2     3     4     5     6     7   
-    0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, FAIL,
-    #8     9     :     ;     <     =     >     ?   
-    0x14, 0x15, DONE, FAIL, FAIL, FAIL, FAIL, FAIL,
-    #@     A     B     C     D     E     F     G   
-    0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D,
-    #H     I     J     K     L     M     N     O   
-    0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, 0x24, FAIL,
-    #P     Q     R     S     T     U     V     W   
-    0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, FAIL,
-    #X     Y     Z     [     \     ]     ^     _   
-    0x2C, 0x2D, 0x2E, 0x2F, FAIL, FAIL, FAIL, FAIL,
-    #`     a     b     c     d     e     f     g   
-    0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, FAIL,
-    #h     i     j     k     l     m     n     o   
-    0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, FAIL, FAIL,
-    #p     q     r     s     t     u     v     w   
-    0x3D, 0x3E, 0x3F, FAIL, FAIL, FAIL, FAIL, FAIL,
-    #x     y     z     {     |     }     ~    ^?   
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-    FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL, FAIL,
-]
-
-def a2b_hqx(s):
-    result = []
-
-    def quadruples_gen(s):
-        t = []
-        for c in s:
-            res = table_a2b_hqx[ord(c)]
-            if res == SKIP:
-                continue
-            elif res == FAIL:
-                raise Error('Illegal character')
-            elif res == DONE:
-                yield t
-                raise Done
-            else:
-                t.append(res)
-            if len(t) == 4:
-                yield t
-                t = []
-        yield t
-        
-    done = 0
-    try:
-        for snippet in quadruples_gen(s):
-            length = len(snippet)
-            if length == 4:
-                result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) 
-                result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) 
-                result.append(chr(((snippet[2] & 0x03) << 6) | (snippet[3]))) 
-            elif length == 3:
-                result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) 
-                result.append(chr(((snippet[1] & 0x0f) << 4) | (snippet[2] >> 2))) 
-            elif length == 2:
-                result.append(chr(((snippet[0] & 0x3f) << 2) | (snippet[1] >> 4))) 
-    except Done:
-        done = 1
-    except Error:
-        raise
-    return (''.join(result), done)
-
-def b2a_hqx(s):
-    result =[]
-
-    def triples_gen(s):
-        while s:
-            try:
-                yield ord(s[0]), ord(s[1]), ord(s[2])
-            except IndexError:
-                yield tuple([ord(c) for c in s])
-            s = s[3:]
-
-    for snippet in triples_gen(s):
-        length = len(snippet)
-        if length == 3:
-            result.append(
-                hqx_encoding[(snippet[0] & 0xfc) >> 2])
-            result.append(hqx_encoding[
-                ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
-            result.append(hqx_encoding[
-                (snippet[1] & 0x0f) << 2 | ((snippet[2] & 0xc0) >> 6)])
-            result.append(hqx_encoding[snippet[2] & 0x3f])
-        elif length == 2:
-            result.append(
-                hqx_encoding[(snippet[0] & 0xfc) >> 2])
-            result.append(hqx_encoding[
-                ((snippet[0] & 0x03) << 4) | ((snippet[1] & 0xf0) >> 4)])
-            result.append(hqx_encoding[
-                (snippet[1] & 0x0f) << 2])
-        elif length == 1:
-            result.append(
-                hqx_encoding[(snippet[0] & 0xfc) >> 2])
-            result.append(hqx_encoding[
-                ((snippet[0] & 0x03) << 4)])
-    return ''.join(result)
-
-crctab_hqx = [
-        0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
-        0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
-        0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
-        0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
-        0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
-        0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
-        0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
-        0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
-        0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
-        0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
-        0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
-        0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
-        0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
-        0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
-        0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
-        0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
-        0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
-        0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
-        0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
-        0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
-        0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
-        0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
-        0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
-        0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
-        0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
-        0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
-        0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
-        0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
-        0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
-        0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
-        0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
-        0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
-]
-
-def crc_hqx(s, crc):
-    for c in s:
-        crc = ((crc << 8) & 0xff00) ^ crctab_hqx[((crc >> 8) & 0xff) ^ ord(c)]
-
-    return crc
-
-def rlecode_hqx(s):
-    """
-    Run length encoding for binhex4.
-    The CPython implementation does not do run length encoding
-    of \x90 characters. This implementation does.
-    """
-    if not s:
-        return ''
-    result = []
-    prev = s[0]
-    count = 1
-    # Add a dummy character to get the loop to go one extra round.
-    # The dummy must be different from the last character of s.
-    # In the same step we remove the first character, which has
-    # already been stored in prev.
-    if s[-1] == '!':
-        s = s[1:] + '?'
-    else:
-        s = s[1:] + '!'
-        
-    for c in s:
-        if c == prev and count < 255:
-            count += 1
-        else:
-            if count == 1:
-                if prev != '\x90':
-                    result.append(prev)
-                else:
-                    result.extend(['\x90', '\x00'])
-            elif count < 4:
-                if prev != '\x90':
-                    result.extend([prev] * count)
-                else:
-                    result.extend(['\x90', '\x00'] * count)
-            else:
-                if prev != '\x90':
-                    result.extend([prev, '\x90', chr(count)])
-                else:
-                    result.extend(['\x90', '\x00', '\x90', chr(count)]) 
-            count = 1
-            prev = c
-        
-    return ''.join(result)
-
-def rledecode_hqx(s):
-    s = s.split('\x90')
-    result = [s[0]]
-    prev = s[0]
-    for snippet in s[1:]:
-        count = ord(snippet[0])
-        if count > 0:
-            result.append(prev[-1] * (count-1))
-            prev = snippet
-        else:
-            result. append('\x90')
-            prev = '\x90'
-        result.append(snippet[1:])
-
-    return ''.join(result)
-
-crc_32_tab = [
-    0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
-    0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
-    0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
-    0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
-    0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
-    0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
-    0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
-    0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
-    0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
-    0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
-    0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
-    0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
-    0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
-    0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
-    0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
-    0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
-    0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
-    0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
-    0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
-    0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
-    0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
-    0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
-    0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
-    0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
-    0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
-    0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
-    0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
-    0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
-    0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
-    0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
-    0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
-    0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
-    0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
-    0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
-    0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
-    0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
-    0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
-    0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
-    0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
-    0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
-    0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
-    0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
-    0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
-    0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
-    0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
-    0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
-    0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
-    0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
-    0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
-    0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
-    0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
-    0x2d02ef8dL
-]
-
-def crc32(s, crc=0):
-    result = 0
-    crc = ~long(crc) & 0xffffffffL
-    for c in s:
-        crc = crc_32_tab[(crc ^ long(ord(c))) & 0xffL] ^ (crc >> 8)
-        #/* Note:  (crc >> 8) MUST zero fill on left
-
-    result = crc ^ 0xffffffffL
-    
-    if result > 2**31:
-        result = ((result + 2**31) % 2**32) - 2**31
-
-    return result
-
-def b2a_hex(s):
-    result = []
-    for char in s:
-        c = (ord(char) >> 4) & 0xf
-        if c > 9:
-            c = c + ord('a') - 10
-        else:
-            c = c + ord('0')
-        result.append(chr(c))
-        c = ord(char) & 0xf
-        if c > 9:
-            c = c + ord('a') - 10
-        else:
-            c = c + ord('0')
-        result.append(chr(c))
-    return ''.join(result)
-
-hexlify = b2a_hex
-
-table_hex = [
-    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    0, 1, 2, 3,  4, 5, 6, 7,  8, 9,-1,-1, -1,-1,-1,-1,
-    -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    -1,10,11,12, 13,14,15,-1, -1,-1,-1,-1, -1,-1,-1,-1,
-    -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1, -1,-1,-1,-1
-]
-
-
-def a2b_hex(t):
-    result = []
-
-    def pairs_gen(s):
-        while s:
-            try:
-                yield table_hex[ord(s[0])], table_hex[ord(s[1])]
-            except IndexError:
-                if len(s):
-                    raise TypeError('Odd-length string')
-                return
-            s = s[2:]
-
-    for a, b in pairs_gen(t):
-        if a < 0 or b < 0:
-            raise TypeError('Non-hexadecimal digit found')
-        result.append(chr((a << 4) + b))
-    return ''.join(result)
-    
-
-unhexlify = a2b_hex
diff --git a/lib_pypy/cPickle.py b/lib_pypy/cPickle.py
--- a/lib_pypy/cPickle.py
+++ b/lib_pypy/cPickle.py
@@ -2,16 +2,95 @@
 # One-liner implementation of cPickle
 #
 
-from pickle import *
+from pickle import Pickler, dump, dumps, PickleError, PicklingError, UnpicklingError, _EmptyClass
 from pickle import __doc__, __version__, format_version, compatible_formats
+from types import *
+from copy_reg import dispatch_table
+from copy_reg import _extension_registry, _inverted_registry, _extension_cache
+import marshal, struct, sys
 
 try: from __pypy__ import builtinify
 except ImportError: builtinify = lambda f: f
 
+# These are purely informational; no code uses these.
+format_version = "2.0"                  # File format version we write
+compatible_formats = ["1.0",            # Original protocol 0
+                      "1.1",            # Protocol 0 with INST added
+                      "1.2",            # Original protocol 1
+                      "1.3",            # Protocol 1 with BINFLOAT added
+                      "2.0",            # Protocol 2
+                      ]                 # Old format versions we can read
+
+# Keep in synch with cPickle.  This is the highest protocol number we
+# know how to read.
+HIGHEST_PROTOCOL = 2
 
 BadPickleGet = KeyError
 UnpickleableError = PicklingError
 
+MARK            = ord('(')   # push special markobject on stack
+STOP            = ord('.')   # every pickle ends with STOP
+POP             = ord('0')   # discard topmost stack item
+POP_MARK        = ord('1')   # discard stack top through topmost markobject
+DUP             = ord('2')   # duplicate top stack item
+FLOAT           = ord('F')   # push float object; decimal string argument
+INT             = ord('I')   # push integer or bool; decimal string argument
+BININT          = ord('J')   # push four-byte signed int
+BININT1         = ord('K')   # push 1-byte unsigned int
+LONG            = ord('L')   # push long; decimal string argument
+BININT2         = ord('M')   # push 2-byte unsigned int
+NONE            = ord('N')   # push None
+PERSID          = ord('P')   # push persistent object; id is taken from string arg
+BINPERSID       = ord('Q')   #  "       "         "  ;  "  "   "     "  stack
+REDUCE          = ord('R')   # apply callable to argtuple, both on stack
+STRING          = ord('S')   # push string; NL-terminated string argument
+BINSTRING       = ord('T')   # push string; counted binary string argument
+SHORT_BINSTRING = ord('U')   #  "     "   ;    "      "       "      " < 256 bytes
+UNICODE         = ord('V')   # push Unicode string; raw-unicode-escaped'd argument
+BINUNICODE      = ord('X')   #   "     "       "  ; counted UTF-8 string argument
+APPEND          = ord('a')   # append stack top to list below it
+BUILD           = ord('b')   # call __setstate__ or __dict__.update()
+GLOBAL          = ord('c')   # push self.find_class(modname, name); 2 string args
+DICT            = ord('d')   # build a dict from stack items
+EMPTY_DICT      = ord('}')   # push empty dict
+APPENDS         = ord('e')   # extend list on stack by topmost stack slice
+GET             = ord('g')   # push item from memo on stack; index is string arg
+BINGET          = ord('h')   #   "    "    "    "   "   "  ;   "    " 1-byte arg
+INST            = ord('i')   # build & push class instance
+LONG_BINGET     = ord('j')   # push item from memo on stack; index is 4-byte arg
+LIST            = ord('l')   # build list from topmost stack items
+EMPTY_LIST      = ord(']')   # push empty list
+OBJ             = ord('o')   # build & push class instance
+PUT             = ord('p')   # store stack top in memo; index is string arg
+BINPUT          = ord('q')   #   "     "    "   "   " ;   "    " 1-byte arg
+LONG_BINPUT     = ord('r')   #   "     "    "   "   " ;   "    " 4-byte arg
+SETITEM         = ord('s')   # add key+value pair to dict
+TUPLE           = ord('t')   # build tuple from topmost stack items
+EMPTY_TUPLE     = ord(')')   # push empty tuple
+SETITEMS        = ord('u')   # modify dict by adding topmost key+value pairs
+BINFLOAT        = ord('G')   # push float; arg is 8-byte float encoding
+
+TRUE            = 'I01\n'  # not an opcode; see INT docs in pickletools.py
+FALSE           = 'I00\n'  # not an opcode; see INT docs in pickletools.py
+
+# Protocol 2
+
+PROTO           = ord('\x80')  # identify pickle protocol
+NEWOBJ          = ord('\x81')  # build object by applying cls.__new__ to argtuple
+EXT1            = ord('\x82')  # push object from extension registry; 1-byte index
+EXT2            = ord('\x83')  # ditto, but 2-byte index
+EXT4            = ord('\x84')  # ditto, but 4-byte index
+TUPLE1          = ord('\x85')  # build 1-tuple from stack top
+TUPLE2          = ord('\x86')  # build 2-tuple from two topmost stack items
+TUPLE3          = ord('\x87')  # build 3-tuple from three topmost stack items
+NEWTRUE         = ord('\x88')  # push True
+NEWFALSE        = ord('\x89')  # push False
+LONG1           = ord('\x8a')  # push long from < 256 bytes
+LONG4           = ord('\x8b')  # push really big long
+
+_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
+
+
 # ____________________________________________________________
 # XXX some temporary dark magic to produce pickled dumps that are
 #     closer to the ones produced by cPickle in CPython
@@ -44,3 +123,474 @@
     file = StringIO()
     Pickler(file, protocol).dump(obj)
     return file.getvalue()
+
+# Why use struct.pack() for pickling but marshal.loads() for
+# unpickling?  struct.pack() is 40% faster than marshal.dumps(), but
+# marshal.loads() is twice as fast as struct.unpack()!
+mloads = marshal.loads
+
+# Unpickling machinery
+
+class Unpickler(object):
+
+    def __init__(self, file):
+        """This takes a file-like object for reading a pickle data stream.
+
+        The protocol version of the pickle is detected automatically, so no
+        proto argument is needed.
+
+        The file-like object must have two methods, a read() method that
+        takes an integer argument, and a readline() method that requires no
+        arguments.  Both methods should return a string.  Thus file-like
+        object can be a file object opened for reading, a StringIO object,
+        or any other custom object that meets this interface.
+        """
+        self.readline = file.readline
+        self.read = file.read
+        self.memo = {}
+
+    def load(self):
+        """Read a pickled object representation from the open file.
+
+        Return the reconstituted object hierarchy specified in the file.
+        """
+        self.mark = object() # any new unique object
+        self.stack = []
+        self.append = self.stack.append
+        try:
+            key = ord(self.read(1))
+            while key != STOP:
+                self.dispatch[key](self)
+                key = ord(self.read(1))
+        except TypeError:
+            if self.read(1) == '':
+                raise EOFError
+            raise
+        return self.stack.pop()
+
+    # Return largest index k such that self.stack[k] is self.mark.
+    # If the stack doesn't contain a mark, eventually raises IndexError.
+    # This could be sped by maintaining another stack, of indices at which
+    # the mark appears.  For that matter, the latter stack would suffice,
+    # and we wouldn't need to push mark objects on self.stack at all.
+    # Doing so is probably a good thing, though, since if the pickle is
+    # corrupt (or hostile) we may get a clue from finding self.mark embedded
+    # in unpickled objects.
+    def marker(self):
+        k = len(self.stack)-1
+        while self.stack[k] is not self.mark: k -= 1
+        return k
+
+    dispatch = {}
+
+    def load_proto(self):
+        proto = ord(self.read(1))
+        if not 0 <= proto <= 2:
+            raise ValueError, "unsupported pickle protocol: %d" % proto
+    dispatch[PROTO] = load_proto
+
+    def load_persid(self):
+        pid = self.readline()[:-1]
+        self.append(self.persistent_load(pid))
+    dispatch[PERSID] = load_persid
+
+    def load_binpersid(self):
+        pid = self.stack.pop()
+        self.append(self.persistent_load(pid))
+    dispatch[BINPERSID] = load_binpersid
+
+    def load_none(self):
+        self.append(None)
+    dispatch[NONE] = load_none
+
+    def load_false(self):
+        self.append(False)
+    dispatch[NEWFALSE] = load_false
+
+    def load_true(self):
+        self.append(True)
+    dispatch[NEWTRUE] = load_true
+
+    def load_int(self):
+        data = self.readline()
+        if data == FALSE[1:]:
+            val = False
+        elif data == TRUE[1:]:
+            val = True
+        else:
+            try:
+                val = int(data)
+            except ValueError:
+                val = long(data)
+        self.append(val)
+    dispatch[INT] = load_int
+
+    def load_binint(self):
+        self.append(mloads('i' + self.read(4)))
+    dispatch[BININT] = load_binint
+
+    def load_binint1(self):
+        self.append(ord(self.read(1)))
+    dispatch[BININT1] = load_binint1
+
+    def load_binint2(self):
+        self.append(mloads('i' + self.read(2) + '\000\000'))
+    dispatch[BININT2] = load_binint2
+
+    def load_long(self):
+        self.append(long(self.readline()[:-1], 0))
+    dispatch[LONG] = load_long
+
+    def load_long1(self):
+        n = ord(self.read(1))
+        bytes = self.read(n)
+        self.append(decode_long(bytes))
+    dispatch[LONG1] = load_long1
+
+    def load_long4(self):
+        n = mloads('i' + self.read(4))
+        bytes = self.read(n)
+        self.append(decode_long(bytes))
+    dispatch[LONG4] = load_long4
+
+    def load_float(self):
+        self.append(float(self.readline()[:-1]))
+    dispatch[FLOAT] = load_float
+
+    def load_binfloat(self, unpack=struct.unpack):
+        self.append(unpack('>d', self.read(8))[0])
+    dispatch[BINFLOAT] = load_binfloat
+
+    def load_string(self):
+        rep = self.readline()
+        if len(rep) < 3:
+            raise ValueError, "insecure string pickle"
+        if rep[0] == "'" == rep[-2]:
+            rep = rep[1:-2]
+        elif rep[0] == '"' == rep[-2]:
+            rep = rep[1:-2]
+        else:
+            raise ValueError, "insecure string pickle"
+        self.append(rep.decode("string-escape"))
+    dispatch[STRING] = load_string
+
+    def load_binstring(self):
+        L = mloads('i' + self.read(4))
+        self.append(self.read(L))
+    dispatch[BINSTRING] = load_binstring
+
+    def load_unicode(self):
+        self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
+    dispatch[UNICODE] = load_unicode
+
+    def load_binunicode(self):
+        L = mloads('i' + self.read(4))
+        self.append(unicode(self.read(L),'utf-8'))
+    dispatch[BINUNICODE] = load_binunicode
+
+    def load_short_binstring(self):
+        L = ord(self.read(1))
+        self.append(self.read(L))
+    dispatch[SHORT_BINSTRING] = load_short_binstring
+
+    def load_tuple(self):
+        k = self.marker()
+        self.stack[k:] = [tuple(self.stack[k+1:])]
+    dispatch[TUPLE] = load_tuple
+
+    def load_empty_tuple(self):
+        self.stack.append(())
+    dispatch[EMPTY_TUPLE] = load_empty_tuple
+
+    def load_tuple1(self):
+        self.stack[-1] = (self.stack[-1],)
+    dispatch[TUPLE1] = load_tuple1
+
+    def load_tuple2(self):
+        self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
+    dispatch[TUPLE2] = load_tuple2
+
+    def load_tuple3(self):
+        self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
+    dispatch[TUPLE3] = load_tuple3
+
+    def load_empty_list(self):
+        self.stack.append([])
+    dispatch[EMPTY_LIST] = load_empty_list
+
+    def load_empty_dictionary(self):
+        self.stack.append({})
+    dispatch[EMPTY_DICT] = load_empty_dictionary
+
+    def load_list(self):
+        k = self.marker()
+        self.stack[k:] = [self.stack[k+1:]]
+    dispatch[LIST] = load_list
+
+    def load_dict(self):
+        k = self.marker()
+        d = {}
+        items = self.stack[k+1:]
+        for i in range(0, len(items), 2):
+            key = items[i]
+            value = items[i+1]
+            d[key] = value
+        self.stack[k:] = [d]
+    dispatch[DICT] = load_dict
+
+    # INST and OBJ differ only in how they get a class object.  It's not
+    # only sensible to do the rest in a common routine, the two routines
+    # previously diverged and grew different bugs.
+    # klass is the class to instantiate, and k points to the topmost mark
+    # object, following which are the arguments for klass.__init__.
+    def _instantiate(self, klass, k):
+        args = tuple(self.stack[k+1:])
+        del self.stack[k:]
+        instantiated = 0
+        if (not args and
+                type(klass) is ClassType and
+                not hasattr(klass, "__getinitargs__")):
+            try:
+                value = _EmptyClass()
+                value.__class__ = klass
+                instantiated = 1
+            except RuntimeError:
+                # In restricted execution, assignment to inst.__class__ is
+                # prohibited
+                pass
+        if not instantiated:
+            try:
+                value = klass(*args)
+            except TypeError, err:
+                raise TypeError, "in constructor for %s: %s" % (
+                    klass.__name__, str(err)), sys.exc_info()[2]
+        self.append(value)
+
+    def load_inst(self):
+        module = self.readline()[:-1]
+        name = self.readline()[:-1]
+        klass = self.find_class(module, name)
+        self._instantiate(klass, self.marker())
+    dispatch[INST] = load_inst
+
+    def load_obj(self):
+        # Stack is ... markobject classobject arg1 arg2 ...
+        k = self.marker()
+        klass = self.stack.pop(k+1)
+        self._instantiate(klass, k)
+    dispatch[OBJ] = load_obj
+
+    def load_newobj(self):
+        args = self.stack.pop()
+        cls = self.stack[-1]
+        obj = cls.__new__(cls, *args)
+        self.stack[-1] = obj
+    dispatch[NEWOBJ] = load_newobj
+
+    def load_global(self):
+        module = self.readline()[:-1]
+        name = self.readline()[:-1]
+        klass = self.find_class(module, name)
+        self.append(klass)
+    dispatch[GLOBAL] = load_global
+
+    def load_ext1(self):
+        code = ord(self.read(1))
+        self.get_extension(code)
+    dispatch[EXT1] = load_ext1
+
+    def load_ext2(self):
+        code = mloads('i' + self.read(2) + '\000\000')
+        self.get_extension(code)
+    dispatch[EXT2] = load_ext2
+
+    def load_ext4(self):
+        code = mloads('i' + self.read(4))
+        self.get_extension(code)
+    dispatch[EXT4] = load_ext4
+
+    def get_extension(self, code):
+        nil = []
+        obj = _extension_cache.get(code, nil)
+        if obj is not nil:
+            self.append(obj)
+            return
+        key = _inverted_registry.get(code)
+        if not key:
+            raise ValueError("unregistered extension code %d" % code)
+        obj = self.find_class(*key)
+        _extension_cache[code] = obj
+        self.append(obj)
+
+    def find_class(self, module, name):
+        # Subclasses may override this
+        __import__(module)
+        mod = sys.modules[module]
+        klass = getattr(mod, name)
+        return klass
+
+    def load_reduce(self):
+        args = self.stack.pop()
+        func = self.stack[-1]
+        value = self.stack[-1](*args)
+        self.stack[-1] = value
+    dispatch[REDUCE] = load_reduce
+
+    def load_pop(self):
+        del self.stack[-1]
+    dispatch[POP] = load_pop
+
+    def load_pop_mark(self):
+        k = self.marker()
+        del self.stack[k:]
+    dispatch[POP_MARK] = load_pop_mark
+
+    def load_dup(self):
+        self.append(self.stack[-1])
+    dispatch[DUP] = load_dup
+
+    def load_get(self):
+        self.append(self.memo[self.readline()[:-1]])
+    dispatch[GET] = load_get
+
+    def load_binget(self):
+        i = ord(self.read(1))
+        self.append(self.memo[repr(i)])
+    dispatch[BINGET] = load_binget
+
+    def load_long_binget(self):
+        i = mloads('i' + self.read(4))
+        self.append(self.memo[repr(i)])
+    dispatch[LONG_BINGET] = load_long_binget
+
+    def load_put(self):
+        self.memo[self.readline()[:-1]] = self.stack[-1]
+    dispatch[PUT] = load_put
+
+    def load_binput(self):
+        i = ord(self.read(1))
+        self.memo[repr(i)] = self.stack[-1]
+    dispatch[BINPUT] = load_binput
+
+    def load_long_binput(self):
+        i = mloads('i' + self.read(4))
+        self.memo[repr(i)] = self.stack[-1]
+    dispatch[LONG_BINPUT] = load_long_binput
+
+    def load_append(self):
+        value = self.stack.pop()
+        self.stack[-1].append(value)
+    dispatch[APPEND] = load_append
+
+    def load_appends(self):
+        stack = self.stack
+        mark = self.marker()
+        lst = stack[mark - 1]
+        lst.extend(stack[mark + 1:])
+        del stack[mark:]
+    dispatch[APPENDS] = load_appends
+
+    def load_setitem(self):
+        stack = self.stack
+        value = stack.pop()
+        key = stack.pop()
+        dict = stack[-1]
+        dict[key] = value
+    dispatch[SETITEM] = load_setitem
+
+    def load_setitems(self):
+        stack = self.stack
+        mark = self.marker()
+        dict = stack[mark - 1]
+        for i in range(mark + 1, len(stack), 2):
+            dict[stack[i]] = stack[i + 1]
+
+        del stack[mark:]
+    dispatch[SETITEMS] = load_setitems
+
+    def load_build(self):
+        stack = self.stack
+        state = stack.pop()
+        inst = stack[-1]
+        setstate = getattr(inst, "__setstate__", None)
+        if setstate:
+            setstate(state)
+            return
+        slotstate = None
+        if isinstance(state, tuple) and len(state) == 2:
+            state, slotstate = state
+        if state:
+            try:
+                d = inst.__dict__
+                try:
+                    for k, v in state.iteritems():
+                        d[intern(k)] = v
+                # keys in state don't have to be strings
+                # don't blow up, but don't go out of our way
+                except TypeError:
+                    d.update(state)
+
+            except RuntimeError:
+                # XXX In restricted execution, the instance's __dict__
+                # is not accessible.  Use the old way of unpickling
+                # the instance variables.  This is a semantic
+                # difference when unpickling in restricted
+                # vs. unrestricted modes.
+                # Note, however, that cPickle has never tried to do the
+                # .update() business, and always uses
+                #     PyObject_SetItem(inst.__dict__, key, value) in a
+                # loop over state.items().
+                for k, v in state.items():
+                    setattr(inst, k, v)
+        if slotstate:
+            for k, v in slotstate.items():
+                setattr(inst, k, v)
+    dispatch[BUILD] = load_build
+
+    def load_mark(self):
+        self.append(self.mark)
+    dispatch[MARK] = load_mark
+
+#from pickle import decode_long
+
+def decode_long(data):
+    r"""Decode a long from a two's complement little-endian binary string.
+
+    >>> decode_long('')
+    0L
+    >>> decode_long("\xff\x00")
+    255L
+    >>> decode_long("\xff\x7f")
+    32767L
+    >>> decode_long("\x00\xff")
+    -256L
+    >>> decode_long("\x00\x80")
+    -32768L
+    >>> decode_long("\x80")
+    -128L
+    >>> decode_long("\x7f")
+    127L
+    """
+
+    nbytes = len(data)
+    if nbytes == 0:
+        return 0L
+    ind = nbytes - 1
+    while ind and ord(data[ind]) == 0:
+        ind -= 1
+    n = ord(data[ind])
+    while ind:
+        n <<= 8
+        ind -= 1
+        if ord(data[ind]):
+            n += ord(data[ind])
+    if ord(data[nbytes - 1]) >= 128:
+        n -= 1L << (nbytes << 3)
+    return n
+
+def load(f):
+    return Unpickler(f).load()
+
+def loads(str):
+    f = StringIO(str)
+    return Unpickler(f).load()
diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -968,8 +968,7 @@
             self._checkOverflow(t.year)
             result = date(t.year, t.month, t.day)
             return result
-        raise TypeError
-        # XXX Should be 'return NotImplemented', but there's a bug in 2.2...
+        return NotImplemented    # note that this doesn't work on CPython 2.2
 
     __radd__ = __add__
 
@@ -1032,8 +1031,8 @@
     def __setstate(self, string):
         if len(string) != 4 or not (1 <= ord(string[2]) <= 12):
             raise TypeError("not enough arguments")
-        yhi, ylo, self._month, self._day = map(ord, string)
-        self._year = yhi * 256 + ylo
+        self._month, self._day = ord(string[2]), ord(string[3])
+        self._year = ord(string[0]) * 256 + ord(string[1])
 
     def __reduce__(self):
         return (self.__class__, self._getstate())
@@ -1421,9 +1420,10 @@
     def __setstate(self, string, tzinfo):
         if len(string) != 6 or ord(string[0]) >= 24:
             raise TypeError("an integer is required")
-        self._hour, self._minute, self._second, us1, us2, us3 = \
-                                                            map(ord, string)
-        self._microsecond = (((us1 << 8) | us2) << 8) | us3
+        self._hour, self._minute, self._second = ord(string[0]), \
+                                                 ord(string[1]), ord(string[2])
+        self._microsecond = (((ord(string[3]) << 8) | \
+                            ord(string[4])) << 8) | ord(string[5])
         self._tzinfo = tzinfo
 
     def __reduce__(self):
@@ -1903,10 +1903,11 @@
             return (basestate, self._tzinfo)
 
     def __setstate(self, string, tzinfo):
-        (yhi, ylo, self._month, self._day, self._hour,
-         self._minute, self._second, us1, us2, us3) = map(ord, string)
-        self._year = yhi * 256 + ylo
-        self._microsecond = (((us1 << 8) | us2) << 8) | us3
+        (self._month, self._day, self._hour, self._minute,
+            self._second) = (ord(string[2]), ord(string[3]), ord(string[4]),
+                             ord(string[5]), ord(string[6]))
+        self._year = ord(string[0]) * 256 + ord(string[1])
+        self._microsecond = (((ord(string[7]) << 8) | ord(string[8])) << 8) | ord(string[9])
         self._tzinfo = tzinfo
 
     def __reduce__(self):
diff --git a/lib_pypy/numpypy/core/numeric.py b/lib_pypy/numpypy/core/numeric.py
--- a/lib_pypy/numpypy/core/numeric.py
+++ b/lib_pypy/numpypy/core/numeric.py
@@ -6,7 +6,7 @@
 import _numpypy as multiarray # ARGH
 from numpypy.core.arrayprint import array2string
 
-
+newaxis = None
 
 def asanyarray(a, dtype=None, order=None, maskna=None, ownmaskna=False):
     """
@@ -306,6 +306,125 @@
     else:
         return multiarray.set_string_function(f, repr)
 
+def array_equal(a1, a2):
+    """
+    True if two arrays have the same shape and elements, False otherwise.
+
+    Parameters
+    ----------
+    a1, a2 : array_like
+        Input arrays.
+
+    Returns
+    -------
+    b : bool
+        Returns True if the arrays are equal.
+
+    See Also
+    --------
+    allclose: Returns True if two arrays are element-wise equal within a
+              tolerance.
+    array_equiv: Returns True if input arrays are shape consistent and all
+                 elements equal.
+
+    Examples
+    --------
+    >>> np.array_equal([1, 2], [1, 2])
+    True
+    >>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
+    True
+    >>> np.array_equal([1, 2], [1, 2, 3])
+    False
+    >>> np.array_equal([1, 2], [1, 4])
+    False
+
+    """
+    try:
+        a1, a2 = asarray(a1), asarray(a2)
+    except:
+        return False
+    if a1.shape != a2.shape:
+        return False
+    return bool((a1 == a2).all())
+
+def asarray(a, dtype=None, order=None, maskna=None, ownmaskna=False):
+    """
+    Convert the input to an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data, in any form that can be converted to an array.  This
+        includes lists, lists of tuples, tuples, tuples of tuples, tuples
+        of lists and ndarrays.
+    dtype : data-type, optional
+        By default, the data-type is inferred from the input data.
+    order : {'C', 'F'}, optional
+        Whether to use row-major ('C') or column-major ('F' for FORTRAN)
+        memory representation.  Defaults to 'C'.
+   maskna : bool or None, optional
+        If this is set to True, it forces the array to have an NA mask.
+        If this is set to False, it forces the array to not have an NA
+        mask.
+    ownmaskna : bool, optional
+        If this is set to True, forces the array to have a mask which
+        it owns.
+
+    Returns
+    -------
+    out : ndarray
+        Array interpretation of `a`.  No copy is performed if the input
+        is already an ndarray.  If `a` is a subclass of ndarray, a base
+        class ndarray is returned.
+
+    See Also
+    --------
+    asanyarray : Similar function which passes through subclasses.
+    ascontiguousarray : Convert input to a contiguous array.
+    asfarray : Convert input to a floating point ndarray.
+    asfortranarray : Convert input to an ndarray with column-major
+                     memory order.
+    asarray_chkfinite : Similar function which checks input for NaNs and Infs.
+    fromiter : Create an array from an iterator.
+    fromfunction : Construct an array by executing a function on grid
+                   positions.
+
+    Examples
+    --------
+    Convert a list into an array:
+
+    >>> a = [1, 2]
+    >>> np.asarray(a)
+    array([1, 2])
+
+    Existing arrays are not copied:
+
+    >>> a = np.array([1, 2])
+    >>> np.asarray(a) is a
+    True
+
+    If `dtype` is set, array is copied only if dtype does not match:
+
+    >>> a = np.array([1, 2], dtype=np.float32)
+    >>> np.asarray(a, dtype=np.float32) is a
+    True
+    >>> np.asarray(a, dtype=np.float64) is a
+    False
+
+    Contrary to `asanyarray`, ndarray subclasses are not passed through:
+
+    >>> issubclass(np.matrix, np.ndarray)
+    True
+    >>> a = np.matrix([[1, 2]])
+    >>> np.asarray(a) is a
+    False
+    >>> np.asanyarray(a) is a
+    True
+
+    """
+    return array(a, dtype, copy=False, order=order,
+                            maskna=maskna, ownmaskna=ownmaskna)
+
 set_string_function(array_str, 0)
 set_string_function(array_repr, 1)
 
@@ -319,4 +438,4 @@
 False_ = bool_(False)
 True_ = bool_(True)
 e = math.e
-pi = math.pi
\ No newline at end of file
+pi = math.pi
diff --git a/lib_pypy/pypy_test/test_binascii.py b/lib_pypy/pypy_test/test_binascii.py
deleted file mode 100644
--- a/lib_pypy/pypy_test/test_binascii.py
+++ /dev/null
@@ -1,168 +0,0 @@
-from __future__ import absolute_import
-import py
-from lib_pypy import binascii
-
-# Create binary test data
-data = "The quick brown fox jumps over the lazy dog.\r\n"
-# Be slow so we don't depend on other modules
-data += "".join(map(chr, xrange(256)))
-data += "\r\nHello world.\n"
-
-def test_exceptions():
-    # Check module exceptions
-    assert issubclass(binascii.Error, Exception)
-    assert issubclass(binascii.Incomplete, Exception)
-
-def test_functions():
-    # Check presence of all functions
-    funcs = []
-    for suffix in "base64", "hqx", "uu", "hex":
-        prefixes = ["a2b_", "b2a_"]
-        if suffix == "hqx":
-            prefixes.extend(["crc_", "rlecode_", "rledecode_"])
-        for prefix in prefixes:
-            name = prefix + suffix
-            assert callable(getattr(binascii, name))
-            py.test.raises(TypeError, getattr(binascii, name))
-    for name in ("hexlify", "unhexlify"):
-        assert callable(getattr(binascii, name))
-        py.test.raises(TypeError, getattr(binascii, name))
-
-def test_base64valid():
-    # Test base64 with valid data
-    MAX_BASE64 = 57
-    lines = []
-    for i in range(0, len(data), MAX_BASE64):
-        b = data[i:i+MAX_BASE64]
-        a = binascii.b2a_base64(b)
-        lines.append(a)
-    res = ""
-    for line in lines:
-        b = binascii.a2b_base64(line)
-        res = res + b
-    assert res == data
-
-def test_base64invalid():
-    # Test base64 with random invalid characters sprinkled throughout
-    # (This requires a new version of binascii.)
-    MAX_BASE64 = 57
-    lines = []
-    for i in range(0, len(data), MAX_BASE64):
-        b = data[i:i+MAX_BASE64]
-        a = binascii.b2a_base64(b)
-        lines.append(a)
-
-    fillers = ""
-    valid = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/"
-    for i in xrange(256):
-        c = chr(i)
-        if c not in valid:
-            fillers += c
-    def addnoise(line):
-        noise = fillers
-        ratio = len(line) // len(noise)
-        res = ""
-        while line and noise:
-            if len(line) // len(noise) > ratio:
-                c, line = line[0], line[1:]
-            else:
-                c, noise = noise[0], noise[1:]
-            res += c
-        return res + noise + line
-    res = ""
-    for line in map(addnoise, lines):
-        b = binascii.a2b_base64(line)
-        res += b
-    assert res == data
-
-    # Test base64 with just invalid characters, which should return
-    # empty strings. TBD: shouldn't it raise an exception instead ?
-    assert binascii.a2b_base64(fillers) == ''
-
-def test_uu():
-    MAX_UU = 45
-    lines = []
-    for i in range(0, len(data), MAX_UU):
-        b = data[i:i+MAX_UU]
-        a = binascii.b2a_uu(b)
-        lines.append(a)
-    res = ""
-    for line in lines:
-        b = binascii.a2b_uu(line)
-        res += b
-    assert res == data
-
-    assert binascii.a2b_uu("\x7f") == "\x00"*31
-    assert binascii.a2b_uu("\x80") == "\x00"*32
-    assert binascii.a2b_uu("\xff") == "\x00"*31
-    py.test.raises(binascii.Error, binascii.a2b_uu, "\xff\x00")
-    py.test.raises(binascii.Error, binascii.a2b_uu, "!!!!")
-
-    py.test.raises(binascii.Error, binascii.b2a_uu, 46*"!")
-
-def test_crc32():
-    crc = binascii.crc32("Test the CRC-32 of")
-    crc = binascii.crc32(" this string.", crc)
-    assert crc == 1571220330
-    
-    crc = binascii.crc32('frotz\n', 0)
-    assert crc == -372923920
-
-    py.test.raises(TypeError, binascii.crc32)
-
-def test_hex():
-    # test hexlification
-    s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000'
-    t = binascii.b2a_hex(s)
-    u = binascii.a2b_hex(t)
-    assert s == u
-    py.test.raises(TypeError, binascii.a2b_hex, t[:-1])
-    py.test.raises(TypeError, binascii.a2b_hex, t[:-1] + 'q')
-
-    # Verify the treatment of Unicode strings
-    assert binascii.hexlify(unicode('a', 'ascii')) == '61'
-
-def test_qp():
-    # A test for SF bug 534347 (segfaults without the proper fix)
-    try:
-        binascii.a2b_qp("", **{1:1})
-    except TypeError:
-        pass
-    else:
-        fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError")
-    assert binascii.a2b_qp("= ") == "= "
-    assert binascii.a2b_qp("==") == "="
-    assert binascii.a2b_qp("=AX") == "=AX"
-    py.test.raises(TypeError, binascii.b2a_qp, foo="bar")
-    assert binascii.a2b_qp("=00\r\n=00") == "\x00\r\n\x00"
-    assert binascii.b2a_qp("\xff\r\n\xff\n\xff") == "=FF\r\n=FF\r\n=FF"
-    target = "0"*75+"=\r\n=FF\r\n=FF\r\n=FF"
-    assert binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff") == target
-
-def test_empty_string():
-    # A test for SF bug #1022953.  Make sure SystemError is not raised.
-    for n in ['b2a_qp', 'a2b_hex', 'b2a_base64', 'a2b_uu', 'a2b_qp',
-              'b2a_hex', 'unhexlify', 'hexlify', 'crc32', 'b2a_hqx',
-              'a2b_hqx', 'a2b_base64', 'rlecode_hqx', 'b2a_uu',
-              'rledecode_hqx']:
-        f = getattr(binascii, n)
-        f('')
-    binascii.crc_hqx('', 0)
-
-def test_qp_bug_case():
-    assert binascii.b2a_qp('y'*77, False, False) == 'y'*75 + '=\nyy'
-    assert binascii.b2a_qp(' '*77, False, False) == ' '*75 + '=\n =20'
-    assert binascii.b2a_qp('y'*76, False, False) == 'y'*76
-    assert binascii.b2a_qp(' '*76, False, False) == ' '*75 + '=\n=20'
-
-def test_wrong_padding():
-    s = 'CSixpLDtKSC/7Liuvsax4iC6uLmwMcijIKHaILzSwd/H0SC8+LCjwLsgv7W/+Mj3IQ'
-    py.test.raises(binascii.Error, binascii.a2b_base64, s)
-
-def test_crap_after_padding():
-    s = 'xxx=axxxx'
-    assert binascii.a2b_base64(s) == '\xc7\x1c'
-
-def test_wrong_args():
-    # this should grow as a way longer list
-    py.test.raises(TypeError, binascii.a2b_base64, 42)
diff --git a/lib_pypy/pypy_test/test_locale.py b/lib_pypy/pypy_test/test_locale.py
deleted file mode 100644
--- a/lib_pypy/pypy_test/test_locale.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from __future__ import absolute_import
-import py
-import sys
-
-from lib_pypy.ctypes_config_cache import rebuild
-rebuild.rebuild_one('locale.ctc.py')
-
-from lib_pypy import _locale
-
-
-def setup_module(mod):
-    if sys.platform == 'darwin':
-        py.test.skip("Locale support on MacOSX is minimal and cannot be tested")
-
-class TestLocale:
-    def setup_class(cls):
-        cls.oldlocale = _locale.setlocale(_locale.LC_NUMERIC)
-        if sys.platform.startswith("win"):
-            cls.tloc = "en"
-        elif sys.platform.startswith("freebsd"):
-            cls.tloc = "en_US.US-ASCII"
-        else:
-            cls.tloc = "en_US.UTF8"
-        try:
-            _locale.setlocale(_locale.LC_NUMERIC, cls.tloc)
-        except _locale.Error:
-            py.test.skip("test locale %s not supported" % cls.tloc)
-            
-    def teardown_class(cls):
-        _locale.setlocale(_locale.LC_NUMERIC, cls.oldlocale)
-
-    def test_format(self):
-        py.test.skip("XXX fix or kill me")
-
-        def testformat(formatstr, value, grouping = 0, output=None):
-            if output:
-                print "%s %% %s =? %s ..." %\
-                      (repr(formatstr), repr(value), repr(output)),
-            else:
-                print "%s %% %s works? ..." % (repr(formatstr), repr(value)),
-            result = locale.format(formatstr, value, grouping = grouping)
-            assert result == output
-
-        testformat("%f", 1024, grouping=1, output='1,024.000000')
-        testformat("%f", 102, grouping=1, output='102.000000')
-        testformat("%f", -42, grouping=1, output='-42.000000')
-        testformat("%+f", -42, grouping=1, output='-42.000000')
-        testformat("%20.f", -42, grouping=1, output='                 -42')
-        testformat("%+10.f", -4200, grouping=1, output='    -4,200')
-        testformat("%-10.f", 4200, grouping=1, output='4,200     ')
-
-    def test_getpreferredencoding(self):
-        py.test.skip("XXX fix or kill me")
-        # Invoke getpreferredencoding to make sure it does not cause exceptions
-        _locale.getpreferredencoding()
-
-    # Test BSD Rune locale's bug for isctype functions.
-    def test_bsd_bug(self):
-        def teststrop(s, method, output):
-            print "%s.%s() =? %s ..." % (repr(s), method, repr(output)),
-            result = getattr(s, method)()
-            assert result == output
-
-        oldlocale = _locale.setlocale(_locale.LC_CTYPE)
-        _locale.setlocale(_locale.LC_CTYPE, self.tloc)
-        try:
-            teststrop('\x20', 'isspace', True)
-            teststrop('\xa0', 'isspace', False)
-            teststrop('\xa1', 'isspace', False)
-            teststrop('\xc0', 'isalpha', False)
-            teststrop('\xc0', 'isalnum', False)
-            teststrop('\xc0', 'isupper', False)
-            teststrop('\xc0', 'islower', False)
-            teststrop('\xec\xa0\xbc', 'split', ['\xec\xa0\xbc'])
-            teststrop('\xed\x95\xa0', 'strip', '\xed\x95\xa0')
-            teststrop('\xcc\x85', 'lower', '\xcc\x85')
-            teststrop('\xed\x95\xa0', 'upper', '\xed\x95\xa0')
-        finally:
-            _locale.setlocale(_locale.LC_CTYPE, oldlocale)
diff --git a/lib_pypy/pypy_test/test_site_extra.py b/lib_pypy/pypy_test/test_site_extra.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/pypy_test/test_site_extra.py
@@ -0,0 +1,13 @@
+import sys, os
+
+
+def test_preimported_modules():
+    lst = ['__builtin__', '_codecs', '_warnings', 'codecs', 'encodings',
+           'exceptions', 'signal', 'sys', 'zipimport']
+    g = os.popen("'%s' -c 'import sys; print sorted(sys.modules)'" %
+                 (sys.executable,))
+    real_data = g.read()
+    g.close()
+    for name in lst:
+        quoted_name = repr(name)
+        assert quoted_name in real_data
diff --git a/lib_pypy/pypy_test/test_struct_extra.py b/lib_pypy/pypy_test/test_struct_extra.py
deleted file mode 100644
--- a/lib_pypy/pypy_test/test_struct_extra.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from __future__ import absolute_import
-from lib_pypy import struct 
-
-def test_simple():
-    morezeros = '\x00' * (struct.calcsize('l')-4)
-    assert struct.pack('<l', 16) == '\x10\x00\x00\x00' + morezeros
-    assert struct.pack('4s', 'WAVE') == 'WAVE'
-    assert struct.pack('<4sl', 'WAVE', 16) == 'WAVE\x10\x00\x00\x00' + morezeros
-    s = 'ABCD01234567\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00'
-    assert struct.unpack('<4s4H2lH', s) == ('ABCD', 0x3130, 0x3332, 0x3534,
-                                            0x3736, 1, 2, 3)
-
-def test_infinity():
-    INFINITY = 1e200 * 1e200
-    assert str(struct.unpack("!d", struct.pack("!d", INFINITY))[0]) \
-           == str(INFINITY)
-    assert str(struct.unpack("!d", struct.pack("!d", -INFINITY))[0]) \
-           == str(-INFINITY)
-
-def test_nan():
-    INFINITY = 1e200 * 1e200
-    NAN = INFINITY / INFINITY
-    assert str(struct.unpack("!d", '\xff\xf8\x00\x00\x00\x00\x00\x00')[0]) \
-           == str(NAN)
-    assert str(struct.unpack("!d", struct.pack("!d", NAN))[0]) == str(NAN)
diff --git a/lib_pypy/struct.py b/lib_pypy/struct.py
deleted file mode 100644
--- a/lib_pypy/struct.py
+++ /dev/null
@@ -1,417 +0,0 @@
-#
-# This module is a pure Python version of pypy.module.struct.
-# It is only imported if the vastly faster pypy.module.struct is not
-# compiled in.  For now we keep this version for reference and
-# because pypy.module.struct is not ootype-backend-friendly yet.
-#
-
-"""Functions to convert between Python values and C structs.
-Python strings are used to hold the data representing the C struct
-and also as format strings to describe the layout of data in the C struct.
-
-The optional first format char indicates byte order, size and alignment:
- @: native order, size & alignment (default)
- =: native order, std. size & alignment
- <: little-endian, std. size & alignment
- >: big-endian, std. size & alignment
- !: same as >
-
-The remaining chars indicate types of args and must match exactly;
-these can be preceded by a decimal repeat count:
-   x: pad byte (no data);
-   c:char;
-   b:signed byte;
-   B:unsigned byte;
-   h:short;
-   H:unsigned short;
-   i:int;
-   I:unsigned int;
-   l:long;
-   L:unsigned long;
-   f:float;
-   d:double.
-Special cases (preceding decimal count indicates length):
-   s:string (array of char); p: pascal string (with count byte).
-Special case (only available in native format):
-   P:an integer type that is wide enough to hold a pointer.
-Special case (not in native mode unless 'long long' in platform C):
-   q:long long;
-   Q:unsigned long long
-Whitespace between formats is ignored.
-
-The variable struct.error is an exception raised on errors."""
-
-import math, sys
-
-# TODO: XXX Find a way to get information on native sizes and alignments
-class StructError(Exception):
-    pass
-error = StructError
-def unpack_int(data,index,size,le):
-    bytes = [ord(b) for b in data[index:index+size]]
-    if le == 'little':
-        bytes.reverse()
-    number = 0L
-    for b in bytes:
-        number = number << 8 | b
-    return int(number)
-
-def unpack_signed_int(data,index,size,le):
-    number = unpack_int(data,index,size,le)
-    max = 2**(size*8)
-    if number > 2**(size*8 - 1) - 1:
-        number = int(-1*(max - number))
-    return number
-
-INFINITY = 1e200 * 1e200
-NAN = INFINITY / INFINITY
-
-def unpack_char(data,index,size,le):
-    return data[index:index+size]
-
-def pack_int(number,size,le):
-    x=number
-    res=[]
-    for i in range(size):
-        res.append(chr(x&0xff))
-        x >>= 8
-    if le == 'big':
-        res.reverse()
-    return ''.join(res)
-
-def pack_signed_int(number,size,le):
-    if not isinstance(number, (int,long)):
-        raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer"
-    if  number > 2**(8*size-1)-1 or number < -1*2**(8*size-1):
-        raise OverflowError,"Number:%i too large to convert" % number
-    return pack_int(number,size,le)
-
-def pack_unsigned_int(number,size,le):
-    if not isinstance(number, (int,long)):
-        raise StructError,"argument for i,I,l,L,q,Q,h,H must be integer"
-    if number < 0:
-        raise TypeError,"can't convert negative long to unsigned"
-    if number > 2**(8*size)-1:
-        raise OverflowError,"Number:%i too large to convert" % number
-    return pack_int(number,size,le)
-
-def pack_char(char,size,le):
-    return str(char)
-
-def isinf(x):
-    return x != 0.0 and x / 2 == x
-def isnan(v):
-    return v != v*1.0 or (v == 1.0 and v == 2.0)
-
-def pack_float(x, size, le):
-    unsigned = float_pack(x, size)
-    result = []
-    for i in range(8):
-        result.append(chr((unsigned >> (i * 8)) & 0xFF))
-    if le == "big":
-        result.reverse()
-    return ''.join(result)
-
-def unpack_float(data, index, size, le):
-    binary = [data[i] for i in range(index, index + 8)]
-    if le == "big":
-        binary.reverse()
-    unsigned = 0
-    for i in range(8):
-        unsigned |= ord(binary[i]) << (i * 8)
-    return float_unpack(unsigned, size, le)
-
-def round_to_nearest(x):
-    """Python 3 style round:  round a float x to the nearest int, but
-    unlike the builtin Python 2.x round function:
-
-      - return an int, not a float
-      - do round-half-to-even, not round-half-away-from-zero.
-
-    We assume that x is finite and nonnegative; except wrong results
-    if you use this for negative x.
-
-    """
-    int_part = int(x)
-    frac_part = x - int_part
-    if frac_part > 0.5 or frac_part == 0.5 and int_part & 1 == 1:
-        int_part += 1
-    return int_part
-
-def float_unpack(Q, size, le):
-    """Convert a 32-bit or 64-bit integer created
-    by float_pack into a Python float."""
-
-    if size == 8:
-        MIN_EXP = -1021  # = sys.float_info.min_exp
-        MAX_EXP = 1024   # = sys.float_info.max_exp
-        MANT_DIG = 53    # = sys.float_info.mant_dig
-        BITS = 64
-    elif size == 4:
-        MIN_EXP = -125   # C's FLT_MIN_EXP
-        MAX_EXP = 128    # FLT_MAX_EXP
-        MANT_DIG = 24    # FLT_MANT_DIG
-        BITS = 32
-    else:
-        raise ValueError("invalid size value")
-
-    if Q >> BITS:
-         raise ValueError("input out of range")
-
-    # extract pieces
-    sign = Q >> BITS - 1
-    exp = (Q & ((1 << BITS - 1) - (1 << MANT_DIG - 1))) >> MANT_DIG - 1
-    mant = Q & ((1 << MANT_DIG - 1) - 1)
-
-    if exp == MAX_EXP - MIN_EXP + 2:
-        # nan or infinity
-        result = float('nan') if mant else float('inf')
-    elif exp == 0:
-        # subnormal or zero
-        result = math.ldexp(float(mant), MIN_EXP - MANT_DIG)
-    else:
-        # normal
-        mant += 1 << MANT_DIG - 1
-        result = math.ldexp(float(mant), exp + MIN_EXP - MANT_DIG - 1)
-    return -result if sign else result
-
-
-def float_pack(x, size):
-    """Convert a Python float x into a 64-bit unsigned integer
-    with the same byte representation."""
-
-    if size == 8:
-        MIN_EXP = -1021  # = sys.float_info.min_exp
-        MAX_EXP = 1024   # = sys.float_info.max_exp
-        MANT_DIG = 53    # = sys.float_info.mant_dig
-        BITS = 64
-    elif size == 4:
-        MIN_EXP = -125   # C's FLT_MIN_EXP
-        MAX_EXP = 128    # FLT_MAX_EXP
-        MANT_DIG = 24    # FLT_MANT_DIG
-        BITS = 32
-    else:
-        raise ValueError("invalid size value")
-
-    sign = math.copysign(1.0, x) < 0.0
-    if math.isinf(x):
-        mant = 0
-        exp = MAX_EXP - MIN_EXP + 2
-    elif math.isnan(x):
-        mant = 1 << (MANT_DIG-2) # other values possible
-        exp = MAX_EXP - MIN_EXP + 2
-    elif x == 0.0:
-        mant = 0
-        exp = 0
-    else:
-        m, e = math.frexp(abs(x))  # abs(x) == m * 2**e
-        exp = e - (MIN_EXP - 1)
-        if exp > 0:
-            # Normal case.
-            mant = round_to_nearest(m * (1 << MANT_DIG))
-            mant -= 1 << MANT_DIG - 1
-        else:
-            # Subnormal case.
-            if exp + MANT_DIG - 1 >= 0:
-                mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1))
-            else:
-                mant = 0
-            exp = 0
-
-        # Special case: rounding produced a MANT_DIG-bit mantissa.
-        assert 0 <= mant <= 1 << MANT_DIG - 1
-        if mant == 1 << MANT_DIG - 1:
-            mant = 0
-            exp += 1
-
-        # Raise on overflow (in some circumstances, may want to return
-        # infinity instead).
-        if exp >= MAX_EXP - MIN_EXP + 2:
-             raise OverflowError("float too large to pack in this format")
-
-    # check constraints
-    assert 0 <= mant < 1 << MANT_DIG - 1
-    assert 0 <= exp <= MAX_EXP - MIN_EXP + 2
-    assert 0 <= sign <= 1
-    return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant
-
-
-big_endian_format = {
-    'x':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None},
-    'b':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int},
-    'B':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int},
-    'c':{ 'size' : 1, 'alignment' : 0, 'pack' : pack_char, 'unpack' : unpack_char},
-    's':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None},
-    'p':{ 'size' : 1, 'alignment' : 0, 'pack' : None, 'unpack' : None},
-    'h':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int},
-    'H':{ 'size' : 2, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int},
-    'i':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int},
-    'I':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int},
-    'l':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int},
-    'L':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int},
-    'q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_signed_int, 'unpack' : unpack_signed_int},
-    'Q':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_unsigned_int, 'unpack' : unpack_int},
-    'f':{ 'size' : 4, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float},
-    'd':{ 'size' : 8, 'alignment' : 0, 'pack' : pack_float, 'unpack' : unpack_float},
-    }
-default = big_endian_format
-formatmode={ '<' : (default, 'little'),
-             '>' : (default, 'big'),
-             '!' : (default, 'big'),
-             '=' : (default, sys.byteorder),
-             '@' : (default, sys.byteorder)
-            }
-
-def getmode(fmt):
-    try:
-        formatdef,endianness = formatmode[fmt[0]]
-        index = 1
-    except KeyError:
-        formatdef,endianness = formatmode['@']
-        index = 0
-    return formatdef,endianness,index
-def getNum(fmt,i):
-    num=None
-    cur = fmt[i]
-    while ('0'<= cur ) and ( cur <= '9'):
-        if num == None:
-            num = int(cur)
-        else:
-            num = 10*num + int(cur)
-        i += 1
-        cur = fmt[i]
-    return num,i
-
-def calcsize(fmt):
-    """calcsize(fmt) -> int
-    Return size of C struct described by format string fmt.
-    See struct.__doc__ for more on format strings."""
-
-    formatdef,endianness,i = getmode(fmt)
-    num = 0
-    result = 0
-    while i<len(fmt):
-        num,i = getNum(fmt,i)
-        cur = fmt[i]
-        try:
-            format = formatdef[cur]
-        except KeyError:
-            raise StructError,"%s is not a valid format"%cur
-        if num != None :
-            result += num*format['size']
-        else:
-            result += format['size']
-        num = 0
-        i += 1
-    return result
-
-def pack(fmt,*args):
-    """pack(fmt, v1, v2, ...) -> string
-       Return string containing values v1, v2, ... packed according to fmt.
-       See struct.__doc__ for more on format strings."""
-    formatdef,endianness,i = getmode(fmt)
-    args = list(args)
-    n_args = len(args)
-    result = []
-    while i<len(fmt):
-        num,i = getNum(fmt,i)
-        cur = fmt[i]
-        try:
-            format = formatdef[cur]
-        except KeyError:
-            raise StructError,"%s is not a valid format"%cur
-        if num == None :
-            num_s = 0
-            num = 1
-        else:
-            num_s = num
-
-        if cur == 'x':
-            result += ['\0'*num]
-        elif cur == 's':
-            if isinstance(args[0], str):
-                padding = num - len(args[0])
-                result += [args[0][:num] + '\0'*padding]
-                args.pop(0)
-            else:
-                raise StructError,"arg for string format not a string"
-        elif cur == 'p':
-            if isinstance(args[0], str):
-                padding = num - len(args[0]) - 1
-
-                if padding > 0:
-                    result += [chr(len(args[0])) + args[0][:num-1] + '\0'*padding]
-                else:
-                    if num<255:
-                        result += [chr(num-1) + args[0][:num-1]]
-                    else:
-                        result += [chr(255) + args[0][:num-1]]
-                args.pop(0)
-            else:
-                raise StructError,"arg for string format not a string"
-
-        else:
-            if len(args) < num:
-                raise StructError,"insufficient arguments to pack"
-            for var in args[:num]:
-                result += [format['pack'](var,format['size'],endianness)]
-            args=args[num:]
-        num = None
-        i += 1
-    if len(args) != 0:
-        raise StructError,"too many arguments for pack format"
-    return ''.join(result)
-
-def unpack(fmt,data):
-    """unpack(fmt, string) -> (v1, v2, ...)
-       Unpack the string, containing packed C structure data, according
-       to fmt.  Requires len(string)==calcsize(fmt).
-       See struct.__doc__ for more on format strings."""
-    formatdef,endianness,i = getmode(fmt)
-    j = 0
-    num = 0
-    result = []
-    length= calcsize(fmt)
-    if length != len (data):
-        raise StructError,"unpack str size does not match format"
-    while i<len(fmt):
-        num,i=getNum(fmt,i)
-        cur = fmt[i]
-        i += 1
-        try:
-            format = formatdef[cur]
-        except KeyError:
-            raise StructError,"%s is not a valid format"%cur
-
-        if not num :
-            num = 1
-
-        if cur == 'x':
-            j += num
-        elif cur == 's':
-            result.append(data[j:j+num])
-            j += num
-        elif cur == 'p':
-            n=ord(data[j])
-            if n >= num:
-                n = num-1
-            result.append(data[j+1:j+n+1])
-            j += num
-        else:
-            for n in range(num):
-                result += [format['unpack'](data,j,format['size'],endianness)]
-                j += format['size']
-
-    return tuple(result)
-
-def pack_into(fmt, buf, offset, *args):
-    data = pack(fmt, *args)
-    buffer(buf)[offset:offset+len(data)] = data
-
-def unpack_from(fmt, buf, offset=0):
-    size = calcsize(fmt)
-    data = buffer(buf)[offset:offset+size]
-    if len(data) != size:
-        raise error("unpack_from requires a buffer of at least %d bytes"
-                    % (size,))
-    return unpack(fmt, data)
diff --git a/pypy/__init__.py b/pypy/__init__.py
--- a/pypy/__init__.py
+++ b/pypy/__init__.py
@@ -1,1 +1,16 @@
 # Empty
+
+# XXX Should be empty again, soon.
+# XXX hack for win64:
+# This patch must stay here until the END OF STAGE 1
+# When all tests work, this branch will be merged
+# and the branch stage 2 is started, where we remove this patch.
+import sys
+if hasattr(sys, "maxsize"):
+    if sys.maxint != sys.maxsize:
+        sys.maxint = sys.maxsize
+        import warnings
+        warnings.warn("""\n
+---> This win64 port is now in stage 1: sys.maxint was modified.
+---> When pypy/__init__.py becomes empty again, we have reached stage 2.
+""")
diff --git a/pypy/annotation/builtin.py b/pypy/annotation/builtin.py
--- a/pypy/annotation/builtin.py
+++ b/pypy/annotation/builtin.py
@@ -37,7 +37,11 @@
     try:
         realresult = func(*args)
     except (ValueError, OverflowError):
-        return s_ImpossibleValue   # no possible answer for this precise input
+        # no possible answer for this precise input.  Be conservative
+        # and keep the computation non-constant.  Example:
+        # unichr(constant-that-doesn't-fit-16-bits) on platforms where
+        # the underlying Python has sys.maxunicode == 0xffff.
+        return s_result
     s_realresult = immutablevalue(realresult)
     if not s_result.contains(s_realresult):
         raise Exception("%s%r returned %r, which is not contained in %s" % (
@@ -163,7 +167,7 @@
                         r.const = False
                 return r
                 
-            assert not issubclass(typ, (int,long)) or typ in (bool, int), (
+            assert not issubclass(typ, (int, long)) or typ in (bool, int, long), (
                 "for integers only isinstance(.,int|r_uint) are supported")
  
             if s_obj.is_constant():
@@ -297,7 +301,7 @@
 def robjmodel_instantiate(s_clspbc):
     assert isinstance(s_clspbc, SomePBC)
     clsdef = None
-    more_than_one = len(s_clspbc.descriptions)
+    more_than_one = len(s_clspbc.descriptions) > 1
     for desc in s_clspbc.descriptions:
         cdef = desc.getuniqueclassdef()
         if more_than_one:
diff --git a/pypy/annotation/classdef.py b/pypy/annotation/classdef.py
--- a/pypy/annotation/classdef.py
+++ b/pypy/annotation/classdef.py
@@ -134,13 +134,19 @@
             if self.name not in homedef.classdesc.all_enforced_attrs:
                 self.attr_allowed = False
                 if not self.readonly:
-                    raise NoSuchAttrError(homedef, self.name)
+                    raise NoSuchAttrError(
+                        "setting forbidden attribute %r on %r" % (
+                        self.name, homedef))
 
     def modified(self, classdef='?'):
         self.readonly = False
         if not self.attr_allowed:
-            raise NoSuchAttrError(classdef, self.name)
-
+            raise NoSuchAttrError(
+                "Attribute %r on %r should be read-only.\n" % (self.name,
+                                                               classdef) +
+                "This error can be caused by another 'getattr' that promoted\n"
+                "the attribute here; the list of read locations is:\n" +
+                '\n'.join([str(loc[0]) for loc in self.read_locations]))
 
 class ClassDef(object):
     "Wraps a user class."
diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py
--- a/pypy/annotation/description.py
+++ b/pypy/annotation/description.py
@@ -398,7 +398,6 @@
             cls = pyobj
             base = object
             baselist = list(cls.__bases__)
-            baselist.reverse()
 
             # special case: skip BaseException in Python 2.5, and pretend
             # that all exceptions ultimately inherit from Exception instead
@@ -408,17 +407,27 @@
             elif baselist == [py.builtin.BaseException]:
                 baselist = [Exception]
 
+            mixins_before = []
+            mixins_after = []
             for b1 in baselist:
                 if b1 is object:
                     continue
                 if b1.__dict__.get('_mixin_', False):
-                    self.add_mixin(b1)
+                    if base is object:
+                        mixins_before.append(b1)
+                    else:
+                        mixins_after.append(b1)
                 else:
                     assert base is object, ("multiple inheritance only supported "
                                             "with _mixin_: %r" % (cls,))
                     base = b1
+            if mixins_before and mixins_after:
+                raise Exception("unsupported: class %r has mixin bases both"
+                                " before and after the regular base" % (self,))
+            self.add_mixins(mixins_after, check_not_in=base)
+            self.add_mixins(mixins_before)
+            self.add_sources_for_class(cls)
 
-            self.add_sources_for_class(cls)
             if base is not object:
                 self.basedesc = bookkeeper.getdesc(base)
 
@@ -480,14 +489,30 @@
                 return
         self.classdict[name] = Constant(value)
 
-    def add_mixin(self, base):
-        for subbase in base.__bases__:
-            if subbase is object:
-                continue
-            assert subbase.__dict__.get("_mixin_", False), ("Mixin class %r has non"
-                "mixin base class %r" % (base, subbase))
-            self.add_mixin(subbase)
-        self.add_sources_for_class(base, mixin=True)
+    def add_mixins(self, mixins, check_not_in=object):
+        if not mixins:
+            return
+        A = type('tmp', tuple(mixins) + (object,), {})
+        mro = A.__mro__
+        assert mro[0] is A and mro[-1] is object
+        mro = mro[1:-1]
+        #
+        skip = set()
+        def add(cls):
+            if cls is not object:
+                for base in cls.__bases__:
+                    add(base)
+                for name in cls.__dict__:
+                    skip.add(name)
+        add(check_not_in)
+        #
+        for base in reversed(mro):
+            assert base.__dict__.get("_mixin_", False), ("Mixin class %r has non"
+                "mixin base class %r" % (mixins, base))
+            for name, value in base.__dict__.items():
+                if name in skip:
+                    continue
+                self.add_source_attribute(name, value, mixin=True)
 
     def add_sources_for_class(self, cls, mixin=False):
         for name, value in cls.__dict__.items():
diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py
--- a/pypy/annotation/model.py
+++ b/pypy/annotation/model.py
@@ -786,12 +786,15 @@
 #
 # safety check that no-one is trying to make annotation and translation
 # faster by providing the -O option to Python.
-try:
-    assert False
-except AssertionError:
-    pass   # fine
-else:
-    raise RuntimeError("The annotator relies on 'assert' statements from the\n"
+import os
+if "WINGDB_PYTHON" not in os.environ:
+    # ...but avoiding this boring check in the IDE
+    try:
+        assert False
+    except AssertionError:
+        pass   # fine
+    else:
+        raise RuntimeError("The annotator relies on 'assert' statements from the\n"
                      "\tannotated program: you cannot run it with 'python -O'.")
 
 # this has the side-effect of registering the unary and binary operations
diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py
--- a/pypy/annotation/test/test_annrpython.py
+++ b/pypy/annotation/test/test_annrpython.py
@@ -1,15 +1,12 @@
 from __future__ import with_statement
-import autopath
 import py.test
 import sys
 from pypy import conftest
-from pypy.tool.udir import udir
 
 from pypy.annotation import model as annmodel
 from pypy.annotation.annrpython import RPythonAnnotator as _RPythonAnnotator
 from pypy.translator.translator import graphof as tgraphof
 from pypy.annotation import policy
-from pypy.annotation import specialize
 from pypy.annotation.listdef import ListDef, ListChangeUnallowed
 from pypy.annotation.dictdef import DictDef
 from pypy.objspace.flow.model import *
@@ -2431,6 +2428,93 @@
         assert isinstance(s.items[1], annmodel.SomeChar)
         assert isinstance(s.items[2], annmodel.SomeChar)
 
+    def test_mixin_first(self):
+        class Mixin(object):
+            _mixin_ = True
+            def foo(self): return 4
+        class Base(object):
+            def foo(self): return 5
+        class Concrete(Mixin, Base):
+            pass
+        def f():
+            return Concrete().foo()
+
+        assert f() == 4
+        a = self.RPythonAnnotator()
+        s = a.build_types(f, [])
+        assert s.const == 4
+
+    def test_mixin_last(self):
+        class Mixin(object):
+            _mixin_ = True
+            def foo(self): return 4
+        class Base(object):
+            def foo(self): return 5
+        class Concrete(Base, Mixin):
+            pass
+        def f():
+            return Concrete().foo()
+
+        assert f() == 5
+        a = self.RPythonAnnotator()
+        s = a.build_types(f, [])
+        assert s.const == 5
+
+    def test_mixin_concrete(self):
+        class Mixin(object):
+            _mixin_ = True
+            def foo(self): return 4
+        class Concrete(Mixin):
+            def foo(self): return 5
+        def f():
+            return Concrete().foo()
+
+        assert f() == 5
+        a = self.RPythonAnnotator()
+        s = a.build_types(f, [])
+        assert s.const == 5
+
+    def test_multiple_mixins_mro(self):
+        # an obscure situation, but it occurred in module/micronumpy/types.py
+        class A(object):
+            _mixin_ = True
+            def foo(self): return 1
+        class B(A):
+            _mixin_ = True
+            def foo(self): return 2
+        class C(A):
+            _mixin_ = True
+        class D(B, C):
+            _mixin_ = True
+        class Concrete(D):
+            pass
+        def f():
+            return Concrete().foo()
+
+        assert f() == 2
+        a = self.RPythonAnnotator()
+        s = a.build_types(f, [])
+        assert s.const == 2
+
+    def test_multiple_mixins_mro_2(self):
+        class A(object):
+            _mixin_ = True
+            def foo(self): return 1
+        class B(A):
+            _mixin_ = True
+            def foo(self): return 2
+        class C(A):
+            _mixin_ = True
+        class Concrete(C, B):
+            pass
+        def f():
+            return Concrete().foo()
+
+        assert f() == 2
+        a = self.RPythonAnnotator()
+        s = a.build_types(f, [])
+        assert s.const == 2
+
     def test___class___attribute(self):
         class Base(object): pass
         class A(Base): pass
@@ -2469,6 +2553,26 @@
         s = a.build_types(f, [int])
         assert s.knowntype == int
 
+    def test_slots_reads(self):
+        class A(object):
+            __slots__ = ()
+        class B(A):
+            def __init__(self, x):
+                self.x = x
+        def f(x):
+            if x:
+                a = A()
+            else:
+                a = B(x)
+            return a.x   # should explode here
+
+        a = self.RPythonAnnotator()
+        e = py.test.raises(Exception, a.build_types, f, [int])
+        # this should explode on reading the attribute 'a.x', but it can
+        # sometimes explode on 'self.x = x', which does not make much sense.
+        # But it looks hard to fix in general: we don't know yet during 'a.x'
+        # if the attribute x will be read-only or read-write.
+
     def test_unboxed_value(self):
         class A(object):
             __slots__ = ()
diff --git a/pypy/bin/rpython b/pypy/bin/rpython
new file mode 100755
--- /dev/null
+++ b/pypy/bin/rpython
@@ -0,0 +1,18 @@
+#!/usr/bin/env pypy
+
+"""RPython translation usage:
+
+rpython <translation options> target <targetoptions>
+
+run with --help for more information
+"""
+
+import sys
+from pypy.translator.goal.translate import main
+
+# no implicit targets
+if len(sys.argv) == 1:
+    print __doc__
+    sys.exit(1)
+
+main()
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -176,9 +176,6 @@
                cmdline="--translationmodules",
                suggests=[("objspace.allworkingmodules", False)]),
 
-    BoolOption("geninterp", "specify whether geninterp should be used",
-               default=False),
-
     BoolOption("logbytecodes",
                "keep track of bytecode usage",
                default=False),
@@ -392,10 +389,6 @@
             config.objspace.std.suggest(withsmalllong=True)
         # xxx other options? ropes maybe?
 
-    # completely disable geninterp in a level 0 translation
-    if level == '0':
-        config.objspace.suggest(geninterp=False)
-
     # some optimizations have different effects depending on the typesystem
     if type_system == 'ootype':
         config.objspace.std.suggest(multimethods="doubledispatch")
diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py
--- a/pypy/config/translationoption.py
+++ b/pypy/config/translationoption.py
@@ -182,11 +182,6 @@
 
     # Flags of the TranslationContext:
     BoolOption("simplifying", "Simplify flow graphs", default=True),
-    BoolOption("builtins_can_raise_exceptions",
-               "When true, assume any call to a 'simple' builtin such as "
-               "'hex' can raise an arbitrary exception",
-               default=False,
-               cmdline=None),
     BoolOption("list_comprehension_operations",
                "When true, look for and special-case the sequence of "
                "operations that results from a list comprehension and "
diff --git a/pypy/doc/discussion/win64_todo.txt b/pypy/doc/discussion/win64_todo.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/discussion/win64_todo.txt
@@ -0,0 +1,9 @@
+2011-11-04
+ll_os.py has a problem with the file rwin32.py.
+Temporarily disabled for the win64_gborg branch. This needs to be
+investigated and re-enabled.
+Resolved, enabled.
+
+2011-11-05
+test_typed.py needs explicit tests to ensure that we
+handle word sizes right.
\ No newline at end of file
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -103,21 +103,13 @@
 
 * A concurrent garbage collector (a lot of work)
 
-Remove the GIL
---------------
+STM, a.k.a. "remove the GIL"
+----------------------------
 
-This is a major task that requires lots of thinking. However, few subprojects
-can be potentially specified, unless a better plan can be thought out:
+Removing the GIL --- or more precisely, a GIL-less thread-less solution ---
+is `now work in progress.`__  Contributions welcome.
 
-* A thread-aware garbage collector
-
-* Better RPython primitives for dealing with concurrency
-
-* JIT passes to remove locks on objects
-
-* (maybe) implement locking in Python interpreter
-
-* alternatively, look at Software Transactional Memory
+.. __: http://pypy.org/tmdonate.html
 
 Introduce new benchmarks
 ------------------------
@@ -157,6 +149,22 @@
 exported.  This would give us a one-size-fits-all generic .so file to be
 imported by any application that wants to load .so files :-)
 
+Optimising cpyext (CPython C-API compatibility layer)
+-----------------------------------------------------
+
+A lot of work has gone into PyPy's implementation of CPython's C-API over
+the last years to let it reach a practical level of compatibility, so that
+C extensions for CPython work on PyPy without major rewrites. However,
+there are still many edges and corner cases where it misbehaves, and it has
+not received any substantial optimisation so far.
+
+The objective of this project is to fix bugs in cpyext and to optimise
+several performance critical parts of it, such as the reference counting
+support and other heavily used C-API functions. The net result would be to
+have CPython extensions run much faster on PyPy than they currently do, or
+to make them work at all if they currently don't. A part of this work would
+be to get cpyext into a shape where it supports running Cython generated
+extensions.
 
 .. _`issue tracker`: http://bugs.pypy.org
 .. _`mailing list`: http://mail.python.org/mailman/listinfo/pypy-dev
diff --git a/pypy/doc/sandbox.rst b/pypy/doc/sandbox.rst
--- a/pypy/doc/sandbox.rst
+++ b/pypy/doc/sandbox.rst
@@ -82,7 +82,10 @@
 
 In pypy/translator/goal::
 
-   ./translate.py --sandbox targetpypystandalone.py
+   ./translate.py -O2 --sandbox targetpypystandalone.py
+
+If you don't have a regular PyPy installed, you should, because it's
+faster to translate, but you can also run ``python translate.py`` instead.
 
 
 To run it, use the tools in the pypy/translator/sandbox directory::
diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst
--- a/pypy/doc/stackless.rst
+++ b/pypy/doc/stackless.rst
@@ -199,17 +199,11 @@
 The following features (present in some past Stackless version of PyPy)
 are for the time being not supported any more:
 
-* Tasklets and channels (currently ``stackless.py`` seems to import,
-  but you have tasklets on top of coroutines on top of greenlets on
-  top of continulets on top of stacklets, and it's probably not too
-  hard to cut two of these levels by adapting ``stackless.py`` to
-  use directly continulets)
-
 * Coroutines (could be rewritten at app-level)
 
-* Pickling and unpickling continulets (*)
-
-* Continuing execution of a continulet in a different thread (*)
+* Continuing execution of a continulet in a different thread
+  (but if it is "simple enough", you can pickle it and unpickle it
+  in the other thread).
 
 * Automatic unlimited stack (must be emulated__ so far)
 
@@ -217,15 +211,6 @@
 
 .. __: `recursion depth limit`_
 
-(*) Pickling, as well as changing threads, could be implemented by using
-a "soft" stack switching mode again.  We would get either "hard" or
-"soft" switches, similarly to Stackless Python 3rd version: you get a
-"hard" switch (like now) when the C stack contains non-trivial C frames
-to save, and a "soft" switch (like previously) when it contains only
-simple calls from Python to Python.  Soft-switched continulets would
-also consume a bit less RAM, and the switch might be a bit faster too
-(unsure about that; what is the Stackless Python experience?).
-
 
 Recursion depth limit
 +++++++++++++++++++++
diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py
new file mode 100644
--- /dev/null
+++ b/pypy/doc/tool/makecontributor.py
@@ -0,0 +1,133 @@
+import py
+import sys
+from collections import defaultdict
+import operator
+import re
+import mercurial.localrepo
+import mercurial.ui
+
+ROOT = py.path.local(__file__).join('..', '..', '..', '..')
+author_re = re.compile('(.*) <.*>')
+pair_programming_re = re.compile(r'^\((.*?)\)')
+excluded = set(["pypy", "convert-repo"])
+
+alias = {
+    'Anders Chrigstrom': ['arre'],
+    'Antonio Cuni': ['antocuni', 'anto'],
+    'Armin Rigo': ['arigo', 'arfigo', 'armin', 'arigato'],
+    'Maciej Fijalkowski': ['fijal'],
+    'Carl Friedrich Bolz': ['cfbolz', 'cf'],
+    'Samuele Pedroni': ['pedronis', 'samuele', 'samule'],
+    'Michael Hudson': ['mwh'],
+    'Holger Krekel': ['hpk', 'holger krekel', 'holger', 'hufpk'],
+    "Amaury Forgeot d'Arc": ['afa'],
+    'Alex Gaynor': ['alex', 'agaynor'],
+    'David Schneider': ['bivab', 'david'],
+    'Christian Tismer': ['chris', 'christian', 'tismer',
+                         'tismer at christia-wjtqxl.localdomain'],
+    'Benjamin Peterson': ['benjamin'],
+    'Hakan Ardo': ['hakan', 'hakanardo'],
+    'Niklaus Haldimann': ['nik'],
+    'Alexander Schremmer': ['xoraxax'],
+    'Anders Hammarquist': ['iko'],
+    'David Edelsohn': ['edelsoh', 'edelsohn'],
+    'Niko Matsakis': ['niko'],
+    'Jakub Gustak': ['jlg'],
+    'Guido Wesdorp': ['guido'],
+    'Michael Foord': ['mfoord'],
+    'Mark Pearse': ['mwp'],
+    'Toon Verwaest': ['tverwaes'],
+    'Eric van Riet Paap': ['ericvrp'],
+    'Jacob Hallen': ['jacob', 'jakob'],
+    'Anders Lehmann': ['ale', 'anders'],
+    'Bert Freudenberg': ['bert'],
+    'Boris Feigin': ['boris', 'boria'],
+    'Valentino Volonghi': ['valentino', 'dialtone'],
+    'Aurelien Campeas': ['aurelien', 'aureliene'],
+    'Adrien Di Mascio': ['adim'],
+    'Jacek Generowicz': ['Jacek', 'jacek'],
+    'Jim Hunziker': ['landtuna at gmail.com'],
+    'Kristjan Valur Jonsson': ['kristjan at kristjan-lp.ccp.ad.local'],
+    'Laura Creighton': ['lac'],
+    'Aaron Iles': ['aliles'],
+    'Ludovic Aubry': ['ludal', 'ludovic'],
+    'Lukas Diekmann': ['l.diekmann', 'ldiekmann'],
+    'Matti Picus': ['Matti Picus matti.picus at gmail.com',
+                    'matthp', 'mattip', 'mattip>'],
+    'Michael Cheng': ['mikefc'],
+    'Richard Emslie': ['rxe'],
+    'Roberto De Ioris': ['roberto at goyle'],
+    'Roberto De Ioris': ['roberto at mrspurr'],
+    'Sven Hager': ['hager'],
+    'Tomo Cocoa': ['cocoatomo'],
+    }
+
+alias_map = {}
+for name, nicks in alias.iteritems():
+    for nick in nicks:
+        alias_map[nick] = name
+
+def get_canonical_author(name):
+    match = author_re.match(name)
+    if match:
+        name = match.group(1)
+    return alias_map.get(name, name)
+
+ignored_nicknames = defaultdict(int)
+
+def get_more_authors(log):
+    match = pair_programming_re.match(log)
+    if not match:
+        return set()
+    ignore_words = ['around', 'consulting', 'yesterday', 'for a bit', 'thanks',
+                    'in-progress', 'bits of', 'even a little', 'floating',]
+    sep_words = ['and', ';', '+', '/', 'with special  by']
+    nicknames = match.group(1)
+    for word in ignore_words:
+        nicknames = nicknames.replace(word, '')
+    for word in sep_words:
+        nicknames = nicknames.replace(word, ',')
+    nicknames = [nick.strip().lower() for nick in nicknames.split(',')]
+    authors = set()
+    for nickname in nicknames:
+        author = alias_map.get(nickname)
+        if not author:
+            ignored_nicknames[nickname] += 1
+        else:
+            authors.add(author)
+    return authors
+
+def main(show_numbers):
+    ui = mercurial.ui.ui()
+    repo = mercurial.localrepo.localrepository(ui, str(ROOT))
+    authors_count = defaultdict(int)
+    for i in repo:
+        ctx = repo[i]
+        authors = set()
+        authors.add(get_canonical_author(ctx.user()))
+        authors.update(get_more_authors(ctx.description()))
+        for author in authors:
+            if author not in excluded:
+                authors_count[author] += 1
+
+    # uncomment the next lines to get the list of nicknamed which could not be
+    # parsed from commit logs
+    ## items = ignored_nicknames.items()
+    ## items.sort(key=operator.itemgetter(1), reverse=True)
+    ## for name, n in items:
+    ##     if show_numbers:
+    ##         print '%5d %s' % (n, name)
+    ##     else:
+    ##         print name
+                
+    items = authors_count.items()
+    items.sort(key=operator.itemgetter(1), reverse=True)
+    for name, n in items:
+        if show_numbers:
+            print '%5d %s' % (n, name)
+        else:
+            print name
+
+if __name__ == '__main__':
+    show_numbers = '-n' in sys.argv
+    main(show_numbers)
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -18,7 +18,8 @@
 Edition.  Other configurations may work as well.
 
 The translation scripts will set up the appropriate environment variables
-for the compiler.  They will attempt to locate the same compiler version that
+for the compiler, so you do not need to run vcvars before translation.  
+They will attempt to locate the same compiler version that
 was used to build the Python interpreter doing the
 translation.  Failing that, they will pick the most recent Visual Studio
 compiler they can find.  In addition, the target architecture
@@ -26,7 +27,7 @@
 using a 32 bit Python and vice versa.
 
 **Note:** PyPy is currently not supported for 64 bit Windows, and translation
-will be aborted in this case.
+will fail in this case.
 
 The compiler is all you need to build pypy-c, but it will miss some
 modules that relies on third-party libraries.  See below how to get
@@ -57,7 +58,8 @@
 install third-party libraries.  We chose to install them in the parent
 directory of the pypy checkout.  For example, if you installed pypy in
 ``d:\pypy\trunk\`` (This directory contains a README file), the base
-directory is ``d:\pypy``.
+directory is ``d:\pypy``. You may choose different values by setting the
+INCLUDE, LIB and PATH (for DLLs)
 
 The Boehm garbage collector
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -126,18 +128,54 @@
 ------------------------
 
 You can compile pypy with the mingw compiler, using the --cc=mingw32 option;
-mingw.exe must be on the PATH.
+gcc.exe must be on the PATH. If the -cc flag does not begin with "ming", it should be
+the name of a valid gcc-derivative compiler, i.e. x86_64-w64-mingw32-gcc for the 64 bit
+compiler creating a 64 bit target.
 
-libffi for the mingw32 compiler
+libffi for the mingw compiler
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-To enable the _rawffi (and ctypes) module, you need to compile a mingw32
-version of libffi.  I downloaded the `libffi source files`_, and extracted
-them in the base directory.  Then run::
+To enable the _rawffi (and ctypes) module, you need to compile a mingw
+version of libffi.  Here is one way to do this, wich should allow you to try
+to build for win64 or win32:
+
+#. Download and unzip a `mingw32 build`_ or `mingw64 build`_, say into c:\mingw
+#. If you do not use cygwin, you will need msys to provide make, 
+   autoconf tools and other goodies.
+
+    #. Download and unzip a `msys for mingw`_, say into c:\msys
+    #. Edit the c:\msys\etc\fstab file to mount c:\mingw
+
+#. Download and unzip the `libffi source files`_, and extract
+   them in the base directory.  
+#. Run c:\msys\msys.bat or a cygwin shell which should make you
+   feel better since it is a shell prompt with shell tools.
+#. From inside the shell, cd to the libffi directory and do::
 
     sh ./configure
     make
     cp .libs/libffi-5.dll <somewhere on the PATH>
 
+If you can't find the dll, and the libtool issued a warning about 
+"undefined symbols not allowed", you will need to edit the libffi
+Makefile in the toplevel directory. Add the flag -no-undefined to
+the definition of libffi_la_LDFLAGS
+
+If you wish to experiment with win64, you must run configure with flags::
+
+    sh ./configure --build=x86_64-w64-mingw32 --host=x86_64-w64-mingw32
+
+or such, depending on your mingw64 download.
+
+hacking on Pypy with the mingw compiler
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Since hacking on Pypy means running tests, you will need a way to specify
+the mingw compiler when hacking (as opposed to translating). As of
+March 2012, --cc is not a valid option for pytest.py. However if you set an
+environment variable CC it will allow you to choose a compiler.
+
+.. _'mingw32 build': http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win32/Automated%20Builds
+.. _`mingw64 build`: http://sourceforge.net/projects/mingw-w64/files/Toolchains%20targetting%20Win64/Automated%20Builds
+.. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29   
 .. _`libffi source files`: http://sourceware.org/libffi/
 .. _`RPython translation toolchain`: translation.html
diff --git a/pypy/doc/you-want-to-help.rst b/pypy/doc/you-want-to-help.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/you-want-to-help.rst
@@ -0,0 +1,86 @@
+
+You want to help with PyPy, now what?
+=====================================
+
+PyPy is a very large project that has a reputation of being hard to dive into.
+Some of this fame is warranted, some of it is purely accidental. There are three
+important lessons that everyone willing to contribute should learn:
+
+* PyPy has layers. There are many pieces of architecture that are very well
+  separated from each other. More about this below, but often the manifestation
+  of this is that things are at a different layer than you would expect them
+  to be. For example if you are looking for the JIT implementation, you will
+  not find it in the implementation of the Python programming language.
+
+* Because of the above, we are very serious about Test Driven Development.
+  It's not only what we believe in, but also that PyPy's architecture is
+  working very well with TDD in mind and not so well without it. Often
+  the development means progressing in an unrelated corner, one unittest
+  at a time; and then flipping a giant switch, bringing it all together.
+  (It generally works out of the box.  If it doesn't, then we didn't
+  write enough unit tests.)  It's worth repeating - PyPy
+  approach is great if you do TDD, not so great otherwise.
+
+* PyPy uses an entirely different set of tools - most of them included
+  in the PyPy repository. There is no Makefile, nor autoconf. More below
+
+Architecture
+============
+
+PyPy has layers. The 100 miles view:
+
+* `RPython`_ is the language in which we write interpreters. Not the entire
+  PyPy project is written in RPython, only the parts that are compiled in
+  the translation process. The interesting point is that RPython has no parser,
+  it's compiled from the live python objects, which make it possible to do
+  all kinds of metaprogramming during import time. In short, Python is a meta
+  programming language for RPython.
+
+  The RPython standard library is to be found in the ``rlib`` subdirectory.
+
+.. _`RPython`: coding-guide.html#RPython
+
+* The translation toolchain - this is the part that takes care about translating
+  RPython to flow graphs and then to C. There is more in the `architecture`_
+  document written about it.
+
+  It mostly lives in ``rpython``, ``annotator`` and ``objspace/flow``.
+
+.. _`architecture`: architecture.html 
+
+* Python Interpreter
+
+  xxx
+
+* Python modules
+
+  xxx
+
+* Just-in-Time Compiler (JIT): `we have a tracing JIT`_ that traces the
+  interpreter written in RPython, rather than the user program that it
+  interprets.  As a result it applies to any interpreter, i.e. any
+  language.  But getting it to work correctly is not trivial: it
+  requires a small number of precise "hints" and possibly some small
+  refactorings of the interpreter.  The JIT itself also has several
+  almost-independent parts: the tracer itself in ``jit/metainterp``, the
+  optimizer in ``jit/metainterp/optimizer`` that optimizes a list of
+  residual operations, and the backend in ``jit/backend/<machine-name>``
+  that turns it into machine code.  Writing a new backend is a
+  traditional way to get into the project.
+
+.. _`we have a tracing JIT`: jit/index.html
+
+* Garbage Collectors (GC): as you can notice if you are used to CPython's
+  C code, there are no ``Py_INCREF/Py_DECREF`` equivalents in RPython code.
+  `Garbage collection in PyPy`_ is inserted
+  during translation.  Moreover, this is not reference counting; it is a real
+  GC written as more RPython code.  The best one we have so far is in
+  ``rpython/memory/gc/minimark.py``.
+
+.. _`Garbage collection in PyPy`: garbage_collection.html
+
+
+Toolset
+=======
+
+xxx
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -610,6 +610,8 @@
     ops.JUMP_IF_FALSE_OR_POP : 0,
     ops.POP_JUMP_IF_TRUE : -1,
     ops.POP_JUMP_IF_FALSE : -1,
+
+    ops.BUILD_LIST_FROM_ARG: 1,
 }
 
 
diff --git a/pypy/interpreter/astcompiler/codegen.py b/pypy/interpreter/astcompiler/codegen.py
--- a/pypy/interpreter/astcompiler/codegen.py
+++ b/pypy/interpreter/astcompiler/codegen.py
@@ -965,7 +965,7 @@
         self.emit_op_arg(ops.CALL_METHOD, (kwarg_count << 8) | arg_count)
         return True
 
-    def _listcomp_generator(self, gens, gen_index, elt):
+    def _listcomp_generator(self, gens, gen_index, elt, single=False):
         start = self.new_block()
         skip = self.new_block()
         if_cleanup = self.new_block()
@@ -973,6 +973,8 @@
         gen = gens[gen_index]
         assert isinstance(gen, ast.comprehension)
         gen.iter.walkabout(self)
+        if single:
+            self.emit_op_arg(ops.BUILD_LIST_FROM_ARG, 0)
         self.emit_op(ops.GET_ITER)
         self.use_next_block(start)
         self.emit_jump(ops.FOR_ITER, anchor)
@@ -998,8 +1000,12 @@
 
     def visit_ListComp(self, lc):
         self.update_position(lc.lineno)
-        self.emit_op_arg(ops.BUILD_LIST, 0)
-        self._listcomp_generator(lc.generators, 0, lc.elt)
+        if len(lc.generators) != 1 or lc.generators[0].ifs:
+            single = False
+            self.emit_op_arg(ops.BUILD_LIST, 0)
+        else:
+            single = True
+        self._listcomp_generator(lc.generators, 0, lc.elt, single=single)
 
     def _comp_generator(self, node, generators, gen_index):
         start = self.new_block()
diff --git a/pypy/interpreter/astcompiler/test/test_astbuilder.py b/pypy/interpreter/astcompiler/test/test_astbuilder.py
--- a/pypy/interpreter/astcompiler/test/test_astbuilder.py
+++ b/pypy/interpreter/astcompiler/test/test_astbuilder.py
@@ -10,16 +10,6 @@
 from pypy.interpreter.astcompiler import ast, consts
 
 
-try:
-    all
-except NameError:
-    def all(iterable):
-        for x in iterable:
-            if not x:
-                return False
-        return True
-
-
 class TestAstBuilder:
 
     def setup_class(cls):
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -58,7 +58,8 @@
         w_res = pyco_expr.exec_host_bytecode(w_dict, w_dict)
         res = space.str_w(space.repr(w_res))
         if not isinstance(expected, float):
-            assert res == repr(expected)
+            noL = lambda expr: expr.replace('L', '')
+            assert noL(res) == noL(repr(expected))
         else:
             # Float representation can vary a bit between interpreter
             # versions, compare the numbers instead.
@@ -908,3 +909,17 @@
             return d['f'](5)
         """)
         assert 'generator' in space.str_w(space.repr(w_generator))
+        
+    def test_list_comprehension(self):
+        source = "def f(): [i for i in l]"
+        source2 = "def f(): [i for i in l for j in l]"
+        source3 = "def f(): [i for i in l if i]"
+        counts = self.count_instructions(source)
+        assert ops.BUILD_LIST not in counts
+        assert counts[ops.BUILD_LIST_FROM_ARG] == 1
+        counts = self.count_instructions(source2)
+        assert counts[ops.BUILD_LIST] == 1
+        assert ops.BUILD_LIST_FROM_ARG not in counts
+        counts = self.count_instructions(source3)
+        assert counts[ops.BUILD_LIST] == 1
+        assert ops.BUILD_LIST_FROM_ARG not in counts
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -7,7 +7,8 @@
 from pypy.interpreter.miscutils import ThreadLocals
 from pypy.tool.cache import Cache
 from pypy.tool.uid import HUGEVAL_BYTES
-from pypy.rlib.objectmodel import we_are_translated, newlist, compute_unique_id
+from pypy.rlib.objectmodel import we_are_translated, newlist_hint,\
+     compute_unique_id
 from pypy.rlib.debug import make_sure_not_resized
 from pypy.rlib.timer import DummyTimer, Timer
 from pypy.rlib.rarithmetic import r_uint
@@ -295,6 +296,7 @@
         self.check_signal_action = None   # changed by the signal module
         self.user_del_action = UserDelAction(self)
         self.frame_trace_action = FrameTraceAction(self)
+        self._code_of_sys_exc_info = None
 
         from pypy.interpreter.pycode import cpython_magic, default_magic
         self.our_magic = default_magic
@@ -466,9 +468,9 @@
                 if name not in modules:
                     modules.append(name)
 
-        # a bit of custom logic: time2 or rctime take precedence over time
+        # a bit of custom logic: rctime take precedence over time
         # XXX this could probably be done as a "requires" in the config
-        if ('time2' in modules or 'rctime' in modules) and 'time' in modules:
+        if 'rctime' in modules and 'time' in modules:
             modules.remove('time')
 
         if not self.config.objspace.nofaking:
@@ -833,7 +835,7 @@
             items = []
         else:
             try:
-                items = newlist(lgt_estimate)
+                items = newlist_hint(lgt_estimate)
             except MemoryError:
                 items = [] # it might have lied
         #
@@ -1335,7 +1337,7 @@
         if not self.is_true(self.isinstance(w_obj, self.w_str)):
             raise OperationError(self.w_TypeError,
                                  self.wrap('argument must be a string'))
-        return self.str_w(w_obj)
+        return self.str_w(w_obj)            
 
     def unicode_w(self, w_obj):
         return w_obj.unicode_w(self)
diff --git a/pypy/interpreter/buffer.py b/pypy/interpreter/buffer.py
--- a/pypy/interpreter/buffer.py
+++ b/pypy/interpreter/buffer.py
@@ -20,6 +20,7 @@
 from pypy.interpreter.gateway import interp2app, unwrap_spec
 from pypy.interpreter.error import OperationError
 from pypy.rlib.objectmodel import compute_hash
+from pypy.rlib.rstring import StringBuilder
 
 
 class Buffer(Wrappable):
@@ -152,12 +153,13 @@
     if space.isinstance_w(w_object, space.w_unicode):
         # unicode objects support the old buffer interface
         # but not the new buffer interface (change in python  2.7)
-        from pypy.rlib.rstruct.unichar import pack_unichar
-        charlist = []
-        for unich in space.unicode_w(w_object):
-            pack_unichar(unich, charlist)
+        from pypy.rlib.rstruct.unichar import pack_unichar, UNICODE_SIZE
+        unistr = space.unicode_w(w_object)
+        builder = StringBuilder(len(unistr) * UNICODE_SIZE)
+        for unich in unistr:
+            pack_unichar(unich, builder)
         from pypy.interpreter.buffer import StringBuffer
-        w_buffer = space.wrap(StringBuffer(''.join(charlist)))
+        w_buffer = space.wrap(StringBuffer(builder.build()))
     else:
         w_buffer = space.buffer(w_object)
 
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -47,6 +47,11 @@
 
     def async(self, space):
         "Check if this is an exception that should better not be caught."
+        if not space.full_exceptions:
+            # flow objspace does not support such exceptions and more
+            # importantly, raises KeyboardInterrupt if you try to access
+            # space.w_KeyboardInterrupt
+            return False
         return (self.match(space, space.w_SystemExit) or
                 self.match(space, space.w_KeyboardInterrupt))
 
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -154,6 +154,7 @@
         #operationerr.print_detailed_traceback(self.space)
 
     def _convert_exc(self, operr):
+        # Only for the flow object space
         return operr
 
     def sys_exc_info(self): # attn: the result is not the wrapped sys.exc_info() !!!
@@ -166,6 +167,11 @@
             frame = self.getnextframe_nohidden(frame)
         return None
 
+    def set_sys_exc_info(self, operror):
+        frame = self.gettopframe_nohidden()
+        if frame:     # else, the exception goes nowhere and is lost
+            frame.last_exception = operror
+
     def settrace(self, w_func):
         """Set the global trace function."""
         if self.space.is_w(w_func, self.space.w_None):
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -113,6 +113,12 @@
         from pypy.interpreter.pycode import PyCode
 
         code = self.getcode() # hook for the jit
+        #
+        if (jit.we_are_jitted() and code is self.space._code_of_sys_exc_info
+                                and nargs == 0):
+            from pypy.module.sys.vm import exc_info_direct
+            return exc_info_direct(self.space, frame)
+        #
         fast_natural_arity = code.fast_natural_arity
         if nargs == fast_natural_arity:
             if nargs == 0:
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -874,6 +874,12 @@
             fn.add_to_table()
         if gateway.as_classmethod:
             fn = ClassMethod(space.wrap(fn))
+        #
+        from pypy.module.sys.vm import exc_info
+        if code._bltin is exc_info:
+            assert space._code_of_sys_exc_info is None
+            space._code_of_sys_exc_info = code
+        #
         return fn
 
 
@@ -901,24 +907,20 @@
 
     def __init__(self, source, filename=None, modname='__builtin__'):
         # HAAACK (but a good one)
+        self.filename = filename
+        self.source = str(py.code.Source(source).deindent())
+        self.modname = modname
         if filename is None:
             f = sys._getframe(1)
             filename = '<%s:%d>' % (f.f_code.co_filename, f.f_lineno)
+        if not os.path.exists(filename):
+            # make source code available for tracebacks
+            lines = [x + "\n" for x in source.split("\n")]
+            py.std.linecache.cache[filename] = (1, None, lines, filename)
         self.filename = filename
-        self.source = str(py.code.Source(source).deindent())
-        self.modname = modname
-        # look at the first three lines for a NOT_RPYTHON tag
-        first = "\n".join(source.split("\n", 3)[:3])
-        if "NOT_RPYTHON" in first:
-            self.can_use_geninterp = False
-        else:
-            self.can_use_geninterp = True
-        # make source code available for tracebacks
-        lines = [x + "\n" for x in source.split("\n")]
-        py.std.linecache.cache[filename] = (1, None, lines, filename)
 
     def __repr__(self):
-        return "<ApplevelClass filename=%r can_use_geninterp=%r>" % (self.filename, self.can_use_geninterp)
+        return "<ApplevelClass filename=%r>" % (self.filename,)
 
     def getwdict(self, space):
         return space.fromcache(ApplevelCache).getorbuild(self)
@@ -979,10 +981,7 @@
 
     def build(self, app):
         "NOT_RPYTHON.  Called indirectly by Applevel.getwdict()."
-        if self.space.config.objspace.geninterp and app.can_use_geninterp:
-            return PyPyCacheDir.build_applevelinterp_dict(app, self.space)
-        else:
-            return build_applevel_dict(app, self.space)
+        return build_applevel_dict(app, self.space)
 
 
 # __________ pure applevel version __________
@@ -996,157 +995,6 @@
                 filename=self.filename)
     return w_glob
 
-# __________ geninterplevel version __________
-
-class PyPyCacheDir:
-    "NOT_RPYTHON"
-    # similar to applevel, but using translation to interp-level.
-    # This version maintains a cache folder with single files.
-
-    def build_applevelinterp_dict(cls, self, space):
-        "NOT_RPYTHON"
-        # N.B. 'self' is the ApplevelInterp; this is a class method,
-        # just so that we have a convenient place to store the global state.
-        if not cls._setup_done:
-            cls._setup()
-
-        from pypy.translator.geninterplevel import translate_as_module
-        import marshal
-        scramble = md5(cls.seed)
-        scramble.update(marshal.dumps(self.source))
-        key = scramble.hexdigest()
-        initfunc = cls.known_code.get(key)
-        if not initfunc:
-            # try to get it from file
-            name = key
-            if self.filename:
-                prename = os.path.splitext(os.path.basename(self.filename))[0]
-            else:
-                prename = 'zznoname'
-            name = "%s_%s" % (prename, name)
-            try:
-                __import__("pypy._cache."+name)
-            except ImportError, x:
-                # print x
-                pass
-            else:
-                initfunc = cls.known_code[key]
-        if not initfunc:
-            # build it and put it into a file
-            initfunc, newsrc = translate_as_module(
-                self.source, self.filename, self.modname)
-            fname = cls.cache_path.join(name+".py").strpath
-            f = file(get_tmp_file_name(fname), "w")
-            print >> f, """\
-# self-destruct on double-click:
-if __name__ == "__main__":
-    from pypy import _cache
-    import os
-    namestart = os.path.join(os.path.split(_cache.__file__)[0], '%s')
-    for ending in ('.py', '.pyc', '.pyo'):
-        try:
-            os.unlink(namestart+ending)
-        except os.error:
-            pass""" % name
-            print >> f
-            print >> f, newsrc
-            print >> f, "from pypy._cache import known_code"
-            print >> f, "known_code[%r] = %s" % (key, initfunc.__name__)
-            f.close()
-            rename_tmp_to_eventual_file_name(fname)
-        w_glob = initfunc(space)
-        return w_glob
-    build_applevelinterp_dict = classmethod(build_applevelinterp_dict)
-
-    _setup_done = False
-
-    def _setup(cls):
-        """NOT_RPYTHON"""
-        lp = py.path.local
-        import pypy, os
-        p = lp(pypy.__file__).new(basename='_cache').ensure(dir=1)
-        cls.cache_path = p
-        ini = p.join('__init__.py')
-        try:
-            if not ini.check():
-                raise ImportError  # don't import if only a .pyc file left!!!
-            from pypy._cache import known_code, \
-                 GI_VERSION_RENDERED
-        except ImportError:
-            GI_VERSION_RENDERED = 0
-        from pypy.translator.geninterplevel import GI_VERSION
-        cls.seed = md5(str(GI_VERSION)).digest()
-        if GI_VERSION != GI_VERSION_RENDERED or GI_VERSION is None:
-            for pth in p.listdir():
-                if pth.check(file=1):
-                    try:
-                        pth.remove()
-                    except: pass
-            f = file(get_tmp_file_name(str(ini)), "w")
-            f.write("""\
-# This folder acts as a cache for code snippets which have been
-# compiled by compile_as_module().
-# It will get a new entry for every piece of code that has
-# not been seen, yet.
-#
-# Caution! Only the code snippet is checked. If something
-# is imported, changes are not detected. Also, changes
-# to geninterplevel or gateway are also not checked.
-# Exception: There is a checked version number in geninterplevel.py
-#
-# If in doubt, remove this file from time to time.
-
-GI_VERSION_RENDERED = %r
-
-known_code = {}
-
-# self-destruct on double-click:
-def harakiri():
-    import pypy._cache as _c
-    import py
-    lp = py.path.local
-    for pth in lp(_c.__file__).dirpath().listdir():
-        try:
-            pth.remove()
-        except: pass
-
-if __name__ == "__main__":
-    harakiri()
-
-del harakiri
-""" % GI_VERSION)
-            f.close()
-            rename_tmp_to_eventual_file_name(str(ini))
-        import pypy._cache
-        cls.known_code = pypy._cache.known_code
-        cls._setup_done = True
-    _setup = classmethod(_setup)
-
-
-def gethostname(_cache=[]):
-    if not _cache:
-        try:
-            import socket
-            hostname = socket.gethostname()
-        except:
-            hostname = ''
-        _cache.append(hostname)
-    return _cache[0]
-
-def get_tmp_file_name(fname):
-    return '%s~%s~%d' % (fname, gethostname(), os.getpid())
-
-def rename_tmp_to_eventual_file_name(fname):
-    # generated files are first written to the host- and process-specific
-    # file 'tmpname', and then atomically moved to their final 'fname'
-    # to avoid problems if py.py is started several times in parallel
-    tmpname = get_tmp_file_name(fname)
-    try:
-        os.rename(tmpname, fname)
-    except (OSError, IOError):
-        os.unlink(fname)    # necessary on Windows
-        os.rename(tmpname, fname)
-
 # ____________________________________________________________
 
 def appdef(source, applevel=ApplevelClass, filename=None):
@@ -1184,11 +1032,6 @@
         return build_applevel_dict(self, space)
 
 
-class applevelinterp_temp(ApplevelClass):
-    hidden_applevel = False
-    def getwdict(self, space):   # no cache
-        return PyPyCacheDir.build_applevelinterp_dict(self, space)
-
 # app2interp_temp is used for testing mainly
 def app2interp_temp(func, applevel_temp=applevel_temp, filename=None):
     """ NOT_RPYTHON """
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -15,9 +15,8 @@
 from pypy.rlib.rarithmetic import r_uint, intmask
 from pypy.rlib.unroll import unrolling_iterable
 from pypy.rlib.debug import check_nonneg
-from pypy.tool.stdlib_opcode import (bytecode_spec, host_bytecode_spec,
-                                     unrolling_all_opcode_descs, opmap,
-                                     host_opmap)
+from pypy.tool.stdlib_opcode import (bytecode_spec,
+                                     unrolling_all_opcode_descs)
 
 def unaryoperation(operationname):
     """NOT_RPYTHON"""
@@ -713,6 +712,19 @@
         w_list = self.space.newlist(items)
         self.pushvalue(w_list)
 
+    def BUILD_LIST_FROM_ARG(self, _, next_instr):
+        # this is a little dance, because list has to be before the
+        # value
+        last_val = self.popvalue()
+        try:
+            lgt = self.space.len_w(last_val)
+        except OperationError, e:
+            if e.async(self.space):
+                raise
+            lgt = 0 # oh well
+        self.pushvalue(self.space.newlist([], sizehint=lgt))
+        self.pushvalue(last_val)
+
     def LOAD_ATTR(self, nameindex, next_instr):
         "obj.attributename"
         w_obj = self.popvalue()
@@ -1419,11 +1431,9 @@
             if lastchar.isspace() and lastchar != ' ':
                 return
         file_softspace(stream, True)
-    print_item_to._annspecialcase_ = "specialize:argtype(0)"
 
     def print_item(x):
         print_item_to(x, sys_stdout())
-    print_item._annspecialcase_ = "flowspace:print_item"
 
     def print_newline_to(stream):
         stream.write("\n")
@@ -1431,7 +1441,6 @@
 
     def print_newline():
         print_newline_to(sys_stdout())
-    print_newline._annspecialcase_ = "flowspace:print_newline"
 
     def file_softspace(file, newflag):
         try:
diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py
--- a/pypy/interpreter/pyparser/parsestring.py
+++ b/pypy/interpreter/pyparser/parsestring.py
@@ -2,34 +2,39 @@
 from pypy.interpreter import unicodehelper
 from pypy.rlib.rstring import StringBuilder
 
-def parsestr(space, encoding, s, unicode_literals=False):
-    # compiler.transformer.Transformer.decode_literal depends on what 
-    # might seem like minor details of this function -- changes here 
-    # must be reflected there.
+def parsestr(space, encoding, s, unicode_literal=False):
+    """Parses a string or unicode literal, and return a wrapped value.
+
+    If encoding=iso8859-1, the source string is also in this encoding.
+    If encoding=None, the source string is ascii only.
+    In other cases, the source string is in utf-8 encoding.
+
+    When a bytes string is returned, it will be encoded with the
+    original encoding.
+
+    Yes, it's very inefficient.
+    Yes, CPython has very similar code.
+    """
 
     # we use ps as "pointer to s"
     # q is the virtual last char index of the string
     ps = 0
     quote = s[ps]
     rawmode = False
-    unicode = unicode_literals
 
     # string decoration handling
-    o = ord(quote)
-    isalpha = (o>=97 and o<=122) or (o>=65 and o<=90)
-    if isalpha or quote == '_':
-        if quote == 'b' or quote == 'B':
-            ps += 1
-            quote = s[ps]
-            unicode = False
-        elif quote == 'u' or quote == 'U':
-            ps += 1
-            quote = s[ps]
-            unicode = True
-        if quote == 'r' or quote == 'R':
-            ps += 1
-            quote = s[ps]
-            rawmode = True
+    if quote == 'b' or quote == 'B':
+        ps += 1
+        quote = s[ps]
+        unicode_literal = False
+    elif quote == 'u' or quote == 'U':
+        ps += 1
+        quote = s[ps]
+        unicode_literal = True
+    if quote == 'r' or quote == 'R':
+        ps += 1
+        quote = s[ps]
+        rawmode = True
     if quote != "'" and quote != '"':
         raise_app_valueerror(space,
                              'Internal error: parser passed unquoted literal')
@@ -46,21 +51,28 @@
                                         'unmatched triple quotes in literal')
         q -= 2
 
-    if unicode: # XXX Py_UnicodeFlag is ignored for now
+    if unicode_literal: # XXX Py_UnicodeFlag is ignored for now
         if encoding is None or encoding == "iso-8859-1":
+            # 'unicode_escape' expects latin-1 bytes, string is ready.
             buf = s
             bufp = ps
             bufq = q
             u = None
         else:
-            # "\XX" may become "\u005c\uHHLL" (12 bytes)
+            # String is utf8-encoded, but 'unicode_escape' expects
+            # latin-1; So multibyte sequences must be escaped.
             lis = [] # using a list to assemble the value
             end = q
+            # Worst case: "\XX" may become "\u005c\uHHLL" (12 bytes)
             while ps < end:
                 if s[ps] == '\\':
                     lis.append(s[ps])
                     ps += 1
                     if ord(s[ps]) & 0x80:
+                        # A multibyte sequence will follow, it will be
+                        # escaped like \u1234. To avoid confusion with
+                        # the backslash we just wrote, we emit "\u005c"
+                        # instead.
                         lis.append("u005c")
                 if ord(s[ps]) & 0x80: # XXX inefficient
                     w, ps = decode_utf8(space, s, ps, end, "utf-16-be")
@@ -86,13 +98,11 @@
 
     need_encoding = (encoding is not None and
                      encoding != "utf-8" and encoding != "iso-8859-1")
-    # XXX add strchr like interface to rtyper
     assert 0 <= ps <= q
     substr = s[ps : q]
     if rawmode or '\\' not in s[ps:]:
         if need_encoding:
             w_u = space.wrap(unicodehelper.PyUnicode_DecodeUTF8(space, substr))
-            #w_v = space.wrap(space.unwrap(w_u).encode(encoding)) this works
             w_v = unicodehelper.PyUnicode_AsEncodedString(space, w_u, space.wrap(encoding))
             return w_v
         else:
diff --git a/pypy/interpreter/test/test_appinterp.py b/pypy/interpreter/test/test_appinterp.py
--- a/pypy/interpreter/test/test_appinterp.py
+++ b/pypy/interpreter/test/test_appinterp.py
@@ -1,6 +1,6 @@
 
 import py
-from pypy.interpreter.gateway import appdef, ApplevelClass, applevel_temp, applevelinterp_temp
+from pypy.interpreter.gateway import appdef, ApplevelClass, applevel_temp
 from pypy.interpreter.error import OperationError
 
 def test_execwith_novars(space): 
@@ -82,9 +82,6 @@
     w_res = g(space, space.wrap(10), space.wrap(1))
     assert space.eq_w(w_res, space.wrap(-9))
 
-def test_applevelinterp_functions(space):
-    test_applevel_functions(space, applevel_temp = applevelinterp_temp)
-
 def test_applevel_class(space, applevel_temp = applevel_temp):
     app = applevel_temp('''
         class C(object):
@@ -99,9 +96,6 @@
     w_clsattr = space.getattr(c, space.wrap('attr'))
     assert space.eq_w(w_clsattr, space.wrap(17))
 
-def test_applevelinterp_class(space):
-    test_applevel_class(space, applevel_temp = applevelinterp_temp)
-
 def app_test_something_at_app_level(): 
     x = 2
     assert x/2 == 1
@@ -161,7 +155,7 @@
         w_str = space1.getattr(w_mymod1, space1.wrap("hi"))
         assert space1.str_w(w_str) == "hello"
 
-    def test_geninterp_can_unfreeze(self):
+    def test_random_stuff_can_unfreeze(self):
         # When a module contains an "import" statement in applevel code, the
         # imported module is initialized, possibly after it has been already
         # frozen.
diff --git a/pypy/interpreter/test/test_gateway.py b/pypy/interpreter/test/test_gateway.py
--- a/pypy/interpreter/test/test_gateway.py
+++ b/pypy/interpreter/test/test_gateway.py
@@ -101,14 +101,6 @@
         g3 = gateway.app2interp_temp(noapp_g3, gateway.applevel_temp)
         assert self.space.eq_w(g3(self.space, w('foo'), w('bar')), w('foobar'))
 
-    def test_app2interp2(self):
-        """same but using transformed code"""
-        w = self.space.wrap
-        def noapp_g3(a, b):
-            return a+b
-        g3 = gateway.app2interp_temp(noapp_g3, gateway.applevelinterp_temp)
-        assert self.space.eq_w(g3(self.space, w('foo'), w('bar')), w('foobar'))
-
     def test_app2interp_general_args(self):
         w = self.space.wrap
         def app_general(x, *args, **kwds):
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -312,8 +312,8 @@
             mods = space.get_builtinmodule_to_install()
             
             assert '__pypy__' in mods                # real builtin
-            assert 'array' not in mods               # in lib_pypy
-            assert 'faked+array' not in mods         # in lib_pypy
+            assert '_functools' not in mods               # in lib_pypy
+            assert 'faked+_functools' not in mods         # in lib_pypy
             assert 'this_doesnt_exist' not in mods   # not in lib_pypy
             assert 'faked+this_doesnt_exist' in mods # not in lib_pypy, but in
                                                      # ALL_BUILTIN_MODULES
diff --git a/pypy/interpreter/test/test_zzpickle_and_slow.py b/pypy/interpreter/test/test_zzpickle_and_slow.py
--- a/pypy/interpreter/test/test_zzpickle_and_slow.py
+++ b/pypy/interpreter/test/test_zzpickle_and_slow.py
@@ -75,6 +75,7 @@
 class AppTestInterpObjectPickling:
     pytestmark = py.test.mark.skipif("config.option.runappdirect")
     def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=['struct'])
         _attach_helpers(cls.space)
 
     def teardown_class(cls):
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -321,7 +321,7 @@
             
             def user_setup(self, space, w_subtype):
                 self.w__dict__ = space.newdict(
-                    instance=True, classofinstance=w_subtype)
+                    instance=True)
                 base_user_setup(self, space, w_subtype)
 
             def setclass(self, space, w_subtype):
diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py
--- a/pypy/jit/backend/llgraph/llimpl.py
+++ b/pypy/jit/backend/llgraph/llimpl.py
@@ -171,7 +171,7 @@
     'unicodesetitem'  : (('ref', 'int', 'int'), 'int'),
     'cast_ptr_to_int' : (('ref',), 'int'),
     'cast_int_to_ptr' : (('int',), 'ref'),
-    'debug_merge_point': (('ref', 'int'), None),
+    'debug_merge_point': (('ref', 'int', 'int'), None),
     'force_token'     : ((), 'int'),
     'call_may_force'  : (('int', 'varargs'), 'intorptr'),
     'guard_not_forced': ((), None),
@@ -1797,6 +1797,7 @@
         if specialize_as_constant:
             def specialize_call(self, hop):
                 llvalue = func(hop.args_s[0].const)
+                hop.exception_cannot_occur()
                 return hop.inputconst(lltype.typeOf(llvalue), llvalue)
         else:
             # specialize as direct_call
@@ -1813,6 +1814,7 @@
                     sm = ootype._static_meth(FUNCTYPE, _name=func.__name__, _callable=func)
                     cfunc = hop.inputconst(FUNCTYPE, sm)
                 args_v = hop.inputargs(*hop.args_r)
+                hop.exception_is_here()
                 return hop.genop('direct_call', [cfunc] + args_v, hop.r_result)
 
 
diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py
--- a/pypy/jit/backend/llsupport/gc.py
+++ b/pypy/jit/backend/llsupport/gc.py
@@ -208,6 +208,7 @@
     This is the class supporting --gcrootfinder=asmgcc.
     """
     is_shadow_stack = False
+    is_64_bit = (WORD == 8)
 
     LOC_REG       = 0
     LOC_ESP_PLUS  = 1
@@ -336,17 +337,17 @@
             self._gcmap_deadentries += 1
             item += asmgcroot.arrayitemsize
 
-    def get_basic_shape(self, is_64_bit=False):
+    def get_basic_shape(self):
         # XXX: Should this code even really know about stack frame layout of
         # the JIT?
-        if is_64_bit:
-            return [chr(self.LOC_EBP_PLUS  | 8),
-                    chr(self.LOC_EBP_MINUS | 8),
-                    chr(self.LOC_EBP_MINUS | 16),
-                    chr(self.LOC_EBP_MINUS | 24),
-                    chr(self.LOC_EBP_MINUS | 32),
-                    chr(self.LOC_EBP_MINUS | 40),
-                    chr(self.LOC_EBP_PLUS  | 0),
+        if self.is_64_bit:
+            return [chr(self.LOC_EBP_PLUS  | 4),    # return addr: at   8(%rbp)
+                    chr(self.LOC_EBP_MINUS | 4),    # saved %rbx:  at  -8(%rbp)
+                    chr(self.LOC_EBP_MINUS | 8),    # saved %r12:  at -16(%rbp)
+                    chr(self.LOC_EBP_MINUS | 12),   # saved %r13:  at -24(%rbp)
+                    chr(self.LOC_EBP_MINUS | 16),   # saved %r14:  at -32(%rbp)
+                    chr(self.LOC_EBP_MINUS | 20),   # saved %r15:  at -40(%rbp)
+                    chr(self.LOC_EBP_PLUS  | 0),    # saved %rbp:  at    (%rbp)
                     chr(0)]
         else:
             return [chr(self.LOC_EBP_PLUS  | 4),    # return addr: at   4(%ebp)
@@ -366,7 +367,11 @@
         shape.append(chr(number | flag))
 
     def add_frame_offset(self, shape, offset):
-        assert (offset & 3) == 0
+        if self.is_64_bit:
+            assert (offset & 7) == 0
+            offset >>= 1
+        else:
+            assert (offset & 3) == 0
         if offset >= 0:
             num = self.LOC_EBP_PLUS | offset
         else:
@@ -518,7 +523,7 @@
     def initialize(self):
         pass
 
-    def get_basic_shape(self, is_64_bit=False):
+    def get_basic_shape(self):
         return []
 
     def add_frame_offset(self, shape, offset):
@@ -594,7 +599,7 @@
         # if convenient for the backend, we compute the info about
         # the flag as (byte-offset, single-byte-flag).
         import struct
-        value = struct.pack("l", flag_word)
+        value = struct.pack(lltype.SignedFmt, flag_word)
         assert value.count('\x00') == len(value) - 1    # only one byte is != 0
         i = 0
         while value[i] == '\x00': i += 1
diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py
--- a/pypy/jit/backend/llsupport/regalloc.py
+++ b/pypy/jit/backend/llsupport/regalloc.py
@@ -321,7 +321,7 @@
         except KeyError:
             pass   # 'var' is already not in a register
 
-    def loc(self, box):
+    def loc(self, box, must_exist=False):
         """ Return the location of 'box'.
         """
         self._check_type(box)
@@ -332,6 +332,8 @@
         except KeyError:
             if box in self.bindings_to_frame_reg:
                 return self.frame_reg
+            if must_exist:
+                return self.frame_manager.bindings[box]
             return self.frame_manager.loc(box)
 
     def return_constant(self, v, forbidden_vars=[], selected_reg=None):
@@ -360,7 +362,7 @@
         self._check_type(v)
         if isinstance(v, Const):
             return self.return_constant(v, forbidden_vars, selected_reg)
-        prev_loc = self.loc(v)
+        prev_loc = self.loc(v, must_exist=True)
         if prev_loc is self.frame_reg and selected_reg is None:
             return prev_loc
         loc = self.force_allocate_reg(v, forbidden_vars, selected_reg,
diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py
--- a/pypy/jit/backend/llsupport/test/test_descr.py
+++ b/pypy/jit/backend/llsupport/test/test_descr.py
@@ -148,7 +148,7 @@
     #
     def get_alignment(code):
         # Retrieve default alignment for the compiler/platform
-        return struct.calcsize('l' + code) - struct.calcsize(code)
+        return struct.calcsize(lltype.SignedFmt + code) - struct.calcsize(code)
     assert descr1.basesize == get_alignment('c')
     assert descr2.basesize == get_alignment('p')
     assert descr3.basesize == get_alignment('p')
diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py
--- a/pypy/jit/backend/llsupport/test/test_ffisupport.py
+++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py
@@ -2,6 +2,7 @@
 from pypy.jit.codewriter.longlong import is_64_bit
 from pypy.jit.backend.llsupport.descr import *
 from pypy.jit.backend.llsupport.ffisupport import *
+from pypy.rlib.rarithmetic import is_emulated_long
 
 
 class FakeCPU:
@@ -43,7 +44,7 @@
     assert descr.result_flag == FLAG_UNSIGNED
     assert descr.is_result_signed() == False
 
-    if not is_64_bit:
+    if not is_64_bit or is_emulated_long:
         descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong,
                                        None, 42)
         assert descr is None   # missing longlongs
diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py
--- a/pypy/jit/backend/llsupport/test/test_gc.py
+++ b/pypy/jit/backend/llsupport/test/test_gc.py
@@ -11,6 +11,7 @@
 from pypy.jit.tool.oparser import parse
 from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE
 from pypy.jit.metainterp.optimizeopt.util import equaloplists
+from pypy.rlib.rarithmetic import is_valid_int
 
 def test_boehm():
     gc_ll_descr = GcLLDescr_boehm(None, None, None)
@@ -57,6 +58,7 @@
         def frame_pos(n):
             return -4*(4+n)
         gcrootmap = GcRootMap_asmgcc()
+        gcrootmap.is_64_bit = False
         num1 = frame_pos(-5)
         num1a = num1|2
         num2 = frame_pos(55)
@@ -102,7 +104,7 @@
         gcrootmap.put(retaddr, shapeaddr)
         assert gcrootmap._gcmap[0] == retaddr
         assert gcrootmap._gcmap[1] == shapeaddr
-        p = rffi.cast(rffi.LONGP, gcrootmap.gcmapstart())
+        p = rffi.cast(rffi.SIGNEDP, gcrootmap.gcmapstart())
         assert p[0] == retaddr
         assert (gcrootmap.gcmapend() ==
                 gcrootmap.gcmapstart() + rffi.sizeof(lltype.Signed) * 2)
@@ -418,9 +420,9 @@
         assert newops[0].getarg(1) == v_value
         assert newops[0].result is None
         wbdescr = newops[0].getdescr()
-        assert isinstance(wbdescr.jit_wb_if_flag, int)
-        assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int)
-        assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int)
+        assert is_valid_int(wbdescr.jit_wb_if_flag)
+        assert is_valid_int(wbdescr.jit_wb_if_flag_byteofs)
+        assert is_valid_int(wbdescr.jit_wb_if_flag_singlebyte)
 
     def test_get_rid_of_debug_merge_point(self):
         operations = [
diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py
--- a/pypy/jit/backend/llsupport/test/test_regalloc.py
+++ b/pypy/jit/backend/llsupport/test/test_regalloc.py
@@ -1,4 +1,4 @@
-
+import py
 from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT
 from pypy.jit.backend.llsupport.regalloc import FrameManager
 from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan
@@ -236,6 +236,16 @@
         assert isinstance(loc, FakeFramePos)
         assert len(asm.moves) == 1
 
+    def test_bogus_make_sure_var_in_reg(self):
+        b0, = newboxes(0)
+        longevity = {b0: (0, 1)}
+        fm = TFrameManager()
+        asm = MockAsm()
+        rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
+        rm.next_instruction()
+        # invalid call to make_sure_var_in_reg(): box unknown so far
+        py.test.raises(KeyError, rm.make_sure_var_in_reg, b0)
+
     def test_return_constant(self):
         asm = MockAsm()
         boxes, longevity = boxes_and_longevity(5)
diff --git a/pypy/jit/backend/test/runner_test.py b/pypy/jit/backend/test/runner_test.py
--- a/pypy/jit/backend/test/runner_test.py
+++ b/pypy/jit/backend/test/runner_test.py
@@ -16,15 +16,23 @@
 from pypy.rpython.annlowlevel import llhelper
 from pypy.rpython.llinterp import LLException
 from pypy.jit.codewriter import heaptracker, longlong
-from pypy.rlib.rarithmetic import intmask
+from pypy.rlib import longlong2float
+from pypy.rlib.rarithmetic import intmask, is_valid_int
 from pypy.jit.backend.detect_cpu import autodetect_main_model_and_size
 
+
 def boxfloat(x):
     return BoxFloat(longlong.getfloatstorage(x))
 
 def constfloat(x):
     return ConstFloat(longlong.getfloatstorage(x))
 
+def boxlonglong(ll):
+    if longlong.is_64_bit:
+        return BoxInt(ll)
+    else:
+        return BoxFloat(ll)
+
 
 class Runner(object):
 
@@ -493,7 +501,7 @@
         if cpu.supports_floats:
             def func(f, i):
                 assert isinstance(f, float)
-                assert isinstance(i, int)
+                assert is_valid_int(i)
                 return f - float(i)
             FPTR = self.Ptr(self.FuncType([lltype.Float, lltype.Signed],
                                           lltype.Float))
@@ -604,7 +612,7 @@
                                          [funcbox, BoxInt(arg1), BoxInt(arg2)],
                                          'int', descr=calldescr)
             assert res.getint() == f(arg1, arg2)
-        
+
     def test_call_stack_alignment(self):
         # test stack alignment issues, notably for Mac OS/X.
         # also test the ordering of the arguments.
@@ -1490,18 +1498,36 @@
     def test_noops(self):
         c_box = self.alloc_string("hi there").constbox()
         c_nest = ConstInt(0)
-        self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest], 'void')
+        c_id = ConstInt(0)
+        self.execute_operation(rop.DEBUG_MERGE_POINT, [c_box, c_nest, c_id], 'void')
         self.execute_operation(rop.JIT_DEBUG, [c_box, c_nest, c_nest,
                                                c_nest, c_nest], 'void')
 
     def test_read_timestamp(self):
+        if sys.platform == 'win32':
+            # windows quite often is very inexact (like the old Intel 8259 PIC),
+            # so we stretch the time a little bit.
+            # On my virtual Parallels machine in a 2GHz Core i7 Mac Mini,
+            # the test starts working at delay == 21670 and stops at 20600000.
+            # We take the geometric mean value.
+            from math import log, exp
+            delay_min = 21670
+            delay_max = 20600000
+            delay = int(exp((log(delay_min)+log(delay_max))/2))
+            def wait_a_bit():
+                for i in xrange(delay): pass
+        else:
+            def wait_a_bit():
+                pass
         if longlong.is_64_bit:
             got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int')
+            wait_a_bit()
             got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'int')
             res1 = got1.getint()
             res2 = got2.getint()
         else:
             got1 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float')
+            wait_a_bit()
             got2 = self.execute_operation(rop.READ_TIMESTAMP, [], 'float')
             res1 = got1.getlonglong()
             res2 = got2.getlonglong()
@@ -1597,6 +1623,17 @@
                                      [BoxPtr(x)], 'int').value
         assert res == -19
 
+    def test_convert_float_bytes(self):
+        t = 'int' if longlong.is_64_bit else 'float'
+        res = self.execute_operation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG,
+                                     [boxfloat(2.5)], t).value
+        assert res == longlong2float.float2longlong(2.5)
+
+        bytes = longlong2float.float2longlong(2.5)
+        res = self.execute_operation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT,
+                                     [boxlonglong(res)], 'float').value
+        assert longlong.getrealfloat(res) == 2.5
+
     def test_ooops_non_gc(self):
         x = lltype.malloc(lltype.Struct('x'), flavor='raw')
         v = heaptracker.adr2int(llmemory.cast_ptr_to_adr(x))
@@ -3061,7 +3098,7 @@
             ResOperation(rop.JUMP, [i2], None, descr=targettoken2),
             ]
         self.cpu.compile_bridge(faildescr, inputargs, operations, looptoken)
-        
+
         fail = self.cpu.execute_token(looptoken, 2)
         assert fail.identifier == 3
         res = self.cpu.get_latest_value_int(0)
@@ -3106,7 +3143,7 @@
             assert len(mc) == len(ops)
             for i in range(len(mc)):
                 assert mc[i].split("\t")[-1].startswith(ops[i])
-            
+
         data = ctypes.string_at(info.asmaddr, info.asmlen)
         mc = list(machine_code_dump(data, info.asmaddr, cpuname))
         lines = [line for line in mc if line.count('\t') == 2]
diff --git a/pypy/jit/backend/test/support.py b/pypy/jit/backend/test/support.py
--- a/pypy/jit/backend/test/support.py
+++ b/pypy/jit/backend/test/support.py
@@ -3,6 +3,7 @@
 from pypy.rlib.debug import debug_print
 from pypy.translator.translator import TranslationContext, graphof
 from pypy.jit.metainterp.optimizeopt import ALL_OPTS_NAMES
+from pypy.rlib.rarithmetic import is_valid_int
 
 class BaseCompiledMixin(object):
 
@@ -24,7 +25,7 @@
         from pypy.annotation import model as annmodel
 
         for arg in args:
-            assert isinstance(arg, int)
+            assert is_valid_int(arg)
 
         self.pre_translation_hook()
         t = self._get_TranslationContext()
diff --git a/pypy/jit/backend/test/test_random.py b/pypy/jit/backend/test/test_random.py
--- a/pypy/jit/backend/test/test_random.py
+++ b/pypy/jit/backend/test/test_random.py
@@ -328,6 +328,15 @@
     def produce_into(self, builder, r):
         self.put(builder, [r.choice(builder.intvars)])
 
+class CastLongLongToFloatOperation(AbstractFloatOperation):
+    def produce_into(self, builder, r):
+        if longlong.is_64_bit:
+            self.put(builder, [r.choice(builder.intvars)])
+        else:
+            if not builder.floatvars:
+                raise CannotProduceOperation
+            self.put(builder, [r.choice(builder.floatvars)])
+
 class CastFloatToIntOperation(AbstractFloatOperation):
     def produce_into(self, builder, r):
         if not builder.floatvars:
@@ -449,6 +458,8 @@
 
 OPERATIONS.append(CastFloatToIntOperation(rop.CAST_FLOAT_TO_INT))
 OPERATIONS.append(CastIntToFloatOperation(rop.CAST_INT_TO_FLOAT))
+OPERATIONS.append(CastFloatToIntOperation(rop.CONVERT_FLOAT_BYTES_TO_LONGLONG))
+OPERATIONS.append(CastLongLongToFloatOperation(rop.CONVERT_LONGLONG_BYTES_TO_FLOAT))
 
 OperationBuilder.OPERATIONS = OPERATIONS
 
@@ -502,11 +513,11 @@
     else:
         assert 0, "unknown backend %r" % pytest.config.option.backend
 
-# ____________________________________________________________    
+# ____________________________________________________________
 
 class RandomLoop(object):
     dont_generate_more = False
-    
+
     def __init__(self, cpu, builder_factory, r, startvars=None):
         self.cpu = cpu
         if startvars is None:
diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
--- a/pypy/jit/backend/x86/assembler.py
+++ b/pypy/jit/backend/x86/assembler.py
@@ -88,7 +88,6 @@
         self._debug = False
         self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i')
         self.fail_boxes_count = 0
-        self._current_depths_cache = (0, 0)
         self.datablockwrapper = None
         self.stack_check_slowpath = 0
         self.propagate_exception_path = 0
@@ -442,10 +441,8 @@
         looppos = self.mc.get_relative_pos()
         looptoken._x86_loop_code = looppos
         clt.frame_depth = -1     # temporarily
-        clt.param_depth = -1     # temporarily
-        frame_depth, param_depth = self._assemble(regalloc, operations)
+        frame_depth = self._assemble(regalloc, operations)
         clt.frame_depth = frame_depth
-        clt.param_depth = param_depth
         #
         size_excluding_failure_stuff = self.mc.get_relative_pos()
         self.write_pending_failure_recoveries()
@@ -459,8 +456,7 @@
             rawstart + size_excluding_failure_stuff,
             rawstart))
         debug_stop("jit-backend-addr")
-        self._patch_stackadjust(rawstart + stackadjustpos,
-                                frame_depth + param_depth)
+        self._patch_stackadjust(rawstart + stackadjustpos, frame_depth)
         self.patch_pending_failure_recoveries(rawstart)
         #
         ops_offset = self.mc.ops_offset
@@ -500,14 +496,13 @@
             assert ([loc.assembler() for loc in arglocs] ==
                     [loc.assembler() for loc in faildescr._x86_debug_faillocs])
         regalloc = RegAlloc(self, self.cpu.translate_support_code)
-        fail_depths = faildescr._x86_current_depths
         startpos = self.mc.get_relative_pos()
-        operations = regalloc.prepare_bridge(fail_depths, inputargs, arglocs,
+        operations = regalloc.prepare_bridge(inputargs, arglocs,
                                              operations,
                                              self.current_clt.allgcrefs)
 
         stackadjustpos = self._patchable_stackadjust()
-        frame_depth, param_depth = self._assemble(regalloc, operations)
+        frame_depth = self._assemble(regalloc, operations)
         codeendpos = self.mc.get_relative_pos()
         self.write_pending_failure_recoveries()
         fullsize = self.mc.get_relative_pos()
@@ -517,19 +512,16 @@
         debug_print("bridge out of Guard %d has address %x to %x" %
                     (descr_number, rawstart, rawstart + codeendpos))
         debug_stop("jit-backend-addr")
-        self._patch_stackadjust(rawstart + stackadjustpos,
-                                frame_depth + param_depth)
+        self._patch_stackadjust(rawstart + stackadjustpos, frame_depth)
         self.patch_pending_failure_recoveries(rawstart)
         if not we_are_translated():
             # for the benefit of tests
             faildescr._x86_bridge_frame_depth = frame_depth
-            faildescr._x86_bridge_param_depth = param_depth
         # patch the jump from original guard
         self.patch_jump_for_descr(faildescr, rawstart)
         ops_offset = self.mc.ops_offset
         self.fixup_target_tokens(rawstart)
         self.current_clt.frame_depth = max(self.current_clt.frame_depth, frame_depth)
-        self.current_clt.param_depth = max(self.current_clt.param_depth, param_depth)
         self.teardown()
         # oprofile support
         if self.cpu.profile_agent is not None:
@@ -614,7 +606,7 @@
         else:
             assert token
             struct.number = compute_unique_id(token)
-        self.loop_run_counters.append(struct)            
+        self.loop_run_counters.append(struct)
         return struct
 
     def _find_failure_recovery_bytecode(self, faildescr):
@@ -673,7 +665,7 @@
                ResOperation(rop.SETFIELD_RAW, [c_adr, box2],
                             None, descr=self.debug_counter_descr)]
         operations.extend(ops)
-        
+
     @specialize.argtype(1)
     def _inject_debugging_code(self, looptoken, operations, tp, number):
         if self._debug:
@@ -700,15 +692,12 @@
         regalloc.walk_operations(operations)
         if we_are_translated() or self.cpu.dont_keepalive_stuff:
             self._regalloc = None   # else keep it around for debugging
-        frame_depth = regalloc.fm.get_frame_depth()
-        param_depth = regalloc.param_depth
+        frame_depth = regalloc.get_final_frame_depth()
         jump_target_descr = regalloc.jump_target_descr
         if jump_target_descr is not None:
             target_frame_depth = jump_target_descr._x86_clt.frame_depth
-            target_param_depth = jump_target_descr._x86_clt.param_depth
             frame_depth = max(frame_depth, target_frame_depth)
-            param_depth = max(param_depth, target_param_depth)
-        return frame_depth, param_depth
+        return frame_depth
 
     def _patchable_stackadjust(self):
         # stack adjustment LEA
@@ -847,8 +836,8 @@
             self.mc.MOVSD_sx(0, loc.value)
         elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8:
             # XXX evil trick
-            self.mc.PUSH_b(get_ebp_ofs(loc.position))
-            self.mc.PUSH_b(get_ebp_ofs(loc.position + 1))
+            self.mc.PUSH_b(loc.value + 4)
+            self.mc.PUSH_b(loc.value)
         else:
             self.mc.PUSH(loc)
 
@@ -858,8 +847,8 @@
             self.mc.ADD_ri(esp.value, 8)   # = size of doubles
         elif WORD == 4 and isinstance(loc, StackLoc) and loc.get_width() == 8:
             # XXX evil trick
-            self.mc.POP_b(get_ebp_ofs(loc.position + 1))
-            self.mc.POP_b(get_ebp_ofs(loc.position))
+            self.mc.POP_b(loc.value)
+            self.mc.POP_b(loc.value + 4)
         else:
             self.mc.POP(loc)
 
@@ -892,10 +881,9 @@
         genop_math_list[oopspecindex](self, op, arglocs, resloc)
 
     def regalloc_perform_with_guard(self, op, guard_op, faillocs,
-                                    arglocs, resloc, current_depths):
+                                    arglocs, resloc):
         faildescr = guard_op.getdescr()
         assert isinstance(faildescr, AbstractFailDescr)
-        faildescr._x86_current_depths = current_depths
         failargs = guard_op.getfailargs()
         guard_opnum = guard_op.getopnum()
         guard_token = self.implement_guard_recovery(guard_opnum,
@@ -911,10 +899,9 @@
             # must be added by the genop_guard_list[]()
             assert guard_token is self.pending_guard_tokens[-1]
 
-    def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc,
-                               current_depths):
+    def regalloc_perform_guard(self, guard_op, faillocs, arglocs, resloc):
         self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs,
-                                         resloc, current_depths)
+                                         resloc)
 
     def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm0):
         self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale))
@@ -1038,13 +1025,14 @@
                     self.mc.MOV(tmp, loc)
                     self.mc.MOV_sr(p, tmp.value)
             p += loc.get_width()
-        self._regalloc.reserve_param(p//WORD)
         # x is a location
         self.mc.CALL(x)
         self.mark_gc_roots(force_index)
         #
         if callconv != FFI_DEFAULT_ABI:
             self._fix_stdcall(callconv, p)
+        #
+        self._regalloc.needed_extra_stack_locations(p//WORD)
 
     def _fix_stdcall(self, callconv, p):
         from pypy.rlib.clibffi import FFI_STDCALL
@@ -1127,9 +1115,9 @@
             x = r10
         remap_frame_layout(self, src_locs, dst_locs, X86_64_SCRATCH_REG)
 
-        self._regalloc.reserve_param(len(pass_on_stack))
         self.mc.CALL(x)
         self.mark_gc_roots(force_index)
+        self._regalloc.needed_extra_stack_locations(len(pass_on_stack))
 
     def call(self, addr, args, res):
         force_index = self.write_new_force_index()
@@ -1254,6 +1242,24 @@
         self.mc.MOVD_xr(resloc.value, loc0.value)
         self.mc.CVTSS2SD_xx(resloc.value, resloc.value)
 
+    def genop_convert_float_bytes_to_longlong(self, op, arglocs, resloc):
+        loc0, = arglocs
+        if longlong.is_64_bit:
+            assert isinstance(resloc, RegLoc)
+            assert isinstance(loc0, RegLoc)
+            self.mc.MOVD(resloc, loc0)
+        else:
+            self.mov(loc0, resloc)
+
+    def genop_convert_longlong_bytes_to_float(self, op, arglocs, resloc):
+        loc0, = arglocs
+        if longlong.is_64_bit:
+            assert isinstance(resloc, RegLoc)
+            assert isinstance(loc0, RegLoc)
+            self.mc.MOVD(resloc, loc0)
+        else:
+            self.mov(loc0, resloc)
+
     def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc):
         guard_opnum = guard_op.getopnum()
         self.mc.CMP(arglocs[0], imm0)
@@ -1966,8 +1972,6 @@
             mc.PUSH_r(ebx.value)
         elif IS_X86_64:
             mc.MOV_rr(edi.value, ebx.value)
-            # XXX: Correct to only align the stack on 64-bit?
-            mc.AND_ri(esp.value, -16)
         else:
             raise AssertionError("Shouldn't happen")
 
@@ -2129,14 +2133,16 @@
         # First, we need to save away the registers listed in
         # 'save_registers' that are not callee-save.  XXX We assume that
         # the XMM registers won't be modified.  We store them in
-        # [ESP+4], [ESP+8], etc., leaving enough room in [ESP] for the
-        # single argument to closestack_addr below.
-        p = WORD
+        # [ESP+4], [ESP+8], etc.; on x86-32 we leave enough room in [ESP]
+        # for the single argument to closestack_addr below.
+        if IS_X86_32:
+            p = WORD
+        elif IS_X86_64:
+            p = 0
         for reg in self._regalloc.rm.save_around_call_regs:
             if reg in save_registers:
                 self.mc.MOV_sr(p, reg.value)
                 p += WORD
-        self._regalloc.reserve_param(p//WORD)
         #
         if gcrootmap.is_shadow_stack:
             args = []
@@ -2187,11 +2193,15 @@
         #
         self._emit_call(-1, imm(self.releasegil_addr), args)
         # Finally, restore the registers saved above.
-        p = WORD
+        if IS_X86_32:
+            p = WORD
+        elif IS_X86_64:
+            p = 0
         for reg in self._regalloc.rm.save_around_call_regs:
             if reg in save_registers:
                 self.mc.MOV_rs(reg.value, p)
                 p += WORD
+        self._regalloc.needed_extra_stack_locations(p//WORD)
 
     def call_reacquire_gil(self, gcrootmap, save_loc):
         # save the previous result (eax/xmm0) into the stack temporarily.
@@ -2199,7 +2209,6 @@
         # to save xmm0 in this case.
         if isinstance(save_loc, RegLoc) and not save_loc.is_xmm:
             self.mc.MOV_sr(WORD, save_loc.value)
-            self._regalloc.reserve_param(2)
         # call the reopenstack() function (also reacquiring the GIL)
         if gcrootmap.is_shadow_stack:
             args = []
@@ -2219,6 +2228,7 @@
         # restore the result from the stack
         if isinstance(save_loc, RegLoc) and not save_loc.is_xmm:
             self.mc.MOV_rs(save_loc.value, WORD)
+            self._regalloc.needed_extra_stack_locations(2)
 
     def genop_guard_call_assembler(self, op, guard_op, guard_token,
                                    arglocs, result_loc):
@@ -2495,11 +2505,6 @@
         # copy of heap(nursery_free_adr), so that the final MOV below is
         # a no-op.
 
-        # reserve room for the argument to the real malloc and the
-        # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1
-        # word)
-        self._regalloc.reserve_param(1+16)
-
         gcrootmap = self.cpu.gc_ll_descr.gcrootmap
         shadow_stack = (gcrootmap is not None and gcrootmap.is_shadow_stack)
         if not shadow_stack:
@@ -2510,6 +2515,11 @@
         slowpath_addr2 = self.malloc_slowpath2
         self.mc.CALL(imm(slowpath_addr2))
 
+        # reserve room for the argument to the real malloc and the
+        # saved XMM regs (on 32 bit: 8 * 2 words; on 64 bit: 16 * 1
+        # word)
+        self._regalloc.needed_extra_stack_locations(1+16)
+
         offset = self.mc.get_relative_pos() - jmp_adr
         assert 0 < offset <= 127
         self.mc.overwrite(jmp_adr-1, chr(offset))
diff --git a/pypy/jit/backend/x86/codebuf.py b/pypy/jit/backend/x86/codebuf.py
--- a/pypy/jit/backend/x86/codebuf.py
+++ b/pypy/jit/backend/x86/codebuf.py
@@ -19,8 +19,8 @@
 
 
 class MachineCodeBlockWrapper(BlockBuilderMixin,
-                              codebuilder_cls,
-                              LocationCodeBuilder):
+                              LocationCodeBuilder,
+                              codebuilder_cls):
     def __init__(self):
         self.init_block_builder()
         # a list of relative positions; for each position p, the bytes
diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
--- a/pypy/jit/backend/x86/regalloc.py
+++ b/pypy/jit/backend/x86/regalloc.py
@@ -168,7 +168,7 @@
 
     def _prepare(self, inputargs, operations, allgcrefs):
         self.fm = X86FrameManager()
-        self.param_depth = 0
+        self.min_frame_depth = 0
         cpu = self.assembler.cpu
         operations = cpu.gc_ll_descr.rewrite_assembler(cpu, operations,
                                                        allgcrefs)
@@ -193,11 +193,9 @@
             self.min_bytes_before_label = 13
         return operations
 
-    def prepare_bridge(self, prev_depths, inputargs, arglocs, operations,
-                       allgcrefs):
+    def prepare_bridge(self, inputargs, arglocs, operations, allgcrefs):
         operations = self._prepare(inputargs, operations, allgcrefs)
         self._update_bindings(arglocs, inputargs)
-        self.param_depth = prev_depths[1]
         self.min_bytes_before_label = 0
         return operations
 
@@ -205,8 +203,15 @@
         self.min_bytes_before_label = max(self.min_bytes_before_label,
                                           at_least_position)
 
-    def reserve_param(self, n):
-        self.param_depth = max(self.param_depth, n)
+    def needed_extra_stack_locations(self, n):
+        # call *after* you needed extra stack locations: (%esp), (%esp+4)...
+        min_frame_depth = self.fm.get_frame_depth() + n
+        if min_frame_depth > self.min_frame_depth:
+            self.min_frame_depth = min_frame_depth
+
+    def get_final_frame_depth(self):
+        self.needed_extra_stack_locations(0)  # update min_frame_depth
+        return self.min_frame_depth
 
     def _set_initial_bindings(self, inputargs):
         if IS_X86_64:
@@ -376,25 +381,12 @@
     def locs_for_fail(self, guard_op):
         return [self.loc(v) for v in guard_op.getfailargs()]
 
-    def get_current_depth(self):
-        # return (self.fm.frame_depth, self.param_depth), but trying to share
-        # the resulting tuple among several calls
-        arg0 = self.fm.get_frame_depth()
-        arg1 = self.param_depth
-        result = self.assembler._current_depths_cache
-        if result[0] != arg0 or result[1] != arg1:
-            result = (arg0, arg1)
-            self.assembler._current_depths_cache = result
-        return result
-
     def perform_with_guard(self, op, guard_op, arglocs, result_loc):
         faillocs = self.locs_for_fail(guard_op)
         self.rm.position += 1
         self.xrm.position += 1
-        current_depths = self.get_current_depth()
         self.assembler.regalloc_perform_with_guard(op, guard_op, faillocs,
-                                                   arglocs, result_loc,
-                                                   current_depths)
+                                                   arglocs, result_loc)
         if op.result is not None:
             self.possibly_free_var(op.result)
         self.possibly_free_vars(guard_op.getfailargs())
@@ -407,10 +399,8 @@
                                                       arglocs))
             else:
                 self.assembler.dump('%s(%s)' % (guard_op, arglocs))
-        current_depths = self.get_current_depth()
         self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs,
-                                              result_loc,
-                                              current_depths)
+                                              result_loc)
         self.possibly_free_vars(guard_op.getfailargs())
 
     def PerformDiscard(self, op, arglocs):
@@ -776,6 +766,32 @@
 
     consider_cast_singlefloat_to_float = consider_cast_int_to_float
 
+    def consider_convert_float_bytes_to_longlong(self, op):
+        if longlong.is_64_bit:
+            loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0))
+            loc1 = self.rm.force_allocate_reg(op.result)
+            self.Perform(op, [loc0], loc1)
+            self.xrm.possibly_free_var(op.getarg(0))
+        else:
+            arg0 = op.getarg(0)
+            loc0 = self.xrm.loc(arg0)
+            loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0])
+            self.Perform(op, [loc0], loc1)
+            self.xrm.possibly_free_var(arg0)
+
+    def consider_convert_longlong_bytes_to_float(self, op):
+        if longlong.is_64_bit:
+            loc0 = self.rm.make_sure_var_in_reg(op.getarg(0))
+            loc1 = self.xrm.force_allocate_reg(op.result)
+            self.Perform(op, [loc0], loc1)
+            self.rm.possibly_free_var(op.getarg(0))
+        else:
+            arg0 = op.getarg(0)
+            loc0 = self.xrm.make_sure_var_in_reg(arg0)
+            loc1 = self.xrm.force_allocate_reg(op.result, forbidden_vars=[arg0])
+            self.Perform(op, [loc0], loc1)
+            self.xrm.possibly_free_var(arg0)
+
     def _consider_llong_binop_xx(self, op):
         # must force both arguments into xmm registers, because we don't
         # know if they will be suitably aligned.  Exception: if the second
@@ -1393,7 +1409,7 @@
         self.force_spill_var(op.getarg(0))
 
     def get_mark_gc_roots(self, gcrootmap, use_copy_area=False):
-        shape = gcrootmap.get_basic_shape(IS_X86_64)
+        shape = gcrootmap.get_basic_shape()
         for v, val in self.fm.bindings.items():
             if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)):
                 assert isinstance(val, StackLoc)
diff --git a/pypy/jit/backend/x86/rx86.py b/pypy/jit/backend/x86/rx86.py
--- a/pypy/jit/backend/x86/rx86.py
+++ b/pypy/jit/backend/x86/rx86.py
@@ -601,9 +601,12 @@
     CVTSS2SD_xb = xmminsn('\xF3', rex_nw, '\x0F\x5A',
                           register(1, 8), stack_bp(2))
 
-    MOVD_rx = xmminsn('\x66', rex_nw, '\x0F\x7E', register(2, 8), register(1), '\xC0')
-    MOVD_xr = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), register(2), '\xC0')
-    MOVD_xb = xmminsn('\x66', rex_nw, '\x0F\x6E', register(1, 8), stack_bp(2))
+    # These work on machine sized registers, so MOVD is actually MOVQ
+    # when running on 64 bits.  Note a bug in the Intel documentation:
+    # http://lists.gnu.org/archive/html/bug-binutils/2007-07/msg00095.html
+    MOVD_rx = xmminsn('\x66', rex_w, '\x0F\x7E', register(2, 8), register(1), '\xC0')
+    MOVD_xr = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), register(2), '\xC0')
+    MOVD_xb = xmminsn('\x66', rex_w, '\x0F\x6E', register(1, 8), stack_bp(2))
 
     PSRAD_xi = xmminsn('\x66', rex_nw, '\x0F\x72', register(1), '\xE0', immediate(2, 'b'))
 
diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py
--- a/pypy/jit/backend/x86/support.py
+++ b/pypy/jit/backend/x86/support.py
@@ -36,15 +36,15 @@
 
 # ____________________________________________________________
 
-if sys.platform == 'win32':
-    ensure_sse2_floats = lambda : None
-    # XXX check for SSE2 on win32 too
+if WORD == 4:
+    extra = ['-DPYPY_X86_CHECK_SSE2']
 else:
-    if WORD == 4:
-        extra = ['-DPYPY_X86_CHECK_SSE2']
-    else:
-        extra = []
-    ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo(
-        compile_extra = ['-msse2', '-mfpmath=sse',
-                         '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra,
-        ))
+    extra = []
+
+if sys.platform != 'win32':
+    extra = ['-msse2', '-mfpmath=sse',
+             '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra
+
+ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo(
+    compile_extra = extra,
+))
diff --git a/pypy/jit/backend/x86/test/conftest.py b/pypy/jit/backend/x86/test/conftest.py
--- a/pypy/jit/backend/x86/test/conftest.py
+++ b/pypy/jit/backend/x86/test/conftest.py
@@ -1,4 +1,4 @@
-import py
+import py, os
 from pypy.jit.backend import detect_cpu
 
 cpu = detect_cpu.autodetect()
@@ -6,5 +6,7 @@
     if cpu not in ('x86', 'x86_64'):
         py.test.skip("x86/x86_64 tests skipped: cpu is %r" % (cpu,))
     if cpu == 'x86_64':
+        if os.name == "nt":
+            py.test.skip("Windows cannot allocate non-reserved memory")
         from pypy.rpython.lltypesystem import ll2ctypes
         ll2ctypes.do_allocation_in_far_regions()
diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py
--- a/pypy/jit/backend/x86/test/test_gc_integration.py
+++ b/pypy/jit/backend/x86/test/test_gc_integration.py
@@ -28,7 +28,7 @@
 
 class MockGcRootMap(object):
     is_shadow_stack = False
-    def get_basic_shape(self, is_64_bit):
+    def get_basic_shape(self):
         return ['shape']
     def add_frame_offset(self, shape, offset):
         shape.append(offset)
diff --git a/pypy/jit/backend/x86/test/test_recompilation.py b/pypy/jit/backend/x86/test/test_recompilation.py
--- a/pypy/jit/backend/x86/test/test_recompilation.py
+++ b/pypy/jit/backend/x86/test/test_recompilation.py
@@ -34,7 +34,6 @@
         '''
         loop = self.interpret(ops, [0])
         previous = loop._jitcelltoken.compiled_loop_token.frame_depth
-        assert loop._jitcelltoken.compiled_loop_token.param_depth == 0
         assert self.getint(0) == 20
         ops = '''
         [i1]
@@ -51,7 +50,6 @@
         bridge = self.attach_bridge(ops, loop, -2)
         descr = loop.operations[3].getdescr()
         new = descr._x86_bridge_frame_depth
-        assert descr._x86_bridge_param_depth == 0
         # the force_spill() forces the stack to grow
         assert new > previous
         fail = self.run(loop, 0)
@@ -116,10 +114,8 @@
         loop_frame_depth = loop._jitcelltoken.compiled_loop_token.frame_depth
         bridge = self.attach_bridge(ops, loop, 6)
         guard_op = loop.operations[6]
-        assert loop._jitcelltoken.compiled_loop_token.param_depth == 0
         # the force_spill() forces the stack to grow
         assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth
-        assert guard_op.getdescr()._x86_bridge_param_depth == 0
         self.run(loop, 0, 0, 0, 0, 0, 0)
         assert self.getint(0) == 1
         assert self.getint(1) == 20
diff --git a/pypy/jit/backend/x86/test/test_regalloc.py b/pypy/jit/backend/x86/test/test_regalloc.py
--- a/pypy/jit/backend/x86/test/test_regalloc.py
+++ b/pypy/jit/backend/x86/test/test_regalloc.py
@@ -606,23 +606,37 @@
         assert self.getints(9) == [0, 1, 1, 1, 1, 1, 1, 1, 1]
 
 class TestRegAllocCallAndStackDepth(BaseTestRegalloc):
-    def expected_param_depth(self, num_args):
+    def expected_frame_depth(self, num_call_args, num_pushed_input_args=0):
         # Assumes the arguments are all non-float
         if IS_X86_32:
-            return num_args
+            extra_esp = num_call_args
+            return extra_esp
         elif IS_X86_64:
-            return max(num_args - 6, 0)
+            # 'num_pushed_input_args' is for X86_64 only
+            extra_esp = max(num_call_args - 6, 0)
+            return num_pushed_input_args + extra_esp
 
     def test_one_call(self):
         ops = '''
-        [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9]
+        [i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b]
         i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr)
-        finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9)
+        finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b)
         '''
-        loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9])
-        assert self.getints(10) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9]
+        loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9, 8])
+        assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8]
         clt = loop._jitcelltoken.compiled_loop_token
-        assert clt.param_depth == self.expected_param_depth(1)
+        assert clt.frame_depth == self.expected_frame_depth(1, 5)
+
+    def test_one_call_reverse(self):
+        ops = '''
+        [i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b, i0]
+        i10 = call(ConstClass(f1ptr), i0, descr=f1_calldescr)
+        finish(i10, i1, i2, i3, i4, i5, i6, i7, i8, i9, i9b)
+        '''
+        loop = self.interpret(ops, [7, 9, 9 ,9, 9, 9, 9, 9, 9, 8, 4])
+        assert self.getints(11) == [5, 7, 9, 9, 9, 9, 9, 9, 9, 9, 8]
+        clt = loop._jitcelltoken.compiled_loop_token
+        assert clt.frame_depth == self.expected_frame_depth(1, 6)
 
     def test_two_calls(self):
         ops = '''
@@ -634,7 +648,7 @@
         loop = self.interpret(ops, [4, 7, 9, 9 ,9, 9, 9, 9, 9, 9])
         assert self.getints(10) == [5*7, 7, 9, 9, 9, 9, 9, 9, 9, 9]
         clt = loop._jitcelltoken.compiled_loop_token
-        assert clt.param_depth == self.expected_param_depth(2)
+        assert clt.frame_depth == self.expected_frame_depth(2, 5)
 
     def test_call_many_arguments(self):
         # NB: The first and last arguments in the call are constants. This
@@ -648,25 +662,31 @@
         loop = self.interpret(ops, [2, 3, 4, 5, 6, 7, 8, 9])
         assert self.getint(0) == 55
         clt = loop._jitcelltoken.compiled_loop_token
-        assert clt.param_depth == self.expected_param_depth(10)
+        assert clt.frame_depth == self.expected_frame_depth(10)
 
     def test_bridge_calls_1(self):
         ops = '''
         [i0, i1]
         i2 = call(ConstClass(f1ptr), i0, descr=f1_calldescr)
-        guard_value(i2, 0, descr=fdescr1) [i2, i1]
+        guard_value(i2, 0, descr=fdescr1) [i2, i0, i1]
         finish(i1)
         '''
         loop = self.interpret(ops, [4, 7])
         assert self.getint(0) == 5
+        clt = loop._jitcelltoken.compiled_loop_token
+        orgdepth = clt.frame_depth
+        assert orgdepth == self.expected_frame_depth(1, 2)
+
         ops = '''
-        [i2, i1]
+        [i2, i0, i1]
         i3 = call(ConstClass(f2ptr), i2, i1, descr=f2_calldescr)        
-        finish(i3, descr=fdescr2)        
+        finish(i3, i0, descr=fdescr2)
         '''
         bridge = self.attach_bridge(ops, loop, -2)
 
-        assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
+        assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(2, 2))
+        assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \
+            self.expected_frame_depth(2, 2)
 
         self.run(loop, 4, 7)
         assert self.getint(0) == 5*7
@@ -676,10 +696,14 @@
         [i0, i1]
         i2 = call(ConstClass(f2ptr), i0, i1, descr=f2_calldescr)
         guard_value(i2, 0, descr=fdescr1) [i2]
-        finish(i1)
+        finish(i2)
         '''
         loop = self.interpret(ops, [4, 7])
         assert self.getint(0) == 4*7
+        clt = loop._jitcelltoken.compiled_loop_token
+        orgdepth = clt.frame_depth
+        assert orgdepth == self.expected_frame_depth(2)
+
         ops = '''
         [i2]
         i3 = call(ConstClass(f1ptr), i2, descr=f1_calldescr)        
@@ -687,7 +711,9 @@
         '''
         bridge = self.attach_bridge(ops, loop, -2)
 
-        assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
+        assert clt.frame_depth == max(orgdepth, self.expected_frame_depth(1))
+        assert loop.operations[-2].getdescr()._x86_bridge_frame_depth == \
+            self.expected_frame_depth(1)
 
         self.run(loop, 4, 7)
         assert self.getint(0) == 29
diff --git a/pypy/jit/backend/x86/test/test_runner.py b/pypy/jit/backend/x86/test/test_runner.py
--- a/pypy/jit/backend/x86/test/test_runner.py
+++ b/pypy/jit/backend/x86/test/test_runner.py
@@ -371,7 +371,7 @@
 
         operations = [
             ResOperation(rop.LABEL, [i0], None, descr=targettoken),
-            ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0], None),
+            ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("hello"), 0, 0], None),
             ResOperation(rop.INT_ADD, [i0, ConstInt(1)], i1),
             ResOperation(rop.INT_LE, [i1, ConstInt(9)], i2),
             ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1),
@@ -390,7 +390,7 @@
         bridge = [
             ResOperation(rop.INT_LE, [i1b, ConstInt(19)], i3),
             ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2),
-            ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0], None),
+            ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye"), 0, 0], None),
             ResOperation(rop.JUMP, [i1b], None, descr=targettoken),
         ]
         bridge[1].setfailargs([i1b])
@@ -531,12 +531,12 @@
         loop = """
         [i0]
         label(i0, descr=preambletoken)
-        debug_merge_point('xyz', 0)
+        debug_merge_point('xyz', 0, 0)
         i1 = int_add(i0, 1)
         i2 = int_ge(i1, 10)
         guard_false(i2) []
         label(i1, descr=targettoken)
-        debug_merge_point('xyz', 0)
+        debug_merge_point('xyz', 0, 0)
         i11 = int_add(i1, 1)
         i12 = int_ge(i11, 10)
         guard_false(i12) []
@@ -569,7 +569,7 @@
         loop = """
         [i0]
         label(i0, descr=targettoken)
-        debug_merge_point('xyz', 0)
+        debug_merge_point('xyz', 0, 0)
         i1 = int_add(i0, 1)
         i2 = int_ge(i1, 10)
         guard_false(i2) []
diff --git a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
--- a/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
+++ b/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
@@ -182,6 +182,12 @@
         filename  = str(testdir.join(FILENAME  % methname))
         g = open(inputname, 'w')
         g.write('\x09.string "%s"\n' % BEGIN_TAG)
+        #
+        if instrname == 'MOVD' and self.WORD == 8:
+            instrname = 'MOVQ'
+            if argmodes == 'xb':
+                py.test.skip('"as" uses an undocumented alternate encoding??')
+        #
         for args in args_lists:
             suffix = ""
     ##        all = instr.as_all_suffixes
@@ -229,9 +235,6 @@
                 # movq $xxx, %rax => movl $xxx, %eax
                 suffix = 'l'
                 ops[1] = reduce_to_32bit(ops[1])
-            if instrname.lower() == 'movd':
-                ops[0] = reduce_to_32bit(ops[0])
-                ops[1] = reduce_to_32bit(ops[1])
             #
             op = '\t%s%s %s%s' % (instrname.lower(), suffix,
                                   ', '.join(ops), following)
diff --git a/pypy/jit/backend/x86/test/test_zmath.py b/pypy/jit/backend/x86/test/test_zmath.py
--- a/pypy/jit/backend/x86/test/test_zmath.py
+++ b/pypy/jit/backend/x86/test/test_zmath.py
@@ -6,6 +6,8 @@
 from pypy.translator.c.test.test_genc import compile
 from pypy.jit.backend.x86.support import ensure_sse2_floats
 from pypy.rlib import rfloat
+from pypy.rlib.unroll import unrolling_iterable
+from pypy.rlib.debug import debug_print
 
 
 def get_test_case((fnname, args, expected)):
@@ -16,16 +18,32 @@
     expect_valueerror = (expected == ValueError)
     expect_overflowerror = (expected == OverflowError)
     check = test_direct.get_tester(expected)
+    unroll_args = unrolling_iterable(args)
     #
     def testfn():
+        debug_print('calling', fnname, 'with arguments:')
+        for arg in unroll_args:
+            debug_print('\t', arg)
         try:
             got = fn(*args)
         except ValueError:
-            return expect_valueerror
+            if expect_valueerror:
+                return True
+            else:
+                debug_print('unexpected ValueError!')
+                return False
         except OverflowError:
-            return expect_overflowerror
+            if expect_overflowerror:
+                return True
+            else:
+                debug_print('unexpected OverflowError!')
+                return False
         else:
-            return check(got)
+            if check(got):
+                return True
+            else:
+                debug_print('unexpected result:', got)
+                return False
     #
     testfn.func_name = 'test_' + fnname
     return testfn
diff --git a/pypy/jit/backend/x86/tool/viewcode.py b/pypy/jit/backend/x86/tool/viewcode.py
--- a/pypy/jit/backend/x86/tool/viewcode.py
+++ b/pypy/jit/backend/x86/tool/viewcode.py
@@ -34,7 +34,7 @@
 # I am porting it in a lazy fashion...  See py-utils/xam.py
 
 if sys.platform == "win32":
-    XXX   # lots more in Psyco
+    pass   # lots more in Psyco
 
 def machine_code_dump(data, originaddr, backend_name, label_list=None):
     objdump_backend_option = {
diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py
--- a/pypy/jit/codewriter/jtransform.py
+++ b/pypy/jit/codewriter/jtransform.py
@@ -291,6 +291,12 @@
         op1 = SpaceOperation('-live-', [], None)
         return [op, op1]
 
+    def _noop_rewrite(self, op):
+        return op
+
+    rewrite_op_convert_float_bytes_to_longlong = _noop_rewrite
+    rewrite_op_convert_longlong_bytes_to_float = _noop_rewrite
+
     # ----------
     # Various kinds of calls
 
@@ -365,7 +371,7 @@
     def handle_builtin_call(self, op):
         oopspec_name, args = support.decode_builtin_call(op)
         # dispatch to various implementations depending on the oopspec_name
-        if oopspec_name.startswith('list.') or oopspec_name == 'newlist':
+        if oopspec_name.startswith('list.') or oopspec_name.startswith('newlist'):
             prepare = self._handle_list_call
         elif oopspec_name.startswith('stroruni.'):
             prepare = self._handle_stroruni_call
@@ -1494,6 +1500,14 @@
                                arraydescr, v_length],
                               op.result)
 
+    def do_resizable_newlist_hint(self, op, args, arraydescr, lengthdescr,
+                                  itemsdescr, structdescr):
+        v_hint = self._get_initial_newlist_length(op, args)
+        return SpaceOperation('newlist_hint',
+                              [structdescr, lengthdescr, itemsdescr,
+                               arraydescr, v_hint],
+                              op.result)
+
     def do_resizable_list_getitem(self, op, args, arraydescr, lengthdescr,
                                   itemsdescr, structdescr):
         v_index, extraop = self._prepare_list_getset(op, lengthdescr, args,
diff --git a/pypy/jit/codewriter/support.py b/pypy/jit/codewriter/support.py
--- a/pypy/jit/codewriter/support.py
+++ b/pypy/jit/codewriter/support.py
@@ -144,6 +144,10 @@
 _ll_1_newlist.need_result_type = True
 _ll_2_newlist.need_result_type = True
 
+def _ll_1_newlist_hint(LIST, hint):
+    return LIST.ll_newlist_hint(hint)
+_ll_1_newlist_hint.need_result_type = True
+
 def _ll_1_list_len(l):
     return l.ll_length()
 def _ll_2_list_getitem(l, index):
diff --git a/pypy/jit/codewriter/test/test_flatten.py b/pypy/jit/codewriter/test/test_flatten.py
--- a/pypy/jit/codewriter/test/test_flatten.py
+++ b/pypy/jit/codewriter/test/test_flatten.py
@@ -968,6 +968,23 @@
             int_return %i2
         """, transform=True)
 
+    def test_convert_float_bytes(self):
+        from pypy.rlib.longlong2float import float2longlong, longlong2float
+        def f(x):
+            ll = float2longlong(x)
+            return longlong2float(ll)
+        if longlong.is_64_bit:
+            tmp_var = "%i0"
+            result_var = "%f1"
+        else:
+            tmp_var = "%f1"
+            result_var = "%f2"
+        self.encoding_test(f, [25.0], """
+            convert_float_bytes_to_longlong %%f0 -> %(tmp_var)s
+            convert_longlong_bytes_to_float %(tmp_var)s -> %(result_var)s
+            float_return %(result_var)s
+        """ % {"result_var": result_var, "tmp_var": tmp_var}, transform=True)
+
 
 def check_force_cast(FROM, TO, operations, value):
     """Check that the test is correctly written..."""
diff --git a/pypy/jit/codewriter/test/test_longlong.py b/pypy/jit/codewriter/test/test_longlong.py
--- a/pypy/jit/codewriter/test/test_longlong.py
+++ b/pypy/jit/codewriter/test/test_longlong.py
@@ -1,6 +1,6 @@
 import py, sys
 
-from pypy.rlib.rarithmetic import r_longlong, intmask
+from pypy.rlib.rarithmetic import r_longlong, intmask, is_valid_int
 from pypy.objspace.flow.model import SpaceOperation, Variable, Constant
 from pypy.objspace.flow.model import Block, Link
 from pypy.translator.unsimplify import varoftype
@@ -32,7 +32,7 @@
 def test_functions():
     xll = longlong.getfloatstorage(3.5)
     assert longlong.getrealfloat(xll) == 3.5
-    assert isinstance(longlong.gethash(xll), int)
+    assert is_valid_int(longlong.gethash(xll))
 
 
 class TestLongLong:
diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py
--- a/pypy/jit/metainterp/blackhole.py
+++ b/pypy/jit/metainterp/blackhole.py
@@ -1,15 +1,16 @@
+from pypy.jit.codewriter import heaptracker, longlong
+from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr
+from pypy.jit.metainterp.compile import ResumeAtPositionDescr
+from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise
+from pypy.rlib import longlong2float
+from pypy.rlib.debug import debug_start, debug_stop, ll_assert, make_sure_not_resized
+from pypy.rlib.objectmodel import we_are_translated
+from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck
+from pypy.rlib.rtimer import read_timestamp
 from pypy.rlib.unroll import unrolling_iterable
-from pypy.rlib.rtimer import read_timestamp
-from pypy.rlib.rarithmetic import intmask, LONG_BIT, r_uint, ovfcheck
-from pypy.rlib.objectmodel import we_are_translated
-from pypy.rlib.debug import debug_start, debug_stop, ll_assert
-from pypy.rlib.debug import make_sure_not_resized
 from pypy.rpython.lltypesystem import lltype, llmemory, rclass
 from pypy.rpython.lltypesystem.lloperation import llop
-from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr
-from pypy.jit.codewriter import heaptracker, longlong
-from pypy.jit.metainterp.jitexc import JitException, get_llexception, reraise
-from pypy.jit.metainterp.compile import ResumeAtPositionDescr
+
 
 def arguments(*argtypes, **kwds):
     resulttype = kwds.pop('returns', None)
@@ -20,6 +21,9 @@
         return function
     return decorate
 
+LONGLONG_TYPECODE = 'i' if longlong.is_64_bit else 'f'
+
+
 class LeaveFrame(JitException):
     pass
 
@@ -663,6 +667,16 @@
         a = float(a)
         return longlong.getfloatstorage(a)
 
+    @arguments("f", returns=LONGLONG_TYPECODE)
+    def bhimpl_convert_float_bytes_to_longlong(a):
+        a = longlong.getrealfloat(a)
+        return longlong2float.float2longlong(a)
+
+    @arguments(LONGLONG_TYPECODE, returns="f")
+    def bhimpl_convert_longlong_bytes_to_float(a):
+        a = longlong2float.longlong2float(a)
+        return longlong.getfloatstorage(a)
+
     # ----------
     # control flow operations
 
@@ -982,6 +996,15 @@
         cpu.bh_setfield_gc_r(result, itemsdescr, items)
         return result
 
+    @arguments("cpu", "d", "d", "d", "d", "i", returns="r")
+    def bhimpl_newlist_hint(cpu, structdescr, lengthdescr, itemsdescr,
+                            arraydescr, lengthhint):
+        result = cpu.bh_new(structdescr)
+        cpu.bh_setfield_gc_i(result, lengthdescr, 0)
+        items = cpu.bh_new_array(arraydescr, lengthhint)
+        cpu.bh_setfield_gc_r(result, itemsdescr, items)
+        return result
+
     @arguments("cpu", "r", "d", "d", "i", returns="i")
     def bhimpl_getlistitem_gc_i(cpu, lst, itemsdescr, arraydescr, index):
         items = cpu.bh_getfield_gc_r(lst, itemsdescr)
@@ -1176,14 +1199,14 @@
     def bhimpl_getinteriorfield_gc_f(cpu, array, index, descr):
         return cpu.bh_getinteriorfield_gc_f(array, index, descr)
 
-    @arguments("cpu", "r", "i", "d", "i")
-    def bhimpl_setinteriorfield_gc_i(cpu, array, index, descr, value):
+    @arguments("cpu", "r", "i", "i", "d")
+    def bhimpl_setinteriorfield_gc_i(cpu, array, index, value, descr):
         cpu.bh_setinteriorfield_gc_i(array, index, descr, value)
-    @arguments("cpu", "r", "i", "d", "r")
-    def bhimpl_setinteriorfield_gc_r(cpu, array, index, descr, value):
+    @arguments("cpu", "r", "i", "r", "d")
+    def bhimpl_setinteriorfield_gc_r(cpu, array, index, value, descr):
         cpu.bh_setinteriorfield_gc_r(array, index, descr, value)
-    @arguments("cpu", "r", "i", "d", "f")
-    def bhimpl_setinteriorfield_gc_f(cpu, array, index, descr, value):
+    @arguments("cpu", "r", "i", "f", "d")
+    def bhimpl_setinteriorfield_gc_f(cpu, array, index, value, descr):
         cpu.bh_setinteriorfield_gc_f(array, index, descr, value)
 
     @arguments("cpu", "r", "d", returns="i")
@@ -1300,7 +1323,7 @@
     def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length):
         cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length)
 
-    @arguments(returns=(longlong.is_64_bit and "i" or "f"))
+    @arguments(returns=LONGLONG_TYPECODE)
     def bhimpl_ll_read_timestamp():
         return read_timestamp()
 
diff --git a/pypy/jit/metainterp/executor.py b/pypy/jit/metainterp/executor.py
--- a/pypy/jit/metainterp/executor.py
+++ b/pypy/jit/metainterp/executor.py
@@ -2,7 +2,7 @@
 """
 
 from pypy.rpython.lltypesystem import lltype, rstr
-from pypy.rlib.rarithmetic import ovfcheck, r_longlong
+from pypy.rlib.rarithmetic import ovfcheck, r_longlong, is_valid_int
 from pypy.rlib.rtimer import read_timestamp
 from pypy.rlib.unroll import unrolling_iterable
 from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat, check_descr
@@ -248,7 +248,7 @@
 def do_read_timestamp(cpu, _):
     x = read_timestamp()
     if longlong.is_64_bit:
-        assert isinstance(x, int)         # 64-bit
+        assert is_valid_int(x)            # 64-bit
         return BoxInt(x)
     else:
         assert isinstance(x, r_longlong)  # 32-bit
diff --git a/pypy/jit/metainterp/graphpage.py b/pypy/jit/metainterp/graphpage.py
--- a/pypy/jit/metainterp/graphpage.py
+++ b/pypy/jit/metainterp/graphpage.py
@@ -169,9 +169,9 @@
             if op.getopnum() == rop.DEBUG_MERGE_POINT:
                 jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()]
                 if jd_sd._get_printable_location_ptr:
-                    s = jd_sd.warmstate.get_location_str(op.getarglist()[2:])
+                    s = jd_sd.warmstate.get_location_str(op.getarglist()[3:])
                     s = s.replace(',', '.') # we use comma for argument splitting
-                    op_repr = "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s)
+                    op_repr = "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s)
             lines.append(op_repr)
             if is_interesting_guard(op):
                 tgt = op.getdescr()._debug_suboperations[0]
diff --git a/pypy/jit/metainterp/history.py b/pypy/jit/metainterp/history.py
--- a/pypy/jit/metainterp/history.py
+++ b/pypy/jit/metainterp/history.py
@@ -4,7 +4,8 @@
 from pypy.rpython.ootypesystem import ootype
 from pypy.rlib.objectmodel import we_are_translated, Symbolic
 from pypy.rlib.objectmodel import compute_unique_id
-from pypy.rlib.rarithmetic import r_int64
+from pypy.rlib.rarithmetic import r_int64, is_valid_int
+
 from pypy.conftest import option
 
 from pypy.jit.metainterp.resoperation import ResOperation, rop
@@ -213,7 +214,7 @@
 
     def __init__(self, value):
         if not we_are_translated():
-            if isinstance(value, int):
+            if is_valid_int(value):
                 value = int(value)    # bool -> int
             else:
                 assert isinstance(value, Symbolic)
@@ -448,7 +449,7 @@
 
     def __init__(self, value=0):
         if not we_are_translated():
-            if isinstance(value, int):
+            if is_valid_int(value):
                 value = int(value)    # bool -> int
             else:
                 assert isinstance(value, Symbolic)
diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py
--- a/pypy/jit/metainterp/logger.py
+++ b/pypy/jit/metainterp/logger.py
@@ -110,9 +110,9 @@
     def repr_of_resop(self, op, ops_offset=None):
         if op.getopnum() == rop.DEBUG_MERGE_POINT:
             jd_sd = self.metainterp_sd.jitdrivers_sd[op.getarg(0).getint()]
-            s = jd_sd.warmstate.get_location_str(op.getarglist()[2:])
+            s = jd_sd.warmstate.get_location_str(op.getarglist()[3:])
             s = s.replace(',', '.') # we use comma for argument splitting
-            return "debug_merge_point(%d, '%s')" % (op.getarg(1).getint(), s)
+            return "debug_merge_point(%d, %d, '%s')" % (op.getarg(1).getint(), op.getarg(2).getint(), s)
         if ops_offset is None:
             offset = -1
         else:
@@ -149,7 +149,7 @@
             if target_token.exported_state:
                 for op in target_token.exported_state.inputarg_setup_ops:
                     debug_print('    ' + self.repr_of_resop(op))
-        
+
     def _log_operations(self, inputargs, operations, ops_offset):
         if not have_debug_prints():
             return
diff --git a/pypy/jit/metainterp/optimizeopt/__init__.py b/pypy/jit/metainterp/optimizeopt/__init__.py
--- a/pypy/jit/metainterp/optimizeopt/__init__.py
+++ b/pypy/jit/metainterp/optimizeopt/__init__.py
@@ -9,7 +9,7 @@
 from pypy.jit.metainterp.optimizeopt.simplify import OptSimplify
 from pypy.jit.metainterp.optimizeopt.pure import OptPure
 from pypy.jit.metainterp.optimizeopt.earlyforce import OptEarlyForce
-from pypy.rlib.jit import PARAMETERS
+from pypy.rlib.jit import PARAMETERS, ENABLE_ALL_OPTS
 from pypy.rlib.unroll import unrolling_iterable
 from pypy.rlib.debug import debug_start, debug_stop, debug_print
 
@@ -30,6 +30,9 @@
 ALL_OPTS_LIST = [name for name, _ in ALL_OPTS]
 ALL_OPTS_NAMES = ':'.join([name for name, _ in ALL_OPTS])
 
+assert ENABLE_ALL_OPTS == ALL_OPTS_NAMES, (
+    'please fix rlib/jit.py to say ENABLE_ALL_OPTS = %r' % (ALL_OPTS_NAMES,))
+
 def build_opt_chain(metainterp_sd, enable_opts):
     config = metainterp_sd.config
     optimizations = []
diff --git a/pypy/jit/metainterp/optimizeopt/intutils.py b/pypy/jit/metainterp/optimizeopt/intutils.py
--- a/pypy/jit/metainterp/optimizeopt/intutils.py
+++ b/pypy/jit/metainterp/optimizeopt/intutils.py
@@ -1,10 +1,9 @@
-from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT
+from pypy.rlib.rarithmetic import ovfcheck, LONG_BIT, maxint, is_valid_int
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.jit.metainterp.resoperation import rop, ResOperation
 from pypy.jit.metainterp.history import BoxInt, ConstInt
-import sys
-MAXINT = sys.maxint
-MININT = -sys.maxint - 1
+MAXINT = maxint
+MININT = -maxint - 1
 
 class IntBound(object):
     _attrs_ = ('has_upper', 'has_lower', 'upper', 'lower')
@@ -16,8 +15,8 @@
         self.lower = lower
         # check for unexpected overflows:
         if not we_are_translated():
-            assert type(upper) is not long
-            assert type(lower) is not long
+            assert type(upper) is not long or is_valid_int(upper)
+            assert type(lower) is not long or is_valid_int(lower)
 
     # Returns True if the bound was updated
     def make_le(self, other):
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -5031,6 +5031,42 @@
         """
         self.optimize_loop(ops, expected)
 
+    def test_str_copy_virtual(self):
+        ops = """
+        [i0]
+        p0 = newstr(8)
+        strsetitem(p0, 0, i0)
+        strsetitem(p0, 1, i0)
+        strsetitem(p0, 2, i0)
+        strsetitem(p0, 3, i0)
+        strsetitem(p0, 4, i0)
+        strsetitem(p0, 5, i0)
+        strsetitem(p0, 6, i0)
+        strsetitem(p0, 7, i0)
+        p1 = newstr(12)
+        copystrcontent(p0, p1, 0, 0, 8)
+        strsetitem(p1, 8, 3)
+        strsetitem(p1, 9, 0)
+        strsetitem(p1, 10, 0)
+        strsetitem(p1, 11, 0)
+        finish(p1)
+        """
+        expected = """
+        [i0]
+        p1 = newstr(12)
+        strsetitem(p1, 0, i0)
+        strsetitem(p1, 1, i0)
+        strsetitem(p1, 2, i0)
+        strsetitem(p1, 3, i0)
+        strsetitem(p1, 4, i0)
+        strsetitem(p1, 5, i0)
+        strsetitem(p1, 6, i0)
+        strsetitem(p1, 7, i0)
+        strsetitem(p1, 8, 3)
+        finish(p1)
+        """
+        self.optimize_strunicode_loop(ops, expected)
+
 
 class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
     pass
diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py
--- a/pypy/jit/metainterp/optimizeopt/unroll.py
+++ b/pypy/jit/metainterp/optimizeopt/unroll.py
@@ -9,7 +9,6 @@
 from pypy.jit.metainterp.inliner import Inliner
 from pypy.jit.metainterp.resoperation import rop, ResOperation
 from pypy.jit.metainterp.resume import Snapshot
-from pypy.rlib.debug import debug_print
 import sys, os
 
 # FIXME: Introduce some VirtualOptimizer super class instead
@@ -121,9 +120,9 @@
                 limit = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.retrace_limit
                 if cell_token.retraced_count < limit:
                     cell_token.retraced_count += 1
-                    debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit))
+                    #debug_print('Retracing (%d/%d)' % (cell_token.retraced_count, limit))
                 else:
-                    debug_print("Retrace count reached, jumping to preamble")
+                    #debug_print("Retrace count reached, jumping to preamble")
                     assert cell_token.target_tokens[0].virtual_state is None
                     jumpop.setdescr(cell_token.target_tokens[0])
                     self.optimizer.send_extra_operation(jumpop)
@@ -273,9 +272,9 @@
                    not newvalue.is_constant():
                     op = ResOperation(rop.SAME_AS, [op.result], newresult)
                     self.optimizer._newoperations.append(op)
-                    if self.optimizer.loop.logops:
-                        debug_print('  Falling back to add extra: ' +
-                                    self.optimizer.loop.logops.repr_of_resop(op))
+                    #if self.optimizer.loop.logops:
+                    #    debug_print('  Falling back to add extra: ' +
+                    #                self.optimizer.loop.logops.repr_of_resop(op))
                     
         self.optimizer.flush()
         self.optimizer.emitting_dissabled = False
@@ -341,8 +340,8 @@
             if i == len(newoperations):
                 while j < len(jumpargs):
                     a = jumpargs[j]
-                    if self.optimizer.loop.logops:
-                        debug_print('J:  ' + self.optimizer.loop.logops.repr_of_arg(a))
+                    #if self.optimizer.loop.logops:
+                    #    debug_print('J:  ' + self.optimizer.loop.logops.repr_of_arg(a))
                     self.import_box(a, inputargs, short_jumpargs, jumpargs)
                     j += 1
             else:
@@ -353,11 +352,11 @@
                 if op.is_guard():
                     args = args + op.getfailargs()
 
-                if self.optimizer.loop.logops:
-                    debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op))
+                #if self.optimizer.loop.logops:
+                #    debug_print('OP: ' + self.optimizer.loop.logops.repr_of_resop(op))
                 for a in args:
-                    if self.optimizer.loop.logops:
-                        debug_print('A:  ' + self.optimizer.loop.logops.repr_of_arg(a))
+                    #if self.optimizer.loop.logops:
+                    #    debug_print('A:  ' + self.optimizer.loop.logops.repr_of_arg(a))
                     self.import_box(a, inputargs, short_jumpargs, jumpargs)
                 i += 1
             newoperations = self.optimizer.get_newoperations()
@@ -370,18 +369,18 @@
         # that is compatible with the virtual state at the start of the loop
         modifier = VirtualStateAdder(self.optimizer)
         final_virtual_state = modifier.get_virtual_state(original_jumpargs)
-        debug_start('jit-log-virtualstate')
-        virtual_state.debug_print('Closed loop with ')
+        #debug_start('jit-log-virtualstate')
+        #virtual_state.debug_print('Closed loop with ')
         bad = {}
         if not virtual_state.generalization_of(final_virtual_state, bad):
             # We ended up with a virtual state that is not compatible
             # and we are thus unable to jump to the start of the loop
-            final_virtual_state.debug_print("Bad virtual state at end of loop, ",
-                                            bad)
-            debug_stop('jit-log-virtualstate')
+            #final_virtual_state.debug_print("Bad virtual state at end of loop, ",
+            #                                bad)
+            #debug_stop('jit-log-virtualstate')
             raise InvalidLoop
             
-        debug_stop('jit-log-virtualstate')
+        #debug_stop('jit-log-virtualstate')
 
         maxguards = self.optimizer.metainterp_sd.warmrunnerdesc.memory_manager.max_retrace_guards
         if self.optimizer.emitted_guards > maxguards:
@@ -444,9 +443,9 @@
                 self.ensure_short_op_emitted(self.short_boxes.producer(a), optimizer,
                                              seen)
 
-        if self.optimizer.loop.logops:
-            debug_print('  Emitting short op: ' +
-                        self.optimizer.loop.logops.repr_of_resop(op))
+        #if self.optimizer.loop.logops:
+        #    debug_print('  Emitting short op: ' +
+        #                self.optimizer.loop.logops.repr_of_resop(op))
 
         optimizer.send_extra_operation(op)
         seen[op.result] = True
@@ -527,8 +526,8 @@
         args = jumpop.getarglist()
         modifier = VirtualStateAdder(self.optimizer)
         virtual_state = modifier.get_virtual_state(args)
-        debug_start('jit-log-virtualstate')
-        virtual_state.debug_print("Looking for ")
+        #debug_start('jit-log-virtualstate')
+        #virtual_state.debug_print("Looking for ")
 
         for target in cell_token.target_tokens:
             if not target.virtual_state:
@@ -537,10 +536,10 @@
             extra_guards = []
 
             bad = {}
-            debugmsg = 'Did not match '
+            #debugmsg = 'Did not match '
             if target.virtual_state.generalization_of(virtual_state, bad):
                 ok = True
-                debugmsg = 'Matched '
+                #debugmsg = 'Matched '
             else:
                 try:
                     cpu = self.optimizer.cpu
@@ -549,13 +548,13 @@
                                                          extra_guards)
 
                     ok = True
-                    debugmsg = 'Guarded to match '
+                    #debugmsg = 'Guarded to match '
                 except InvalidLoop:
                     pass
-            target.virtual_state.debug_print(debugmsg, bad)
+            #target.virtual_state.debug_print(debugmsg, bad)
 
             if ok:
-                debug_stop('jit-log-virtualstate')
+                #debug_stop('jit-log-virtualstate')
 
                 values = [self.getvalue(arg)
                           for arg in jumpop.getarglist()]
@@ -576,13 +575,13 @@
                         newop = inliner.inline_op(shop)
                         self.optimizer.send_extra_operation(newop)
                 except InvalidLoop:
-                    debug_print("Inlining failed unexpectedly",
-                                "jumping to preamble instead")
+                    #debug_print("Inlining failed unexpectedly",
+                    #            "jumping to preamble instead")
                     assert cell_token.target_tokens[0].virtual_state is None
                     jumpop.setdescr(cell_token.target_tokens[0])
                     self.optimizer.send_extra_operation(jumpop)
                 return True
-        debug_stop('jit-log-virtualstate')
+        #debug_stop('jit-log-virtualstate')
         return False
 
 class ValueImporter(object):
diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py
--- a/pypy/jit/metainterp/optimizeopt/virtualstate.py
+++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py
@@ -681,13 +681,14 @@
             self.synthetic[op] = True
 
     def debug_print(self, logops):
-        debug_start('jit-short-boxes')
-        for box, op in self.short_boxes.items():
-            if op:
-                debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op))
-            else:
-                debug_print(logops.repr_of_arg(box) + ': None')
-        debug_stop('jit-short-boxes')
+        if 0:
+            debug_start('jit-short-boxes')
+            for box, op in self.short_boxes.items():
+                if op:
+                    debug_print(logops.repr_of_arg(box) + ': ' + logops.repr_of_resop(op))
+                else:
+                    debug_print(logops.repr_of_arg(box) + ': None')
+            debug_stop('jit-short-boxes')
 
     def operations(self):
         if not we_are_translated(): # For tests
diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py
--- a/pypy/jit/metainterp/optimizeopt/vstring.py
+++ b/pypy/jit/metainterp/optimizeopt/vstring.py
@@ -10,6 +10,8 @@
 from pypy.rlib.unroll import unrolling_iterable
 from pypy.rpython import annlowlevel
 from pypy.rpython.lltypesystem import lltype, rstr
+from pypy.rlib.rarithmetic import is_valid_int
+
 
 
 class StrOrUnicode(object):
@@ -505,14 +507,23 @@
 
         if length.is_constant() and length.box.getint() == 0:
             return
-        copy_str_content(self,
-            src.force_box(self),
-            dst.force_box(self),
-            srcstart.force_box(self),
-            dststart.force_box(self),
-            length.force_box(self),
-            mode, need_next_offset=False
-        )
+        elif (src.is_virtual() and dst.is_virtual() and srcstart.is_constant() and
+            dststart.is_constant() and length.is_constant()):
+
+            src_start = srcstart.force_box(self).getint()
+            dst_start = dststart.force_box(self).getint()
+            for index in range(length.force_box(self).getint()):
+                vresult = self.strgetitem(src, optimizer.ConstantValue(ConstInt(index + src_start)), mode)
+                dst.setitem(index + dst_start, vresult)
+        else:
+            copy_str_content(self,
+                src.force_box(self),
+                dst.force_box(self),
+                srcstart.force_box(self),
+                dststart.force_box(self),
+                length.force_box(self),
+                mode, need_next_offset=False
+            )
 
     def optimize_CALL(self, op):
         # dispatch based on 'oopspecindex' to a method that handles
@@ -721,7 +732,7 @@
     for name in dir(OptString):
         if name.startswith(prefix):
             value = getattr(EffectInfo, 'OS_' + name[len(prefix):])
-            assert isinstance(value, int) and value != 0
+            assert is_valid_int(value) and value != 0
             result.append((value, getattr(OptString, name)))
     return unrolling_iterable(result)
 opt_call_oopspec_ops = _findall_call_oopspec()
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -223,6 +223,8 @@
                     'cast_float_to_singlefloat', 'cast_singlefloat_to_float',
                     'float_neg', 'float_abs',
                     'cast_ptr_to_int', 'cast_int_to_ptr',
+                    'convert_float_bytes_to_longlong',
+                    'convert_longlong_bytes_to_float',
                     ]:
         exec py.code.Source('''
             @arguments("box")
@@ -509,6 +511,15 @@
         self._opimpl_setfield_gc_any(sbox, itemsdescr, abox)
         return sbox
 
+    @arguments("descr", "descr", "descr", "descr", "box")
+    def opimpl_newlist_hint(self, structdescr, lengthdescr, itemsdescr,
+                            arraydescr, sizehintbox):
+        sbox = self.opimpl_new(structdescr)
+        self._opimpl_setfield_gc_any(sbox, lengthdescr, history.CONST_FALSE)
+        abox = self.opimpl_new_array(arraydescr, sizehintbox)
+        self._opimpl_setfield_gc_any(sbox, itemsdescr, abox)
+        return sbox
+
     @arguments("box", "descr", "descr", "box")
     def _opimpl_getlistitem_gc_any(self, listbox, itemsdescr, arraydescr,
                                    indexbox):
@@ -974,9 +985,11 @@
         any_operation = len(self.metainterp.history.operations) > 0
         jitdriver_sd = self.metainterp.staticdata.jitdrivers_sd[jdindex]
         self.verify_green_args(jitdriver_sd, greenboxes)
-        self.debug_merge_point(jitdriver_sd, jdindex, self.metainterp.portal_call_depth,
+        self.debug_merge_point(jitdriver_sd, jdindex,
+                               self.metainterp.portal_call_depth,
+                               self.metainterp.call_ids[-1],
                                greenboxes)
-        
+
         if self.metainterp.seen_loop_header_for_jdindex < 0:
             if not any_operation:
                 return
@@ -1028,11 +1041,11 @@
                                     assembler_call=True)
             raise ChangeFrame
 
-    def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, greenkey):
+    def debug_merge_point(self, jitdriver_sd, jd_index, portal_call_depth, current_call_id, greenkey):
         # debugging: produce a DEBUG_MERGE_POINT operation
         loc = jitdriver_sd.warmstate.get_location_str(greenkey)
         debug_print(loc)
-        args = [ConstInt(jd_index), ConstInt(portal_call_depth)] + greenkey
+        args = [ConstInt(jd_index), ConstInt(portal_call_depth), ConstInt(current_call_id)] + greenkey
         self.metainterp.history.record(rop.DEBUG_MERGE_POINT, args, None)
 
     @arguments("box", "label")
@@ -1574,11 +1587,14 @@
         self.call_pure_results = args_dict_box()
         self.heapcache = HeapCache()
 
+        self.call_ids = []
+        self.current_call_id = 0
+
     def retrace_needed(self, trace):
         self.partial_trace = trace
         self.retracing_from = len(self.history.operations) - 1
         self.heapcache.reset()
-        
+
 
     def perform_call(self, jitcode, boxes, greenkey=None):
         # causes the metainterp to enter the given subfunction
@@ -1592,6 +1608,8 @@
     def newframe(self, jitcode, greenkey=None):
         if jitcode.is_portal:
             self.portal_call_depth += 1
+            self.call_ids.append(self.current_call_id)
+            self.current_call_id += 1
         if greenkey is not None and self.is_main_jitcode(jitcode):
             self.portal_trace_positions.append(
                     (greenkey, len(self.history.operations)))
@@ -1608,6 +1626,7 @@
         jitcode = frame.jitcode
         if jitcode.is_portal:
             self.portal_call_depth -= 1
+            self.call_ids.pop()
         if frame.greenkey is not None and self.is_main_jitcode(jitcode):
             self.portal_trace_positions.append(
                     (None, len(self.history.operations)))
@@ -1976,7 +1995,7 @@
                 # Found!  Compile it as a loop.
                 # raises in case it works -- which is the common case
                 if self.partial_trace:
-                    if  start != self.retracing_from: 
+                    if  start != self.retracing_from:
                         raise SwitchToBlackhole(ABORT_BAD_LOOP) # For now
                 self.compile_loop(original_boxes, live_arg_boxes, start, resumedescr)
                 # creation of the loop was cancelled!
@@ -2085,7 +2104,7 @@
             if not token.target_tokens:
                 return None
         return token
-        
+
     def compile_loop(self, original_boxes, live_arg_boxes, start, resume_at_jump_descr):
         num_green_args = self.jitdriver_sd.num_green_args
         greenkey = original_boxes[:num_green_args]
diff --git a/pypy/jit/metainterp/resoperation.py b/pypy/jit/metainterp/resoperation.py
--- a/pypy/jit/metainterp/resoperation.py
+++ b/pypy/jit/metainterp/resoperation.py
@@ -419,6 +419,8 @@
     'CAST_INT_TO_FLOAT/1',          # need some messy code in the backend
     'CAST_FLOAT_TO_SINGLEFLOAT/1',
     'CAST_SINGLEFLOAT_TO_FLOAT/1',
+    'CONVERT_FLOAT_BYTES_TO_LONGLONG/1',
+    'CONVERT_LONGLONG_BYTES_TO_FLOAT/1',
     #
     'INT_LT/2b',
     'INT_LE/2b',
diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py
--- a/pypy/jit/metainterp/test/test_ajit.py
+++ b/pypy/jit/metainterp/test/test_ajit.py
@@ -1,8 +1,10 @@
+import math
 import sys
 
 import py
 
 from pypy import conftest
+from pypy.jit.codewriter import longlong
 from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy
 from pypy.jit.metainterp import pyjitpl, history
 from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT
@@ -14,7 +16,8 @@
     loop_invariant, elidable, promote, jit_debug, assert_green,
     AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff,
     isconstant, isvirtual, promote_string, set_param, record_known_class)
-from pypy.rlib.rarithmetic import ovfcheck
+from pypy.rlib.longlong2float import float2longlong, longlong2float
+from pypy.rlib.rarithmetic import ovfcheck, is_valid_int
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.rpython.ootypesystem import ootype
 
@@ -292,7 +295,7 @@
         assert res == f(6, sys.maxint, 32, 48)
         res = self.meta_interp(f, [sys.maxint, 6, 32, 48])
         assert res == f(sys.maxint, 6, 32, 48)
-        
+
 
     def test_loop_invariant_intbox(self):
         myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
@@ -953,7 +956,7 @@
         self.meta_interp(f, [20], repeat=7)
         # the loop and the entry path as a single trace
         self.check_jitcell_token_count(1)
-        
+
         # we get:
         #    ENTER             - compile the new loop and the entry bridge
         #    ENTER             - compile the leaving path
@@ -1470,7 +1473,7 @@
         assert res == f(299)
         self.check_resops(guard_class=0, guard_nonnull=4,
                           guard_nonnull_class=4, guard_isnull=2)
-        
+
 
     def test_merge_guardnonnull_guardvalue(self):
         from pypy.rlib.objectmodel import instantiate
@@ -1499,7 +1502,7 @@
         assert res == f(299)
         self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4,
                           guard_nonnull_class=0, guard_isnull=2)
-        
+
 
     def test_merge_guardnonnull_guardvalue_2(self):
         from pypy.rlib.objectmodel import instantiate
@@ -1528,7 +1531,7 @@
         assert res == f(299)
         self.check_resops(guard_value=4, guard_class=0, guard_nonnull=4,
                           guard_nonnull_class=0, guard_isnull=2)
-        
+
 
     def test_merge_guardnonnull_guardclass_guardvalue(self):
         from pypy.rlib.objectmodel import instantiate
@@ -2296,7 +2299,7 @@
             self.check_resops(int_rshift=3)
 
             bigval = 1
-            while (bigval << 3).__class__ is int:
+            while is_valid_int(bigval << 3):
                 bigval = bigval << 1
 
             assert self.meta_interp(f, [bigval, 5]) == 0
@@ -2341,7 +2344,7 @@
             self.check_resops(int_rshift=3)
 
             bigval = 1
-            while (bigval << 3).__class__ is int:
+            while is_valid_int(bigval << 3):
                 bigval = bigval << 1
 
             assert self.meta_interp(f, [bigval, 5]) == 0
@@ -2636,7 +2639,7 @@
             return sa
         assert self.meta_interp(f, [20]) == f(20)
         self.check_resops(int_lt=6, int_le=2, int_ge=4, int_gt=3)
-        
+
 
     def test_intbounds_not_generalized2(self):
         myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'node'])
@@ -2677,7 +2680,7 @@
         assert self.meta_interp(f, [20, 3]) == f(20, 3)
         self.check_jitcell_token_count(1)
         self.check_target_token_count(5)
-        
+
     def test_max_retrace_guards(self):
         myjitdriver = JitDriver(greens = [], reds = ['n', 'i', 'sa', 'a'])
 
@@ -2815,7 +2818,7 @@
         for cell in get_stats().get_all_jitcell_tokens():
             # Initialal trace with two labels and 5 retraces
             assert len(cell.target_tokens) <= 7
-            
+
     def test_nested_retrace(self):
 
         myjitdriver = JitDriver(greens = ['pc'], reds = ['n', 'a', 'i', 'j', 'sa'])
@@ -3784,6 +3787,25 @@
         assert res == 11 * 12 * 13
         self.check_operations_history(int_add=3, int_mul=2)
 
+    def test_setinteriorfield(self):
+        A = lltype.GcArray(lltype.Struct('S', ('x', lltype.Signed)))
+        a = lltype.malloc(A, 5, immortal=True)
+        def g(n):
+            a[n].x = n + 2
+            return a[n].x
+        res = self.interp_operations(g, [1])
+        assert res == 3
+
+    def test_float_bytes(self):
+        def f(n):
+            ll = float2longlong(n)
+            return longlong2float(ll)
+
+        for x in [2.5, float("nan"), -2.5, float("inf")]:
+            # There are tests elsewhere to verify the correctness of this.
+            res = self.interp_operations(f, [x])
+            assert res == x or math.isnan(x) and math.isnan(res)
+
 
 class TestLLtype(BaseLLtypeTests, LLJitMixin):
     def test_tagged(self):
diff --git a/pypy/jit/metainterp/test/test_compile.py b/pypy/jit/metainterp/test/test_compile.py
--- a/pypy/jit/metainterp/test/test_compile.py
+++ b/pypy/jit/metainterp/test/test_compile.py
@@ -14,7 +14,7 @@
     ts = typesystem.llhelper
     def __init__(self):
         self.seen = []
-    def compile_loop(self, inputargs, operations, token, name=''):
+    def compile_loop(self, inputargs, operations, token, log=True, name=''):
         self.seen.append((inputargs, operations, token))
 
 class FakeLogger(object):
diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py
--- a/pypy/jit/metainterp/test/test_list.py
+++ b/pypy/jit/metainterp/test/test_list.py
@@ -1,4 +1,5 @@
 import py
+from pypy.rlib.objectmodel import newlist_hint
 from pypy.rlib.jit import JitDriver
 from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin
 
@@ -228,6 +229,28 @@
         self.check_resops({'jump': 1, 'int_gt': 2, 'int_add': 2,
                            'guard_true': 2, 'int_sub': 2})
 
+    def test_newlist_hint(self):
+        def f(i):
+            l = newlist_hint(i)
+            l[0] = 55
+            return len(l)
+
+        r = self.interp_operations(f, [3])
+        assert r == 0
+
+    def test_newlist_hint_optimized(self):
+        driver = JitDriver(greens = [], reds = ['i'])
+
+        def f(i):
+            while i > 0:
+                driver.jit_merge_point(i=i)
+                l = newlist_hint(5)
+                l.append(1)
+                i -= l[0]
+
+        self.meta_interp(f, [10], listops=True)
+        self.check_resops(new_array=0, call=0)
+
 class TestOOtype(ListTests, OOJitMixin):
     pass
 
diff --git a/pypy/jit/metainterp/test/test_logger.py b/pypy/jit/metainterp/test/test_logger.py
--- a/pypy/jit/metainterp/test/test_logger.py
+++ b/pypy/jit/metainterp/test/test_logger.py
@@ -54,7 +54,7 @@
         class FakeJitDriver(object):
             class warmstate(object):
                 get_location_str = staticmethod(lambda args: "dupa")
-        
+
         class FakeMetaInterpSd:
             cpu = AbstractCPU()
             cpu.ts = self.ts
@@ -77,7 +77,7 @@
             equaloplists(loop.operations, oloop.operations)
             assert oloop.inputargs == loop.inputargs
         return logger, loop, oloop
-    
+
     def test_simple(self):
         inp = '''
         [i0, i1, i2, p3, p4, p5]
@@ -116,12 +116,13 @@
     def test_debug_merge_point(self):
         inp = '''
         []
-        debug_merge_point(0, 0)
+        debug_merge_point(0, 0, 0)
         '''
         _, loop, oloop = self.reparse(inp, check_equal=False)
         assert loop.operations[0].getarg(1).getint() == 0
-        assert oloop.operations[0].getarg(1)._get_str() == "dupa"
-        
+        assert loop.operations[0].getarg(2).getint() == 0
+        assert oloop.operations[0].getarg(2)._get_str() == "dupa"
+
     def test_floats(self):
         inp = '''
         [f0]
@@ -142,7 +143,7 @@
         output = logger.log_loop(loop)
         assert output.splitlines()[-1] == "jump(i0, descr=<Loop3>)"
         pure_parse(output)
-        
+
     def test_guard_descr(self):
         namespace = {'fdescr': BasicFailDescr()}
         inp = '''
@@ -154,7 +155,7 @@
         output = logger.log_loop(loop)
         assert output.splitlines()[-1] == "guard_true(i0, descr=<Guard0>) [i0]"
         pure_parse(output)
-        
+
         logger = Logger(self.make_metainterp_sd(), guard_number=False)
         output = logger.log_loop(loop)
         lastline = output.splitlines()[-1]
diff --git a/pypy/jit/metainterp/test/test_warmspot.py b/pypy/jit/metainterp/test/test_warmspot.py
--- a/pypy/jit/metainterp/test/test_warmspot.py
+++ b/pypy/jit/metainterp/test/test_warmspot.py
@@ -13,7 +13,7 @@
 
 
 class WarmspotTests(object):
-    
+
     def test_basic(self):
         mydriver = JitDriver(reds=['a'],
                              greens=['i'])
@@ -77,16 +77,16 @@
         self.meta_interp(f, [123, 10])
         assert len(get_stats().locations) >= 4
         for loc in get_stats().locations:
-            assert loc == (0, 123)
+            assert loc == (0, 0, 123)
 
     def test_set_param_enable_opts(self):
         from pypy.rpython.annlowlevel import llstr, hlstr
-        
+
         myjitdriver = JitDriver(greens = [], reds = ['n'])
         class A(object):
             def m(self, n):
                 return n-1
-            
+
         def g(n):
             while n > 0:
                 myjitdriver.can_enter_jit(n=n)
@@ -332,7 +332,7 @@
             ts = llhelper
             translate_support_code = False
             stats = "stats"
-            
+
             def get_fail_descr_number(self, d):
                 return -1
 
@@ -352,7 +352,7 @@
                 return "not callable"
 
         driver = JitDriver(reds = ['red'], greens = ['green'])
-        
+
         def f(green):
             red = 0
             while red < 10:
diff --git a/pypy/jit/tl/tlc.py b/pypy/jit/tl/tlc.py
--- a/pypy/jit/tl/tlc.py
+++ b/pypy/jit/tl/tlc.py
@@ -6,6 +6,8 @@
 from pypy.jit.tl.tlopcode import *
 from pypy.jit.tl import tlopcode
 from pypy.rlib.jit import JitDriver, elidable
+from pypy.rlib.rarithmetic import is_valid_int
+
 
 class Obj(object):
 
@@ -219,7 +221,7 @@
 class Frame(object):
 
     def __init__(self, args, pc):
-        assert isinstance(pc, int)
+        assert is_valid_int(pc)
         self.args  = args
         self.pc    = pc
         self.stack = []
@@ -239,7 +241,7 @@
         return interp_eval(code, pc, args, pool).int_o()
 
     def interp_eval(code, pc, args, pool):
-        assert isinstance(pc, int)
+        assert is_valid_int(pc)
         frame = Frame(args, pc)
         pc = frame.pc
 
diff --git a/pypy/jit/tool/test/test_oparser.py b/pypy/jit/tool/test/test_oparser.py
--- a/pypy/jit/tool/test/test_oparser.py
+++ b/pypy/jit/tool/test/test_oparser.py
@@ -146,16 +146,18 @@
     def test_debug_merge_point(self):
         x = '''
         []
-        debug_merge_point(0, "info")
-        debug_merge_point(0, 'info')
-        debug_merge_point(1, '<some ('other.')> info')
-        debug_merge_point(0, '(stuff) #1')
+        debug_merge_point(0, 0, "info")
+        debug_merge_point(0, 0, 'info')
+        debug_merge_point(1, 1, '<some ('other.')> info')
+        debug_merge_point(0, 0, '(stuff) #1')
         '''
         loop = self.parse(x)
-        assert loop.operations[0].getarg(1)._get_str() == 'info'
-        assert loop.operations[1].getarg(1)._get_str() == 'info'
-        assert loop.operations[2].getarg(1)._get_str() == "<some ('other.')> info"
-        assert loop.operations[3].getarg(1)._get_str() == "(stuff) #1"
+        assert loop.operations[0].getarg(2)._get_str() == 'info'
+        assert loop.operations[0].getarg(1).value == 0
+        assert loop.operations[1].getarg(2)._get_str() == 'info'
+        assert loop.operations[2].getarg(2)._get_str() == "<some ('other.')> info"
+        assert loop.operations[2].getarg(1).value == 1
+        assert loop.operations[3].getarg(2)._get_str() == "(stuff) #1"
 
 
     def test_descr_with_obj_print(self):
diff --git a/pypy/module/__builtin__/app_inspect.py b/pypy/module/__builtin__/app_inspect.py
--- a/pypy/module/__builtin__/app_inspect.py
+++ b/pypy/module/__builtin__/app_inspect.py
@@ -8,8 +8,6 @@
 from __pypy__ import lookup_special
 
 def _caller_locals(): 
-    # note: the reason why this is working is because the functions in here are
-    # compiled by geninterp, so they don't have a frame
     return sys._getframe(0).f_locals 
 
 def vars(*obj):
@@ -26,17 +24,6 @@
         except AttributeError:
             raise TypeError, "vars() argument must have __dict__ attribute"
 
-# Replaced by the interp-level helper space.callable(): 
-##def callable(ob):
-##    import __builtin__ # XXX this is insane but required for now for geninterp
-##    for c in type(ob).__mro__:
-##        if '__call__' in c.__dict__:
-##            if isinstance(ob, __builtin__._instance): # old style instance!
-##                return getattr(ob, '__call__', None) is not None
-##            return True
-##    else:
-##        return False
-
 def dir(*args):
     """dir([object]) -> list of strings
 
diff --git a/pypy/module/__builtin__/interp_memoryview.py b/pypy/module/__builtin__/interp_memoryview.py
--- a/pypy/module/__builtin__/interp_memoryview.py
+++ b/pypy/module/__builtin__/interp_memoryview.py
@@ -69,6 +69,10 @@
         return W_MemoryView(buf)
 
     def descr_buffer(self, space):
+        """Note that memoryview() objects in PyPy support buffer(), whereas
+        not in CPython; but CPython supports passing memoryview() to most
+        built-in functions that accept buffers, with the notable exception
+        of the buffer() built-in."""
         return space.wrap(self.buf)
 
     def descr_tobytes(self, space):
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -1,5 +1,5 @@
+import sys
 
-# Package initialisation
 from pypy.interpreter.mixedmodule import MixedModule
 from pypy.module.imp.importing import get_pyc_magic
 
@@ -12,6 +12,21 @@
         "UnicodeBuilder": "interp_builders.W_UnicodeBuilder",
     }
 
+class TimeModule(MixedModule):
+    appleveldefs = {}
+    interpleveldefs = {}
+    if sys.platform.startswith("linux"):
+        from pypy.module.__pypy__ import interp_time
+        interpleveldefs["clock_gettime"] = "interp_time.clock_gettime"
+        interpleveldefs["clock_getres"] = "interp_time.clock_getres"
+        for name in [
+            "CLOCK_REALTIME", "CLOCK_MONOTONIC", "CLOCK_MONOTONIC_RAW",
+            "CLOCK_PROCESS_CPUTIME_ID", "CLOCK_THREAD_CPUTIME_ID"
+        ]:
+            if getattr(interp_time, name) is not None:
+                interpleveldefs[name] = "space.wrap(interp_time.%s)" % name
+
+
 class Module(MixedModule):
     appleveldefs = {
     }
@@ -32,6 +47,7 @@
 
     submodules = {
         "builders": BuildersModule,
+        "time": TimeModule,
     }
 
     def setup_after_space_initialization(self):
diff --git a/pypy/module/__pypy__/interp_time.py b/pypy/module/__pypy__/interp_time.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/__pypy__/interp_time.py
@@ -0,0 +1,65 @@
+from __future__ import with_statement
+import sys
+
+from pypy.interpreter.error import exception_from_errno
+from pypy.interpreter.gateway import unwrap_spec
+from pypy.rpython.lltypesystem import rffi, lltype
+from pypy.rpython.tool import rffi_platform
+from pypy.translator.tool.cbuild import ExternalCompilationInfo
+
+
+class CConfig:
+    _compilation_info_ = ExternalCompilationInfo(
+        includes=["time.h"],
+        libraries=["rt"],
+    )
+
+    HAS_CLOCK_GETTIME = rffi_platform.Has('clock_gettime')
+
+    CLOCK_REALTIME = rffi_platform.DefinedConstantInteger("CLOCK_REALTIME")
+    CLOCK_MONOTONIC = rffi_platform.DefinedConstantInteger("CLOCK_MONOTONIC")
+    CLOCK_MONOTONIC_RAW = rffi_platform.DefinedConstantInteger("CLOCK_MONOTONIC_RAW")
+    CLOCK_PROCESS_CPUTIME_ID = rffi_platform.DefinedConstantInteger("CLOCK_PROCESS_CPUTIME_ID")
+    CLOCK_THREAD_CPUTIME_ID = rffi_platform.DefinedConstantInteger("CLOCK_THREAD_CPUTIME_ID")
+
+    TIMESPEC = rffi_platform.Struct("struct timespec", [
+        ("tv_sec", rffi.TIME_T),
+        ("tv_nsec", rffi.LONG),
+    ])
+
+cconfig = rffi_platform.configure(CConfig)
+
+HAS_CLOCK_GETTIME = cconfig["HAS_CLOCK_GETTIME"]
+
+CLOCK_REALTIME = cconfig["CLOCK_REALTIME"]
+CLOCK_MONOTONIC = cconfig["CLOCK_MONOTONIC"]
+CLOCK_MONOTONIC_RAW = cconfig["CLOCK_MONOTONIC_RAW"]
+CLOCK_PROCESS_CPUTIME_ID = cconfig["CLOCK_PROCESS_CPUTIME_ID"]
+CLOCK_THREAD_CPUTIME_ID = cconfig["CLOCK_THREAD_CPUTIME_ID"]
+
+TIMESPEC = cconfig["TIMESPEC"]
+
+c_clock_gettime = rffi.llexternal("clock_gettime",
+    [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT,
+    compilation_info=CConfig._compilation_info_, threadsafe=False
+)
+c_clock_getres = rffi.llexternal("clock_getres",
+    [lltype.Signed, lltype.Ptr(TIMESPEC)], rffi.INT,
+    compilation_info=CConfig._compilation_info_, threadsafe=False
+)
+
+ at unwrap_spec(clk_id="c_int")
+def clock_gettime(space, clk_id):
+    with lltype.scoped_alloc(TIMESPEC) as tp:
+        ret = c_clock_gettime(clk_id, tp)
+        if ret != 0:
+            raise exception_from_errno(space, space.w_IOError)
+        return space.wrap(tp.c_tv_sec + tp.c_tv_nsec * 1e-9)
+
+ at unwrap_spec(clk_id="c_int")
+def clock_getres(space, clk_id):
+    with lltype.scoped_alloc(TIMESPEC) as tp:
+        ret = c_clock_getres(clk_id, tp)
+        if ret != 0:
+            raise exception_from_errno(space, space.w_IOError)
+        return space.wrap(tp.c_tv_sec + tp.c_tv_nsec * 1e-9)
diff --git a/pypy/module/__pypy__/test/test_time.py b/pypy/module/__pypy__/test/test_time.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/__pypy__/test/test_time.py
@@ -0,0 +1,26 @@
+import py
+
+from pypy.module.__pypy__.interp_time import HAS_CLOCK_GETTIME
+
+
+class AppTestTime(object):
+    def setup_class(cls):
+        if not HAS_CLOCK_GETTIME:
+            py.test.skip("need time.clock_gettime")
+
+    def test_clock_realtime(self):
+        from __pypy__ import time
+        res = time.clock_gettime(time.CLOCK_REALTIME)
+        assert isinstance(res, float)
+
+    def test_clock_monotonic(self):
+        from __pypy__ import time
+        a = time.clock_gettime(time.CLOCK_MONOTONIC)
+        b = time.clock_gettime(time.CLOCK_MONOTONIC)
+        assert a <= b
+
+    def test_clock_getres(self):
+        from __pypy__ import time
+        res = time.clock_getres(time.CLOCK_REALTIME)
+        assert res > 0.0
+        assert res <= 1.0
diff --git a/pypy/module/_ast/test/test_ast.py b/pypy/module/_ast/test/test_ast.py
--- a/pypy/module/_ast/test/test_ast.py
+++ b/pypy/module/_ast/test/test_ast.py
@@ -1,9 +1,10 @@
 import py
-
+from pypy.conftest import gettestobjspace
 
 class AppTestAST:
 
     def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=['struct'])
         cls.w_ast = cls.space.appexec([], """():
     import _ast
     return _ast""")
diff --git a/pypy/module/_codecs/test/test_codecs.py b/pypy/module/_codecs/test/test_codecs.py
--- a/pypy/module/_codecs/test/test_codecs.py
+++ b/pypy/module/_codecs/test/test_codecs.py
@@ -4,7 +4,7 @@
 
 class AppTestCodecs:
     def setup_class(cls):
-        space = gettestobjspace(usemodules=('unicodedata',))
+        space = gettestobjspace(usemodules=('unicodedata', 'struct'))
         cls.space = space
 
     def test_register_noncallable(self):
diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py
--- a/pypy/module/_continuation/test/test_zpickle.py
+++ b/pypy/module/_continuation/test/test_zpickle.py
@@ -106,8 +106,9 @@
     version = 0
 
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=('_continuation',),
+        cls.space = gettestobjspace(usemodules=('_continuation', 'struct'),
                                     CALL_METHOD=True)
+        cls.space.config.translation.continuation = True
         cls.space.appexec([], """():
             global continulet, A, __name__
 
diff --git a/pypy/module/_ffi/test/test__ffi.py b/pypy/module/_ffi/test/test__ffi.py
--- a/pypy/module/_ffi/test/test__ffi.py
+++ b/pypy/module/_ffi/test/test__ffi.py
@@ -100,7 +100,10 @@
         from _ffi import CDLL, types
         libm = CDLL(self.libm_name)
         pow_addr = libm.getaddressindll('pow')
-        assert pow_addr == self.pow_addr & (sys.maxint*2-1)
+        fff = sys.maxint*2-1
+        if sys.platform == 'win32':
+            fff = sys.maxint*2+1
+        assert pow_addr == self.pow_addr & fff
 
     def test_func_fromaddr(self):
         import sys
diff --git a/pypy/module/_hashlib/test/test_hashlib.py b/pypy/module/_hashlib/test/test_hashlib.py
--- a/pypy/module/_hashlib/test/test_hashlib.py
+++ b/pypy/module/_hashlib/test/test_hashlib.py
@@ -3,7 +3,7 @@
 
 class AppTestHashlib:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['_hashlib'])
+        cls.space = gettestobjspace(usemodules=['_hashlib', 'array', 'struct'])
 
     def test_simple(self):
         import _hashlib
diff --git a/pypy/module/_io/test/test_io.py b/pypy/module/_io/test/test_io.py
--- a/pypy/module/_io/test/test_io.py
+++ b/pypy/module/_io/test/test_io.py
@@ -158,7 +158,7 @@
 
 class AppTestOpen:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['_io', '_locale'])
+        cls.space = gettestobjspace(usemodules=['_io', '_locale', 'array', 'struct'])
         tmpfile = udir.join('tmpfile').ensure()
         cls.w_tmpfile = cls.space.wrap(str(tmpfile))
 
diff --git a/pypy/module/_lsprof/interp_lsprof.py b/pypy/module/_lsprof/interp_lsprof.py
--- a/pypy/module/_lsprof/interp_lsprof.py
+++ b/pypy/module/_lsprof/interp_lsprof.py
@@ -22,7 +22,7 @@
 eci = ExternalCompilationInfo(
     separate_module_files=[srcdir.join('profiling.c')],
     export_symbols=['pypy_setup_profiling', 'pypy_teardown_profiling'])
-                                                     
+
 c_setup_profiling = rffi.llexternal('pypy_setup_profiling',
                                   [], lltype.Void,
                                   compilation_info = eci)
@@ -228,7 +228,7 @@
         if w_self.builtins:
             key = create_spec(space, w_arg)
             w_self._enter_builtin_call(key)
-    elif event == 'c_return':
+    elif event == 'c_return' or event == 'c_exception':
         if w_self.builtins:
             key = create_spec(space, w_arg)
             w_self._enter_builtin_return(key)
@@ -237,7 +237,7 @@
         pass
 
 class W_Profiler(Wrappable):
-    
+
     def __init__(self, space, w_callable, time_unit, subcalls, builtins):
         self.subcalls = subcalls
         self.builtins = builtins
diff --git a/pypy/module/_lsprof/test/test_cprofile.py b/pypy/module/_lsprof/test/test_cprofile.py
--- a/pypy/module/_lsprof/test/test_cprofile.py
+++ b/pypy/module/_lsprof/test/test_cprofile.py
@@ -117,6 +117,20 @@
             assert 0.9 < subentry.totaltime < 2.9
             #assert 0.9 < subentry.inlinetime < 2.9
 
+    def test_builtin_exception(self):
+        import math
+        import _lsprof
+
+        prof = _lsprof.Profiler()
+        prof.enable()
+        try:
+            math.sqrt("a")
+        except TypeError:
+            pass
+        prof.disable()
+        stats = prof.getstats()
+        assert len(stats) == 2
+
     def test_use_cprofile(self):
         import sys, os
         # XXX this is evil trickery to walk around the fact that we don't
diff --git a/pypy/module/_md5/test/test_md5.py b/pypy/module/_md5/test/test_md5.py
--- a/pypy/module/_md5/test/test_md5.py
+++ b/pypy/module/_md5/test/test_md5.py
@@ -28,7 +28,7 @@
         assert self.md5.digest_size == 16
         #assert self.md5.digestsize == 16        -- not on CPython
         assert self.md5.md5().digest_size == 16
-        if sys.version >= (2, 5):
+        if sys.version_info >= (2, 5):
             assert self.md5.blocksize == 1
             assert self.md5.md5().digestsize == 16
 
diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py
--- a/pypy/module/_multiprocessing/test/test_connection.py
+++ b/pypy/module/_multiprocessing/test/test_connection.py
@@ -92,7 +92,8 @@
 
 class AppTestSocketConnection(BaseConnectionTest):
     def setup_class(cls):
-        space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal'))
+        space = gettestobjspace(usemodules=('_multiprocessing', 'thread', 'signal',
+                                            'struct', 'array'))
         cls.space = space
         cls.w_connections = space.newlist([])
 
diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py
--- a/pypy/module/_multiprocessing/test/test_semaphore.py
+++ b/pypy/module/_multiprocessing/test/test_semaphore.py
@@ -2,6 +2,7 @@
 from pypy.module._multiprocessing.interp_semaphore import (
     RECURSIVE_MUTEX, SEMAPHORE)
 
+
 class AppTestSemaphore:
     def setup_class(cls):
         space = gettestobjspace(usemodules=('_multiprocessing', 'thread'))
diff --git a/pypy/module/_socket/test/test_sock_app.py b/pypy/module/_socket/test/test_sock_app.py
--- a/pypy/module/_socket/test/test_sock_app.py
+++ b/pypy/module/_socket/test/test_sock_app.py
@@ -6,7 +6,7 @@
 from pypy.rpython.lltypesystem import lltype, rffi
 
 def setup_module(mod):
-    mod.space = gettestobjspace(usemodules=['_socket', 'array'])
+    mod.space = gettestobjspace(usemodules=['_socket', 'array', 'struct'])
     global socket
     import socket
     mod.w_socket = space.appexec([], "(): import _socket as m; return m")
@@ -372,10 +372,9 @@
     def test_socket_connect(self):
         import _socket, os
         s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM, 0)
-        # XXX temporarily we use python.org to test, will have more robust tests
-        # in the absence of a network connection later when more parts of the
-        # socket API are implemented.  Currently skip the test if there is no
-        # connection.
+        # it would be nice to have a test which works even if there is no
+        # network connection. However, this one is "good enough" for now. Skip
+        # it if there is no connection.
         try:
             s.connect(("www.python.org", 80))
         except _socket.gaierror, ex:
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -432,7 +432,8 @@
                     raise _ssl_seterror(self.space, self, length)
                 try:
                     # this is actually an immutable bytes sequence
-                    return self.space.wrap(rffi.charp2str(buf_ptr[0]))
+                    return self.space.wrap(rffi.charpsize2str(buf_ptr[0],
+                                                              length))
                 finally:
                     libssl_OPENSSL_free(buf_ptr[0])
         else:
diff --git a/pypy/module/_ssl/test/test_ssl.py b/pypy/module/_ssl/test/test_ssl.py
--- a/pypy/module/_ssl/test/test_ssl.py
+++ b/pypy/module/_ssl/test/test_ssl.py
@@ -2,6 +2,7 @@
 import os
 import py
 
+
 class AppTestSSL:
     def setup_class(cls):
         space = gettestobjspace(usemodules=('_ssl', '_socket'))
@@ -29,7 +30,6 @@
         assert isinstance(_ssl.SSL_ERROR_EOF, int)
         assert isinstance(_ssl.SSL_ERROR_INVALID_ERROR_CODE, int)
 
-        assert isinstance(_ssl.OPENSSL_VERSION_NUMBER, (int, long))
         assert isinstance(_ssl.OPENSSL_VERSION_INFO, tuple)
         assert len(_ssl.OPENSSL_VERSION_INFO) == 5
         assert isinstance(_ssl.OPENSSL_VERSION, str)
@@ -90,7 +90,7 @@
 
 class AppTestConnectedSSL:
     def setup_class(cls):
-        space = gettestobjspace(usemodules=('_ssl', '_socket'))
+        space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct'))
         cls.space = space
 
     def setup_method(self, method):
@@ -179,7 +179,7 @@
     # to exercise the poll() calls
 
     def setup_class(cls):
-        space = gettestobjspace(usemodules=('_ssl', '_socket'))
+        space = gettestobjspace(usemodules=('_ssl', '_socket', 'struct'))
         cls.space = space
         cls.space.appexec([], """():
             import socket; socket.setdefaulttimeout(1)
diff --git a/pypy/module/array/interp_array.py b/pypy/module/array/interp_array.py
--- a/pypy/module/array/interp_array.py
+++ b/pypy/module/array/interp_array.py
@@ -11,6 +11,7 @@
 from pypy.objspace.std.register_all import register_all
 from pypy.rlib.rarithmetic import ovfcheck
 from pypy.rlib.unroll import unrolling_iterable
+from pypy.rlib.objectmodel import specialize, keepalive_until_here
 from pypy.rpython.lltypesystem import lltype, rffi
 
 
@@ -144,28 +145,36 @@
 unroll_typecodes = unrolling_iterable(types.keys())
 
 class ArrayBuffer(RWBuffer):
-    def __init__(self, data, bytes):
-        self.data = data
-        self.len = bytes
+    def __init__(self, array):
+        self.array = array
 
     def getlength(self):
-        return self.len
+        return self.array.len * self.array.itemsize
 
     def getitem(self, index):
-        return self.data[index]
+        array = self.array
+        data = array._charbuf_start()
+        char = data[index]
+        array._charbuf_stop()
+        return char
 
     def setitem(self, index, char):
-        self.data[index] = char
+        array = self.array
+        data = array._charbuf_start()
+        data[index] = char
+        array._charbuf_stop()
 
 
 def make_array(mytype):
+    W_ArrayBase = globals()['W_ArrayBase']
+
     class W_Array(W_ArrayBase):
         itemsize = mytype.bytes
         typecode = mytype.typecode
 
         @staticmethod
         def register(typeorder):
-            typeorder[W_Array] = []
+            typeorder[W_Array] = [(W_ArrayBase, None)]
 
         def __init__(self, space):
             self.space = space
@@ -275,9 +284,10 @@
             oldlen = self.len
             new = len(s) / mytype.bytes
             self.setlen(oldlen + new)
-            cbuf = self.charbuf()
+            cbuf = self._charbuf_start()
             for i in range(len(s)):
                 cbuf[oldlen * mytype.bytes + i] = s[i]
+            self._charbuf_stop()
 
         def fromlist(self, w_lst):
             s = self.len
@@ -307,8 +317,11 @@
             else:
                 self.fromsequence(w_iterable)
 
-        def charbuf(self):
-            return  rffi.cast(rffi.CCHARP, self.buffer)
+        def _charbuf_start(self):
+            return rffi.cast(rffi.CCHARP, self.buffer)
+
+        def _charbuf_stop(self):
+            keepalive_until_here(self)
 
         def w_getitem(self, space, idx):
             item = self.buffer[idx]
@@ -527,8 +540,10 @@
         self.fromstring(space.str_w(w_s))
 
     def array_tostring__Array(space, self):
-        cbuf = self.charbuf()
-        return self.space.wrap(rffi.charpsize2str(cbuf, self.len * mytype.bytes))
+        cbuf = self._charbuf_start()
+        s = rffi.charpsize2str(cbuf, self.len * mytype.bytes)
+        self._charbuf_stop()
+        return self.space.wrap(s)
 
     def array_fromfile__Array_ANY_ANY(space, self, w_f, w_n):
         if not isinstance(w_f, W_File):
@@ -583,37 +598,34 @@
             raise OperationError(space.w_ValueError, space.wrap(msg))
 
     # Compare methods
+    @specialize.arg(3)
     def _cmp_impl(space, self, other, space_fn):
-        if isinstance(other, W_ArrayBase):
-            w_lst1 = array_tolist__Array(space, self)
-            w_lst2 = space.call_method(other, 'tolist')
-            return space_fn(w_lst1, w_lst2)
-        else:
-            return space.w_NotImplemented
+        w_lst1 = array_tolist__Array(space, self)
+        w_lst2 = space.call_method(other, 'tolist')
+        return space_fn(w_lst1, w_lst2)
 
-    def eq__Array_ANY(space, self, other):
+    def eq__Array_ArrayBase(space, self, other):
         return _cmp_impl(space, self, other, space.eq)
 
-    def ne__Array_ANY(space, self, other):
+    def ne__Array_ArrayBase(space, self, other):
         return _cmp_impl(space, self, other, space.ne)
 
-    def lt__Array_ANY(space, self, other):
+    def lt__Array_ArrayBase(space, self, other):
         return _cmp_impl(space, self, other, space.lt)
 
-    def le__Array_ANY(space, self, other):
+    def le__Array_ArrayBase(space, self, other):
         return _cmp_impl(space, self, other, space.le)
 
-    def gt__Array_ANY(space, self, other):
+    def gt__Array_ArrayBase(space, self, other):
         return _cmp_impl(space, self, other, space.gt)
 
-    def ge__Array_ANY(space, self, other):
+    def ge__Array_ArrayBase(space, self, other):
         return _cmp_impl(space, self, other, space.ge)
 
     # Misc methods
 
     def buffer__Array(space, self):
-        b = ArrayBuffer(self.charbuf(), self.len * mytype.bytes)
-        return space.wrap(b)
+        return space.wrap(ArrayBuffer(self))
 
     def array_buffer_info__Array(space, self):
         w_ptr = space.wrap(rffi.cast(lltype.Unsigned, self.buffer))
@@ -648,7 +660,7 @@
             raise OperationError(space.w_RuntimeError, space.wrap(msg))
         if self.len == 0:
             return
-        bytes = self.charbuf()
+        bytes = self._charbuf_start()
         tmp = [bytes[0]] * mytype.bytes
         for start in range(0, self.len * mytype.bytes, mytype.bytes):
             stop = start + mytype.bytes - 1
@@ -656,6 +668,7 @@
                 tmp[i] = bytes[start + i]
             for i in range(mytype.bytes):
                 bytes[stop - i] = tmp[i]
+        self._charbuf_stop()
 
     def repr__Array(space, self):
         if self.len == 0:
diff --git a/pypy/module/array/test/test_array.py b/pypy/module/array/test/test_array.py
--- a/pypy/module/array/test/test_array.py
+++ b/pypy/module/array/test/test_array.py
@@ -433,7 +433,25 @@
         a = self.array('h', 'Hi')
         buf = buffer(a)
         assert buf[1] == 'i'
-        #raises(TypeError, buf.__setitem__, 1, 'o')
+
+    def test_buffer_write(self):
+        a = self.array('c', 'hello')
+        buf = buffer(a)
+        print repr(buf)
+        try:
+            buf[3] = 'L'
+        except TypeError:
+            skip("buffer(array) returns a read-only buffer on CPython")
+        assert a.tostring() == 'helLo'
+
+    def test_buffer_keepalive(self):
+        buf = buffer(self.array('c', 'text'))
+        assert buf[2] == 'x'
+        #
+        a = self.array('c', 'foobarbaz')
+        buf = buffer(a)
+        a.fromstring('some extra text')
+        assert buf[:] == 'foobarbazsome extra text'
 
     def test_list_methods(self):
         assert repr(self.array('i')) == "array('i')"
@@ -845,8 +863,11 @@
         cls.maxint = sys.maxint
 
 class AppTestArray(BaseArrayTests):
+    OPTIONS = {}
+
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi'))
+        cls.space = gettestobjspace(usemodules=('array', 'struct', '_rawffi'),
+                                    **cls.OPTIONS)
         cls.w_array = cls.space.appexec([], """():
             import array
             return array.array
@@ -868,3 +889,7 @@
         a = self.array('b', range(4))
         a[::-1] = a
         assert a == self.array('b', [3, 2, 1, 0])
+
+
+class AppTestArrayBuiltinShortcut(AppTestArray):
+    OPTIONS = {'objspace.std.builtinshortcut': True}
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -352,6 +352,9 @@
     'PyObject_AsReadBuffer', 'PyObject_AsWriteBuffer', 'PyObject_CheckReadBuffer',
 
     'PyOS_getsig', 'PyOS_setsig',
+    'PyThread_create_key', 'PyThread_delete_key', 'PyThread_set_key_value',
+    'PyThread_get_key_value', 'PyThread_delete_key_value',
+    'PyThread_ReInitTLS',
 
     'PyStructSequence_InitType', 'PyStructSequence_New',
 ]
@@ -617,6 +620,10 @@
         lambda space: init_pycobject(),
         lambda space: init_capsule(),
     ])
+    from pypy.module.posix.interp_posix import add_fork_hook
+    reinit_tls = rffi.llexternal('PyThread_ReInitTLS', [], lltype.Void,
+                                 compilation_info=eci)    
+    add_fork_hook('child', reinit_tls)
 
 def init_function(func):
     INIT_FUNCTIONS.append(func)
@@ -817,6 +824,8 @@
     pypy_decls.append("#ifdef __cplusplus")
     pypy_decls.append("extern \"C\" {")
     pypy_decls.append("#endif\n")
+    pypy_decls.append('#define Signed   long           /* xxx temporary fix */\n')
+    pypy_decls.append('#define Unsigned unsigned long  /* xxx temporary fix */\n')
 
     for decl in FORWARD_DECLS:
         pypy_decls.append("%s;" % (decl,))
@@ -848,6 +857,8 @@
             typ = 'PyObject*'
         pypy_decls.append('PyAPI_DATA(%s) %s;' % (typ, name))
 
+    pypy_decls.append('#undef Signed    /* xxx temporary fix */\n')
+    pypy_decls.append('#undef Unsigned  /* xxx temporary fix */\n')
     pypy_decls.append("#ifdef __cplusplus")
     pypy_decls.append("}")
     pypy_decls.append("#endif")
@@ -926,6 +937,7 @@
                                source_dir / "structseq.c",
                                source_dir / "capsule.c",
                                source_dir / "pysignals.c",
+                               source_dir / "thread.c",
                                ],
         separate_module_sources=separate_module_sources,
         export_symbols=export_symbols_eci,
diff --git a/pypy/module/cpyext/bufferobject.py b/pypy/module/cpyext/bufferobject.py
--- a/pypy/module/cpyext/bufferobject.py
+++ b/pypy/module/cpyext/bufferobject.py
@@ -2,8 +2,10 @@
 from pypy.module.cpyext.api import (
     cpython_api, Py_ssize_t, cpython_struct, bootstrap_function,
     PyObjectFields, PyObject)
-from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef
+from pypy.module.cpyext.pyobject import make_typedescr, Py_DecRef, make_ref
 from pypy.interpreter.buffer import Buffer, StringBuffer, SubBuffer
+from pypy.interpreter.error import OperationError
+from pypy.module.array.interp_array import ArrayBuffer
 
 
 PyBufferObjectStruct = lltype.ForwardReference()
@@ -41,26 +43,38 @@
         py_buf.c_b_offset = w_obj.offset
         w_obj = w_obj.buffer
 
+    # If w_obj already allocated a fixed buffer, use it, and keep a
+    # reference to w_obj.
+    # Otherwise, b_base stays NULL, and we own the b_ptr.
+
     if isinstance(w_obj, StringBuffer):
-        py_buf.c_b_base = rffi.cast(PyObject, 0) # space.wrap(w_obj.value)
-        py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.as_str()))
+        py_buf.c_b_base = lltype.nullptr(PyObject.TO)
+        py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, rffi.str2charp(w_obj.value))
+        py_buf.c_b_size = w_obj.getlength()
+    elif isinstance(w_obj, ArrayBuffer):
+        w_base = w_obj.array
+        py_buf.c_b_base = make_ref(space, w_base)
+        py_buf.c_b_ptr = rffi.cast(rffi.VOIDP, w_obj.array._charbuf_start())
         py_buf.c_b_size = w_obj.getlength()
     else:
-        raise Exception("Fail fail fail fail fail")
+        raise OperationError(space.w_NotImplementedError, space.wrap(
+            "buffer flavor not supported"))
 
 
 def buffer_realize(space, py_obj):
     """
     Creates the buffer in the PyPy interpreter from a cpyext representation.
     """
-    raise Exception("realize fail fail fail")
-
+    raise OperationError(space.w_NotImplementedError, space.wrap(
+        "Don't know how to realize a buffer"))
 
 
 @cpython_api([PyObject], lltype.Void, external=False)
 def buffer_dealloc(space, py_obj):
     py_buf = rffi.cast(PyBufferObject, py_obj)
-    Py_DecRef(space, py_buf.c_b_base)
-    rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr))
+    if py_buf.c_b_base:
+        Py_DecRef(space, py_buf.c_b_base)
+    else:
+        rffi.free_charp(rffi.cast(rffi.CCHARP, py_buf.c_b_ptr))
     from pypy.module.cpyext.object import PyObject_dealloc
     PyObject_dealloc(space, py_obj)
diff --git a/pypy/module/cpyext/include/patchlevel.h b/pypy/module/cpyext/include/patchlevel.h
--- a/pypy/module/cpyext/include/patchlevel.h
+++ b/pypy/module/cpyext/include/patchlevel.h
@@ -29,7 +29,7 @@
 #define PY_VERSION		"2.7.2"
 
 /* PyPy version as a string */
-#define PYPY_VERSION "1.8.1"
+#define PYPY_VERSION "1.9.1"
 
 /* Subversion Revision number of this file (not of the repository).
  * Empty since Mercurial migration. */
diff --git a/pypy/module/cpyext/include/pythread.h b/pypy/module/cpyext/include/pythread.h
--- a/pypy/module/cpyext/include/pythread.h
+++ b/pypy/module/cpyext/include/pythread.h
@@ -3,8 +3,26 @@
 
 #define WITH_THREAD
 
+#ifdef __cplusplus
+extern "C" {
+#endif
+
 typedef void *PyThread_type_lock;
 #define WAIT_LOCK	1
 #define NOWAIT_LOCK	0
 
+/* Thread Local Storage (TLS) API */
+PyAPI_FUNC(int) PyThread_create_key(void);
+PyAPI_FUNC(void) PyThread_delete_key(int);
+PyAPI_FUNC(int) PyThread_set_key_value(int, void *);
+PyAPI_FUNC(void *) PyThread_get_key_value(int);
+PyAPI_FUNC(void) PyThread_delete_key_value(int key);
+
+/* Cleanup after a fork */
+PyAPI_FUNC(void) PyThread_ReInitTLS(void);
+
+#ifdef __cplusplus
+}
 #endif
+
+#endif
diff --git a/pypy/module/cpyext/pyerrors.py b/pypy/module/cpyext/pyerrors.py
--- a/pypy/module/cpyext/pyerrors.py
+++ b/pypy/module/cpyext/pyerrors.py
@@ -2,6 +2,7 @@
 
 from pypy.rpython.lltypesystem import rffi, lltype
 from pypy.interpreter.error import OperationError
+from pypy.interpreter import pytraceback
 from pypy.module.cpyext.api import cpython_api, CANNOT_FAIL, CONST_STRING
 from pypy.module.exceptions.interp_exceptions import W_RuntimeWarning
 from pypy.module.cpyext.pyobject import (
@@ -315,3 +316,65 @@
     It may be called without holding the interpreter lock."""
     space.check_signal_action.set_interrupt()
 
+ at cpython_api([PyObjectP, PyObjectP, PyObjectP], lltype.Void)
+def PyErr_GetExcInfo(space, ptype, pvalue, ptraceback):
+    """---Cython extension---
+
+    Retrieve the exception info, as known from ``sys.exc_info()``.  This
+    refers to an exception that was already caught, not to an exception
+    that was freshly raised.  Returns new references for the three
+    objects, any of which may be *NULL*.  Does not modify the exception
+    info state.
+
+    .. note::
+
+       This function is not normally used by code that wants to handle
+       exceptions.  Rather, it can be used when code needs to save and
+       restore the exception state temporarily.  Use
+       :c:func:`PyErr_SetExcInfo` to restore or clear the exception
+       state.
+    """
+    ec = space.getexecutioncontext()
+    operror = ec.sys_exc_info()
+    if operror:
+        ptype[0] = make_ref(space, operror.w_type)
+        pvalue[0] = make_ref(space, operror.get_w_value(space))
+        ptraceback[0] = make_ref(space, space.wrap(operror.get_traceback()))
+    else:
+        ptype[0] = lltype.nullptr(PyObject.TO)
+        pvalue[0] = lltype.nullptr(PyObject.TO)
+        ptraceback[0] = lltype.nullptr(PyObject.TO)
+
+ at cpython_api([PyObject, PyObject, PyObject], lltype.Void)
+def PyErr_SetExcInfo(space, w_type, w_value, w_traceback):
+    """---Cython extension---
+
+    Set the exception info, as known from ``sys.exc_info()``.  This refers
+    to an exception that was already caught, not to an exception that was
+    freshly raised.  This function steals the references of the arguments.
+    To clear the exception state, pass *NULL* for all three arguments.
+    For general rules about the three arguments, see :c:func:`PyErr_Restore`.
+ 
+    .. note::
+ 
+       This function is not normally used by code that wants to handle
+       exceptions.  Rather, it can be used when code needs to save and
+       restore the exception state temporarily.  Use
+       :c:func:`PyErr_GetExcInfo` to read the exception state.
+    """
+    if w_value is None or space.is_w(w_value, space.w_None):
+        operror = None
+    else:
+        tb = None
+        if w_traceback is not None:
+            try:
+                tb = pytraceback.check_traceback(space, w_traceback, '?')
+            except OperationError:    # catch and ignore bogus objects
+                pass
+        operror = OperationError(w_type, w_value, tb)
+    #
+    ec = space.getexecutioncontext()
+    ec.set_sys_exc_info(operror)
+    Py_DecRef(space, w_type)
+    Py_DecRef(space, w_value)
+    Py_DecRef(space, w_traceback)
diff --git a/pypy/module/cpyext/pystate.py b/pypy/module/cpyext/pystate.py
--- a/pypy/module/cpyext/pystate.py
+++ b/pypy/module/cpyext/pystate.py
@@ -10,7 +10,7 @@
     [('next', PyInterpreterState)],
     PyInterpreterStateStruct)
 PyThreadState = lltype.Ptr(cpython_struct(
-    "PyThreadState", 
+    "PyThreadState",
     [('interp', PyInterpreterState),
      ('dict', PyObject),
      ]))
@@ -19,12 +19,15 @@
 def PyEval_SaveThread(space):
     """Release the global interpreter lock (if it has been created and thread
     support is enabled) and reset the thread state to NULL, returning the
-    previous thread state (which is not NULL except in PyPy).  If the lock has been created,
+    previous thread state.  If the lock has been created,
     the current thread must have acquired it.  (This function is available even
     when thread support is disabled at compile time.)"""
+    state = space.fromcache(InterpreterState)
     if rffi.aroundstate.before:
         rffi.aroundstate.before()
-    return lltype.nullptr(PyThreadState.TO)
+    tstate = state.swap_thread_state(
+        space, lltype.nullptr(PyThreadState.TO))
+    return tstate
 
 @cpython_api([PyThreadState], lltype.Void)
 def PyEval_RestoreThread(space, tstate):
@@ -35,6 +38,8 @@
     when thread support is disabled at compile time.)"""
     if rffi.aroundstate.after:
         rffi.aroundstate.after()
+    state = space.fromcache(InterpreterState)
+    state.swap_thread_state(space, tstate)
 
 @cpython_api([], lltype.Void)
 def PyEval_InitThreads(space):
@@ -67,28 +72,91 @@
                                   dealloc=ThreadState_dealloc)
 
 from pypy.interpreter.executioncontext import ExecutionContext
+
+# Keep track of the ThreadStateCapsule for a particular execution context.  The
+# default is for new execution contexts not to have one; it is allocated on the
+# first cpyext-based request for it.
 ExecutionContext.cpyext_threadstate = ThreadStateCapsule(None)
 
+# Also keep track of whether it has been initialized yet or not (None is a valid
+# PyThreadState for an execution context to have, when the GIL has been
+# released, so a check against that can't be used to determine the need for
+# initialization).
+ExecutionContext.cpyext_initialized_threadstate = False
+
+def cleanup_cpyext_state(self):
+    try:
+        del self.cpyext_threadstate
+    except AttributeError:
+        pass
+    self.cpyext_initialized_threadstate = False
+ExecutionContext.cleanup_cpyext_state = cleanup_cpyext_state
+
 class InterpreterState(object):
     def __init__(self, space):
         self.interpreter_state = lltype.malloc(
             PyInterpreterState.TO, flavor='raw', zero=True, immortal=True)
 
     def new_thread_state(self, space):
+        """
+        Create a new ThreadStateCapsule to hold the PyThreadState for a
+        particular execution context.
+
+        :param space: A space.
+
+        :returns: A new ThreadStateCapsule holding a newly allocated
+            PyThreadState and referring to this interpreter state.
+        """
         capsule = ThreadStateCapsule(space)
         ts = capsule.memory
         ts.c_interp = self.interpreter_state
         ts.c_dict = make_ref(space, space.newdict())
         return capsule
 
+
     def get_thread_state(self, space):
+        """
+        Get the current PyThreadState for the current execution context.
+
+        :param space: A space.
+
+        :returns: The current PyThreadState for the current execution context,
+            or None if it does not have one.
+        """
         ec = space.getexecutioncontext()
         return self._get_thread_state(space, ec).memory
 
+
+    def swap_thread_state(self, space, tstate):
+        """
+        Replace the current thread state of the current execution context with a
+        new thread state.
+
+        :param space: The space.
+
+        :param tstate: The new PyThreadState for the current execution context.
+
+        :returns: The old thread state for the current execution context, either
+            None or a PyThreadState.
+        """
+        ec = space.getexecutioncontext()
+        capsule = self._get_thread_state(space, ec)
+        old_tstate = capsule.memory
+        capsule.memory = tstate
+        return old_tstate
+
     def _get_thread_state(self, space, ec):
-        if ec.cpyext_threadstate.memory == lltype.nullptr(PyThreadState.TO):
+        """
+        Get the ThreadStateCapsule for the given execution context, possibly
+        creating a new one if it does not already have one.
+
+        :param space: The space.
+        :param ec: The ExecutionContext of which to get the thread state.
+        :returns: The ThreadStateCapsule for the given execution context.
+        """
+        if not ec.cpyext_initialized_threadstate:
             ec.cpyext_threadstate = self.new_thread_state(space)
-
+            ec.cpyext_initialized_threadstate = True
         return ec.cpyext_threadstate
 
 @cpython_api([], PyThreadState, error=CANNOT_FAIL)
@@ -105,13 +173,8 @@
 def PyThreadState_Swap(space, tstate):
     """Swap the current thread state with the thread state given by the argument
     tstate, which may be NULL.  The global interpreter lock must be held."""
-    # All cpyext calls release and acquire the GIL, so this function has no
-    # side-effects
-    if tstate:
-        return lltype.nullptr(PyThreadState.TO)
-    else:
-        state = space.fromcache(InterpreterState)
-        return state.get_thread_state(space)
+    state = space.fromcache(InterpreterState)
+    return state.swap_thread_state(space, tstate)
 
 @cpython_api([PyThreadState], lltype.Void)
 def PyEval_AcquireThread(space, tstate):
diff --git a/pypy/module/cpyext/slotdefs.py b/pypy/module/cpyext/slotdefs.py
--- a/pypy/module/cpyext/slotdefs.py
+++ b/pypy/module/cpyext/slotdefs.py
@@ -167,14 +167,16 @@
     if rffi.cast(lltype.Signed, res) == -1:
         space.fromcache(State).check_and_raise_exception(always=True)
 
+# Warning, confusing function name (like CPython).  Used only for sq_contains.
 def wrap_objobjproc(space, w_self, w_args, func):
     func_target = rffi.cast(objobjproc, func)
     check_num_args(space, w_args, 1)
     w_value, = space.fixedview(w_args)
     res = generic_cpy_call(space, func_target, w_self, w_value)
-    if rffi.cast(lltype.Signed, res) == -1:
+    res = rffi.cast(lltype.Signed, res)
+    if res == -1:
         space.fromcache(State).check_and_raise_exception(always=True)
-    return space.wrap(res)
+    return space.wrap(bool(res))
 
 def wrap_objobjargproc(space, w_self, w_args, func):
     func_target = rffi.cast(objobjargproc, func)
@@ -183,7 +185,7 @@
     res = generic_cpy_call(space, func_target, w_self, w_key, w_value)
     if rffi.cast(lltype.Signed, res) == -1:
         space.fromcache(State).check_and_raise_exception(always=True)
-    return space.wrap(res)
+    return space.w_None
 
 def wrap_delitem(space, w_self, w_args, func):
     func_target = rffi.cast(objobjargproc, func)
diff --git a/pypy/module/cpyext/src/getargs.c b/pypy/module/cpyext/src/getargs.c
--- a/pypy/module/cpyext/src/getargs.c
+++ b/pypy/module/cpyext/src/getargs.c
@@ -23,16 +23,33 @@
 #define FLAG_COMPAT 1
 #define FLAG_SIZE_T 2
 
+typedef int (*destr_t)(PyObject *, void *);
+
+
+/* Keep track of "objects" that have been allocated or initialized and
+   which will need to be deallocated or cleaned up somehow if overall
+   parsing fails.
+*/
+typedef struct {
+  void *item;
+  destr_t destructor;
+} freelistentry_t;
+
+typedef struct {
+  int first_available;
+  freelistentry_t *entries;
+} freelist_t;
+
 
 /* Forward */
 static int vgetargs1(PyObject *, const char *, va_list *, int);
 static void seterror(int, const char *, int *, const char *, const char *);
 static char *convertitem(PyObject *, const char **, va_list *, int, int *, 
-                         char *, size_t, PyObject **);
+                         char *, size_t, freelist_t *);
 static char *converttuple(PyObject *, const char **, va_list *, int,
-			  int *, char *, size_t, int, PyObject **);
+			  int *, char *, size_t, int, freelist_t *);
 static char *convertsimple(PyObject *, const char **, va_list *, int, char *,
-			   size_t, PyObject **);
+			   size_t, freelist_t *);
 static Py_ssize_t convertbuffer(PyObject *, void **p, char **);
 static int getbuffer(PyObject *, Py_buffer *, char**);
 
@@ -129,57 +146,56 @@
 
 /* Handle cleanup of allocated memory in case of exception */
 
-static void
-cleanup_ptr(void *ptr)
+static int
+cleanup_ptr(PyObject *self, void *ptr)
 {
-	PyMem_FREE(ptr);
-}
-
-static void
-cleanup_buffer(void *ptr)
-{
-	PyBuffer_Release((Py_buffer *) ptr);
+    if (ptr) {
+        PyMem_FREE(ptr);
+    }
+    return 0;
 }
 
 static int
-addcleanup(void *ptr, PyObject **freelist, void (*destr)(void *))
+cleanup_buffer(PyObject *self, void *ptr)
 {
-	PyObject *cobj;
-	if (!*freelist) {
-		*freelist = PyList_New(0);
-		if (!*freelist) {
-			destr(ptr);
-			return -1;
-		}
-	}
-	cobj = PyCObject_FromVoidPtr(ptr, destr);
-	if (!cobj) {
-		destr(ptr);
-		return -1;
-	}
-	if (PyList_Append(*freelist, cobj)) {
-		Py_DECREF(cobj);
-		return -1;
-	}
-        Py_DECREF(cobj);
-	return 0;
+    Py_buffer *buf = (Py_buffer *)ptr;
+    if (buf) {
+        PyBuffer_Release(buf);
+    }
+    return 0;
 }
 
 static int
-cleanreturn(int retval, PyObject *freelist)
+addcleanup(void *ptr, freelist_t *freelist, destr_t destructor)
 {
-	if (freelist && retval != 0) {
-		/* We were successful, reset the destructors so that they
-		   don't get called. */
-		Py_ssize_t len = PyList_GET_SIZE(freelist), i;
-		for (i = 0; i < len; i++)
-			((PyCObject *) PyList_GET_ITEM(freelist, i))
-				->destructor = NULL;
-	}
-	Py_XDECREF(freelist);
-	return retval;
+    int index;
+
+    index = freelist->first_available;
+    freelist->first_available += 1;
+
+    freelist->entries[index].item = ptr;
+    freelist->entries[index].destructor = destructor;
+
+    return 0;
 }
 
+static int
+cleanreturn(int retval, freelist_t *freelist)
+{
+    int index;
+
+    if (retval == 0) {
+      /* A failure occurred, therefore execute all of the cleanup
+	 functions.
+      */
+      for (index = 0; index < freelist->first_available; ++index) {
+          freelist->entries[index].destructor(NULL,
+                                              freelist->entries[index].item);
+      }
+    }
+    PyMem_Free(freelist->entries);
+    return retval;
+}
 
 static int
 vgetargs1(PyObject *args, const char *format, va_list *p_va, int flags)
@@ -195,7 +211,7 @@
 	const char *formatsave = format;
 	Py_ssize_t i, len;
 	char *msg;
-	PyObject *freelist = NULL;
+	freelist_t freelist = {0, NULL};
 	int compat = flags & FLAG_COMPAT;
 
 	assert(compat || (args != (PyObject*)NULL));
@@ -251,16 +267,18 @@
 	
 	format = formatsave;
 	
+	freelist.entries = PyMem_New(freelistentry_t, max);
+
 	if (compat) {
 		if (max == 0) {
 			if (args == NULL)
-				return 1;
+			    return cleanreturn(1, &freelist);
 			PyOS_snprintf(msgbuf, sizeof(msgbuf),
 				      "%.200s%s takes no arguments",
 				      fname==NULL ? "function" : fname,
 				      fname==NULL ? "" : "()");
 			PyErr_SetString(PyExc_TypeError, msgbuf);
-			return 0;
+			return cleanreturn(0, &freelist);
 		}
 		else if (min == 1 && max == 1) {
 			if (args == NULL) {
@@ -269,26 +287,26 @@
 					      fname==NULL ? "function" : fname,
 					      fname==NULL ? "" : "()");
 				PyErr_SetString(PyExc_TypeError, msgbuf);
-				return 0;
+				return cleanreturn(0, &freelist);
 			}
 			msg = convertitem(args, &format, p_va, flags, levels, 
 					  msgbuf, sizeof(msgbuf), &freelist);
 			if (msg == NULL)
-				return cleanreturn(1, freelist);
+				return cleanreturn(1, &freelist);
 			seterror(levels[0], msg, levels+1, fname, message);
-			return cleanreturn(0, freelist);
+			return cleanreturn(0, &freelist);
 		}
 		else {
 			PyErr_SetString(PyExc_SystemError,
 			    "old style getargs format uses new features");
-			return 0;
+			return cleanreturn(0, &freelist);
 		}
 	}
 	
 	if (!PyTuple_Check(args)) {
 		PyErr_SetString(PyExc_SystemError,
 		    "new style getargs format but argument is not a tuple");
-		return 0;
+		return cleanreturn(0, &freelist);
 	}
 	
 	len = PyTuple_GET_SIZE(args);
@@ -308,7 +326,7 @@
 			message = msgbuf;
 		}
 		PyErr_SetString(PyExc_TypeError, message);
-		return 0;
+		return cleanreturn(0, &freelist);
 	}
 	
 	for (i = 0; i < len; i++) {
@@ -319,7 +337,7 @@
 				  sizeof(msgbuf), &freelist);
 		if (msg) {
 			seterror(i+1, msg, levels, fname, message);
-			return cleanreturn(0, freelist);
+			return cleanreturn(0, &freelist);
 		}
 	}
 
@@ -328,10 +346,10 @@
 	    *format != '|' && *format != ':' && *format != ';') {
 		PyErr_Format(PyExc_SystemError,
 			     "bad format string: %.200s", formatsave);
-		return cleanreturn(0, freelist);
+		return cleanreturn(0, &freelist);
 	}
 	
-	return cleanreturn(1, freelist);
+	return cleanreturn(1, &freelist);
 }
 
 
@@ -395,7 +413,7 @@
 static char *
 converttuple(PyObject *arg, const char **p_format, va_list *p_va, int flags,
              int *levels, char *msgbuf, size_t bufsize, int toplevel, 
-             PyObject **freelist)
+             freelist_t *freelist)
 {
 	int level = 0;
 	int n = 0;
@@ -472,7 +490,7 @@
 
 static char *
 convertitem(PyObject *arg, const char **p_format, va_list *p_va, int flags,
-            int *levels, char *msgbuf, size_t bufsize, PyObject **freelist)
+            int *levels, char *msgbuf, size_t bufsize, freelist_t *freelist)
 {
 	char *msg;
 	const char *format = *p_format;
@@ -539,7 +557,7 @@
 
 static char *
 convertsimple(PyObject *arg, const char **p_format, va_list *p_va, int flags,
-              char *msgbuf, size_t bufsize, PyObject **freelist)
+              char *msgbuf, size_t bufsize, freelist_t *freelist)
 {
 	/* For # codes */
 #define FETCH_SIZE	int *q=NULL;Py_ssize_t *q2=NULL;\
@@ -1501,7 +1519,9 @@
 	const char *fname, *msg, *custom_msg, *keyword;
 	int min = INT_MAX;
 	int i, len, nargs, nkeywords;
-	PyObject *freelist = NULL, *current_arg;
+	PyObject *current_arg;
+	freelist_t freelist = {0, NULL};
+
 
 	assert(args != NULL && PyTuple_Check(args));
 	assert(keywords == NULL || PyDict_Check(keywords));
@@ -1525,6 +1545,8 @@
 	for (len=0; kwlist[len]; len++)
 		continue;
 
+	freelist.entries = PyMem_New(freelistentry_t, len);
+
 	nargs = PyTuple_GET_SIZE(args);
 	nkeywords = (keywords == NULL) ? 0 : PyDict_Size(keywords);
 	if (nargs + nkeywords > len) {
@@ -1535,7 +1557,7 @@
 			     len,
 			     (len == 1) ? "" : "s",
 			     nargs + nkeywords);
-		return 0;
+		return cleanreturn(0, &freelist);
 	}
 
 	/* convert tuple args and keyword args in same loop, using kwlist to drive process */
@@ -1549,7 +1571,7 @@
 			PyErr_Format(PyExc_RuntimeError,
 				     "More keyword list entries (%d) than "
 				     "format specifiers (%d)", len, i);
-			return cleanreturn(0, freelist);
+			return cleanreturn(0, &freelist);
 		}
 		current_arg = NULL;
 		if (nkeywords) {
@@ -1563,11 +1585,11 @@
 					     "Argument given by name ('%s') "
 					     "and position (%d)",
 					     keyword, i+1);
-				return cleanreturn(0, freelist);
+				return cleanreturn(0, &freelist);
 			}
 		}
 		else if (nkeywords && PyErr_Occurred())
-			return cleanreturn(0, freelist);
+			return cleanreturn(0, &freelist);
 		else if (i < nargs)
 			current_arg = PyTuple_GET_ITEM(args, i);
 			
@@ -1576,7 +1598,7 @@
 				levels, msgbuf, sizeof(msgbuf), &freelist);
 			if (msg) {
 				seterror(i+1, msg, levels, fname, custom_msg);
-				return cleanreturn(0, freelist);
+				return cleanreturn(0, &freelist);
 			}
 			continue;
 		}
@@ -1585,14 +1607,14 @@
 			PyErr_Format(PyExc_TypeError, "Required argument "
 				     "'%s' (pos %d) not found",
 				     keyword, i+1);
-			return cleanreturn(0, freelist);
+			return cleanreturn(0, &freelist);
 		}
 		/* current code reports success when all required args
 		 * fulfilled and no keyword args left, with no further
 		 * validation. XXX Maybe skip this in debug build ?
 		 */
 		if (!nkeywords)
-			return cleanreturn(1, freelist);
+			return cleanreturn(1, &freelist);
 
 		/* We are into optional args, skip thru to any remaining
 		 * keyword args */
@@ -1600,7 +1622,7 @@
 		if (msg) {
 			PyErr_Format(PyExc_RuntimeError, "%s: '%s'", msg,
 				     format);
-			return cleanreturn(0, freelist);
+			return cleanreturn(0, &freelist);
 		}
 	}
 
@@ -1608,7 +1630,7 @@
 		PyErr_Format(PyExc_RuntimeError,
 			"more argument specifiers than keyword list entries "
 			"(remaining format:'%s')", format);
-		return cleanreturn(0, freelist);
+		return cleanreturn(0, &freelist);
 	}
 
 	/* make sure there are no extraneous keyword arguments */
@@ -1621,7 +1643,7 @@
 			if (!PyString_Check(key)) {
                             PyErr_SetString(PyExc_TypeError, 
 					        "keywords must be strings");
-				return cleanreturn(0, freelist);
+				return cleanreturn(0, &freelist);
 			}
 			ks = PyString_AsString(key);
 			for (i = 0; i < len; i++) {
@@ -1635,12 +1657,12 @@
 					     "'%s' is an invalid keyword "
 					     "argument for this function",
 					     ks);
-				return cleanreturn(0, freelist);
+				return cleanreturn(0, &freelist);
 			}
 		}
 	}
 
-	return cleanreturn(1, freelist);
+	return cleanreturn(1, &freelist);
 }
 
 
diff --git a/pypy/module/cpyext/src/thread.c b/pypy/module/cpyext/src/thread.c
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/src/thread.c
@@ -0,0 +1,313 @@
+#include <Python.h>
+#include "pythread.h"
+
+/* ------------------------------------------------------------------------
+Per-thread data ("key") support.
+
+Use PyThread_create_key() to create a new key.  This is typically shared
+across threads.
+
+Use PyThread_set_key_value(thekey, value) to associate void* value with
+thekey in the current thread.  Each thread has a distinct mapping of thekey
+to a void* value.  Caution:  if the current thread already has a mapping
+for thekey, value is ignored.
+
+Use PyThread_get_key_value(thekey) to retrieve the void* value associated
+with thekey in the current thread.  This returns NULL if no value is
+associated with thekey in the current thread.
+
+Use PyThread_delete_key_value(thekey) to forget the current thread's associated
+value for thekey.  PyThread_delete_key(thekey) forgets the values associated
+with thekey across *all* threads.
+
+While some of these functions have error-return values, none set any
+Python exception.
+
+None of the functions does memory management on behalf of the void* values.
+You need to allocate and deallocate them yourself.  If the void* values
+happen to be PyObject*, these functions don't do refcount operations on
+them either.
+
+The GIL does not need to be held when calling these functions; they supply
+their own locking.  This isn't true of PyThread_create_key(), though (see
+next paragraph).
+
+There's a hidden assumption that PyThread_create_key() will be called before
+any of the other functions are called.  There's also a hidden assumption
+that calls to PyThread_create_key() are serialized externally.
+------------------------------------------------------------------------ */
+
+#ifdef MS_WINDOWS
+#include <windows.h>
+
+/* use native Windows TLS functions */
+#define Py_HAVE_NATIVE_TLS
+
+int
+PyThread_create_key(void)
+{
+    return (int) TlsAlloc();
+}
+
+void
+PyThread_delete_key(int key)
+{
+    TlsFree(key);
+}
+
+/* We must be careful to emulate the strange semantics implemented in thread.c,
+ * where the value is only set if it hasn't been set before.
+ */
+int
+PyThread_set_key_value(int key, void *value)
+{
+    BOOL ok;
+    void *oldvalue;
+
+    assert(value != NULL);
+    oldvalue = TlsGetValue(key);
+    if (oldvalue != NULL)
+        /* ignore value if already set */
+        return 0;
+    ok = TlsSetValue(key, value);
+    if (!ok)
+        return -1;
+    return 0;
+}
+
+void *
+PyThread_get_key_value(int key)
+{
+    /* because TLS is used in the Py_END_ALLOW_THREAD macro,
+     * it is necessary to preserve the windows error state, because
+     * it is assumed to be preserved across the call to the macro.
+     * Ideally, the macro should be fixed, but it is simpler to
+     * do it here.
+     */
+    DWORD error = GetLastError();
+    void *result = TlsGetValue(key);
+    SetLastError(error);
+    return result;
+}
+
+void
+PyThread_delete_key_value(int key)
+{
+    /* NULL is used as "key missing", and it is also the default
+     * given by TlsGetValue() if nothing has been set yet.
+     */
+    TlsSetValue(key, NULL);
+}
+
+/* reinitialization of TLS is not necessary after fork when using
+ * the native TLS functions.  And forking isn't supported on Windows either.
+ */
+void
+PyThread_ReInitTLS(void)
+{}
+
+#else  /* MS_WINDOWS */
+
+/* A singly-linked list of struct key objects remembers all the key->value
+ * associations.  File static keyhead heads the list.  keymutex is used
+ * to enforce exclusion internally.
+ */
+struct key {
+    /* Next record in the list, or NULL if this is the last record. */
+    struct key *next;
+
+    /* The thread id, according to PyThread_get_thread_ident(). */
+    long id;
+
+    /* The key and its associated value. */
+    int key;
+    void *value;
+};
+
+static struct key *keyhead = NULL;
+static PyThread_type_lock keymutex = NULL;
+static int nkeys = 0;  /* PyThread_create_key() hands out nkeys+1 next */
+
+/* Internal helper.
+ * If the current thread has a mapping for key, the appropriate struct key*
+ * is returned.  NB:  value is ignored in this case!
+ * If there is no mapping for key in the current thread, then:
+ *     If value is NULL, NULL is returned.
+ *     Else a mapping of key to value is created for the current thread,
+ *     and a pointer to a new struct key* is returned; except that if
+ *     malloc() can't find room for a new struct key*, NULL is returned.
+ * So when value==NULL, this acts like a pure lookup routine, and when
+ * value!=NULL, this acts like dict.setdefault(), returning an existing
+ * mapping if one exists, else creating a new mapping.
+ *
+ * Caution:  this used to be too clever, trying to hold keymutex only
+ * around the "p->next = keyhead; keyhead = p" pair.  That allowed
+ * another thread to mutate the list, via key deletion, concurrent with
+ * find_key() crawling over the list.  Hilarity ensued.  For example, when
+ * the for-loop here does "p = p->next", p could end up pointing at a
+ * record that PyThread_delete_key_value() was concurrently free()'ing.
+ * That could lead to anything, from failing to find a key that exists, to
+ * segfaults.  Now we lock the whole routine.
+ */
+static struct key *
+find_key(int key, void *value)
+{
+    struct key *p, *prev_p;
+    long id = PyThread_get_thread_ident();
+
+    if (!keymutex)
+        return NULL;
+    PyThread_acquire_lock(keymutex, 1);
+    prev_p = NULL;
+    for (p = keyhead; p != NULL; p = p->next) {
+        if (p->id == id && p->key == key)
+            goto Done;
+        /* Sanity check.  These states should never happen but if
+         * they do we must abort.  Otherwise we'll end up spinning in
+         * in a tight loop with the lock held.  A similar check is done
+         * in pystate.c tstate_delete_common().  */
+        if (p == prev_p)
+            Py_FatalError("tls find_key: small circular list(!)");
+        prev_p = p;
+        if (p->next == keyhead)
+            Py_FatalError("tls find_key: circular list(!)");
+    }
+    if (value == NULL) {
+        assert(p == NULL);
+        goto Done;
+    }
+    p = (struct key *)malloc(sizeof(struct key));
+    if (p != NULL) {
+        p->id = id;
+        p->key = key;
+        p->value = value;
+        p->next = keyhead;
+        keyhead = p;
+    }
+ Done:
+    PyThread_release_lock(keymutex);
+    return p;
+}
+
+/* Return a new key.  This must be called before any other functions in
+ * this family, and callers must arrange to serialize calls to this
+ * function.  No violations are detected.
+ */
+int
+PyThread_create_key(void)
+{
+    /* All parts of this function are wrong if it's called by multiple
+     * threads simultaneously.
+     */
+    if (keymutex == NULL)
+        keymutex = PyThread_allocate_lock();
+    return ++nkeys;
+}
+
+/* Forget the associations for key across *all* threads. */
+void
+PyThread_delete_key(int key)
+{
+    struct key *p, **q;
+
+    PyThread_acquire_lock(keymutex, 1);
+    q = &keyhead;
+    while ((p = *q) != NULL) {
+        if (p->key == key) {
+            *q = p->next;
+            free((void *)p);
+            /* NB This does *not* free p->value! */
+        }
+        else
+            q = &p->next;
+    }
+    PyThread_release_lock(keymutex);
+}
+
+/* Confusing:  If the current thread has an association for key,
+ * value is ignored, and 0 is returned.  Else an attempt is made to create
+ * an association of key to value for the current thread.  0 is returned
+ * if that succeeds, but -1 is returned if there's not enough memory
+ * to create the association.  value must not be NULL.
+ */
+int
+PyThread_set_key_value(int key, void *value)
+{
+    struct key *p;
+
+    assert(value != NULL);
+    p = find_key(key, value);
+    if (p == NULL)
+        return -1;
+    else
+        return 0;
+}
+
+/* Retrieve the value associated with key in the current thread, or NULL
+ * if the current thread doesn't have an association for key.
+ */
+void *
+PyThread_get_key_value(int key)
+{
+    struct key *p = find_key(key, NULL);
+
+    if (p == NULL)
+        return NULL;
+    else
+        return p->value;
+}
+
+/* Forget the current thread's association for key, if any. */
+void
+PyThread_delete_key_value(int key)
+{
+    long id = PyThread_get_thread_ident();
+    struct key *p, **q;
+
+    PyThread_acquire_lock(keymutex, 1);
+    q = &keyhead;
+    while ((p = *q) != NULL) {
+        if (p->key == key && p->id == id) {
+            *q = p->next;
+            free((void *)p);
+            /* NB This does *not* free p->value! */
+            break;
+        }
+        else
+            q = &p->next;
+    }
+    PyThread_release_lock(keymutex);
+}
+
+/* Forget everything not associated with the current thread id.
+ * This function is called from PyOS_AfterFork().  It is necessary
+ * because other thread ids which were in use at the time of the fork
+ * may be reused for new threads created in the forked process.
+ */
+void
+PyThread_ReInitTLS(void)
+{
+    long id = PyThread_get_thread_ident();
+    struct key *p, **q;
+
+    if (!keymutex)
+        return;
+
+    /* As with interpreter_lock in PyEval_ReInitThreads()
+       we just create a new lock without freeing the old one */
+    keymutex = PyThread_allocate_lock();
+
+    /* Delete all keys which do not match the current thread id */
+    q = &keyhead;
+    while ((p = *q) != NULL) {
+        if (p->id != id) {
+            *q = p->next;
+            free((void *)p);
+            /* NB This does *not* free p->value! */
+        }
+        else
+            q = &p->next;
+    }
+}
+
+#endif  /* !MS_WINDOWS */
diff --git a/pypy/module/cpyext/stringobject.py b/pypy/module/cpyext/stringobject.py
--- a/pypy/module/cpyext/stringobject.py
+++ b/pypy/module/cpyext/stringobject.py
@@ -130,6 +130,11 @@
 
 @cpython_api([PyObject], rffi.CCHARP, error=0)
 def PyString_AsString(space, ref):
+    if from_ref(space, rffi.cast(PyObject, ref.c_ob_type)) is space.w_str:
+        pass    # typecheck returned "ok" without forcing 'ref' at all
+    elif not PyString_Check(space, ref):   # otherwise, use the alternate way
+        raise OperationError(space.w_TypeError, space.wrap(
+            "PyString_AsString only support strings"))
     ref_str = rffi.cast(PyStringObject, ref)
     if not ref_str.c_buffer:
         # copy string buffer
diff --git a/pypy/module/cpyext/test/conftest.py b/pypy/module/cpyext/test/conftest.py
--- a/pypy/module/cpyext/test/conftest.py
+++ b/pypy/module/cpyext/test/conftest.py
@@ -10,7 +10,7 @@
     return False
 
 def pytest_funcarg__space(request):
-    return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+    return gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array'])
 
 def pytest_funcarg__api(request):
     return request.cls.api
diff --git a/pypy/module/cpyext/test/foo.c b/pypy/module/cpyext/test/foo.c
--- a/pypy/module/cpyext/test/foo.c
+++ b/pypy/module/cpyext/test/foo.c
@@ -176,6 +176,8 @@
     {NULL}  /* Sentinel */
 };
 
+PyDoc_STRVAR(foo_doc, "foo is for testing.");
+
 static PyTypeObject footype = {
     PyVarObject_HEAD_INIT(NULL, 0)
     "foo.foo",               /*tp_name*/
@@ -198,7 +200,7 @@
     (setattrofunc)foo_setattro, /*tp_setattro*/
     0,                       /*tp_as_buffer*/
     Py_TPFLAGS_DEFAULT,      /*tp_flags*/
-    0,                       /*tp_doc*/
+    foo_doc,                 /*tp_doc*/
     0,                       /*tp_traverse*/
     0,                       /*tp_clear*/
     0,                       /*tp_richcompare*/
diff --git a/pypy/module/cpyext/test/test_api.py b/pypy/module/cpyext/test/test_api.py
--- a/pypy/module/cpyext/test/test_api.py
+++ b/pypy/module/cpyext/test/test_api.py
@@ -19,7 +19,8 @@
 
 class BaseApiTest(LeakCheckingTest):
     def setup_class(cls):
-        cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+        cls.space = space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi',
+                                                        'array'])
 
         # warm up reference counts:
         # - the posix module allocates a HCRYPTPROV on Windows
diff --git a/pypy/module/cpyext/test/test_arraymodule.py b/pypy/module/cpyext/test/test_arraymodule.py
--- a/pypy/module/cpyext/test/test_arraymodule.py
+++ b/pypy/module/cpyext/test/test_arraymodule.py
@@ -1,3 +1,4 @@
+from pypy.conftest import gettestobjspace
 from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
 
 import py
diff --git a/pypy/module/cpyext/test/test_bufferobject.py b/pypy/module/cpyext/test/test_bufferobject.py
--- a/pypy/module/cpyext/test/test_bufferobject.py
+++ b/pypy/module/cpyext/test/test_bufferobject.py
@@ -48,3 +48,17 @@
             ])
         b = module.buffer_new()
         raises(AttributeError, getattr, b, 'x')
+
+    def test_array_buffer(self):
+        module = self.import_extension('foo', [
+            ("roundtrip", "METH_O",
+             """
+                 PyBufferObject *buf = (PyBufferObject *)args;
+                 return PyString_FromStringAndSize(buf->b_ptr, buf->b_size);
+             """),
+            ])
+        import array
+        a = array.array('c', 'text')
+        b = buffer(a)
+        assert module.roundtrip(b) == 'text'
+        
diff --git a/pypy/module/cpyext/test/test_cpyext.py b/pypy/module/cpyext/test/test_cpyext.py
--- a/pypy/module/cpyext/test/test_cpyext.py
+++ b/pypy/module/cpyext/test/test_cpyext.py
@@ -35,7 +35,7 @@
 
 class AppTestApi:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array'])
         from pypy.rlib.libffi import get_libc_name
         cls.w_libc = cls.space.wrap(get_libc_name())
 
@@ -106,10 +106,7 @@
             del obj
         import gc; gc.collect()
 
-        try:
-            del space.getexecutioncontext().cpyext_threadstate
-        except AttributeError:
-            pass
+        space.getexecutioncontext().cleanup_cpyext_state()
 
         for w_obj in state.non_heaptypes_w:
             Py_DecRef(space, w_obj)
@@ -168,8 +165,9 @@
         return leaking
 
 class AppTestCpythonExtensionBase(LeakCheckingTest):
+    
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi'])
+        cls.space = gettestobjspace(usemodules=['cpyext', 'thread', '_rawffi', 'array'])
         cls.space.getbuiltinmodule("cpyext")
         from pypy.module.imp.importing import importhook
         importhook(cls.space, "os") # warm up reference counts
diff --git a/pypy/module/cpyext/test/test_import.py b/pypy/module/cpyext/test/test_import.py
--- a/pypy/module/cpyext/test/test_import.py
+++ b/pypy/module/cpyext/test/test_import.py
@@ -19,7 +19,7 @@
                                          space.wrap('__name__'))) == 'foobar'
 
     def test_getmoduledict(self, space, api):
-        testmod = "binascii"
+        testmod = "_functools"
         w_pre_dict = api.PyImport_GetModuleDict()
         assert not space.is_true(space.contains(w_pre_dict, space.wrap(testmod)))
 
diff --git a/pypy/module/cpyext/test/test_longobject.py b/pypy/module/cpyext/test/test_longobject.py
--- a/pypy/module/cpyext/test/test_longobject.py
+++ b/pypy/module/cpyext/test/test_longobject.py
@@ -101,9 +101,9 @@
                                   space.wrap((2, 7)))):
             py.test.skip("unsupported before Python 2.7")
 
-        assert api._PyLong_Sign(space.wrap(0L)) == 0
-        assert api._PyLong_Sign(space.wrap(2L)) == 1
-        assert api._PyLong_Sign(space.wrap(-2L)) == -1
+        assert api._PyLong_Sign(space.wraplong(0L)) == 0
+        assert api._PyLong_Sign(space.wraplong(2L)) == 1
+        assert api._PyLong_Sign(space.wraplong(-2L)) == -1
 
         assert api._PyLong_NumBits(space.wrap(0)) == 0
         assert api._PyLong_NumBits(space.wrap(1)) == 1
diff --git a/pypy/module/cpyext/test/test_number.py b/pypy/module/cpyext/test/test_number.py
--- a/pypy/module/cpyext/test/test_number.py
+++ b/pypy/module/cpyext/test/test_number.py
@@ -6,12 +6,12 @@
 class TestIterator(BaseApiTest):
     def test_check(self, space, api):
         assert api.PyIndex_Check(space.wrap(12))
-        assert api.PyIndex_Check(space.wrap(-12L))
+        assert api.PyIndex_Check(space.wraplong(-12L))
         assert not api.PyIndex_Check(space.wrap(12.1))
         assert not api.PyIndex_Check(space.wrap('12'))
 
         assert api.PyNumber_Check(space.wrap(12))
-        assert api.PyNumber_Check(space.wrap(-12L))
+        assert api.PyNumber_Check(space.wraplong(-12L))
         assert api.PyNumber_Check(space.wrap(12.1))
         assert not api.PyNumber_Check(space.wrap('12'))
         assert not api.PyNumber_Check(space.wrap(1+3j))
@@ -21,7 +21,7 @@
         assert api.PyLong_CheckExact(w_l)
 
     def test_number_int(self, space, api):
-        w_l = api.PyNumber_Int(space.wrap(123L))
+        w_l = api.PyNumber_Int(space.wraplong(123L))
         assert api.PyInt_CheckExact(w_l)
         w_l = api.PyNumber_Int(space.wrap(2 << 65))
         assert api.PyLong_CheckExact(w_l)
@@ -29,7 +29,7 @@
         assert api.PyInt_CheckExact(w_l)
 
     def test_number_index(self, space, api):
-        w_l = api.PyNumber_Index(space.wrap(123L))
+        w_l = api.PyNumber_Index(space.wraplong(123L))
         assert api.PyLong_CheckExact(w_l)
         w_l = api.PyNumber_Index(space.wrap(42.3))
         assert w_l is None
diff --git a/pypy/module/cpyext/test/test_pyerrors.py b/pypy/module/cpyext/test/test_pyerrors.py
--- a/pypy/module/cpyext/test/test_pyerrors.py
+++ b/pypy/module/cpyext/test/test_pyerrors.py
@@ -218,3 +218,51 @@
             assert e.filename == "blyf"
             assert e.errno == errno.EBADF
             assert e.strerror == os.strerror(errno.EBADF)
+
+    def test_GetSetExcInfo(self):
+        import sys
+        module = self.import_extension('foo', [
+            ("getset_exc_info", "METH_VARARGS",
+             r'''
+             PyObject *type, *val, *tb;
+             PyObject *new_type, *new_val, *new_tb;
+             PyObject *result;
+
+             if (!PyArg_ParseTuple(args, "OOO", &new_type, &new_val, &new_tb))
+                 return NULL;
+
+             PyErr_GetExcInfo(&type, &val, &tb);
+
+             Py_INCREF(new_type);
+             Py_INCREF(new_val);
+             Py_INCREF(new_tb);
+             PyErr_SetExcInfo(new_type, new_val, new_tb);
+
+             result = Py_BuildValue("OOO",
+                                    type ? type : Py_None,
+                                    val  ? val  : Py_None,
+                                    tb   ? tb   : Py_None);
+             Py_XDECREF(type);
+             Py_XDECREF(val);
+             Py_XDECREF(tb);
+             return result;
+             '''
+             ),
+            ])
+        try:
+            raise ValueError(5)
+        except ValueError, old_exc:
+            new_exc = TypeError("TEST")
+            orig_sys_exc_info = sys.exc_info()
+            orig_exc_info = module.getset_exc_info(new_exc.__class__,
+                                                   new_exc, None)
+            new_sys_exc_info = sys.exc_info()
+            new_exc_info = module.getset_exc_info(*orig_exc_info)
+            reset_sys_exc_info = sys.exc_info()
+
+            assert orig_exc_info[0] is old_exc.__class__
+            assert orig_exc_info[1] is old_exc
+            assert orig_exc_info == orig_sys_exc_info
+            assert orig_exc_info == reset_sys_exc_info
+            assert new_exc_info == (new_exc.__class__, new_exc, None)
+            assert new_exc_info == new_sys_exc_info
diff --git a/pypy/module/cpyext/test/test_pystate.py b/pypy/module/cpyext/test/test_pystate.py
--- a/pypy/module/cpyext/test/test_pystate.py
+++ b/pypy/module/cpyext/test/test_pystate.py
@@ -3,6 +3,10 @@
 from pypy.rpython.lltypesystem.lltype import nullptr
 from pypy.module.cpyext.pystate import PyInterpreterState, PyThreadState
 from pypy.module.cpyext.pyobject import from_ref
+from pypy.rpython.lltypesystem import lltype
+from pypy.module.cpyext.test.test_cpyext import LeakCheckingTest, freeze_refcnts
+from pypy.module.cpyext.pystate import PyThreadState_Get, PyInterpreterState_Head
+from pypy.tool import leakfinder
 
 class AppTestThreads(AppTestCpythonExtensionBase):
     def test_allow_threads(self):
@@ -21,6 +25,93 @@
         # Should compile at least
         module.test()
 
+
+    def test_thread_state_get(self):
+        module = self.import_extension('foo', [
+                ("get", "METH_NOARGS",
+                 """
+                     PyThreadState *tstate = PyThreadState_Get();
+                     if (tstate == NULL) {
+                         return PyLong_FromLong(0);
+                     }
+                     if (tstate->interp != PyInterpreterState_Head()) {
+                         return PyLong_FromLong(1);
+                     }
+                     if (tstate->interp->next != NULL) {
+                         return PyLong_FromLong(2);
+                     }
+                     return PyLong_FromLong(3);
+                 """),
+                ])
+        assert module.get() == 3
+
+    def test_basic_threadstate_dance(self):
+        module = self.import_extension('foo', [
+                ("dance", "METH_NOARGS",
+                 """
+                     PyThreadState *old_tstate, *new_tstate;
+
+                     old_tstate = PyThreadState_Swap(NULL);
+                     if (old_tstate == NULL) {
+                         return PyLong_FromLong(0);
+                     }
+
+                     new_tstate = PyThreadState_Get();
+                     if (new_tstate != NULL) {
+                         return PyLong_FromLong(1);
+                     }
+
+                     new_tstate = PyThreadState_Swap(old_tstate);
+                     if (new_tstate != NULL) {
+                         return PyLong_FromLong(2);
+                     }
+
+                     new_tstate = PyThreadState_Get();
+                     if (new_tstate != old_tstate) {
+                         return PyLong_FromLong(3);
+                     }
+
+                     return PyLong_FromLong(4);
+                 """),
+                ])
+        assert module.dance() == 4
+
+    def test_threadstate_dict(self):
+        module = self.import_extension('foo', [
+                ("getdict", "METH_NOARGS",
+                 """
+                 PyObject *dict = PyThreadState_GetDict();
+                 Py_INCREF(dict);
+                 return dict;
+                 """),
+                ])
+        assert isinstance(module.getdict(), dict)
+
+    def test_savethread(self):
+        module = self.import_extension('foo', [
+                ("bounce", "METH_NOARGS",
+                 """
+                 PyThreadState *tstate = PyEval_SaveThread();
+                 if (tstate == NULL) {
+                     return PyLong_FromLong(0);
+                 }
+
+                 if (PyThreadState_Get() != NULL) {
+                     return PyLong_FromLong(1);
+                 }
+
+                 PyEval_RestoreThread(tstate);
+
+                 if (PyThreadState_Get() != tstate) {
+                     return PyLong_FromLong(2);
+                 }
+
+                 return PyLong_FromLong(3);
+                                  """),
+                ])
+
+
+
 class TestInterpreterState(BaseApiTest):
     def test_interpreter_head(self, space, api):
         state = api.PyInterpreterState_Head()
@@ -29,31 +120,3 @@
     def test_interpreter_next(self, space, api):
         state = api.PyInterpreterState_Head()
         assert nullptr(PyInterpreterState.TO) == api.PyInterpreterState_Next(state)
-
-class TestThreadState(BaseApiTest):
-    def test_thread_state_get(self, space, api):
-        ts = api.PyThreadState_Get()
-        assert ts != nullptr(PyThreadState.TO)
-
-    def test_thread_state_interp(self, space, api):
-        ts = api.PyThreadState_Get()
-        assert ts.c_interp == api.PyInterpreterState_Head()
-        assert ts.c_interp.c_next == nullptr(PyInterpreterState.TO)
-
-    def test_basic_threadstate_dance(self, space, api):
-        # Let extension modules call these functions,
-        # Not sure of the semantics in pypy though.
-        # (cpyext always acquires and releases the GIL around calls)
-        tstate = api.PyThreadState_Swap(None)
-        assert tstate is not None
-        assert not api.PyThreadState_Swap(tstate)
-
-        api.PyEval_AcquireThread(tstate)
-        api.PyEval_ReleaseThread(tstate)
-
-    def test_threadstate_dict(self, space, api):
-        ts = api.PyThreadState_Get()
-        ref = ts.c_dict
-        assert ref == api.PyThreadState_GetDict()
-        w_obj = from_ref(space, ref)
-        assert space.isinstance_w(w_obj, space.w_dict)
diff --git a/pypy/module/cpyext/test/test_stringobject.py b/pypy/module/cpyext/test/test_stringobject.py
--- a/pypy/module/cpyext/test/test_stringobject.py
+++ b/pypy/module/cpyext/test/test_stringobject.py
@@ -105,6 +105,15 @@
             )])
         assert module.string_as_string("huheduwe") == "huhe"
 
+    def test_py_string_as_string_None(self):
+        module = self.import_extension('foo', [
+            ("string_None", "METH_VARARGS",
+             '''
+             return PyString_AsString(Py_None);
+             '''
+            )])
+        raises(TypeError, module.string_None)
+
     def test_AsStringAndSize(self):
         module = self.import_extension('foo', [
             ("getstring", "METH_NOARGS",
diff --git a/pypy/module/cpyext/test/test_thread.py b/pypy/module/cpyext/test/test_thread.py
--- a/pypy/module/cpyext/test/test_thread.py
+++ b/pypy/module/cpyext/test/test_thread.py
@@ -5,6 +5,7 @@
 
 from pypy.module.thread.ll_thread import allocate_ll_lock
 from pypy.module.cpyext.test.test_api import BaseApiTest
+from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
 
 
 class TestPyThread(BaseApiTest):
@@ -38,3 +39,51 @@
         api.PyThread_release_lock(lock)
         assert api.PyThread_acquire_lock(lock, 0) == 1
         api.PyThread_free_lock(lock)
+
+
+class AppTestThread(AppTestCpythonExtensionBase):
+    def test_tls(self):
+        module = self.import_extension('foo', [
+            ("create_key", "METH_NOARGS",
+             """
+                 return PyInt_FromLong(PyThread_create_key());
+             """),
+            ("test_key", "METH_O",
+             """
+                 int key = PyInt_AsLong(args);
+                 if (PyThread_get_key_value(key) != NULL) {
+                     PyErr_SetNone(PyExc_ValueError);
+                     return NULL;
+                 }
+                 if (PyThread_set_key_value(key, (void*)123) < 0) {
+                     PyErr_SetNone(PyExc_ValueError);
+                     return NULL;
+                 }
+                 if (PyThread_get_key_value(key) != (void*)123) {
+                     PyErr_SetNone(PyExc_ValueError);
+                     return NULL;
+                 }
+                 Py_RETURN_NONE;
+             """),
+            ])
+        key = module.create_key()
+        assert key > 0
+        # Test value in main thread.
+        module.test_key(key)
+        raises(ValueError, module.test_key, key)
+        # Same test, in another thread.
+        result = []
+        import thread, time
+        def in_thread():
+            try:
+                module.test_key(key)
+                raises(ValueError, module.test_key, key)
+            except Exception, e:
+                result.append(e)
+            else:
+                result.append(True)
+        thread.start_new_thread(in_thread, ())
+        while not result:
+            print "."
+            time.sleep(.5)
+        assert result == [True]
diff --git a/pypy/module/cpyext/test/test_typeobject.py b/pypy/module/cpyext/test/test_typeobject.py
--- a/pypy/module/cpyext/test/test_typeobject.py
+++ b/pypy/module/cpyext/test/test_typeobject.py
@@ -20,6 +20,7 @@
         assert type(obj) is module.fooType
         print "type of obj has type", type(type(obj))
         print "type of type of obj has type", type(type(type(obj)))
+        assert module.fooType.__doc__ == "foo is for testing."
 
     def test_typeobject_method_descriptor(self):
         module = self.import_module(name='foo')
@@ -414,8 +415,11 @@
             static int
             mp_ass_subscript(PyObject *self, PyObject *key, PyObject *value)
             {
-                PyErr_SetNone(PyExc_ZeroDivisionError);
-                return -1;
+                if (PyInt_Check(key)) {
+                    PyErr_SetNone(PyExc_ZeroDivisionError);
+                    return -1;
+                }
+                return 0;
             }
             PyMappingMethods tp_as_mapping;
             static PyTypeObject Foo_Type = {
@@ -425,6 +429,36 @@
             ''')
         obj = module.new_obj()
         raises(ZeroDivisionError, obj.__setitem__, 5, None)
+        res = obj.__setitem__('foo', None)
+        assert res is None
+
+    def test_sq_contains(self):
+        module = self.import_extension('foo', [
+           ("new_obj", "METH_NOARGS",
+            '''
+                PyObject *obj;
+                Foo_Type.tp_as_sequence = &tp_as_sequence;
+                tp_as_sequence.sq_contains = sq_contains;
+                if (PyType_Ready(&Foo_Type) < 0) return NULL;
+                obj = PyObject_New(PyObject, &Foo_Type);
+                return obj;
+            '''
+            )],
+            '''
+            static int
+            sq_contains(PyObject *self, PyObject *value)
+            {
+                return 42;
+            }
+            PySequenceMethods tp_as_sequence;
+            static PyTypeObject Foo_Type = {
+                PyVarObject_HEAD_INIT(NULL, 0)
+                "foo.foo",
+            };
+            ''')
+        obj = module.new_obj()
+        res = "foo" in obj
+        assert res is True
 
     def test_tp_iter(self):
         module = self.import_extension('foo', [
diff --git a/pypy/module/cpyext/typeobject.py b/pypy/module/cpyext/typeobject.py
--- a/pypy/module/cpyext/typeobject.py
+++ b/pypy/module/cpyext/typeobject.py
@@ -307,6 +307,8 @@
         if not space.is_true(space.issubtype(self, space.w_type)):
             self.flag_cpytype = True
         self.flag_heaptype = False
+        if pto.c_tp_doc:
+            self.w_doc = space.wrap(rffi.charp2str(pto.c_tp_doc))
 
 @bootstrap_function
 def init_typeobject(space):
@@ -624,7 +626,6 @@
     Creates an interpreter type from a PyTypeObject structure.
     """
     # missing:
-    # setting __doc__ if not defined and tp_doc defined
     # inheriting tp_as_* slots
     # unsupported:
     # tp_mro, tp_subclasses
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -178,7 +178,7 @@
 @cpython_api([], Py_UNICODE, error=CANNOT_FAIL)
 def PyUnicode_GetMax(space):
     """Get the maximum ordinal for a Unicode character."""
-    return unichr(runicode.MAXUNICODE)
+    return runicode.UNICHR(runicode.MAXUNICODE)
 
 @cpython_api([PyObject], rffi.CCHARP, error=CANNOT_FAIL)
 def PyUnicode_AS_DATA(space, ref):
diff --git a/pypy/module/fcntl/test/test_fcntl.py b/pypy/module/fcntl/test/test_fcntl.py
--- a/pypy/module/fcntl/test/test_fcntl.py
+++ b/pypy/module/fcntl/test/test_fcntl.py
@@ -13,7 +13,7 @@
 
 class AppTestFcntl:
     def setup_class(cls):
-        space = gettestobjspace(usemodules=('fcntl', 'array'))
+        space = gettestobjspace(usemodules=('fcntl', 'array', 'struct'))
         cls.space = space
         tmpprefix = str(udir.ensure('test_fcntl', dir=1).join('tmp_'))
         cls.w_tmp = space.wrap(tmpprefix)
diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
--- a/pypy/module/imp/test/test_import.py
+++ b/pypy/module/imp/test/test_import.py
@@ -987,6 +987,10 @@
             os.environ['LANG'] = oldlang
 
 class AppTestImportHooks(object):
+
+    def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=('struct',))
+    
     def test_meta_path(self):
         tried_imports = []
         class Importer(object):
diff --git a/pypy/module/itertools/test/test_itertools.py b/pypy/module/itertools/test/test_itertools.py
--- a/pypy/module/itertools/test/test_itertools.py
+++ b/pypy/module/itertools/test/test_itertools.py
@@ -891,7 +891,7 @@
 
 class AppTestItertools27:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['itertools'])
+        cls.space = gettestobjspace(usemodules=['itertools', 'struct'])
         if cls.space.is_true(cls.space.appexec([], """():
             import sys; return sys.version_info < (2, 7)
             """)):
diff --git a/pypy/module/marshal/interp_marshal.py b/pypy/module/marshal/interp_marshal.py
--- a/pypy/module/marshal/interp_marshal.py
+++ b/pypy/module/marshal/interp_marshal.py
@@ -327,8 +327,10 @@
     # %r not supported in rpython
     #u.raise_exc('invalid typecode in unmarshal: %r' % tc)
     c = ord(tc)
-    if c < 32 or c > 126:
-        s = '\\x' + hex(c)
+    if c < 16:
+        s = '\\x0%x' % c
+    elif c < 32 or c > 126:
+        s = '\\x%x' % c
     elif tc == '\\':
         s = r'\\'
     else:
diff --git a/pypy/module/marshal/test/make_test_marshal.py b/pypy/module/marshal/test/make_test_marshal.py
deleted file mode 100644
--- a/pypy/module/marshal/test/make_test_marshal.py
+++ /dev/null
@@ -1,78 +0,0 @@
-
-TESTCASES = """\
-    None
-    False
-    True
-    StopIteration
-    Ellipsis
-    42
-    -17
-    sys.maxint
-    -1.25
-    -1.25 #2
-    2+5j
-    2+5j #2
-    42L
-    -1234567890123456789012345678901234567890L
-    hello   # not interned
-    "hello"
-    ()
-    (1, 2)
-    []
-    [3, 4]
-    {}
-    {5: 6, 7: 8}
-    func.func_code
-    scopefunc.func_code
-    u'hello'
-    set()
-    set([1, 2])
-    frozenset()
-    frozenset([3, 4])
-""".strip().split('\n')
-
-def readable(s):
-    for c, repl in (
-        ("'", '_quote_'), ('"', '_Quote_'), (':', '_colon_'), ('.', '_dot_'),
-        ('[', '_list_'), (']', '_tsil_'), ('{', '_dict_'), ('}', '_tcid_'),
-        ('-', '_minus_'), ('+', '_plus_'),
-        (',', '_comma_'), ('(', '_brace_'), (')', '_ecarb_') ):
-        s = s.replace(c, repl)
-    lis = list(s)
-    for i, c in enumerate(lis):
-        if c.isalnum() or c == '_':
-            continue
-        lis[i] = '_'
-    return ''.join(lis)
-
-print """class AppTestMarshal:
-"""
-for line in TESTCASES:
-    line = line.strip()
-    name = readable(line)
-    version = ''
-    extra = ''
-    if line.endswith('#2'):
-        version = ', 2'
-        extra = '; assert len(s) in (9, 17)'
-    src = '''\
-    def test_%(name)s(self):
-        import sys
-        hello = "he"
-        hello += "llo"
-        def func(x):
-            return lambda y: x+y
-        scopefunc = func(42)
-        import marshal, StringIO
-        case = %(line)s
-        print "case: %%-30s   func=%(name)s" %% (case, )
-        s = marshal.dumps(case%(version)s)%(extra)s
-        x = marshal.loads(s)
-        assert x == case
-        f = StringIO.StringIO()
-        marshal.dump(case, f)
-        f.seek(0)
-        x = marshal.load(f)
-        assert x == case
-''' % {'name': name, 'line': line, 'version' : version, 'extra': extra}
-    print src
diff --git a/pypy/module/marshal/test/test_marshal.py b/pypy/module/marshal/test/test_marshal.py
--- a/pypy/module/marshal/test/test_marshal.py
+++ b/pypy/module/marshal/test/test_marshal.py
@@ -174,6 +174,11 @@
                 pass
             raises(ValueError, marshal.dumps, subtype)
 
+    def test_bad_typecode(self):
+        import marshal
+        exc = raises(ValueError, marshal.loads, chr(1))
+        assert r"'\x01'" in exc.value.message
+
 
 class AppTestRope(AppTestMarshal):
     def setup_class(cls):
diff --git a/pypy/module/math/test/test_direct.py b/pypy/module/math/test/test_direct.py
--- a/pypy/module/math/test/test_direct.py
+++ b/pypy/module/math/test/test_direct.py
@@ -55,6 +55,15 @@
         ('frexp', (-1.25,), lambda x: x == (-0.625, 1)),
         ('modf',  (4.25,), lambda x: x == (0.25, 4.0)),
         ('modf',  (-4.25,), lambda x: x == (-0.25, -4.0)),
+        ('copysign', (1.5, 0.0), 1.5),
+        ('copysign', (1.5, -0.0), -1.5),
+        ('copysign', (1.5, INFINITY), 1.5),
+        ('copysign', (1.5, -INFINITY), -1.5),
+        ]
+    if sys.platform != 'win32':    # all NaNs seem to be negative there...?
+        IRREGCASES += [
+        ('copysign', (1.5, NAN), 1.5),
+        ('copysign', (1.75, -NAN), -1.75),      # special case for -NAN here
         ]
 
     OVFCASES = [
diff --git a/pypy/module/math/test/test_math.py b/pypy/module/math/test/test_math.py
--- a/pypy/module/math/test/test_math.py
+++ b/pypy/module/math/test/test_math.py
@@ -1,3 +1,4 @@
+from __future__ import with_statement
 import sys
 from pypy.conftest import gettestobjspace
 from pypy.module.math.test import test_direct
@@ -5,7 +6,7 @@
 
 class AppTestMath:
     def setup_class(cls):
-        cls.space = gettestobjspace(usemodules=['math'])
+        cls.space = gettestobjspace(usemodules=['math', 'struct'])
         cls.w_cases = cls.space.wrap(test_direct.MathTests.TESTCASES)
         cls.w_consistent_host = cls.space.wrap(test_direct.consistent_host)
 
@@ -268,3 +269,7 @@
             def __trunc__(self):
                 return "truncated"
         assert math.trunc(foo()) == "truncated"
+
+    def test_copysign_nan(self):
+        import math
+        assert math.copysign(1.0, float('-nan')) == -1.0
diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -29,6 +29,7 @@
         'flatiter': 'interp_numarray.W_FlatIterator',
         'isna': 'interp_numarray.isna',
         'concatenate': 'interp_numarray.concatenate',
+        'repeat': 'interp_numarray.repeat',
 
         'set_string_function': 'appbridge.set_string_function',
 
@@ -37,26 +38,44 @@
         'True_': 'types.Bool.True',
         'False_': 'types.Bool.False',
 
+        'typeinfo': 'interp_dtype.get_dtype_cache(space).w_typeinfo',
+
         'generic': 'interp_boxes.W_GenericBox',
         'number': 'interp_boxes.W_NumberBox',
         'integer': 'interp_boxes.W_IntegerBox',
         'signedinteger': 'interp_boxes.W_SignedIntegerBox',
         'unsignedinteger': 'interp_boxes.W_UnsignedIntegerBox',
         'bool_': 'interp_boxes.W_BoolBox',
+        'bool8': 'interp_boxes.W_BoolBox',
         'int8': 'interp_boxes.W_Int8Box',
+        'byte': 'interp_boxes.W_Int8Box',
         'uint8': 'interp_boxes.W_UInt8Box',
+        'ubyte': 'interp_boxes.W_UInt8Box',
         'int16': 'interp_boxes.W_Int16Box',
+        'short': 'interp_boxes.W_Int16Box',
         'uint16': 'interp_boxes.W_UInt16Box',
+        'ushort': 'interp_boxes.W_UInt16Box',
         'int32': 'interp_boxes.W_Int32Box',
+        'intc': 'interp_boxes.W_Int32Box',
         'uint32': 'interp_boxes.W_UInt32Box',
+        'uintc': 'interp_boxes.W_UInt32Box',
         'int64': 'interp_boxes.W_Int64Box',
         'uint64': 'interp_boxes.W_UInt64Box',
+        'longlong': 'interp_boxes.W_LongLongBox',
+        'ulonglong': 'interp_boxes.W_ULongLongBox',
         'int_': 'interp_boxes.W_LongBox',
         'inexact': 'interp_boxes.W_InexactBox',
         'floating': 'interp_boxes.W_FloatingBox',
         'float_': 'interp_boxes.W_Float64Box',
         'float32': 'interp_boxes.W_Float32Box',
         'float64': 'interp_boxes.W_Float64Box',
+        'intp': 'types.IntP.BoxType',
+        'uintp': 'types.UIntP.BoxType',
+        'flexible': 'interp_boxes.W_FlexibleBox',
+        'character': 'interp_boxes.W_CharacterBox',
+        'str_': 'interp_boxes.W_StringBox',
+        'unicode_': 'interp_boxes.W_UnicodeBox',
+        'void': 'interp_boxes.W_VoidBox',
     }
 
     # ufuncs
@@ -67,6 +86,7 @@
         ("arccos", "arccos"),
         ("arcsin", "arcsin"),
         ("arctan", "arctan"),
+        ("arctan2", "arctan2"),
         ("arccosh", "arccosh"),
         ("arcsinh", "arcsinh"),
         ("arctanh", "arctanh"),
@@ -77,9 +97,15 @@
         ("true_divide", "true_divide"),
         ("equal", "equal"),
         ("exp", "exp"),
+        ("exp2", "exp2"),
+        ("expm1", "expm1"),
         ("fabs", "fabs"),
+        ("fmax", "fmax"),
+        ("fmin", "fmin"),
+        ("fmod", "fmod"),
         ("floor", "floor"),
         ("ceil", "ceil"),
+        ("trunc", "trunc"),
         ("greater", "greater"),
         ("greater_equal", "greater_equal"),
         ("less", "less"),
@@ -89,24 +115,44 @@
         ("multiply", "multiply"),
         ("negative", "negative"),
         ("not_equal", "not_equal"),
+        ("radians", "radians"),
+        ("degrees", "degrees"),
+        ("deg2rad", "radians"),
+        ("rad2deg", "degrees"),
         ("reciprocal", "reciprocal"),
         ("sign", "sign"),
+        ("signbit", "signbit"),
         ("sin", "sin"),
         ("sinh", "sinh"),
         ("subtract", "subtract"),
         ('sqrt', 'sqrt'),
+        ('square', 'square'),
         ("tan", "tan"),
         ("tanh", "tanh"),
         ('bitwise_and', 'bitwise_and'),
         ('bitwise_or', 'bitwise_or'),
         ('bitwise_xor', 'bitwise_xor'),
         ('bitwise_not', 'invert'),
+        ('left_shift', 'left_shift'),
+        ('right_shift', 'right_shift'),
+        ('invert', 'invert'),
         ('isnan', 'isnan'),
         ('isinf', 'isinf'),
+        ('isneginf', 'isneginf'),
+        ('isposinf', 'isposinf'),
+        ('isfinite', 'isfinite'),
         ('logical_and', 'logical_and'),
         ('logical_xor', 'logical_xor'),
         ('logical_not', 'logical_not'),
         ('logical_or', 'logical_or'),
+        ('log', 'log'),
+        ('log2', 'log2'),
+        ('log10', 'log10'),
+        ('log1p', 'log1p'),
+        ('power', 'power'),
+        ('floor_divide', 'floor_divide'),
+        ('logaddexp', 'logaddexp'),
+        ('logaddexp2', 'logaddexp2'),
     ]:
         interpleveldefs[exposed] = "interp_ufuncs.get(space).%s" % impl
 
diff --git a/pypy/module/micronumpy/app_numpy.py b/pypy/module/micronumpy/app_numpy.py
--- a/pypy/module/micronumpy/app_numpy.py
+++ b/pypy/module/micronumpy/app_numpy.py
@@ -16,7 +16,7 @@
         a[i][i] = 1
     return a
 
-def sum(a,axis=None):
+def sum(a,axis=None, out=None):
     '''sum(a, axis=None)
     Sum of array elements over a given axis.
 
@@ -43,17 +43,17 @@
     # TODO: add to doc (once it's implemented): cumsum : Cumulative sum of array elements.
     if not hasattr(a, "sum"):
         a = _numpypy.array(a)
-    return a.sum(axis)
+    return a.sum(axis=axis, out=out)
 


More information about the pypy-commit mailing list