[pypy-commit] pypy pytest: merge default

RonnyPfannschmidt noreply at buildbot.pypy.org
Thu Mar 1 08:51:57 CET 2012


Author: Ronny Pfannschmidt <Ronny.Pfannschmidt at gmx.de>
Branch: pytest
Changeset: r53035:e6568e325b8e
Date: 2012-03-01 08:51 +0100
http://bitbucket.org/pypy/pypy/changeset/e6568e325b8e/

Log:	merge default

diff --git a/lib-python/modified-2.7/ctypes/test/test_arrays.py b/lib-python/modified-2.7/ctypes/test/test_arrays.py
--- a/lib-python/modified-2.7/ctypes/test/test_arrays.py
+++ b/lib-python/modified-2.7/ctypes/test/test_arrays.py
@@ -1,12 +1,23 @@
 import unittest
 from ctypes import *
+from test.test_support import impl_detail
 
 formats = "bBhHiIlLqQfd"
 
+# c_longdouble commented out for PyPy, look at the commend in test_longdouble
 formats = c_byte, c_ubyte, c_short, c_ushort, c_int, c_uint, \
-          c_long, c_ulonglong, c_float, c_double, c_longdouble
+          c_long, c_ulonglong, c_float, c_double #, c_longdouble
 
 class ArrayTestCase(unittest.TestCase):
+
+    @impl_detail('long double not supported by PyPy', pypy=False)
+    def test_longdouble(self):
+        """
+        This test is empty. It's just here to remind that we commented out
+        c_longdouble in "formats". If pypy will ever supports c_longdouble, we
+        should kill this test and uncomment c_longdouble inside formats.
+        """
+
     def test_simple(self):
         # create classes holding simple numeric types, and check
         # various properties.
diff --git a/lib_pypy/_ctypes/array.py b/lib_pypy/_ctypes/array.py
--- a/lib_pypy/_ctypes/array.py
+++ b/lib_pypy/_ctypes/array.py
@@ -1,9 +1,9 @@
-
+import _ffi
 import _rawffi
 
 from _ctypes.basics import _CData, cdata_from_address, _CDataMeta, sizeof
 from _ctypes.basics import keepalive_key, store_reference, ensure_objects
-from _ctypes.basics import CArgObject
+from _ctypes.basics import CArgObject, as_ffi_pointer
 
 class ArrayMeta(_CDataMeta):
     def __new__(self, name, cls, typedict):
@@ -211,6 +211,9 @@
     def _to_ffi_param(self):
         return self._get_buffer_value()
 
+    def _as_ffi_pointer_(self, ffitype):
+        return as_ffi_pointer(self, ffitype)
+
 ARRAY_CACHE = {}
 
 def create_array_type(base, length):
@@ -228,5 +231,6 @@
             _type_ = base
         )
         cls = ArrayMeta(name, (Array,), tpdict)
+        cls._ffiargtype = _ffi.types.Pointer(base.get_ffi_argtype())
         ARRAY_CACHE[key] = cls
         return cls
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -230,5 +230,16 @@
     }
 
 
+# called from primitive.py, pointer.py, array.py
+def as_ffi_pointer(value, ffitype):
+    my_ffitype = type(value).get_ffi_argtype()
+    # for now, we always allow types.pointer, else a lot of tests
+    # break. We need to rethink how pointers are represented, though
+    if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p:
+        raise ArgumentError("expected %s instance, got %s" % (type(value),
+                                                              ffitype))
+    return value._get_buffer_value()
+
+
 # used by "byref"
 from _ctypes.pointer import pointer
diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py
--- a/lib_pypy/_ctypes/pointer.py
+++ b/lib_pypy/_ctypes/pointer.py
@@ -3,7 +3,7 @@
 import _ffi
 from _ctypes.basics import _CData, _CDataMeta, cdata_from_address, ArgumentError
 from _ctypes.basics import keepalive_key, store_reference, ensure_objects
-from _ctypes.basics import sizeof, byref
+from _ctypes.basics import sizeof, byref, as_ffi_pointer
 from _ctypes.array import Array, array_get_slice_params, array_slice_getitem,\
      array_slice_setitem
 
@@ -119,14 +119,6 @@
     def _as_ffi_pointer_(self, ffitype):
         return as_ffi_pointer(self, ffitype)
 
-def as_ffi_pointer(value, ffitype):
-    my_ffitype = type(value).get_ffi_argtype()
-    # for now, we always allow types.pointer, else a lot of tests
-    # break. We need to rethink how pointers are represented, though
-    if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p:
-        raise ArgumentError("expected %s instance, got %s" % (type(value),
-                                                              ffitype))
-    return value._get_buffer_value()
 
 def _cast_addr(obj, _, tp):
     if not (isinstance(tp, _CDataMeta) and tp._is_pointer_like()):
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -13,7 +13,7 @@
                and not p.basename.startswith('test')]
 
 essential_modules = dict.fromkeys(
-    ["exceptions", "_file", "sys", "__builtin__", "posix"]
+    ["exceptions", "_file", "sys", "__builtin__", "posix", "_warnings"]
 )
 
 default_modules = essential_modules.copy()
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -313,5 +313,10 @@
   implementation detail that shows up because of internal C-level slots
   that PyPy does not have.
 
+* the ``__dict__`` attribute of new-style classes returns a normal dict, as
+  opposed to a dict proxy like in CPython. Mutating the dict will change the
+  type and vice versa. For builtin types, a dictionary will be returned that
+  cannot be changed (but still looks and behaves like a normal dictionary).
+
 
 .. include:: _ref.txt
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -328,7 +328,7 @@
                 raise
             modname = self.str_w(w_modname)
             mod = self.interpclass_w(w_mod)
-            if isinstance(mod, Module):
+            if isinstance(mod, Module) and not mod.startup_called:
                 self.timer.start("startup " + modname)
                 mod.init(self)
                 self.timer.stop("startup " + modname)
@@ -1471,8 +1471,8 @@
 
     def warn(self, msg, w_warningcls):
         self.appexec([self.wrap(msg), w_warningcls], """(msg, warningcls):
-            import warnings
-            warnings.warn(msg, warningcls, stacklevel=2)
+            import _warnings
+            _warnings.warn(msg, warningcls, stacklevel=2)
         """)
 
     def resolve_target(self, w_obj):
diff --git a/pypy/interpreter/pyparser/parsestring.py b/pypy/interpreter/pyparser/parsestring.py
--- a/pypy/interpreter/pyparser/parsestring.py
+++ b/pypy/interpreter/pyparser/parsestring.py
@@ -1,5 +1,6 @@
 from pypy.interpreter.error import OperationError
 from pypy.interpreter import unicodehelper
+from pypy.rlib.rstring import StringBuilder
 
 def parsestr(space, encoding, s, unicode_literals=False):
     # compiler.transformer.Transformer.decode_literal depends on what 
@@ -115,21 +116,23 @@
     the string is UTF-8 encoded and should be re-encoded in the
     specified encoding.
     """
-    lis = []
+    builder = StringBuilder(len(s))
     ps = 0
     end = len(s)
-    while ps < end:
-        if s[ps] != '\\':
-            # note that the C code has a label here.
-            # the logic is the same.
+    while 1:
+        ps2 = ps
+        while ps < end and s[ps] != '\\':
             if recode_encoding and ord(s[ps]) & 0x80:
                 w, ps = decode_utf8(space, s, ps, end, recode_encoding)
-                # Append bytes to output buffer.
-                lis.append(w)
+                builder.append(w)
+                ps2 = ps
             else:
-                lis.append(s[ps])
                 ps += 1
-            continue
+        if ps > ps2:
+            builder.append_slice(s, ps2, ps)
+        if ps == end:
+            break
+
         ps += 1
         if ps == end:
             raise_app_valueerror(space, 'Trailing \\ in string')
@@ -140,25 +143,25 @@
         if ch == '\n':
             pass
         elif ch == '\\':
-            lis.append('\\')
+            builder.append('\\')
         elif ch == "'":
-            lis.append("'")
+            builder.append("'")
         elif ch == '"':
-            lis.append('"')
+            builder.append('"')
         elif ch == 'b':
-            lis.append("\010")
+            builder.append("\010")
         elif ch == 'f':
-            lis.append('\014') # FF
+            builder.append('\014') # FF
         elif ch == 't':
-            lis.append('\t')
+            builder.append('\t')
         elif ch == 'n':
-            lis.append('\n')
+            builder.append('\n')
         elif ch == 'r':
-            lis.append('\r')
+            builder.append('\r')
         elif ch == 'v':
-            lis.append('\013') # VT
+            builder.append('\013') # VT
         elif ch == 'a':
-            lis.append('\007') # BEL, not classic C
+            builder.append('\007') # BEL, not classic C
         elif ch in '01234567':
             # Look for up to two more octal digits
             span = ps
@@ -168,13 +171,13 @@
             # emulate a strange wrap-around behavior of CPython:
             # \400 is the same as \000 because 0400 == 256
             num = int(octal, 8) & 0xFF
-            lis.append(chr(num))
+            builder.append(chr(num))
             ps = span
         elif ch == 'x':
             if ps+2 <= end and isxdigit(s[ps]) and isxdigit(s[ps + 1]):
                 hexa = s[ps : ps + 2]
                 num = int(hexa, 16)
-                lis.append(chr(num))
+                builder.append(chr(num))
                 ps += 2
             else:
                 raise_app_valueerror(space, 'invalid \\x escape')
@@ -184,13 +187,13 @@
             # this was not an escape, so the backslash
             # has to be added, and we start over in
             # non-escape mode.
-            lis.append('\\')
+            builder.append('\\')
             ps -= 1
             assert ps >= 0
             continue
             # an arbitry number of unescaped UTF-8 bytes may follow.
 
-    buf = ''.join(lis)
+    buf = builder.build()
     return buf
 
 
diff --git a/pypy/interpreter/streamutil.py b/pypy/interpreter/streamutil.py
new file mode 100644
--- /dev/null
+++ b/pypy/interpreter/streamutil.py
@@ -0,0 +1,17 @@
+from pypy.rlib.streamio import StreamError
+from pypy.interpreter.error import OperationError, wrap_oserror2
+
+def wrap_streamerror(space, e, w_filename=None):
+    if isinstance(e, StreamError):
+        return OperationError(space.w_ValueError,
+                              space.wrap(e.message))
+    elif isinstance(e, OSError):
+        return wrap_oserror_as_ioerror(space, e, w_filename)
+    else:
+        # should not happen: wrap_streamerror() is only called when
+        # StreamErrors = (OSError, StreamError) are raised
+        return OperationError(space.w_IOError, space.w_None)
+
+def wrap_oserror_as_ioerror(space, e, w_filename=None):
+    return wrap_oserror2(space, e, w_filename,
+                         w_exception_class=space.w_IOError)
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -322,3 +322,14 @@
             space.ALL_BUILTIN_MODULES.pop()
             del space._builtinmodule_list
             mods = space.get_builtinmodule_to_install()
+
+    def test_dont_reload_builtin_mods_on_startup(self):
+        from pypy.tool.option import make_config, make_objspace
+        config = make_config(None)
+        space = make_objspace(config)
+        w_executable = space.wrap('executable')
+        assert space.str_w(space.getattr(space.sys, w_executable)) == 'py.py'
+        space.setattr(space.sys, w_executable, space.wrap('foobar'))
+        assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar'
+        space.startup()
+        assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar'
diff --git a/pypy/interpreter/test/test_zpy.py b/pypy/interpreter/test/test_zpy.py
--- a/pypy/interpreter/test/test_zpy.py
+++ b/pypy/interpreter/test/test_zpy.py
@@ -17,14 +17,14 @@
 def test_executable():
     """Ensures sys.executable points to the py.py script"""
     # TODO : watch out for spaces/special chars in pypypath
-    output = run(sys.executable, pypypath,
+    output = run(sys.executable, pypypath, '-S',
                  "-c", "import sys;print sys.executable")
     assert output.splitlines()[-1] == pypypath
 
 def test_special_names():
     """Test the __name__ and __file__ special global names"""
     cmd = "print __name__; print '__file__' in globals()"
-    output = run(sys.executable, pypypath, '-c', cmd)
+    output = run(sys.executable, pypypath, '-S', '-c', cmd)
     assert output.splitlines()[-2] == '__main__'
     assert output.splitlines()[-1] == 'False'
 
@@ -33,24 +33,24 @@
     tmpfile.write("print __name__; print __file__\n")
     tmpfile.close()
 
-    output = run(sys.executable, pypypath, tmpfilepath)
+    output = run(sys.executable, pypypath, '-S', tmpfilepath)
     assert output.splitlines()[-2] == '__main__'
     assert output.splitlines()[-1] == str(tmpfilepath)
 
 def test_argv_command():
     """Some tests on argv"""
     # test 1 : no arguments
-    output = run(sys.executable, pypypath,
+    output = run(sys.executable, pypypath, '-S',
                  "-c", "import sys;print sys.argv")
     assert output.splitlines()[-1] == str(['-c'])
 
     # test 2 : some arguments after
-    output = run(sys.executable, pypypath,
+    output = run(sys.executable, pypypath, '-S',
                  "-c", "import sys;print sys.argv", "hello")
     assert output.splitlines()[-1] == str(['-c','hello'])
     
     # test 3 : additionnal pypy parameters
-    output = run(sys.executable, pypypath,
+    output = run(sys.executable, pypypath, '-S',
                  "-O", "-c", "import sys;print sys.argv", "hello")
     assert output.splitlines()[-1] == str(['-c','hello'])
 
@@ -65,15 +65,15 @@
     tmpfile.close()
 
     # test 1 : no arguments
-    output = run(sys.executable, pypypath, tmpfilepath)
+    output = run(sys.executable, pypypath, '-S', tmpfilepath)
     assert output.splitlines()[-1] == str([tmpfilepath])
     
     # test 2 : some arguments after
-    output = run(sys.executable, pypypath, tmpfilepath, "hello")
+    output = run(sys.executable, pypypath, '-S', tmpfilepath, "hello")
     assert output.splitlines()[-1] == str([tmpfilepath,'hello'])
     
     # test 3 : additionnal pypy parameters
-    output = run(sys.executable, pypypath, "-O", tmpfilepath, "hello")
+    output = run(sys.executable, pypypath, '-S', "-O", tmpfilepath, "hello")
     assert output.splitlines()[-1] == str([tmpfilepath,'hello'])
     
 
@@ -95,7 +95,7 @@
     tmpfile.write(TB_NORMALIZATION_CHK)
     tmpfile.close()
 
-    popen = subprocess.Popen([sys.executable, str(pypypath), tmpfilepath],
+    popen = subprocess.Popen([sys.executable, str(pypypath), '-S', tmpfilepath],
                              stderr=subprocess.PIPE)
     _, stderr = popen.communicate()
     assert stderr.endswith('KeyError: <normalized>\n')
diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py
--- a/pypy/jit/backend/llsupport/gc.py
+++ b/pypy/jit/backend/llsupport/gc.py
@@ -1,7 +1,6 @@
 import os
 from pypy.rlib import rgc
 from pypy.rlib.objectmodel import we_are_translated, specialize
-from pypy.rlib.debug import fatalerror
 from pypy.rlib.rarithmetic import ovfcheck
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr
 from pypy.rpython.lltypesystem import llgroup
@@ -770,11 +769,19 @@
         self.generate_function('malloc_unicode', malloc_unicode,
                                [lltype.Signed])
 
-        # Rarely called: allocate a fixed-size amount of bytes, but
-        # not in the nursery, because it is too big.  Implemented like
-        # malloc_nursery_slowpath() above.
-        self.generate_function('malloc_fixedsize', malloc_nursery_slowpath,
-                               [lltype.Signed])
+        # Never called as far as I can tell, but there for completeness:
+        # allocate a fixed-size object, but not in the nursery, because
+        # it is too big.
+        def malloc_big_fixedsize(size, tid):
+            if self.DEBUG:
+                self._random_usage_of_xmm_registers()
+            type_id = llop.extract_ushort(llgroup.HALFWORD, tid)
+            check_typeid(type_id)
+            return llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
+                                                   type_id, size,
+                                                   False, False, False)
+        self.generate_function('malloc_big_fixedsize', malloc_big_fixedsize,
+                               [lltype.Signed] * 2)
 
     def _bh_malloc(self, sizedescr):
         from pypy.rpython.memory.gctypelayout import check_typeid
diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py
--- a/pypy/jit/backend/llsupport/rewrite.py
+++ b/pypy/jit/backend/llsupport/rewrite.py
@@ -96,8 +96,10 @@
     def handle_new_fixedsize(self, descr, op):
         assert isinstance(descr, SizeDescr)
         size = descr.size
-        self.gen_malloc_nursery(size, op.result)
-        self.gen_initialize_tid(op.result, descr.tid)
+        if self.gen_malloc_nursery(size, op.result):
+            self.gen_initialize_tid(op.result, descr.tid)
+        else:
+            self.gen_malloc_fixedsize(size, descr.tid, op.result)
 
     def handle_new_array(self, arraydescr, op):
         v_length = op.getarg(0)
@@ -112,8 +114,8 @@
                 pass    # total_size is still -1
         elif arraydescr.itemsize == 0:
             total_size = arraydescr.basesize
-        if 0 <= total_size <= 0xffffff:     # up to 16MB, arbitrarily
-            self.gen_malloc_nursery(total_size, op.result)
+        if (total_size >= 0 and
+                self.gen_malloc_nursery(total_size, op.result)):
             self.gen_initialize_tid(op.result, arraydescr.tid)
             self.gen_initialize_len(op.result, v_length, arraydescr.lendescr)
         elif self.gc_ll_descr.kind == 'boehm':
@@ -147,13 +149,22 @@
         # mark 'v_result' as freshly malloced
         self.recent_mallocs[v_result] = None
 
-    def gen_malloc_fixedsize(self, size, v_result):
-        """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)).
-        Note that with the framework GC, this should be called very rarely.
+    def gen_malloc_fixedsize(self, size, typeid, v_result):
+        """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, ...).
+        Used on Boehm, and on the framework GC for large fixed-size
+        mallocs.  (For all I know this latter case never occurs in
+        practice, but better safe than sorry.)
         """
-        addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize')
-        self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result,
-                                 self.gc_ll_descr.malloc_fixedsize_descr)
+        if self.gc_ll_descr.fielddescr_tid is not None:  # framework GC
+            assert (size & (WORD-1)) == 0, "size not aligned?"
+            addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_big_fixedsize')
+            args = [ConstInt(addr), ConstInt(size), ConstInt(typeid)]
+            descr = self.gc_ll_descr.malloc_big_fixedsize_descr
+        else:                                            # Boehm
+            addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize')
+            args = [ConstInt(addr), ConstInt(size)]
+            descr = self.gc_ll_descr.malloc_fixedsize_descr
+        self._gen_call_malloc_gc(args, v_result, descr)
 
     def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result):
         """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm."""
@@ -211,8 +222,7 @@
         """
         size = self.round_up_for_allocation(size)
         if not self.gc_ll_descr.can_use_nursery_malloc(size):
-            self.gen_malloc_fixedsize(size, v_result)
-            return
+            return False
         #
         op = None
         if self._op_malloc_nursery is not None:
@@ -238,6 +248,7 @@
         self._previous_size = size
         self._v_last_malloced_nursery = v_result
         self.recent_mallocs[v_result] = None
+        return True
 
     def gen_initialize_tid(self, v_newgcobj, tid):
         if self.gc_ll_descr.fielddescr_tid is not None:
diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py
--- a/pypy/jit/backend/llsupport/test/test_rewrite.py
+++ b/pypy/jit/backend/llsupport/test/test_rewrite.py
@@ -119,12 +119,19 @@
             jump()
         """, """
             []
-            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
-                                %(adescr.basesize + 10 * adescr.itemsize)d, \
-                                descr=malloc_fixedsize_descr)
-            setfield_gc(p0, 10, descr=alendescr)
+            p0 = call_malloc_gc(ConstClass(malloc_array),   \
+                                %(adescr.basesize)d,        \
+                                10,                         \
+                                %(adescr.itemsize)d,        \
+                                %(adescr.lendescr.offset)d, \
+                                descr=malloc_array_descr)
             jump()
         """)
+##      should ideally be:
+##            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
+##                                %(adescr.basesize + 10 * adescr.itemsize)d, \
+##                                descr=malloc_fixedsize_descr)
+##            setfield_gc(p0, 10, descr=alendescr)
 
     def test_new_array_variable(self):
         self.check_rewrite("""
@@ -178,13 +185,20 @@
             jump()
         """, """
             [i1]
-            p0 = call_malloc_gc(ConstClass(malloc_fixedsize),   \
-                                %(unicodedescr.basesize +       \
-                                  10 * unicodedescr.itemsize)d, \
-                                descr=malloc_fixedsize_descr)
-            setfield_gc(p0, 10, descr=unicodelendescr)
+            p0 = call_malloc_gc(ConstClass(malloc_array),   \
+                                %(unicodedescr.basesize)d,  \
+                                10,                         \
+                                %(unicodedescr.itemsize)d,  \
+                                %(unicodelendescr.offset)d, \
+                                descr=malloc_array_descr)
             jump()
         """)
+##      should ideally be:
+##            p0 = call_malloc_gc(ConstClass(malloc_fixedsize),   \
+##                                %(unicodedescr.basesize +       \
+##                                  10 * unicodedescr.itemsize)d, \
+##                                descr=malloc_fixedsize_descr)
+##            setfield_gc(p0, 10, descr=unicodelendescr)
 
 
 class TestFramework(RewriteTests):
@@ -203,7 +217,7 @@
         #
         class FakeCPU(object):
             def sizeof(self, STRUCT):
-                descr = SizeDescrWithVTable(102)
+                descr = SizeDescrWithVTable(104)
                 descr.tid = 9315
                 return descr
         self.cpu = FakeCPU()
@@ -368,11 +382,9 @@
             jump()
         """, """
             []
-            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
-                                %(bdescr.basesize + 104)d,    \
-                                descr=malloc_fixedsize_descr)
-            setfield_gc(p0, 8765, descr=tiddescr)
-            setfield_gc(p0, 103, descr=blendescr)
+            p0 = call_malloc_gc(ConstClass(malloc_array), 1,  \
+                                %(bdescr.tid)d, 103,          \
+                                descr=malloc_array_descr)
             jump()
         """)
 
@@ -435,9 +447,8 @@
             jump()
         """, """
             [p1]
-            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 104, \
-                                descr=malloc_fixedsize_descr)
-            setfield_gc(p0, 9315, descr=tiddescr)
+            p0 = call_malloc_gc(ConstClass(malloc_big_fixedsize), 104, 9315, \
+                                descr=malloc_big_fixedsize_descr)
             setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr)
             jump()
         """)
diff --git a/pypy/jit/backend/x86/support.py b/pypy/jit/backend/x86/support.py
--- a/pypy/jit/backend/x86/support.py
+++ b/pypy/jit/backend/x86/support.py
@@ -1,6 +1,7 @@
 import sys
 from pypy.rpython.lltypesystem import lltype, rffi, llmemory
 from pypy.translator.tool.cbuild import ExternalCompilationInfo
+from pypy.jit.backend.x86.arch import WORD
 
 
 def values_array(TP, size):
@@ -37,8 +38,13 @@
 
 if sys.platform == 'win32':
     ensure_sse2_floats = lambda : None
+    # XXX check for SSE2 on win32 too
 else:
+    if WORD == 4:
+        extra = ['-DPYPY_X86_CHECK_SSE2']
+    else:
+        extra = []
     ensure_sse2_floats = rffi.llexternal_use_eci(ExternalCompilationInfo(
         compile_extra = ['-msse2', '-mfpmath=sse',
-                         '-DPYPY_CPU_HAS_STANDARD_PRECISION'],
+                         '-DPYPY_CPU_HAS_STANDARD_PRECISION'] + extra,
         ))
diff --git a/pypy/jit/backend/x86/test/test_gc_integration.py b/pypy/jit/backend/x86/test/test_gc_integration.py
--- a/pypy/jit/backend/x86/test/test_gc_integration.py
+++ b/pypy/jit/backend/x86/test/test_gc_integration.py
@@ -184,6 +184,8 @@
         self.addrs[1] = self.addrs[0] + 64
         self.calls = []
         def malloc_slowpath(size):
+            if self.gcrootmap is not None:   # hook
+                self.gcrootmap.hook_malloc_slowpath()
             self.calls.append(size)
             # reset the nursery
             nadr = rffi.cast(lltype.Signed, self.nursery)
@@ -257,3 +259,218 @@
         assert gc_ll_descr.addrs[0] == nurs_adr + 24
         # this should call slow path once
         assert gc_ll_descr.calls == [24]
+
+    def test_save_regs_around_malloc(self):
+        S1 = lltype.GcStruct('S1')
+        S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)),
+                                   ('s1', lltype.Ptr(S1)),
+                                   ('s2', lltype.Ptr(S1)),
+                                   ('s3', lltype.Ptr(S1)),
+                                   ('s4', lltype.Ptr(S1)),
+                                   ('s5', lltype.Ptr(S1)),
+                                   ('s6', lltype.Ptr(S1)),
+                                   ('s7', lltype.Ptr(S1)),
+                                   ('s8', lltype.Ptr(S1)),
+                                   ('s9', lltype.Ptr(S1)),
+                                   ('s10', lltype.Ptr(S1)),
+                                   ('s11', lltype.Ptr(S1)),
+                                   ('s12', lltype.Ptr(S1)),
+                                   ('s13', lltype.Ptr(S1)),
+                                   ('s14', lltype.Ptr(S1)),
+                                   ('s15', lltype.Ptr(S1)))
+        cpu = self.cpu
+        self.namespace = self.namespace.copy()
+        for i in range(16):
+            self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i)
+        ops = '''
+        [p0]
+        p1 = getfield_gc(p0, descr=ds0)
+        p2 = getfield_gc(p0, descr=ds1)
+        p3 = getfield_gc(p0, descr=ds2)
+        p4 = getfield_gc(p0, descr=ds3)
+        p5 = getfield_gc(p0, descr=ds4)
+        p6 = getfield_gc(p0, descr=ds5)
+        p7 = getfield_gc(p0, descr=ds6)
+        p8 = getfield_gc(p0, descr=ds7)
+        p9 = getfield_gc(p0, descr=ds8)
+        p10 = getfield_gc(p0, descr=ds9)
+        p11 = getfield_gc(p0, descr=ds10)
+        p12 = getfield_gc(p0, descr=ds11)
+        p13 = getfield_gc(p0, descr=ds12)
+        p14 = getfield_gc(p0, descr=ds13)
+        p15 = getfield_gc(p0, descr=ds14)
+        p16 = getfield_gc(p0, descr=ds15)
+        #
+        # now all registers are in use
+        p17 = call_malloc_nursery(40)
+        p18 = call_malloc_nursery(40)     # overflow
+        #
+        finish(p1, p2, p3, p4, p5, p6, p7, p8,         \
+               p9, p10, p11, p12, p13, p14, p15, p16)
+        '''
+        s2 = lltype.malloc(S2)
+        for i in range(16):
+            setattr(s2, 's%d' % i, lltype.malloc(S1))
+        s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2)
+        #
+        self.interpret(ops, [s2ref])
+        gc_ll_descr = cpu.gc_ll_descr
+        gc_ll_descr.check_nothing_in_nursery()
+        assert gc_ll_descr.calls == [40]
+        # check the returned pointers
+        for i in range(16):
+            s1ref = self.cpu.get_latest_value_ref(i)
+            s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref)
+            assert s1 == getattr(s2, 's%d' % i)
+
+
+class MockShadowStackRootMap(MockGcRootMap):
+    is_shadow_stack = True
+    MARKER_FRAME = 88       # this marker follows the frame addr
+    S1 = lltype.GcStruct('S1')
+
+    def __init__(self):
+        self.addrs = lltype.malloc(rffi.CArray(lltype.Signed), 20,
+                                   flavor='raw')
+        # root_stack_top
+        self.addrs[0] = rffi.cast(lltype.Signed, self.addrs) + 3*WORD
+        # random stuff
+        self.addrs[1] = 123456
+        self.addrs[2] = 654321
+        self.check_initial_and_final_state()
+        self.callshapes = {}
+        self.should_see = []
+
+    def check_initial_and_final_state(self):
+        assert self.addrs[0] == rffi.cast(lltype.Signed, self.addrs) + 3*WORD
+        assert self.addrs[1] == 123456
+        assert self.addrs[2] == 654321
+
+    def get_root_stack_top_addr(self):
+        return rffi.cast(lltype.Signed, self.addrs)
+
+    def compress_callshape(self, shape, datablockwrapper):
+        assert shape[0] == 'shape'
+        return ['compressed'] + shape[1:]
+
+    def write_callshape(self, mark, force_index):
+        assert mark[0] == 'compressed'
+        assert force_index not in self.callshapes
+        assert force_index == 42 + len(self.callshapes)
+        self.callshapes[force_index] = mark
+
+    def hook_malloc_slowpath(self):
+        num_entries = self.addrs[0] - rffi.cast(lltype.Signed, self.addrs)
+        assert num_entries == 5*WORD    # 3 initially, plus 2 by the asm frame
+        assert self.addrs[1] == 123456  # unchanged
+        assert self.addrs[2] == 654321  # unchanged
+        frame_addr = self.addrs[3]                   # pushed by the asm frame
+        assert self.addrs[4] == self.MARKER_FRAME    # pushed by the asm frame
+        #
+        from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS
+        addr = rffi.cast(rffi.CArrayPtr(lltype.Signed),
+                         frame_addr + FORCE_INDEX_OFS)
+        force_index = addr[0]
+        assert force_index == 43    # in this test: the 2nd call_malloc_nursery
+        #
+        # The callshapes[43] saved above should list addresses both in the
+        # COPY_AREA and in the "normal" stack, where all the 16 values p1-p16
+        # of test_save_regs_at_correct_place should have been stored.  Here
+        # we replace them with new addresses, to emulate a moving GC.
+        shape = self.callshapes[force_index]
+        assert len(shape[1:]) == len(self.should_see)
+        new_objects = [None] * len(self.should_see)
+        for ofs in shape[1:]:
+            assert isinstance(ofs, int)    # not a register at all here
+            addr = rffi.cast(rffi.CArrayPtr(lltype.Signed), frame_addr + ofs)
+            contains = addr[0]
+            for j in range(len(self.should_see)):
+                obj = self.should_see[j]
+                if contains == rffi.cast(lltype.Signed, obj):
+                    assert new_objects[j] is None   # duplicate?
+                    break
+            else:
+                assert 0   # the value read from the stack looks random?
+            new_objects[j] = lltype.malloc(self.S1)
+            addr[0] = rffi.cast(lltype.Signed, new_objects[j])
+        self.should_see[:] = new_objects
+
+
+class TestMallocShadowStack(BaseTestRegalloc):
+
+    def setup_method(self, method):
+        cpu = CPU(None, None)
+        cpu.gc_ll_descr = GCDescrFastpathMalloc()
+        cpu.gc_ll_descr.gcrootmap = MockShadowStackRootMap()
+        cpu.setup_once()
+        for i in range(42):
+            cpu.reserve_some_free_fail_descr_number()
+        self.cpu = cpu
+
+    def test_save_regs_at_correct_place(self):
+        cpu = self.cpu
+        gc_ll_descr = cpu.gc_ll_descr
+        S1 = gc_ll_descr.gcrootmap.S1
+        S2 = lltype.GcStruct('S2', ('s0', lltype.Ptr(S1)),
+                                   ('s1', lltype.Ptr(S1)),
+                                   ('s2', lltype.Ptr(S1)),
+                                   ('s3', lltype.Ptr(S1)),
+                                   ('s4', lltype.Ptr(S1)),
+                                   ('s5', lltype.Ptr(S1)),
+                                   ('s6', lltype.Ptr(S1)),
+                                   ('s7', lltype.Ptr(S1)),
+                                   ('s8', lltype.Ptr(S1)),
+                                   ('s9', lltype.Ptr(S1)),
+                                   ('s10', lltype.Ptr(S1)),
+                                   ('s11', lltype.Ptr(S1)),
+                                   ('s12', lltype.Ptr(S1)),
+                                   ('s13', lltype.Ptr(S1)),
+                                   ('s14', lltype.Ptr(S1)),
+                                   ('s15', lltype.Ptr(S1)))
+        self.namespace = self.namespace.copy()
+        for i in range(16):
+            self.namespace['ds%i' % i] = cpu.fielddescrof(S2, 's%d' % i)
+        ops = '''
+        [p0]
+        p1 = getfield_gc(p0, descr=ds0)
+        p2 = getfield_gc(p0, descr=ds1)
+        p3 = getfield_gc(p0, descr=ds2)
+        p4 = getfield_gc(p0, descr=ds3)
+        p5 = getfield_gc(p0, descr=ds4)
+        p6 = getfield_gc(p0, descr=ds5)
+        p7 = getfield_gc(p0, descr=ds6)
+        p8 = getfield_gc(p0, descr=ds7)
+        p9 = getfield_gc(p0, descr=ds8)
+        p10 = getfield_gc(p0, descr=ds9)
+        p11 = getfield_gc(p0, descr=ds10)
+        p12 = getfield_gc(p0, descr=ds11)
+        p13 = getfield_gc(p0, descr=ds12)
+        p14 = getfield_gc(p0, descr=ds13)
+        p15 = getfield_gc(p0, descr=ds14)
+        p16 = getfield_gc(p0, descr=ds15)
+        #
+        # now all registers are in use
+        p17 = call_malloc_nursery(40)
+        p18 = call_malloc_nursery(40)     # overflow
+        #
+        finish(p1, p2, p3, p4, p5, p6, p7, p8,         \
+               p9, p10, p11, p12, p13, p14, p15, p16)
+        '''
+        s2 = lltype.malloc(S2)
+        for i in range(16):
+            s1 = lltype.malloc(S1)
+            setattr(s2, 's%d' % i, s1)
+            gc_ll_descr.gcrootmap.should_see.append(s1)
+        s2ref = lltype.cast_opaque_ptr(llmemory.GCREF, s2)
+        #
+        self.interpret(ops, [s2ref])
+        gc_ll_descr.check_nothing_in_nursery()
+        assert gc_ll_descr.calls == [40]
+        gc_ll_descr.gcrootmap.check_initial_and_final_state()
+        # check the returned pointers
+        for i in range(16):
+            s1ref = self.cpu.get_latest_value_ref(i)
+            s1 = lltype.cast_opaque_ptr(lltype.Ptr(S1), s1ref)
+            for j in range(16):
+                assert s1 != getattr(s2, 's%d' % j)
+            assert s1 == gc_ll_descr.gcrootmap.should_see[i]
diff --git a/pypy/jit/backend/x86/test/test_ztranslation.py b/pypy/jit/backend/x86/test/test_ztranslation.py
--- a/pypy/jit/backend/x86/test/test_ztranslation.py
+++ b/pypy/jit/backend/x86/test/test_ztranslation.py
@@ -52,6 +52,7 @@
             set_param(jitdriver, "trace_eagerness", 2)
             total = 0
             frame = Frame(i)
+            j = float(j)
             while frame.i > 3:
                 jitdriver.can_enter_jit(frame=frame, total=total, j=j)
                 jitdriver.jit_merge_point(frame=frame, total=total, j=j)
diff --git a/pypy/jit/metainterp/compile.py b/pypy/jit/metainterp/compile.py
--- a/pypy/jit/metainterp/compile.py
+++ b/pypy/jit/metainterp/compile.py
@@ -289,8 +289,21 @@
             assert isinstance(token, TargetToken)
             assert token.original_jitcell_token is None
             token.original_jitcell_token = trace.original_jitcell_token
-            
-    
+
+
+def do_compile_loop(metainterp_sd, inputargs, operations, looptoken,
+                    log=True, name=''):
+    metainterp_sd.logger_ops.log_loop(inputargs, operations, -2,
+                                      'compiling', name=name)
+    return metainterp_sd.cpu.compile_loop(inputargs, operations, looptoken,
+                                          log=log, name=name)
+
+def do_compile_bridge(metainterp_sd, faildescr, inputargs, operations,
+                      original_loop_token, log=True):
+    metainterp_sd.logger_ops.log_bridge(inputargs, operations, -2)
+    return metainterp_sd.cpu.compile_bridge(faildescr, inputargs, operations,
+                                            original_loop_token, log=log)
+
 def send_loop_to_backend(greenkey, jitdriver_sd, metainterp_sd, loop, type):
     vinfo = jitdriver_sd.virtualizable_info
     if vinfo is not None:
@@ -319,9 +332,9 @@
     metainterp_sd.profiler.start_backend()
     debug_start("jit-backend")
     try:
-        asminfo = metainterp_sd.cpu.compile_loop(loop.inputargs, operations,
-                                                  original_jitcell_token,
-                                                  name=loopname)
+        asminfo = do_compile_loop(metainterp_sd, loop.inputargs,
+                                  operations, original_jitcell_token,
+                                  name=loopname)
     finally:
         debug_stop("jit-backend")
     metainterp_sd.profiler.end_backend()
@@ -333,7 +346,6 @@
         metainterp_sd.stats.compiled()
     metainterp_sd.log("compiled new " + type)
     #
-    loopname = jitdriver_sd.warmstate.get_location_str(greenkey)
     if asminfo is not None:
         ops_offset = asminfo.ops_offset
     else:
@@ -365,9 +377,9 @@
     metainterp_sd.profiler.start_backend()
     debug_start("jit-backend")
     try:
-        asminfo = metainterp_sd.cpu.compile_bridge(faildescr, inputargs,
-                                                   operations,
-                                                   original_loop_token)
+        asminfo = do_compile_bridge(metainterp_sd, faildescr, inputargs,
+                                    operations,
+                                    original_loop_token)
     finally:
         debug_stop("jit-backend")
     metainterp_sd.profiler.end_backend()
diff --git a/pypy/jit/metainterp/logger.py b/pypy/jit/metainterp/logger.py
--- a/pypy/jit/metainterp/logger.py
+++ b/pypy/jit/metainterp/logger.py
@@ -18,6 +18,10 @@
             debug_start("jit-log-noopt-loop")
             logops = self._log_operations(inputargs, operations, ops_offset)
             debug_stop("jit-log-noopt-loop")
+        elif number == -2:
+            debug_start("jit-log-compiling-loop")
+            logops = self._log_operations(inputargs, operations, ops_offset)
+            debug_stop("jit-log-compiling-loop")
         else:
             debug_start("jit-log-opt-loop")
             debug_print("# Loop", number, '(%s)' % name , ":", type,
@@ -31,6 +35,10 @@
             debug_start("jit-log-noopt-bridge")
             logops = self._log_operations(inputargs, operations, ops_offset)
             debug_stop("jit-log-noopt-bridge")
+        elif number == -2:
+            debug_start("jit-log-compiling-bridge")
+            logops = self._log_operations(inputargs, operations, ops_offset)
+            debug_stop("jit-log-compiling-bridge")
         else:
             debug_start("jit-log-opt-bridge")
             debug_print("# bridge out of Guard", number,
diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py
--- a/pypy/jit/metainterp/optimizeopt/optimizer.py
+++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
@@ -567,7 +567,7 @@
         assert isinstance(descr, compile.ResumeGuardDescr)
         modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo)
         try:
-            newboxes = modifier.finish(self.values, self.pendingfields)
+            newboxes = modifier.finish(self, self.pendingfields)
             if len(newboxes) > self.metainterp_sd.options.failargs_limit:
                 raise resume.TagOverflow
         except resume.TagOverflow:
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_multilabel.py
@@ -398,6 +398,40 @@
         with raises(InvalidLoop):
             self.optimize_loop(ops, ops)
 
+    def test_issue1045(self):
+        ops = """
+        [i55]
+        i73 = int_mod(i55, 2)
+        i75 = int_rshift(i73, 63)
+        i76 = int_and(2, i75)
+        i77 = int_add(i73, i76)
+        i81 = int_eq(i77, 1)
+        i0 = int_ge(i55, 1)
+        guard_true(i0) []
+        label(i55)
+        i3 = int_mod(i55, 2)
+        i5 = int_rshift(i3, 63)
+        i6 = int_and(2, i5)
+        i7 = int_add(i3, i6)
+        i8 = int_eq(i7, 1)
+        escape(i8)
+        jump(i55)
+        """
+        expected = """
+        [i55]
+        i73 = int_mod(i55, 2)
+        i75 = int_rshift(i73, 63)
+        i76 = int_and(2, i75)
+        i77 = int_add(i73, i76)
+        i81 = int_eq(i77, 1)
+        i0 = int_ge(i55, 1)
+        guard_true(i0) []
+        label(i55, i81)
+        escape(i81)
+        jump(i55, i81)
+        """
+        self.optimize_loop(ops, expected)
+        
 class OptRenameStrlen(Optimization):
     def propagate_forward(self, op):
         dispatch_opt(self, op)
@@ -423,7 +457,7 @@
         metainterp_sd = FakeMetaInterpStaticData(self.cpu)
         optimize_unroll(metainterp_sd, loop, [OptRenameStrlen(), OptPure()], True)
 
-    def test_optimizer_renaming_boxes(self):
+    def test_optimizer_renaming_boxes1(self):
         ops = """
         [p1]
         i1 = strlen(p1)
@@ -457,7 +491,6 @@
         jump(p1, i11)
         """
         self.optimize_loop(ops, expected)
-
         
 
 class TestLLtype(OptimizeoptTestMultiLabel, LLtypeMixin):
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -7796,6 +7796,23 @@
         """
         self.optimize_loop(ops, expected)
 
+    def test_issue1048_ok(self):
+        ops = """
+        [p1, i2, i3]
+        p16 = getfield_gc(p1, descr=nextdescr)
+        call(p16, descr=nonwritedescr)
+        guard_true(i2) [p16]
+        setfield_gc(p1, ConstPtr(myptr), descr=nextdescr)
+        jump(p1, i3, i2)
+        """
+        expected = """
+        [p1, i3]
+        call(ConstPtr(myptr), descr=nonwritedescr)
+        guard_true(i3) []
+        jump(p1, 1)
+        """
+        self.optimize_loop(ops, expected)
+
 class TestLLtype(OptimizeOptTest, LLtypeMixin):
     pass
 
diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py
--- a/pypy/jit/metainterp/optimizeopt/unroll.py
+++ b/pypy/jit/metainterp/optimizeopt/unroll.py
@@ -260,7 +260,7 @@
             if op and op.result:
                 preamble_value = exported_state.exported_values[op.result]
                 value = self.optimizer.getvalue(op.result)
-                if not value.is_virtual():
+                if not value.is_virtual() and not value.is_constant():
                     imp = ValueImporter(self, preamble_value, op)
                     self.optimizer.importable_values[value] = imp
                 newvalue = self.optimizer.getvalue(op.result)
@@ -268,7 +268,9 @@
                 # note that emitting here SAME_AS should not happen, but
                 # in case it does, we would prefer to be suboptimal in asm
                 # to a fatal RPython exception.
-                if newresult is not op.result and not newvalue.is_constant():
+                if newresult is not op.result and \
+                   not self.short_boxes.has_producer(newresult) and \
+                   not newvalue.is_constant():
                     op = ResOperation(rop.SAME_AS, [op.result], newresult)
                     self.optimizer._newoperations.append(op)
                     if self.optimizer.loop.logops:
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -2349,7 +2349,7 @@
             # warmstate.py.
             virtualizable_box = self.virtualizable_boxes[-1]
             virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box)
-            assert not vinfo.gettoken(virtualizable)
+            assert not vinfo.is_token_nonnull_gcref(virtualizable)
             # fill the virtualizable with the local boxes
             self.synchronize_virtualizable()
         #
diff --git a/pypy/jit/metainterp/resume.py b/pypy/jit/metainterp/resume.py
--- a/pypy/jit/metainterp/resume.py
+++ b/pypy/jit/metainterp/resume.py
@@ -182,23 +182,22 @@
 
     # env numbering
 
-    def number(self, values, snapshot):
+    def number(self, optimizer, snapshot):
         if snapshot is None:
             return lltype.nullptr(NUMBERING), {}, 0
         if snapshot in self.numberings:
              numb, liveboxes, v = self.numberings[snapshot]
              return numb, liveboxes.copy(), v
 
-        numb1, liveboxes, v = self.number(values, snapshot.prev)
+        numb1, liveboxes, v = self.number(optimizer, snapshot.prev)
         n = len(liveboxes)-v
         boxes = snapshot.boxes
         length = len(boxes)
         numb = lltype.malloc(NUMBERING, length)
         for i in range(length):
             box = boxes[i]
-            value = values.get(box, None)
-            if value is not None:
-                box = value.get_key_box()
+            value = optimizer.getvalue(box)
+            box = value.get_key_box()
 
             if isinstance(box, Const):
                 tagged = self.getconst(box)
@@ -318,14 +317,14 @@
         _, tagbits = untag(tagged)
         return tagbits == TAGVIRTUAL
 
-    def finish(self, values, pending_setfields=[]):
+    def finish(self, optimizer, pending_setfields=[]):
         # compute the numbering
         storage = self.storage
         # make sure that nobody attached resume data to this guard yet
         assert not storage.rd_numb
         snapshot = storage.rd_snapshot
         assert snapshot is not None # is that true?
-        numb, liveboxes_from_env, v = self.memo.number(values, snapshot)
+        numb, liveboxes_from_env, v = self.memo.number(optimizer, snapshot)
         self.liveboxes_from_env = liveboxes_from_env
         self.liveboxes = {}
         storage.rd_numb = numb
@@ -341,23 +340,23 @@
                 liveboxes[i] = box
             else:
                 assert tagbits == TAGVIRTUAL
-                value = values[box]
+                value = optimizer.getvalue(box)
                 value.get_args_for_fail(self)
 
         for _, box, fieldbox, _ in pending_setfields:
             self.register_box(box)
             self.register_box(fieldbox)
-            value = values[fieldbox]
+            value = optimizer.getvalue(fieldbox)
             value.get_args_for_fail(self)
 
-        self._number_virtuals(liveboxes, values, v)
+        self._number_virtuals(liveboxes, optimizer, v)
         self._add_pending_fields(pending_setfields)
 
         storage.rd_consts = self.memo.consts
         dump_storage(storage, liveboxes)
         return liveboxes[:]
 
-    def _number_virtuals(self, liveboxes, values, num_env_virtuals):
+    def _number_virtuals(self, liveboxes, optimizer, num_env_virtuals):
         # !! 'liveboxes' is a list that is extend()ed in-place !!
         memo = self.memo
         new_liveboxes = [None] * memo.num_cached_boxes()
@@ -397,7 +396,7 @@
             memo.nvholes += length - len(vfieldboxes)
             for virtualbox, fieldboxes in vfieldboxes.iteritems():
                 num, _ = untag(self.liveboxes[virtualbox])
-                value = values[virtualbox]
+                value = optimizer.getvalue(virtualbox)
                 fieldnums = [self._gettagged(box)
                              for box in fieldboxes]
                 vinfo = value.make_virtual_info(self, fieldnums)
@@ -1102,14 +1101,14 @@
         virtualizable = self.decode_ref(numb.nums[index])
         if self.resume_after_guard_not_forced == 1:
             # in the middle of handle_async_forcing()
-            assert vinfo.gettoken(virtualizable)
-            vinfo.settoken(virtualizable, vinfo.TOKEN_NONE)
+            assert vinfo.is_token_nonnull_gcref(virtualizable)
+            vinfo.reset_token_gcref(virtualizable)
         else:
             # just jumped away from assembler (case 4 in the comment in
             # virtualizable.py) into tracing (case 2); check that vable_token
             # is and stays 0.  Note the call to reset_vable_token() in
             # warmstate.py.
-            assert not vinfo.gettoken(virtualizable)
+            assert not vinfo.is_token_nonnull_gcref(virtualizable)
         return vinfo.write_from_resume_data_partial(virtualizable, self, numb)
 
     def load_value_of_type(self, TYPE, tagged):
diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py
--- a/pypy/jit/metainterp/test/test_ajit.py
+++ b/pypy/jit/metainterp/test/test_ajit.py
@@ -144,7 +144,7 @@
                            'int_mul': 1, 'guard_true': 2, 'int_sub': 2})
 
 
-    def test_loop_invariant_mul_ovf(self):
+    def test_loop_invariant_mul_ovf1(self):
         myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
         def f(x, y):
             res = 0
@@ -235,6 +235,65 @@
                            'guard_true': 4, 'int_sub': 4, 'jump': 3,
                            'int_mul': 3, 'int_add': 4})
 
+    def test_loop_invariant_mul_ovf2(self):
+        myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
+        def f(x, y):
+            res = 0
+            while y > 0:
+                myjitdriver.can_enter_jit(x=x, y=y, res=res)
+                myjitdriver.jit_merge_point(x=x, y=y, res=res)
+                b = y * 2
+                try:
+                    res += ovfcheck(x * x) + b
+                except OverflowError:
+                    res += 1
+                y -= 1
+            return res
+        res = self.meta_interp(f, [sys.maxint, 7])
+        assert res == f(sys.maxint, 7)
+        self.check_trace_count(1)
+        res = self.meta_interp(f, [6, 7])
+        assert res == 308
+
+    def test_loop_invariant_mul_bridge_ovf1(self):
+        myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2'])
+        def f(x1, x2, y):
+            res = 0
+            while y > 0:
+                myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res)
+                myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res)
+                try:
+                    res += ovfcheck(x1 * x1)
+                except OverflowError:
+                    res += 1
+                if y<32 and (y>>2)&1==0:
+                    x1, x2 = x2, x1
+                y -= 1
+            return res
+        res = self.meta_interp(f, [6, sys.maxint, 48])
+        assert res == f(6, sys.maxint, 48)
+
+    def test_loop_invariant_mul_bridge_ovf2(self):
+        myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x1', 'x2', 'n'])
+        def f(x1, x2, n, y):
+            res = 0
+            while y > 0:
+                myjitdriver.can_enter_jit(x1=x1, x2=x2, y=y, res=res, n=n)
+                myjitdriver.jit_merge_point(x1=x1, x2=x2, y=y, res=res, n=n)
+                try:
+                    res += ovfcheck(x1 * x1)
+                except OverflowError:
+                    res += 1
+                y -= 1
+                if y&4 == 0:
+                    x1, x2 = x2, x1
+            return res
+        res = self.meta_interp(f, [sys.maxint, 6, 32, 48])
+        assert res == f(sys.maxint, 6, 32, 48)
+        res = self.meta_interp(f, [6, sys.maxint, 32, 48])
+        assert res == f(6, sys.maxint, 32, 48)
+        
+
     def test_loop_invariant_intbox(self):
         myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
         class I:
@@ -2943,11 +3002,18 @@
         self.check_resops(arraylen_gc=3)
 
     def test_ulonglong_mod(self):
-        myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'i'])
+        myjitdriver = JitDriver(greens = [], reds = ['n', 'a'])
+        class A:
+            pass
         def f(n):
             sa = i = rffi.cast(rffi.ULONGLONG, 1)
+            a = A()
             while i < rffi.cast(rffi.ULONGLONG, n):
-                myjitdriver.jit_merge_point(sa=sa, n=n, i=i)
+                a.sa = sa
+                a.i = i
+                myjitdriver.jit_merge_point(n=n, a=a)
+                sa = a.sa
+                i = a.i
                 sa += sa % i
                 i += 1
         res = self.meta_interp(f, [32])
diff --git a/pypy/jit/metainterp/test/test_quasiimmut.py b/pypy/jit/metainterp/test/test_quasiimmut.py
--- a/pypy/jit/metainterp/test/test_quasiimmut.py
+++ b/pypy/jit/metainterp/test/test_quasiimmut.py
@@ -8,7 +8,7 @@
 from pypy.jit.metainterp.quasiimmut import get_current_qmut_instance
 from pypy.jit.metainterp.test.support import LLJitMixin
 from pypy.jit.codewriter.policy import StopAtXPolicy
-from pypy.rlib.jit import JitDriver, dont_look_inside
+from pypy.rlib.jit import JitDriver, dont_look_inside, unroll_safe
 
 
 def test_get_current_qmut_instance():
@@ -480,6 +480,32 @@
         assert res == 1
         self.check_jitcell_token_count(2)
 
+    def test_for_loop_array(self):
+        myjitdriver = JitDriver(greens=[], reds=["n", "i"])
+        class Foo(object):
+            _immutable_fields_ = ["x?[*]"]
+            def __init__(self, x):
+                self.x = x
+        f = Foo([1, 3, 5, 6])
+        @unroll_safe
+        def g(v):
+            for x in f.x:
+                if x & 1 == 0:
+                    v += 1
+            return v
+        def main(n):
+            i = 0
+            while i < n:
+                myjitdriver.jit_merge_point(n=n, i=i)
+                i = g(i)
+            return i
+        res = self.meta_interp(main, [10])
+        assert res == 10
+        self.check_resops({
+            "int_add": 2, "int_lt": 2, "jump": 1, "guard_true": 2,
+            "guard_not_invalidated": 2
+        })
+
 
 class TestLLtypeGreenFieldsTests(QuasiImmutTests, LLJitMixin):
     pass
diff --git a/pypy/jit/metainterp/test/test_resume.py b/pypy/jit/metainterp/test/test_resume.py
--- a/pypy/jit/metainterp/test/test_resume.py
+++ b/pypy/jit/metainterp/test/test_resume.py
@@ -18,6 +18,19 @@
     rd_virtuals = None
     rd_pendingfields = None
 
+
+class FakeOptimizer(object):
+    def __init__(self, values):
+        self.values = values
+        
+    def getvalue(self, box):
+        try:
+            value = self.values[box]
+        except KeyError:
+            value = self.values[box] = OptValue(box)
+        return value
+        
+
 def test_tag():
     assert tag(3, 1) == rffi.r_short(3<<2|1)
     assert tag(-3, 2) == rffi.r_short(-3<<2|2)
@@ -500,7 +513,7 @@
     capture_resumedata(fs, None, [], storage)
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish({})
+    liveboxes = modifier.finish(FakeOptimizer({}))
     metainterp = MyMetaInterp()
 
     b1t, b2t, b3t = [BoxInt(), BoxPtr(), BoxInt()]
@@ -524,7 +537,7 @@
     capture_resumedata(fs, [b4], [], storage)
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish({})
+    liveboxes = modifier.finish(FakeOptimizer({}))
     metainterp = MyMetaInterp()
 
     b1t, b2t, b3t, b4t = [BoxInt(), BoxPtr(), BoxInt(), BoxPtr()]
@@ -553,10 +566,10 @@
     
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish({})
+    liveboxes = modifier.finish(FakeOptimizer({}))
 
     modifier = ResumeDataVirtualAdder(storage2, memo)
-    liveboxes2 = modifier.finish({})
+    liveboxes2 = modifier.finish(FakeOptimizer({}))
 
     metainterp = MyMetaInterp()
 
@@ -617,7 +630,7 @@
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     values = {b2: virtual_value(b2, b5, c4)}
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish(values)
+    liveboxes = modifier.finish(FakeOptimizer(values))
     assert len(storage.rd_virtuals) == 1
     assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX),
                                                 tag(0, TAGCONST)]
@@ -628,7 +641,7 @@
     values = {b2: virtual_value(b2, b4, v6), b6: v6}
     memo.clear_box_virtual_numbers()
     modifier = ResumeDataVirtualAdder(storage2, memo)
-    liveboxes2 = modifier.finish(values)
+    liveboxes2 = modifier.finish(FakeOptimizer(values))
     assert len(storage2.rd_virtuals) == 2    
     assert storage2.rd_virtuals[0].fieldnums == [tag(len(liveboxes2)-1, TAGBOX),
                                                  tag(-1, TAGVIRTUAL)]
@@ -674,7 +687,7 @@
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     values = {b2: virtual_value(b2, b5, c4)}
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish(values)
+    liveboxes = modifier.finish(FakeOptimizer(values))
     assert len(storage.rd_virtuals) == 1
     assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX),
                                                 tag(0, TAGCONST)]
@@ -684,7 +697,7 @@
     capture_resumedata(fs, None, [], storage2)
     values[b4] = virtual_value(b4, b6, c4)
     modifier = ResumeDataVirtualAdder(storage2, memo)
-    liveboxes = modifier.finish(values)
+    liveboxes = modifier.finish(FakeOptimizer(values))
     assert len(storage2.rd_virtuals) == 2
     assert storage2.rd_virtuals[1].fieldnums == storage.rd_virtuals[0].fieldnums
     assert storage2.rd_virtuals[1] is storage.rd_virtuals[0]
@@ -703,7 +716,7 @@
     v1.setfield(LLtypeMixin.nextdescr, v2)
     values = {b1: v1, b2: v2}
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish(values)
+    liveboxes = modifier.finish(FakeOptimizer(values))
     assert liveboxes == [b3]
     assert len(storage.rd_virtuals) == 2
     assert storage.rd_virtuals[0].fieldnums == [tag(-1, TAGBOX),
@@ -776,7 +789,7 @@
 
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
 
-    numb, liveboxes, v = memo.number({}, snap1)
+    numb, liveboxes, v = memo.number(FakeOptimizer({}), snap1)
     assert v == 0
 
     assert liveboxes == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX),
@@ -788,7 +801,7 @@
                                     tag(0, TAGBOX), tag(2, TAGINT)]
     assert not numb.prev.prev
 
-    numb2, liveboxes2, v = memo.number({}, snap2)
+    numb2, liveboxes2, v = memo.number(FakeOptimizer({}), snap2)
     assert v == 0
     
     assert liveboxes2 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX),
@@ -813,7 +826,8 @@
             return self.virt
 
     # renamed
-    numb3, liveboxes3, v = memo.number({b3: FakeValue(False, c4)}, snap3)
+    numb3, liveboxes3, v = memo.number(FakeOptimizer({b3: FakeValue(False, c4)}),
+                                       snap3)
     assert v == 0
     
     assert liveboxes3 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX)}
@@ -825,7 +839,8 @@
     env4 = [c3, b4, b1, c3]
     snap4 = Snapshot(snap, env4)    
 
-    numb4, liveboxes4, v = memo.number({b4: FakeValue(True, b4)}, snap4)
+    numb4, liveboxes4, v = memo.number(FakeOptimizer({b4: FakeValue(True, b4)}),
+                                       snap4)
     assert v == 1
     
     assert liveboxes4 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX),
@@ -837,8 +852,9 @@
     env5 = [b1, b4, b5]
     snap5 = Snapshot(snap4, env5)    
 
-    numb5, liveboxes5, v = memo.number({b4: FakeValue(True, b4),
-                                        b5: FakeValue(True, b5)}, snap5)
+    numb5, liveboxes5, v = memo.number(FakeOptimizer({b4: FakeValue(True, b4),
+                                                      b5: FakeValue(True, b5)}),
+                                       snap5)
     assert v == 2
     
     assert liveboxes5 == {b1: tag(0, TAGBOX), b2: tag(1, TAGBOX),
@@ -940,7 +956,7 @@
     storage = make_storage(b1s, b2s, b3s)
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())    
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish({})
+    liveboxes = modifier.finish(FakeOptimizer({}))
     assert storage.rd_snapshot is None
     cpu = MyCPU([])
     reader = ResumeDataDirectReader(MyMetaInterp(cpu), storage)
@@ -954,14 +970,14 @@
     storage = make_storage(b1s, b2s, b3s)
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())
     modifier = ResumeDataVirtualAdder(storage, memo)
-    modifier.finish({})
+    modifier.finish(FakeOptimizer({}))
     assert len(memo.consts) == 2
     assert storage.rd_consts is memo.consts
 
     b1s, b2s, b3s = [ConstInt(sys.maxint), ConstInt(2**17), ConstInt(-65)]
     storage2 = make_storage(b1s, b2s, b3s)
     modifier2 = ResumeDataVirtualAdder(storage2, memo)
-    modifier2.finish({})
+    modifier2.finish(FakeOptimizer({}))
     assert len(memo.consts) == 3    
     assert storage2.rd_consts is memo.consts
 
@@ -1022,7 +1038,7 @@
 
     val = FakeValue()
     values = {b1s: val, b2s: val}  
-    liveboxes = modifier.finish(values)
+    liveboxes = modifier.finish(FakeOptimizer(values))
     assert storage.rd_snapshot is None
     b1t, b3t = [BoxInt(11), BoxInt(33)]
     newboxes = _resume_remap(liveboxes, [b1_2, b3s], b1t, b3t)
@@ -1043,7 +1059,7 @@
     storage = make_storage(b1s, b2s, b3s)
     memo = ResumeDataLoopMemo(FakeMetaInterpStaticData())        
     modifier = ResumeDataVirtualAdder(storage, memo)
-    liveboxes = modifier.finish({})
+    liveboxes = modifier.finish(FakeOptimizer({}))
     b2t, b3t = [BoxPtr(demo55o), BoxInt(33)]
     newboxes = _resume_remap(liveboxes, [b2s, b3s], b2t, b3t)
     metainterp = MyMetaInterp()
@@ -1086,7 +1102,7 @@
     values = {b2s: v2, b4s: v4}
 
     liveboxes = []
-    modifier._number_virtuals(liveboxes, values, 0)
+    modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0)
     storage.rd_consts = memo.consts[:]
     storage.rd_numb = None
     # resume
@@ -1156,7 +1172,7 @@
     modifier.register_virtual_fields(b2s, [b4s, c1s])
     liveboxes = []
     values = {b2s: v2}
-    modifier._number_virtuals(liveboxes, values, 0)
+    modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0)
     dump_storage(storage, liveboxes)
     storage.rd_consts = memo.consts[:]
     storage.rd_numb = None
@@ -1203,7 +1219,7 @@
     v2.setfield(LLtypeMixin.bdescr, OptValue(b4s))
     modifier.register_virtual_fields(b2s, [c1s, b4s])
     liveboxes = []
-    modifier._number_virtuals(liveboxes, {b2s: v2}, 0)
+    modifier._number_virtuals(liveboxes, FakeOptimizer({b2s: v2}), 0)
     dump_storage(storage, liveboxes)
     storage.rd_consts = memo.consts[:]
     storage.rd_numb = None
@@ -1249,7 +1265,7 @@
 
     values = {b4s: v4, b2s: v2}
     liveboxes = []
-    modifier._number_virtuals(liveboxes, values, 0)
+    modifier._number_virtuals(liveboxes, FakeOptimizer(values), 0)
     assert liveboxes == [b2s, b4s] or liveboxes == [b4s, b2s]
     modifier._add_pending_fields([(LLtypeMixin.nextdescr, b2s, b4s, -1)])
     storage.rd_consts = memo.consts[:]
diff --git a/pypy/jit/metainterp/virtualizable.py b/pypy/jit/metainterp/virtualizable.py
--- a/pypy/jit/metainterp/virtualizable.py
+++ b/pypy/jit/metainterp/virtualizable.py
@@ -262,15 +262,15 @@
         force_now._dont_inline_ = True
         self.force_now = force_now
 
-        def gettoken(virtualizable):
+        def is_token_nonnull_gcref(virtualizable):
             virtualizable = cast_gcref_to_vtype(virtualizable)
-            return virtualizable.vable_token
-        self.gettoken = gettoken
+            return bool(virtualizable.vable_token)
+        self.is_token_nonnull_gcref = is_token_nonnull_gcref
 
-        def settoken(virtualizable, token):
+        def reset_token_gcref(virtualizable):
             virtualizable = cast_gcref_to_vtype(virtualizable)
-            virtualizable.vable_token = token
-        self.settoken = settoken
+            virtualizable.vable_token = VirtualizableInfo.TOKEN_NONE
+        self.reset_token_gcref = reset_token_gcref
 
     def _freeze_(self):
         return True
diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py
--- a/pypy/jit/metainterp/warmspot.py
+++ b/pypy/jit/metainterp/warmspot.py
@@ -453,7 +453,7 @@
                     if sys.stdout == sys.__stdout__:
                         import pdb; pdb.post_mortem(tb)
                     raise e.__class__, e, tb
-                fatalerror('~~~ Crash in JIT! %s' % (e,), traceback=True)
+                fatalerror('~~~ Crash in JIT! %s' % (e,))
         crash_in_jit._dont_inline_ = True
 
         if self.translator.rtyper.type_system.name == 'lltypesystem':
diff --git a/pypy/jit/tl/tinyframe/tinyframe.py b/pypy/jit/tl/tinyframe/tinyframe.py
--- a/pypy/jit/tl/tinyframe/tinyframe.py
+++ b/pypy/jit/tl/tinyframe/tinyframe.py
@@ -210,7 +210,7 @@
     def repr(self):
         return "<function %s(%s)>" % (self.outer.repr(), self.inner.repr())
 
-driver = JitDriver(greens = ['code', 'i'], reds = ['self'],
+driver = JitDriver(greens = ['i', 'code'], reds = ['self'],
                    virtualizables = ['self'])
 
 class Frame(object):
diff --git a/pypy/module/_demo/test/test_sieve.py b/pypy/module/_demo/test/test_sieve.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_demo/test/test_sieve.py
@@ -0,0 +1,12 @@
+from pypy.conftest import gettestobjspace
+
+
+class AppTestSieve:
+    def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=('_demo',))
+
+    def test_sieve(self):
+        import _demo
+        lst = _demo.sieve(100)
+        assert lst == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
+                       43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
diff --git a/pypy/module/_file/interp_file.py b/pypy/module/_file/interp_file.py
--- a/pypy/module/_file/interp_file.py
+++ b/pypy/module/_file/interp_file.py
@@ -5,14 +5,13 @@
 from pypy.rlib import streamio
 from pypy.rlib.rarithmetic import r_longlong
 from pypy.rlib.rstring import StringBuilder
-from pypy.module._file.interp_stream import (W_AbstractStream, StreamErrors,
-    wrap_streamerror, wrap_oserror_as_ioerror)
+from pypy.module._file.interp_stream import W_AbstractStream, StreamErrors
 from pypy.module.posix.interp_posix import dispatch_filename
 from pypy.interpreter.error import OperationError, operationerrfmt
 from pypy.interpreter.typedef import (TypeDef, GetSetProperty,
     interp_attrproperty, make_weakref_descr, interp_attrproperty_w)
 from pypy.interpreter.gateway import interp2app, unwrap_spec
-
+from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror
 
 class W_File(W_AbstractStream):
     """An interp-level file object.  This implements the same interface than
diff --git a/pypy/module/_file/interp_stream.py b/pypy/module/_file/interp_stream.py
--- a/pypy/module/_file/interp_stream.py
+++ b/pypy/module/_file/interp_stream.py
@@ -2,27 +2,13 @@
 from pypy.rlib import streamio
 from pypy.rlib.streamio import StreamErrors
 
-from pypy.interpreter.error import OperationError, wrap_oserror2
+from pypy.interpreter.error import OperationError
 from pypy.interpreter.baseobjspace import ObjSpace, Wrappable
 from pypy.interpreter.typedef import TypeDef
 from pypy.interpreter.gateway import interp2app
+from pypy.interpreter.streamutil import wrap_streamerror, wrap_oserror_as_ioerror
 
 
-def wrap_streamerror(space, e, w_filename=None):
-    if isinstance(e, streamio.StreamError):
-        return OperationError(space.w_ValueError,
-                              space.wrap(e.message))
-    elif isinstance(e, OSError):
-        return wrap_oserror_as_ioerror(space, e, w_filename)
-    else:
-        # should not happen: wrap_streamerror() is only called when
-        # StreamErrors = (OSError, StreamError) are raised
-        return OperationError(space.w_IOError, space.w_None)
-
-def wrap_oserror_as_ioerror(space, e, w_filename=None):
-    return wrap_oserror2(space, e, w_filename,
-                         w_exception_class=space.w_IOError)
-
 class W_AbstractStream(Wrappable):
     """Base class for interp-level objects that expose streams to app-level"""
     slock = None
diff --git a/pypy/module/_io/__init__.py b/pypy/module/_io/__init__.py
--- a/pypy/module/_io/__init__.py
+++ b/pypy/module/_io/__init__.py
@@ -28,6 +28,7 @@
         }
 
     def init(self, space):
+        MixedModule.init(self, space)
         w_UnsupportedOperation = space.call_function(
             space.w_type,
             space.wrap('UnsupportedOperation'),
@@ -35,3 +36,9 @@
             space.newdict())
         space.setattr(self, space.wrap('UnsupportedOperation'),
                       w_UnsupportedOperation)
+
+    def shutdown(self, space):
+        # at shutdown, flush all open streams.  Ignore I/O errors.
+        from pypy.module._io.interp_iobase import get_autoflushher
+        get_autoflushher(space).flush_all(space)
+
diff --git a/pypy/module/_io/interp_iobase.py b/pypy/module/_io/interp_iobase.py
--- a/pypy/module/_io/interp_iobase.py
+++ b/pypy/module/_io/interp_iobase.py
@@ -5,6 +5,8 @@
 from pypy.interpreter.gateway import interp2app
 from pypy.interpreter.error import OperationError, operationerrfmt
 from pypy.rlib.rstring import StringBuilder
+from pypy.rlib import rweakref
+
 
 DEFAULT_BUFFER_SIZE = 8192
 
@@ -43,6 +45,8 @@
         self.space = space
         self.w_dict = space.newdict()
         self.__IOBase_closed = False
+        self.streamholder = None # needed by AutoFlusher
+        get_autoflushher(space).add(self)
 
     def getdict(self, space):
         return self.w_dict
@@ -98,6 +102,7 @@
             space.call_method(self, "flush")
         finally:
             self.__IOBase_closed = True
+            get_autoflushher(space).remove(self)
 
     def flush_w(self, space):
         if self._CLOSED():
@@ -303,3 +308,60 @@
     read = interp2app(W_RawIOBase.read_w),
     readall = interp2app(W_RawIOBase.readall_w),
 )
+
+
+# ------------------------------------------------------------
+# functions to make sure that all streams are flushed on exit
+# ------------------------------------------------------------
+
+class StreamHolder(object):
+
+    def __init__(self, w_iobase):
+        self.w_iobase_ref = rweakref.ref(w_iobase)
+        w_iobase.autoflusher = self
+
+    def autoflush(self, space):
+        w_iobase = self.w_iobase_ref()
+        if w_iobase is not None:
+            try:
+                space.call_method(w_iobase, 'flush')
+            except OperationError, e:
+                # if it's an IOError or ValueError, ignore it (ValueError is
+                # raised if by chance we are trying to flush a file which has
+                # already been closed)
+                if not (e.match(space, space.w_IOError) or
+                        e.match(space, space.w_ValueError)):
+                    raise
+        
+
+class AutoFlusher(object):
+    
+    def __init__(self, space):
+        self.streams = {}
+
+    def add(self, w_iobase):
+        assert w_iobase.streamholder is None
+        holder = StreamHolder(w_iobase)
+        w_iobase.streamholder = holder
+        self.streams[holder] = None
+
+    def remove(self, w_iobase):
+        holder = w_iobase.streamholder
+        if holder is not None:
+            del self.streams[holder]
+
+    def flush_all(self, space):
+        while self.streams:
+            for streamholder in self.streams.keys():
+                try:
+                    del self.streams[streamholder]
+                except KeyError:
+                    pass    # key was removed in the meantime
+                else:
+                    streamholder.autoflush(space)
+
+
+def get_autoflushher(space):
+    return space.fromcache(AutoFlusher)
+
+
diff --git a/pypy/module/_io/test/test_fileio.py b/pypy/module/_io/test/test_fileio.py
--- a/pypy/module/_io/test/test_fileio.py
+++ b/pypy/module/_io/test/test_fileio.py
@@ -160,3 +160,42 @@
         f.close()
         assert repr(f) == "<_io.FileIO [closed]>"
 
+def test_flush_at_exit():
+    from pypy import conftest
+    from pypy.tool.option import make_config, make_objspace
+    from pypy.tool.udir import udir
+
+    tmpfile = udir.join('test_flush_at_exit')
+    config = make_config(conftest.option)
+    space = make_objspace(config)
+    space.appexec([space.wrap(str(tmpfile))], """(tmpfile):
+        import io
+        f = io.open(tmpfile, 'w', encoding='ascii')
+        f.write('42')
+        # no flush() and no close()
+        import sys; sys._keepalivesomewhereobscure = f
+    """)
+    space.finish()
+    assert tmpfile.read() == '42'
+
+def test_flush_at_exit_IOError_and_ValueError():
+    from pypy import conftest
+    from pypy.tool.option import make_config, make_objspace
+
+    config = make_config(conftest.option)
+    space = make_objspace(config)
+    space.appexec([], """():
+        import io
+        class MyStream(io.IOBase):
+            def flush(self):
+                raise IOError
+
+        class MyStream2(io.IOBase):
+            def flush(self):
+                raise ValueError
+
+        s = MyStream()
+        s2 = MyStream2()
+        import sys; sys._keepalivesomewhereobscure = s
+    """)
+    space.finish() # the IOError has been ignored
diff --git a/pypy/module/cpyext/api.py b/pypy/module/cpyext/api.py
--- a/pypy/module/cpyext/api.py
+++ b/pypy/module/cpyext/api.py
@@ -385,6 +385,7 @@
         "Tuple": "space.w_tuple",
         "List": "space.w_list",
         "Set": "space.w_set",
+        "FrozenSet": "space.w_frozenset",
         "Int": "space.w_int",
         "Bool": "space.w_bool",
         "Float": "space.w_float",
@@ -406,7 +407,7 @@
         }.items():
         GLOBALS['Py%s_Type#' % (cpyname, )] = ('PyTypeObject*', pypyexpr)
 
-    for cpyname in 'Method List Int Long Dict Tuple Class'.split():
+    for cpyname in 'Method List Long Dict Tuple Class'.split():
         FORWARD_DECLS.append('typedef struct { PyObject_HEAD } '
                              'Py%sObject' % (cpyname, ))
 build_exported_objects()
diff --git a/pypy/module/cpyext/dictobject.py b/pypy/module/cpyext/dictobject.py
--- a/pypy/module/cpyext/dictobject.py
+++ b/pypy/module/cpyext/dictobject.py
@@ -184,8 +184,10 @@
         w_item = space.call_method(w_iter, "next")
         w_key, w_value = space.fixedview(w_item, 2)
         state = space.fromcache(RefcountState)
-        pkey[0]   = state.make_borrowed(w_dict, w_key)
-        pvalue[0] = state.make_borrowed(w_dict, w_value)
+        if pkey:
+            pkey[0]   = state.make_borrowed(w_dict, w_key)
+        if pvalue:
+            pvalue[0] = state.make_borrowed(w_dict, w_value)
         ppos[0] += 1
     except OperationError, e:
         if not e.match(space, space.w_StopIteration):
diff --git a/pypy/module/cpyext/eval.py b/pypy/module/cpyext/eval.py
--- a/pypy/module/cpyext/eval.py
+++ b/pypy/module/cpyext/eval.py
@@ -1,16 +1,24 @@
 from pypy.interpreter.error import OperationError
+from pypy.interpreter.astcompiler import consts
 from pypy.rpython.lltypesystem import rffi, lltype
 from pypy.module.cpyext.api import (
     cpython_api, CANNOT_FAIL, CONST_STRING, FILEP, fread, feof, Py_ssize_tP,
     cpython_struct)
 from pypy.module.cpyext.pyobject import PyObject, borrow_from
 from pypy.module.cpyext.pyerrors import PyErr_SetFromErrno
+from pypy.module.cpyext.funcobject import PyCodeObject
 from pypy.module.__builtin__ import compiling
 
 PyCompilerFlags = cpython_struct(
-    "PyCompilerFlags", ())
+    "PyCompilerFlags", (("cf_flags", rffi.INT),))
 PyCompilerFlagsPtr = lltype.Ptr(PyCompilerFlags)
 
+PyCF_MASK = (consts.CO_FUTURE_DIVISION | 
+             consts.CO_FUTURE_ABSOLUTE_IMPORT |
+             consts.CO_FUTURE_WITH_STATEMENT |
+             consts.CO_FUTURE_PRINT_FUNCTION |
+             consts.CO_FUTURE_UNICODE_LITERALS)
+
 @cpython_api([PyObject, PyObject, PyObject], PyObject)
 def PyEval_CallObjectWithKeywords(space, w_obj, w_arg, w_kwds):
     return space.call(w_obj, w_arg, w_kwds)
@@ -48,6 +56,17 @@
         return None
     return borrow_from(None, caller.w_globals)
 
+ at cpython_api([PyCodeObject, PyObject, PyObject], PyObject)
+def PyEval_EvalCode(space, w_code, w_globals, w_locals):
+    """This is a simplified interface to PyEval_EvalCodeEx(), with just
+    the code object, and the dictionaries of global and local variables.
+    The other arguments are set to NULL."""
+    if w_globals is None:
+        w_globals = space.w_None
+    if w_locals is None:
+        w_locals = space.w_None
+    return compiling.eval(space, w_code, w_globals, w_locals)
+
 @cpython_api([PyObject, PyObject], PyObject)
 def PyObject_CallObject(space, w_obj, w_arg):
     """
@@ -74,7 +93,7 @@
 Py_file_input = 257
 Py_eval_input = 258
 
-def compile_string(space, source, filename, start):
+def compile_string(space, source, filename, start, flags=0):
     w_source = space.wrap(source)
     start = rffi.cast(lltype.Signed, start)
     if start == Py_file_input:
@@ -86,7 +105,7 @@
     else:
         raise OperationError(space.w_ValueError, space.wrap(
             "invalid mode parameter for compilation"))
-    return compiling.compile(space, w_source, filename, mode)
+    return compiling.compile(space, w_source, filename, mode, flags)
 
 def run_string(space, source, filename, start, w_globals, w_locals):
     w_code = compile_string(space, source, filename, start)
@@ -109,6 +128,24 @@
     filename = "<string>"
     return run_string(space, source, filename, start, w_globals, w_locals)
 
+ at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject,
+              PyCompilerFlagsPtr], PyObject)
+def PyRun_StringFlags(space, source, start, w_globals, w_locals, flagsptr):
+    """Execute Python source code from str in the context specified by the
+    dictionaries globals and locals with the compiler flags specified by
+    flags.  The parameter start specifies the start token that should be used to
+    parse the source code.
+
+    Returns the result of executing the code as a Python object, or NULL if an
+    exception was raised."""
+    source = rffi.charp2str(source)
+    if flagsptr:
+        flags = rffi.cast(lltype.Signed, flagsptr.c_cf_flags)
+    else:
+        flags = 0
+    w_code = compile_string(space, source, "<string>", start, flags)
+    return compiling.eval(space, w_code, w_globals, w_locals)
+
 @cpython_api([FILEP, CONST_STRING, rffi.INT_real, PyObject, PyObject], PyObject)
 def PyRun_File(space, fp, filename, start, w_globals, w_locals):
     """This is a simplified interface to PyRun_FileExFlags() below, leaving
@@ -150,7 +187,7 @@
 
 @cpython_api([rffi.CCHARP, rffi.CCHARP, rffi.INT_real, PyCompilerFlagsPtr],
              PyObject)
-def Py_CompileStringFlags(space, source, filename, start, flags):
+def Py_CompileStringFlags(space, source, filename, start, flagsptr):
     """Parse and compile the Python source code in str, returning the
     resulting code object.  The start token is given by start; this
     can be used to constrain the code which can be compiled and should
@@ -160,7 +197,30 @@
     returns NULL if the code cannot be parsed or compiled."""
     source = rffi.charp2str(source)
     filename = rffi.charp2str(filename)
-    if flags:
-        raise OperationError(space.w_NotImplementedError, space.wrap(
-                "cpyext Py_CompileStringFlags does not accept flags"))
-    return compile_string(space, source, filename, start)
+    if flagsptr:
+        flags = rffi.cast(lltype.Signed, flagsptr.c_cf_flags)
+    else:
+        flags = 0
+    return compile_string(space, source, filename, start, flags)
+
+ at cpython_api([PyCompilerFlagsPtr], rffi.INT_real, error=CANNOT_FAIL)
+def PyEval_MergeCompilerFlags(space, cf):
+    """This function changes the flags of the current evaluation
+    frame, and returns true on success, false on failure."""
+    flags = rffi.cast(lltype.Signed, cf.c_cf_flags)
+    result = flags != 0
+    current_frame = space.getexecutioncontext().gettopframe_nohidden()
+    if current_frame:
+        codeflags = current_frame.pycode.co_flags
+        compilerflags = codeflags & PyCF_MASK
+        if compilerflags:
+            result = 1
+            flags |= compilerflags
+        # No future keyword at the moment
+        # if codeflags & CO_GENERATOR_ALLOWED:
+        #     result = 1
+        #     flags |= CO_GENERATOR_ALLOWED
+    cf.c_cf_flags = rffi.cast(rffi.INT, flags)
+    return result
+
+        
diff --git a/pypy/module/cpyext/funcobject.py b/pypy/module/cpyext/funcobject.py
--- a/pypy/module/cpyext/funcobject.py
+++ b/pypy/module/cpyext/funcobject.py
@@ -1,6 +1,6 @@
 from pypy.rpython.lltypesystem import rffi, lltype
 from pypy.module.cpyext.api import (
-    PyObjectFields, generic_cpy_call, CONST_STRING,
+    PyObjectFields, generic_cpy_call, CONST_STRING, CANNOT_FAIL,
     cpython_api, bootstrap_function, cpython_struct, build_type_checkers)
 from pypy.module.cpyext.pyobject import (
     PyObject, make_ref, from_ref, Py_DecRef, make_typedescr, borrow_from)
@@ -48,6 +48,7 @@
 
 PyFunction_Check, PyFunction_CheckExact = build_type_checkers("Function", Function)
 PyMethod_Check, PyMethod_CheckExact = build_type_checkers("Method", Method)
+PyCode_Check, PyCode_CheckExact = build_type_checkers("Code", PyCode)
 
 def function_attach(space, py_obj, w_obj):
     py_func = rffi.cast(PyFunctionObject, py_obj)
@@ -167,3 +168,9 @@
                              freevars=[],
                              cellvars=[]))
 
+ at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)
+def PyCode_GetNumFree(space, w_co):
+    """Return the number of free variables in co."""
+    co = space.interp_w(PyCode, w_co)
+    return len(co.co_freevars)
+
diff --git a/pypy/module/cpyext/include/Python.h b/pypy/module/cpyext/include/Python.h
--- a/pypy/module/cpyext/include/Python.h
+++ b/pypy/module/cpyext/include/Python.h
@@ -113,6 +113,7 @@
 #include "compile.h"
 #include "frameobject.h"
 #include "eval.h"
+#include "pymath.h"
 #include "pymem.h"
 #include "pycobject.h"
 #include "pycapsule.h"
diff --git a/pypy/module/cpyext/include/code.h b/pypy/module/cpyext/include/code.h
--- a/pypy/module/cpyext/include/code.h
+++ b/pypy/module/cpyext/include/code.h
@@ -13,13 +13,19 @@
 
 /* Masks for co_flags above */
 /* These values are also in funcobject.py */
-#define CO_OPTIMIZED	0x0001
-#define CO_NEWLOCALS	0x0002
-#define CO_VARARGS	0x0004
-#define CO_VARKEYWORDS	0x0008
+#define CO_OPTIMIZED    0x0001
+#define CO_NEWLOCALS    0x0002
+#define CO_VARARGS      0x0004
+#define CO_VARKEYWORDS  0x0008
 #define CO_NESTED       0x0010
 #define CO_GENERATOR    0x0020
 
+#define CO_FUTURE_DIVISION         0x02000
+#define CO_FUTURE_ABSOLUTE_IMPORT  0x04000
+#define CO_FUTURE_WITH_STATEMENT   0x08000
+#define CO_FUTURE_PRINT_FUNCTION   0x10000
+#define CO_FUTURE_UNICODE_LITERALS 0x20000
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/pypy/module/cpyext/include/intobject.h b/pypy/module/cpyext/include/intobject.h
--- a/pypy/module/cpyext/include/intobject.h
+++ b/pypy/module/cpyext/include/intobject.h
@@ -7,6 +7,11 @@
 extern "C" {
 #endif
 
+typedef struct {
+    PyObject_HEAD
+    long ob_ival;
+} PyIntObject;
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/pypy/module/cpyext/include/object.h b/pypy/module/cpyext/include/object.h
--- a/pypy/module/cpyext/include/object.h
+++ b/pypy/module/cpyext/include/object.h
@@ -56,6 +56,8 @@
 #define Py_TYPE(ob)		(((PyObject*)(ob))->ob_type)
 #define Py_SIZE(ob)		(((PyVarObject*)(ob))->ob_size)
 
+#define _Py_ForgetReference(ob) /* nothing */
+
 #define Py_None (&_Py_NoneStruct)
 
 /*
diff --git a/pypy/module/cpyext/include/pymath.h b/pypy/module/cpyext/include/pymath.h
new file mode 100644
--- /dev/null
+++ b/pypy/module/cpyext/include/pymath.h
@@ -0,0 +1,20 @@
+#ifndef Py_PYMATH_H
+#define Py_PYMATH_H
+
+/**************************************************************************
+Symbols and macros to supply platform-independent interfaces to mathematical
+functions and constants
+**************************************************************************/
+
+/* HUGE_VAL is supposed to expand to a positive double infinity.  Python
+ * uses Py_HUGE_VAL instead because some platforms are broken in this
+ * respect.  We used to embed code in pyport.h to try to worm around that,
+ * but different platforms are broken in conflicting ways.  If you're on
+ * a platform where HUGE_VAL is defined incorrectly, fiddle your Python
+ * config to #define Py_HUGE_VAL to something that works on your platform.
+ */
+#ifndef Py_HUGE_VAL
+#define Py_HUGE_VAL HUGE_VAL
+#endif
+
+#endif /* Py_PYMATH_H */
diff --git a/pypy/module/cpyext/include/pythonrun.h b/pypy/module/cpyext/include/pythonrun.h
--- a/pypy/module/cpyext/include/pythonrun.h
+++ b/pypy/module/cpyext/include/pythonrun.h
@@ -19,6 +19,14 @@
     int cf_flags;  /* bitmask of CO_xxx flags relevant to future */
 } PyCompilerFlags;
 
+#define PyCF_MASK (CO_FUTURE_DIVISION | CO_FUTURE_ABSOLUTE_IMPORT | \
+                   CO_FUTURE_WITH_STATEMENT | CO_FUTURE_PRINT_FUNCTION | \
+                   CO_FUTURE_UNICODE_LITERALS)
+#define PyCF_MASK_OBSOLETE (CO_NESTED)
+#define PyCF_SOURCE_IS_UTF8  0x0100
+#define PyCF_DONT_IMPLY_DEDENT 0x0200
+#define PyCF_ONLY_AST 0x0400
+
 #define Py_CompileString(str, filename, start) Py_CompileStringFlags(str, filename, start, NULL)
 
 #ifdef __cplusplus
diff --git a/pypy/module/cpyext/intobject.py b/pypy/module/cpyext/intobject.py
--- a/pypy/module/cpyext/intobject.py
+++ b/pypy/module/cpyext/intobject.py
@@ -2,11 +2,37 @@
 from pypy.rpython.lltypesystem import rffi, lltype
 from pypy.interpreter.error import OperationError
 from pypy.module.cpyext.api import (
-    cpython_api, build_type_checkers, PyObject,
-    CONST_STRING, CANNOT_FAIL, Py_ssize_t)
+    cpython_api, cpython_struct, build_type_checkers, bootstrap_function,
+    PyObject, PyObjectFields, CONST_STRING, CANNOT_FAIL, Py_ssize_t)
+from pypy.module.cpyext.pyobject import (
+    make_typedescr, track_reference, RefcountState, from_ref)
 from pypy.rlib.rarithmetic import r_uint, intmask, LONG_TEST
+from pypy.objspace.std.intobject import W_IntObject
 import sys
 
+PyIntObjectStruct = lltype.ForwardReference()
+PyIntObject = lltype.Ptr(PyIntObjectStruct)
+PyIntObjectFields = PyObjectFields + \
+    (("ob_ival", rffi.LONG),)
+cpython_struct("PyIntObject", PyIntObjectFields, PyIntObjectStruct)
+
+ at bootstrap_function
+def init_intobject(space):
+    "Type description of PyIntObject"
+    make_typedescr(space.w_int.instancetypedef,
+                   basestruct=PyIntObject.TO,
+                   realize=int_realize)
+
+def int_realize(space, obj):
+    intval = rffi.cast(lltype.Signed, rffi.cast(PyIntObject, obj).c_ob_ival)
+    w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
+    w_obj = space.allocate_instance(W_IntObject, w_type)
+    w_obj.__init__(intval)
+    track_reference(space, obj, w_obj)
+    state = space.fromcache(RefcountState)
+    state.set_lifeline(w_obj, obj)
+    return w_obj
+
 PyInt_Check, PyInt_CheckExact = build_type_checkers("Int")
 
 @cpython_api([], lltype.Signed, error=CANNOT_FAIL)
diff --git a/pypy/module/cpyext/object.py b/pypy/module/cpyext/object.py
--- a/pypy/module/cpyext/object.py
+++ b/pypy/module/cpyext/object.py
@@ -193,7 +193,7 @@
     if not obj:
         PyErr_NoMemory(space)
     obj.c_ob_type = type
-    _Py_NewReference(space, obj)
+    obj.c_ob_refcnt = 1
     return obj
 
 @cpython_api([PyVarObject, PyTypeObjectPtr, Py_ssize_t], PyObject)
diff --git a/pypy/module/cpyext/pyobject.py b/pypy/module/cpyext/pyobject.py
--- a/pypy/module/cpyext/pyobject.py
+++ b/pypy/module/cpyext/pyobject.py
@@ -17,6 +17,7 @@
 
 class BaseCpyTypedescr(object):
     basestruct = PyObject.TO
+    W_BaseObject = W_ObjectObject
 
     def get_dealloc(self, space):
         from pypy.module.cpyext.typeobject import subtype_dealloc
@@ -51,10 +52,14 @@
     def attach(self, space, pyobj, w_obj):
         pass
 
-    def realize(self, space, ref):
-        # For most types, a reference cannot exist without
-        # a real interpreter object
-        raise InvalidPointerException(str(ref))
+    def realize(self, space, obj):
+        w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
+        w_obj = space.allocate_instance(self.W_BaseObject, w_type)
+        track_reference(space, obj, w_obj)
+        if w_type is not space.gettypefor(self.W_BaseObject):
+            state = space.fromcache(RefcountState)
+            state.set_lifeline(w_obj, obj)
+        return w_obj
 
 typedescr_cache = {}
 
@@ -369,13 +374,7 @@
     obj.c_ob_refcnt = 1
     w_type = from_ref(space, rffi.cast(PyObject, obj.c_ob_type))
     assert isinstance(w_type, W_TypeObject)
-    if w_type.is_cpytype():
-        w_obj = space.allocate_instance(W_ObjectObject, w_type)
-        track_reference(space, obj, w_obj)
-        state = space.fromcache(RefcountState)
-        state.set_lifeline(w_obj, obj)
-    else:
-        assert False, "Please add more cases in _Py_NewReference()"
+    get_typedescr(w_type.instancetypedef).realize(space, obj)
 
 def _Py_Dealloc(space, obj):
     from pypy.module.cpyext.api import generic_cpy_call_dont_decref
diff --git a/pypy/module/cpyext/stubs.py b/pypy/module/cpyext/stubs.py
--- a/pypy/module/cpyext/stubs.py
+++ b/pypy/module/cpyext/stubs.py
@@ -182,16 +182,6 @@
     used as the positional and keyword parameters to the object's constructor."""
     raise NotImplementedError
 
- at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)
-def PyCode_Check(space, co):
-    """Return true if co is a code object"""
-    raise NotImplementedError
-
- at cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)
-def PyCode_GetNumFree(space, co):
-    """Return the number of free variables in co."""
-    raise NotImplementedError
-
 @cpython_api([PyObject], rffi.INT_real, error=-1)
 def PyCodec_Register(space, search_function):
     """Register a new codec search function.
@@ -1293,28 +1283,6 @@
     that haven't been explicitly destroyed at that point."""
     raise NotImplementedError
 
- at cpython_api([rffi.VOIDP], lltype.Void)
-def Py_AddPendingCall(space, func):
-    """Post a notification to the Python main thread.  If successful, func will
-    be called with the argument arg at the earliest convenience.  func will be
-    called having the global interpreter lock held and can thus use the full
-    Python API and can take any action such as setting object attributes to
-    signal IO completion.  It must return 0 on success, or -1 signalling an
-    exception.  The notification function won't be interrupted to perform another
-    asynchronous notification recursively, but it can still be interrupted to
-    switch threads if the global interpreter lock is released, for example, if it
-    calls back into Python code.
-
-    This function returns 0 on success in which case the notification has been
-    scheduled.  Otherwise, for example if the notification buffer is full, it
-    returns -1 without setting any exception.
-
-    This function can be called on any thread, be it a Python thread or some
-    other system thread.  If it is a Python thread, it doesn't matter if it holds
-    the global interpreter lock or not.
-    """
-    raise NotImplementedError
-
 @cpython_api([Py_tracefunc, PyObject], lltype.Void)
 def PyEval_SetProfile(space, func, obj):
     """Set the profiler function to func.  The obj parameter is passed to the
@@ -1875,26 +1843,6 @@
     """
     raise NotImplementedError
 
- at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
-def Py_UNICODE_ISTITLE(space, ch):
-    """Return 1 or 0 depending on whether ch is a titlecase character."""
-    raise NotImplementedError
-
- at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
-def Py_UNICODE_ISDIGIT(space, ch):
-    """Return 1 or 0 depending on whether ch is a digit character."""
-    raise NotImplementedError
-
- at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
-def Py_UNICODE_ISNUMERIC(space, ch):
-    """Return 1 or 0 depending on whether ch is a numeric character."""
-    raise NotImplementedError
-
- at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
-def Py_UNICODE_ISALPHA(space, ch):
-    """Return 1 or 0 depending on whether ch is an alphabetic character."""
-    raise NotImplementedError
-
 @cpython_api([rffi.CCHARP], PyObject)
 def PyUnicode_FromFormat(space, format):
     """Take a C printf()-style format string and a variable number of
@@ -2339,17 +2287,6 @@
     use the default error handling."""
     raise NotImplementedError
 
- at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], rffi.INT_real, error=-1)
-def PyUnicode_Tailmatch(space, str, substr, start, end, direction):
-    """Return 1 if substr matches str*[*start:end] at the given tail end
-    (direction == -1 means to do a prefix match, direction == 1 a suffix match),
-    0 otherwise. Return -1 if an error occurred.
-
-    This function used an int type for start and end. This
-    might require changes in your code for properly supporting 64-bit
-    systems."""
-    raise NotImplementedError
-
 @cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real], Py_ssize_t, error=-2)
 def PyUnicode_Find(space, str, substr, start, end, direction):
     """Return the first position of substr in str*[*start:end] using the given
@@ -2373,16 +2310,6 @@
     properly supporting 64-bit systems."""
     raise NotImplementedError
 
- at cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject)
-def PyUnicode_Replace(space, str, substr, replstr, maxcount):
-    """Replace at most maxcount occurrences of substr in str with replstr and
-    return the resulting Unicode object. maxcount == -1 means replace all
-    occurrences.
-
-    This function used an int type for maxcount. This might
-    require changes in your code for properly supporting 64-bit systems."""
-    raise NotImplementedError
-
 @cpython_api([PyObject, PyObject, rffi.INT_real], PyObject)
 def PyUnicode_RichCompare(space, left, right, op):
     """Rich compare two unicode strings and return one of the following:
@@ -2556,17 +2483,6 @@
     source code is read from fp instead of an in-memory string."""
     raise NotImplementedError
 
- at cpython_api([rffi.CCHARP, rffi.INT_real, PyObject, PyObject, PyCompilerFlags], PyObject)
-def PyRun_StringFlags(space, str, start, globals, locals, flags):
-    """Execute Python source code from str in the context specified by the
-    dictionaries globals and locals with the compiler flags specified by
-    flags.  The parameter start specifies the start token that should be used to
-    parse the source code.
-
-    Returns the result of executing the code as a Python object, or NULL if an
-    exception was raised."""
-    raise NotImplementedError
-
 @cpython_api([FILE, rffi.CCHARP, rffi.INT_real, PyObject, PyObject, rffi.INT_real], PyObject)
 def PyRun_FileEx(space, fp, filename, start, globals, locals, closeit):
     """This is a simplified interface to PyRun_FileExFlags() below, leaving
@@ -2587,13 +2503,6 @@
     returns."""
     raise NotImplementedError
 
- at cpython_api([PyCodeObject, PyObject, PyObject], PyObject)
-def PyEval_EvalCode(space, co, globals, locals):
-    """This is a simplified interface to PyEval_EvalCodeEx(), with just
-    the code object, and the dictionaries of global and local variables.
-    The other arguments are set to NULL."""
-    raise NotImplementedError
-
 @cpython_api([PyCodeObject, PyObject, PyObject, PyObjectP, rffi.INT_real, PyObjectP, rffi.INT_real, PyObjectP, rffi.INT_real, PyObject], PyObject)
 def PyEval_EvalCodeEx(space, co, globals, locals, args, argcount, kws, kwcount, defs, defcount, closure):
     """Evaluate a precompiled code object, given a particular environment for its
@@ -2618,12 +2527,6 @@
     throw() methods of generator objects."""
     raise NotImplementedError
 
- at cpython_api([PyCompilerFlags], rffi.INT_real, error=CANNOT_FAIL)
-def PyEval_MergeCompilerFlags(space, cf):
-    """This function changes the flags of the current evaluation frame, and returns
-    true on success, false on failure."""
-    raise NotImplementedError
-
 @cpython_api([PyObject], rffi.INT_real, error=CANNOT_FAIL)
 def PyWeakref_Check(space, ob):
     """Return true if ob is either a reference or proxy object.
diff --git a/pypy/module/cpyext/stubsactive.py b/pypy/module/cpyext/stubsactive.py
--- a/pypy/module/cpyext/stubsactive.py
+++ b/pypy/module/cpyext/stubsactive.py
@@ -38,3 +38,31 @@
 def Py_MakePendingCalls(space):
     return 0
 
+pending_call = lltype.Ptr(lltype.FuncType([rffi.VOIDP], rffi.INT_real))
+ at cpython_api([pending_call, rffi.VOIDP], rffi.INT_real, error=-1)
+def Py_AddPendingCall(space, func, arg):
+    """Post a notification to the Python main thread.  If successful,
+    func will be called with the argument arg at the earliest
+    convenience.  func will be called having the global interpreter
+    lock held and can thus use the full Python API and can take any
+    action such as setting object attributes to signal IO completion.
+    It must return 0 on success, or -1 signalling an exception.  The
+    notification function won't be interrupted to perform another
+    asynchronous notification recursively, but it can still be
+    interrupted to switch threads if the global interpreter lock is
+    released, for example, if it calls back into Python code.
+
+    This function returns 0 on success in which case the notification
+    has been scheduled.  Otherwise, for example if the notification
+    buffer is full, it returns -1 without setting any exception.
+
+    This function can be called on any thread, be it a Python thread
+    or some other system thread.  If it is a Python thread, it doesn't
+    matter if it holds the global interpreter lock or not.
+    """
+    return -1
+
+thread_func = lltype.Ptr(lltype.FuncType([rffi.VOIDP], lltype.Void))
+ at cpython_api([thread_func, rffi.VOIDP], rffi.INT_real, error=-1)
+def PyThread_start_new_thread(space, func, arg):
+    return -1
diff --git a/pypy/module/cpyext/test/test_dictobject.py b/pypy/module/cpyext/test/test_dictobject.py
--- a/pypy/module/cpyext/test/test_dictobject.py
+++ b/pypy/module/cpyext/test/test_dictobject.py
@@ -112,6 +112,37 @@
         assert space.eq_w(space.len(w_copy), space.len(w_dict))
         assert space.eq_w(w_copy, w_dict)
 
+    def test_iterkeys(self, space, api):
+        w_dict = space.sys.getdict(space)
+        py_dict = make_ref(space, w_dict)
+
+        ppos = lltype.malloc(Py_ssize_tP.TO, 1, flavor='raw')
+        pkey = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
+        pvalue = lltype.malloc(PyObjectP.TO, 1, flavor='raw')
+
+        keys_w = []
+        values_w = []
+        try:
+            ppos[0] = 0
+            while api.PyDict_Next(w_dict, ppos, pkey, None):
+                w_key = from_ref(space, pkey[0])
+                keys_w.append(w_key)
+            ppos[0] = 0
+            while api.PyDict_Next(w_dict, ppos, None, pvalue):
+                w_value = from_ref(space, pvalue[0])
+                values_w.append(w_value)
+        finally:
+            lltype.free(ppos, flavor='raw')
+            lltype.free(pkey, flavor='raw')
+            lltype.free(pvalue, flavor='raw')
+
+        api.Py_DecRef(py_dict) # release borrowed references
+
+        assert space.eq_w(space.newlist(keys_w),
+                          space.call_method(w_dict, "keys"))
+        assert space.eq_w(space.newlist(values_w),
+                          space.call_method(w_dict, "values"))
+
     def test_dictproxy(self, space, api):
         w_dict = space.sys.get('modules')
         w_proxy = api.PyDictProxy_New(w_dict)
diff --git a/pypy/module/cpyext/test/test_eval.py b/pypy/module/cpyext/test/test_eval.py
--- a/pypy/module/cpyext/test/test_eval.py
+++ b/pypy/module/cpyext/test/test_eval.py
@@ -2,9 +2,10 @@
 from pypy.module.cpyext.test.test_cpyext import AppTestCpythonExtensionBase
 from pypy.module.cpyext.test.test_api import BaseApiTest
 from pypy.module.cpyext.eval import (
-    Py_single_input, Py_file_input, Py_eval_input)
+    Py_single_input, Py_file_input, Py_eval_input, PyCompilerFlags)
 from pypy.module.cpyext.api import fopen, fclose, fileno, Py_ssize_tP
 from pypy.interpreter.gateway import interp2app
+from pypy.interpreter.astcompiler import consts
 from pypy.tool.udir import udir
 import sys, os
 
@@ -63,6 +64,22 @@
 
         assert space.int_w(w_res) == 10
 
+    def test_evalcode(self, space, api):
+        w_f = space.appexec([], """():
+            def f(*args):
+                assert isinstance(args, tuple)
+                return len(args) + 8
+            return f
+            """)
+
+        w_t = space.newtuple([space.wrap(1), space.wrap(2)])
+        w_globals = space.newdict()
+        w_locals = space.newdict()
+        space.setitem(w_locals, space.wrap("args"), w_t)
+        w_res = api.PyEval_EvalCode(w_f.code, w_globals, w_locals)
+
+        assert space.int_w(w_res) == 10
+
     def test_run_simple_string(self, space, api):
         def run(code):
             buf = rffi.str2charp(code)
@@ -96,6 +113,20 @@
         assert 42 * 43 == space.unwrap(
             api.PyObject_GetItem(w_globals, space.wrap("a")))
 
+    def test_run_string_flags(self, space, api):
+        flags = lltype.malloc(PyCompilerFlags, flavor='raw')
+        flags.c_cf_flags = rffi.cast(rffi.INT, consts.PyCF_SOURCE_IS_UTF8)
+        w_globals = space.newdict()
+        buf = rffi.str2charp("a = u'caf\xc3\xa9'")
+        try:
+            api.PyRun_StringFlags(buf, Py_single_input,
+                                  w_globals, w_globals, flags)
+        finally:
+            rffi.free_charp(buf)
+        w_a = space.getitem(w_globals, space.wrap("a"))
+        assert space.unwrap(w_a) == u'caf\xe9'
+        lltype.free(flags, flavor='raw')
+
     def test_run_file(self, space, api):
         filepath = udir / "cpyext_test_runfile.py"
         filepath.write("raise ZeroDivisionError")
@@ -256,3 +287,21 @@
         print dir(mod)
         print mod.__dict__
         assert mod.f(42) == 47
+
+    def test_merge_compiler_flags(self):
+        module = self.import_extension('foo', [
+            ("get_flags", "METH_NOARGS",
+             """
+                PyCompilerFlags flags;
+                flags.cf_flags = 0;
+                int result = PyEval_MergeCompilerFlags(&flags);
+                return Py_BuildValue("ii", result, flags.cf_flags);
+             """),
+            ])
+        assert module.get_flags() == (0, 0)
+
+        ns = {'module':module}
+        exec """from __future__ import division    \nif 1:
+                def nested_flags():
+                    return module.get_flags()""" in ns
+        assert ns['nested_flags']() == (1, 0x2000)  # CO_FUTURE_DIVISION
diff --git a/pypy/module/cpyext/test/test_funcobject.py b/pypy/module/cpyext/test/test_funcobject.py
--- a/pypy/module/cpyext/test/test_funcobject.py
+++ b/pypy/module/cpyext/test/test_funcobject.py
@@ -81,6 +81,14 @@
         rffi.free_charp(filename)
         rffi.free_charp(funcname)
 
+    def test_getnumfree(self, space, api):
+        w_function = space.appexec([], """():
+            a = 5
+            def method(x): return a, x
+            return method
+        """)
+        assert api.PyCode_GetNumFree(w_function.code) == 1
+
     def test_classmethod(self, space, api):
         w_function = space.appexec([], """():
             def method(x): return x
diff --git a/pypy/module/cpyext/test/test_intobject.py b/pypy/module/cpyext/test/test_intobject.py
--- a/pypy/module/cpyext/test/test_intobject.py
+++ b/pypy/module/cpyext/test/test_intobject.py
@@ -65,4 +65,97 @@
         values = module.values()
         types = [type(x) for x in values]
         assert types == [int, long, int, int]
-        
+
+    def test_int_subtype(self):
+        module = self.import_extension(
+            'foo', [
+            ("newEnum", "METH_VARARGS",
+             """
+                EnumObject *enumObj;
+                long intval;
+                PyObject *name;
+
+                if (!PyArg_ParseTuple(args, "Oi", &name, &intval))
+                    return NULL;
+
+                PyType_Ready(&Enum_Type);
+                enumObj = PyObject_New(EnumObject, &Enum_Type);
+                if (!enumObj) {
+                    return NULL;
+                }
+
+                enumObj->ob_ival = intval;
+                Py_INCREF(name);
+                enumObj->ob_name = name;
+
+                return (PyObject *)enumObj;
+             """),
+            ], 
+            prologue="""
+            typedef struct
+            {
+                PyObject_HEAD
+                long ob_ival;
+                PyObject* ob_name;
+            } EnumObject;
+
+            static void
+            enum_dealloc(EnumObject *op)
+            {
+                    Py_DECREF(op->ob_name);
+                    Py_TYPE(op)->tp_free((PyObject *)op);
+            }
+
+            static PyMemberDef enum_members[] = {
+                {"name", T_OBJECT, offsetof(EnumObject, ob_name), 0, NULL},
+                {NULL}  /* Sentinel */
+            };
+
+            PyTypeObject Enum_Type = {
+                PyObject_HEAD_INIT(0)
+                /*ob_size*/             0,
+                /*tp_name*/             "Enum",
+                /*tp_basicsize*/        sizeof(EnumObject),
+                /*tp_itemsize*/         0,
+                /*tp_dealloc*/          enum_dealloc,
+                /*tp_print*/            0,
+                /*tp_getattr*/          0,
+                /*tp_setattr*/          0,
+                /*tp_compare*/          0,
+                /*tp_repr*/             0,
+                /*tp_as_number*/        0,
+                /*tp_as_sequence*/      0,
+                /*tp_as_mapping*/       0,
+                /*tp_hash*/             0,
+                /*tp_call*/             0,
+                /*tp_str*/              0,
+                /*tp_getattro*/         0,
+                /*tp_setattro*/         0,
+                /*tp_as_buffer*/        0,
+                /*tp_flags*/            Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE,
+                /*tp_doc*/              0,
+                /*tp_traverse*/         0,
+                /*tp_clear*/            0,
+                /*tp_richcompare*/      0,
+                /*tp_weaklistoffset*/   0,
+                /*tp_iter*/             0,
+                /*tp_iternext*/         0,
+                /*tp_methods*/          0,
+                /*tp_members*/          enum_members,
+                /*tp_getset*/           0,
+                /*tp_base*/             &PyInt_Type,
+                /*tp_dict*/             0,
+                /*tp_descr_get*/        0,
+                /*tp_descr_set*/        0,
+                /*tp_dictoffset*/       0,
+                /*tp_init*/             0,
+                /*tp_alloc*/            0,
+                /*tp_new*/              0
+            };
+            """)
+
+        a = module.newEnum("ULTIMATE_ANSWER", 42)
+        assert type(a).__name__ == "Enum"
+        assert isinstance(a, int)
+        assert a == int(a) == 42
+        assert a.name == "ULTIMATE_ANSWER"
diff --git a/pypy/module/cpyext/test/test_unicodeobject.py b/pypy/module/cpyext/test/test_unicodeobject.py
--- a/pypy/module/cpyext/test/test_unicodeobject.py
+++ b/pypy/module/cpyext/test/test_unicodeobject.py
@@ -204,8 +204,18 @@
             assert api.Py_UNICODE_ISSPACE(unichr(char))
         assert not api.Py_UNICODE_ISSPACE(u'a')
 
+        assert api.Py_UNICODE_ISALPHA(u'a')
+        assert not api.Py_UNICODE_ISALPHA(u'0')
+        assert api.Py_UNICODE_ISALNUM(u'a')
+        assert api.Py_UNICODE_ISALNUM(u'0')
+        assert not api.Py_UNICODE_ISALNUM(u'+')
+
         assert api.Py_UNICODE_ISDECIMAL(u'\u0660')
         assert not api.Py_UNICODE_ISDECIMAL(u'a')
+        assert api.Py_UNICODE_ISDIGIT(u'9')
+        assert not api.Py_UNICODE_ISDIGIT(u'@')
+        assert api.Py_UNICODE_ISNUMERIC(u'9')
+        assert not api.Py_UNICODE_ISNUMERIC(u'@')
 
         for char in [0x0a, 0x0d, 0x1c, 0x1d, 0x1e, 0x85, 0x2028, 0x2029]:
             assert api.Py_UNICODE_ISLINEBREAK(unichr(char))
@@ -216,6 +226,9 @@
         assert not api.Py_UNICODE_ISUPPER(u'a')
         assert not api.Py_UNICODE_ISLOWER(u'&#65533;')
         assert api.Py_UNICODE_ISUPPER(u'&#65533;')
+        assert not api.Py_UNICODE_ISTITLE(u'A')
+        assert api.Py_UNICODE_ISTITLE(
+            u'\N{LATIN CAPITAL LETTER L WITH SMALL LETTER J}')
 
     def test_TOLOWER(self, space, api):
         assert api.Py_UNICODE_TOLOWER(u'&#65533;') == u'&#65533;'
@@ -429,3 +442,18 @@
         w_char = api.PyUnicode_FromOrdinal(0xFFFF)
         assert space.unwrap(w_char) == u'\uFFFF'
 
+    def test_replace(self, space, api):
+        w_str = space.wrap(u"abababab")
+        w_substr = space.wrap(u"a")
+        w_replstr = space.wrap(u"z")
+        assert u"zbzbabab" == space.unwrap(
+            api.PyUnicode_Replace(w_str, w_substr, w_replstr, 2))
+        assert u"zbzbzbzb" == space.unwrap(
+            api.PyUnicode_Replace(w_str, w_substr, w_replstr, -1))
+
+    def test_tailmatch(self, space, api):
+        w_str = space.wrap(u"abcdef")
+        assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 2, 10, 1) == 1
+        assert api.PyUnicode_Tailmatch(w_str, space.wrap("cde"), 1, 5, -1) == 1
+        self.raises(space, api, TypeError,
+                    api.PyUnicode_Tailmatch, w_str, space.wrap(3), 2, 10, 1)
diff --git a/pypy/module/cpyext/unicodeobject.py b/pypy/module/cpyext/unicodeobject.py
--- a/pypy/module/cpyext/unicodeobject.py
+++ b/pypy/module/cpyext/unicodeobject.py
@@ -12,7 +12,7 @@
     make_typedescr, get_typedescr)
 from pypy.module.cpyext.stringobject import PyString_Check
 from pypy.module.sys.interp_encoding import setdefaultencoding
-from pypy.objspace.std import unicodeobject, unicodetype
+from pypy.objspace.std import unicodeobject, unicodetype, stringtype
 from pypy.rlib import runicode
 from pypy.tool.sourcetools import func_renamer
 import sys
@@ -89,6 +89,11 @@
     return unicodedb.isspace(ord(ch))
 
 @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
+def Py_UNICODE_ISALPHA(space, ch):
+    """Return 1 or 0 depending on whether ch is an alphabetic character."""
+    return unicodedb.isalpha(ord(ch))
+
+ at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
 def Py_UNICODE_ISALNUM(space, ch):
     """Return 1 or 0 depending on whether ch is an alphanumeric character."""
     return unicodedb.isalnum(ord(ch))
@@ -104,6 +109,16 @@
     return unicodedb.isdecimal(ord(ch))
 
 @cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
+def Py_UNICODE_ISDIGIT(space, ch):
+    """Return 1 or 0 depending on whether ch is a digit character."""
+    return unicodedb.isdigit(ord(ch))
+
+ at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
+def Py_UNICODE_ISNUMERIC(space, ch):
+    """Return 1 or 0 depending on whether ch is a numeric character."""
+    return unicodedb.isnumeric(ord(ch))
+
+ at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
 def Py_UNICODE_ISLOWER(space, ch):
     """Return 1 or 0 depending on whether ch is a lowercase character."""
     return unicodedb.islower(ord(ch))
@@ -113,6 +128,11 @@
     """Return 1 or 0 depending on whether ch is an uppercase character."""
     return unicodedb.isupper(ord(ch))
 
+ at cpython_api([Py_UNICODE], rffi.INT_real, error=CANNOT_FAIL)
+def Py_UNICODE_ISTITLE(space, ch):
+    """Return 1 or 0 depending on whether ch is a titlecase character."""
+    return unicodedb.istitle(ord(ch))
+
 @cpython_api([Py_UNICODE], Py_UNICODE, error=CANNOT_FAIL)
 def Py_UNICODE_TOLOWER(space, ch):
     """Return the character ch converted to lower case."""
@@ -155,6 +175,11 @@
     except KeyError:
         return -1.0
 
+ at cpython_api([], Py_UNICODE, error=CANNOT_FAIL)
+def PyUnicode_GetMax(space):
+    """Get the maximum ordinal for a Unicode character."""
+    return unichr(runicode.MAXUNICODE)
+
 @cpython_api([PyObject], rffi.CCHARP, error=CANNOT_FAIL)
 def PyUnicode_AS_DATA(space, ref):
     """Return a pointer to the internal buffer of the object. o has to be a
@@ -548,6 +573,28 @@
 
 @cpython_api([PyObject, PyObject], PyObject)
 def PyUnicode_Join(space, w_sep, w_seq):
-    """Join a sequence of strings using the given separator and return the resulting
-    Unicode string."""
+    """Join a sequence of strings using the given separator and return
+    the resulting Unicode string."""
     return space.call_method(w_sep, 'join', w_seq)
+
+ at cpython_api([PyObject, PyObject, PyObject, Py_ssize_t], PyObject)
+def PyUnicode_Replace(space, w_str, w_substr, w_replstr, maxcount):
+    """Replace at most maxcount occurrences of substr in str with replstr and
+    return the resulting Unicode object. maxcount == -1 means replace all
+    occurrences."""
+    return space.call_method(w_str, "replace", w_substr, w_replstr,
+                             space.wrap(maxcount))
+
+ at cpython_api([PyObject, PyObject, Py_ssize_t, Py_ssize_t, rffi.INT_real],
+             rffi.INT_real, error=-1)
+def PyUnicode_Tailmatch(space, w_str, w_substr, start, end, direction):
+    """Return 1 if substr matches str[start:end] at the given tail end
+    (direction == -1 means to do a prefix match, direction == 1 a
+    suffix match), 0 otherwise. Return -1 if an error occurred."""
+    str = space.unicode_w(w_str)
+    substr = space.unicode_w(w_substr)
+    if rffi.cast(lltype.Signed, direction) >= 0:
+        return stringtype.stringstartswith(str, substr, start, end)
+    else:
+        return stringtype.stringendswith(str, substr, start, end)
+
diff --git a/pypy/module/imp/interp_imp.py b/pypy/module/imp/interp_imp.py
--- a/pypy/module/imp/interp_imp.py
+++ b/pypy/module/imp/interp_imp.py
@@ -1,10 +1,11 @@
 from pypy.module.imp import importing
 from pypy.module._file.interp_file import W_File
 from pypy.rlib import streamio
+from pypy.rlib.streamio import StreamErrors
 from pypy.interpreter.error import OperationError, operationerrfmt
 from pypy.interpreter.module import Module
 from pypy.interpreter.gateway import unwrap_spec
-from pypy.module._file.interp_stream import StreamErrors, wrap_streamerror
+from pypy.interpreter.streamutil import wrap_streamerror
 
 
 def get_suffixes(space):
diff --git a/pypy/module/imp/test/test_import.py b/pypy/module/imp/test/test_import.py
--- a/pypy/module/imp/test/test_import.py
+++ b/pypy/module/imp/test/test_import.py
@@ -357,7 +357,7 @@
 
     def test_cannot_write_pyc(self):
         import sys, os
-        p = os.path.join(sys.path[-1], 'readonly')
+        p = os.path.join(sys.path[0], 'readonly')
         try:
             os.chmod(p, 0555)
         except:
diff --git a/pypy/module/micronumpy/__init__.py b/pypy/module/micronumpy/__init__.py
--- a/pypy/module/micronumpy/__init__.py
+++ b/pypy/module/micronumpy/__init__.py
@@ -67,10 +67,12 @@
         ("arccos", "arccos"),
         ("arcsin", "arcsin"),
         ("arctan", "arctan"),
+        ("arccosh", "arccosh"),
         ("arcsinh", "arcsinh"),
         ("arctanh", "arctanh"),
         ("copysign", "copysign"),
         ("cos", "cos"),
+        ("cosh", "cosh"),
         ("divide", "divide"),
         ("true_divide", "true_divide"),
         ("equal", "equal"),
@@ -90,9 +92,11 @@
         ("reciprocal", "reciprocal"),
         ("sign", "sign"),
         ("sin", "sin"),
+        ("sinh", "sinh"),
         ("subtract", "subtract"),
         ('sqrt', 'sqrt'),
         ("tan", "tan"),
+        ("tanh", "tanh"),
         ('bitwise_and', 'bitwise_and'),
         ('bitwise_or', 'bitwise_or'),
         ('bitwise_xor', 'bitwise_xor'),
diff --git a/pypy/module/micronumpy/interp_boxes.py b/pypy/module/micronumpy/interp_boxes.py
--- a/pypy/module/micronumpy/interp_boxes.py
+++ b/pypy/module/micronumpy/interp_boxes.py
@@ -1,6 +1,6 @@
 from pypy.interpreter.baseobjspace import Wrappable
 from pypy.interpreter.error import operationerrfmt
-from pypy.interpreter.gateway import interp2app
+from pypy.interpreter.gateway import interp2app, unwrap_spec
 from pypy.interpreter.typedef import TypeDef
 from pypy.objspace.std.floattype import float_typedef
 from pypy.objspace.std.inttype import int_typedef
@@ -29,7 +29,6 @@
     def convert_to(self, dtype):
         return dtype.box(self.value)
 
-
 class W_GenericBox(Wrappable):
     _attrs_ = ()
 
@@ -39,10 +38,10 @@
         )
 
     def descr_str(self, space):
-        return self.descr_repr(space)
+        return space.wrap(self.get_dtype(space).itemtype.str_format(self))
 
-    def descr_repr(self, space):
-        return space.wrap(self.get_dtype(space).itemtype.str_format(self))
+    def descr_format(self, space, w_spec):
+        return space.format(self.item(space), w_spec)
 
     def descr_int(self, space):
         box = self.convert_to(W_LongBox.get_dtype(space))
@@ -187,6 +186,10 @@
     descr__new__, get_dtype = new_dtype_getter("float64")
 
 
+ at unwrap_spec(self=W_GenericBox)
+def descr_index(space, self):
+    return space.index(self.item(space))
+
 
 W_GenericBox.typedef = TypeDef("generic",
     __module__ = "numpypy",
@@ -194,7 +197,8 @@
     __new__ = interp2app(W_GenericBox.descr__new__.im_func),
 
     __str__ = interp2app(W_GenericBox.descr_str),
-    __repr__ = interp2app(W_GenericBox.descr_repr),
+    __repr__ = interp2app(W_GenericBox.descr_str),
+    __format__ = interp2app(W_GenericBox.descr_format),
     __int__ = interp2app(W_GenericBox.descr_int),
     __float__ = interp2app(W_GenericBox.descr_float),
     __nonzero__ = interp2app(W_GenericBox.descr_nonzero),
@@ -245,6 +249,8 @@
 W_BoolBox.typedef = TypeDef("bool_", W_GenericBox.typedef,
     __module__ = "numpypy",
     __new__ = interp2app(W_BoolBox.descr__new__.im_func),
+
+    __index__ = interp2app(descr_index),
 )
 
 W_NumberBox.typedef = TypeDef("number", W_GenericBox.typedef,
@@ -266,36 +272,43 @@
 W_Int8Box.typedef = TypeDef("int8", W_SignedIntegerBox.typedef,
     __module__ = "numpypy",
     __new__ = interp2app(W_Int8Box.descr__new__.im_func),
+    __index__ = interp2app(descr_index),
 )
 
 W_UInt8Box.typedef = TypeDef("uint8", W_UnsignedIntegerBox.typedef,
     __module__ = "numpypy",
     __new__ = interp2app(W_UInt8Box.descr__new__.im_func),
+    __index__ = interp2app(descr_index),
 )
 
 W_Int16Box.typedef = TypeDef("int16", W_SignedIntegerBox.typedef,
     __module__ = "numpypy",
     __new__ = interp2app(W_Int16Box.descr__new__.im_func),
+    __index__ = interp2app(descr_index),
 )
 
 W_UInt16Box.typedef = TypeDef("uint16", W_UnsignedIntegerBox.typedef,
     __module__ = "numpypy",
     __new__ = interp2app(W_UInt16Box.descr__new__.im_func),
+    __index__ = interp2app(descr_index),
 )
 
 W_Int32Box.typedef = TypeDef("int32", (W_SignedIntegerBox.typedef,) + MIXIN_32,
     __module__ = "numpypy",
     __new__ = interp2app(W_Int32Box.descr__new__.im_func),
+    __index__ = interp2app(descr_index),
 )
 
 W_UInt32Box.typedef = TypeDef("uint32", W_UnsignedIntegerBox.typedef,
     __module__ = "numpypy",
     __new__ = interp2app(W_UInt32Box.descr__new__.im_func),
+    __index__ = interp2app(descr_index),
 )
 
 W_Int64Box.typedef = TypeDef("int64", (W_SignedIntegerBox.typedef,) + MIXIN_64,
     __module__ = "numpypy",
     __new__ = interp2app(W_Int64Box.descr__new__.im_func),
+    __index__ = interp2app(descr_index),
 )
 
 if LONG_BIT == 32:
@@ -308,6 +321,7 @@
 W_UInt64Box.typedef = TypeDef("uint64", W_UnsignedIntegerBox.typedef,
     __module__ = "numpypy",
     __new__ = interp2app(W_UInt64Box.descr__new__.im_func),
+    __index__ = interp2app(descr_index),
 )
 
 W_InexactBox.typedef = TypeDef("inexact", W_NumberBox.typedef,
diff --git a/pypy/module/micronumpy/interp_numarray.py b/pypy/module/micronumpy/interp_numarray.py
--- a/pypy/module/micronumpy/interp_numarray.py
+++ b/pypy/module/micronumpy/interp_numarray.py
@@ -779,8 +779,6 @@
     """
     Intermediate class for performing binary operations.
     """
-    _immutable_fields_ = ['left', 'right']
-
     def __init__(self, ufunc, name, shape, calc_dtype, res_dtype, left, right):
         VirtualArray.__init__(self, name, shape, res_dtype)
         self.ufunc = ufunc
@@ -856,8 +854,6 @@
                                          self.right.create_sig(), done_func)
 
 class AxisReduce(Call2):
-    _immutable_fields_ = ['left', 'right']
-
     def __init__(self, ufunc, name, identity, shape, dtype, left, right, dim):
         Call2.__init__(self, ufunc, name, shape, dtype, dtype,
                        left, right)
diff --git a/pypy/module/micronumpy/interp_support.py b/pypy/module/micronumpy/interp_support.py
--- a/pypy/module/micronumpy/interp_support.py
+++ b/pypy/module/micronumpy/interp_support.py
@@ -3,7 +3,7 @@
 from pypy.rpython.lltypesystem import lltype, rffi
 from pypy.module.micronumpy import interp_dtype
 from pypy.objspace.std.strutil import strip_spaces
-
+from pypy.rlib import jit
 
 FLOAT_SIZE = rffi.sizeof(lltype.Float)
 
@@ -72,11 +72,20 @@
             "string is smaller than requested size"))
         
     a = W_NDimArray(count, [count], dtype=dtype)
-    for i in range(count):
+    fromstring_loop(a, count, dtype, itemsize, s)
+    return space.wrap(a)
+
+fromstring_driver = jit.JitDriver(greens=[], reds=['count', 'i', 'itemsize',
+                                                   'dtype', 's', 'a'])
+
+def fromstring_loop(a, count, dtype, itemsize, s):
+    i = 0
+    while i < count:
+        fromstring_driver.jit_merge_point(a=a, count=count, dtype=dtype,
+                                          itemsize=itemsize, s=s, i=i)
         val = dtype.itemtype.runpack_str(s[i*itemsize:i*itemsize + itemsize])
         a.dtype.setitem(a.storage, i, val)
-        
-    return space.wrap(a)
+        i += 1
 
 @unwrap_spec(s=str, count=int, sep=str)
 def fromstring(space, s, w_dtype=None, count=-1, sep=''):
diff --git a/pypy/module/micronumpy/interp_ufuncs.py b/pypy/module/micronumpy/interp_ufuncs.py
--- a/pypy/module/micronumpy/interp_ufuncs.py
+++ b/pypy/module/micronumpy/interp_ufuncs.py
@@ -435,7 +435,11 @@
             ("arcsin", "arcsin", 1, {"promote_to_float": True}),
             ("arccos", "arccos", 1, {"promote_to_float": True}),
             ("arctan", "arctan", 1, {"promote_to_float": True}),
+            ("sinh", "sinh", 1, {"promote_to_float": True}),
+            ("cosh", "cosh", 1, {"promote_to_float": True}),
+            ("tanh", "tanh", 1, {"promote_to_float": True}),
             ("arcsinh", "arcsinh", 1, {"promote_to_float": True}),
+            ("arccosh", "arccosh", 1, {"promote_to_float": True}),
             ("arctanh", "arctanh", 1, {"promote_to_float": True}),
         ]:
             self.add_ufunc(space, *ufunc_def)
diff --git a/pypy/module/micronumpy/test/test_dtypes.py b/pypy/module/micronumpy/test/test_dtypes.py
--- a/pypy/module/micronumpy/test/test_dtypes.py
+++ b/pypy/module/micronumpy/test/test_dtypes.py
@@ -371,6 +371,8 @@
         assert type(a[1]) is numpy.float64
         assert numpy.dtype(float).type is numpy.float64
 
+        assert "{:3f}".format(numpy.float64(3)) == "3.000000"
+
         assert numpy.float64(2.0) == 2.0
         assert numpy.float64('23.4') == numpy.float64(23.4)
         raises(ValueError, numpy.float64, '23.2df')
@@ -387,9 +389,9 @@
         assert b.m() == 12
 
     def test_long_as_index(self):
-        skip("waiting for removal of multimethods of __index__")
-        from _numpypy import int_
+        from _numpypy import int_, float64
         assert (1, 2, 3)[int_(1)] == 2
+        raises(TypeError, lambda: (1, 2, 3)[float64(1)])
 
     def test_int(self):
         import sys
diff --git a/pypy/module/micronumpy/test/test_ufuncs.py b/pypy/module/micronumpy/test/test_ufuncs.py
--- a/pypy/module/micronumpy/test/test_ufuncs.py
+++ b/pypy/module/micronumpy/test/test_ufuncs.py
@@ -310,6 +310,33 @@
         b = arctan(a)
         assert math.isnan(b[0])
 
+    def test_sinh(self):
+        import math
+        from _numpypy import array, sinh
+
+        a = array([-1, 0, 1, float('inf'), float('-inf')])
+        b = sinh(a)
+        for i in range(len(a)):
+            assert b[i] == math.sinh(a[i])
+
+    def test_cosh(self):
+        import math
+        from _numpypy import array, cosh
+
+        a = array([-1, 0, 1, float('inf'), float('-inf')])
+        b = cosh(a)
+        for i in range(len(a)):
+            assert b[i] == math.cosh(a[i])
+
+    def test_tanh(self):
+        import math
+        from _numpypy import array, tanh
+
+        a = array([-1, 0, 1, float('inf'), float('-inf')])
+        b = tanh(a)
+        for i in range(len(a)):
+            assert b[i] == math.tanh(a[i])
+
     def test_arcsinh(self):
         import math
         from _numpypy import arcsinh
@@ -318,6 +345,15 @@
             assert math.asinh(v) == arcsinh(v)
         assert math.isnan(arcsinh(float("nan")))
 
+    def test_arccosh(self):
+        import math
+        from _numpypy import arccosh
+
+        for v in [1.0, 1.1, 2]:
+            assert math.acosh(v) == arccosh(v)
+        for v in [-1.0, 0, .99]:
+            assert math.isnan(arccosh(v))
+
     def test_arctanh(self):
         import math
         from _numpypy import arctanh
diff --git a/pypy/module/micronumpy/test/test_zjit.py b/pypy/module/micronumpy/test/test_zjit.py
--- a/pypy/module/micronumpy/test/test_zjit.py
+++ b/pypy/module/micronumpy/test/test_zjit.py
@@ -479,38 +479,3 @@
                                 'int_sub': 3,
                                 'jump': 1,
                                 'setinteriorfield_raw': 1})
-
-
-class TestNumpyOld(LLJitMixin):
-    def setup_class(cls):
-        py.test.skip("old")
-        from pypy.module.micronumpy.compile import FakeSpace
-        from pypy.module.micronumpy.interp_dtype import get_dtype_cache
-
-        cls.space = FakeSpace()
-        cls.float64_dtype = get_dtype_cache(cls.space).w_float64dtype
-
-    def test_int32_sum(self):
-        py.test.skip("pypy/jit/backend/llimpl.py needs to be changed to "
-                     "deal correctly with int dtypes for this test to "
-                     "work. skip for now until someone feels up to the task")
-        space = self.space
-        float64_dtype = self.float64_dtype
-        int32_dtype = self.int32_dtype
-
-        def f(n):
-            if NonConstant(False):
-                dtype = float64_dtype
-            else:
-                dtype = int32_dtype
-            ar = W_NDimArray(n, [n], dtype=dtype)
-            i = 0
-            while i < n:
-                ar.get_concrete().setitem(i, int32_dtype.box(7))
-                i += 1
-            v = ar.descr_add(space, ar).descr_sum(space)
-            assert isinstance(v, IntObject)
-            return v.intval
-
-        result = self.meta_interp(f, [5], listops=True, backendopt=True)
-        assert result == f(5)
diff --git a/pypy/module/micronumpy/types.py b/pypy/module/micronumpy/types.py
--- a/pypy/module/micronumpy/types.py
+++ b/pypy/module/micronumpy/types.py
@@ -489,10 +489,28 @@
         return math.atan(v)
 
     @simple_unary_op
+    def sinh(self, v):
+        return math.sinh(v)
+
+    @simple_unary_op
+    def cosh(self, v):
+        return math.cosh(v)
+
+    @simple_unary_op
+    def tanh(self, v):
+        return math.tanh(v)
+
+    @simple_unary_op
     def arcsinh(self, v):
         return math.asinh(v)
 
     @simple_unary_op
+    def arccosh(self, v):
+        if v < 1.0:
+            return rfloat.NAN
+        return math.acosh(v)
+
+    @simple_unary_op
     def arctanh(self, v):
         if v == 1.0 or v == -1.0:
             return math.copysign(rfloat.INFINITY, v)
diff --git a/pypy/module/pypyjit/test_pypy_c/test_00_model.py b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
--- a/pypy/module/pypyjit/test_pypy_c/test_00_model.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_00_model.py
@@ -60,6 +60,9 @@
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
         stdout, stderr = pipe.communicate()
+        if getattr(pipe, 'returncode', 0) < 0:
+            raise IOError("subprocess was killed by signal %d" % (
+                pipe.returncode,))
         if stderr.startswith('SKIP:'):
             py.test.skip(stderr)
         if stderr.startswith('debug_alloc.h:'):   # lldebug builds
diff --git a/pypy/module/pypyjit/test_pypy_c/test_alloc.py b/pypy/module/pypyjit/test_pypy_c/test_alloc.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/pypyjit/test_pypy_c/test_alloc.py
@@ -0,0 +1,26 @@
+import py, sys
+from pypy.module.pypyjit.test_pypy_c.test_00_model import BaseTestPyPyC
+
+class TestAlloc(BaseTestPyPyC):
+
+    SIZES = dict.fromkeys([2 ** n for n in range(26)] +     # up to 32MB
+                          [2 ** n - 1 for n in range(26)])
+
+    def test_newstr_constant_size(self):
+        for size in TestAlloc.SIZES:
+            yield self.newstr_constant_size, size
+
+    def newstr_constant_size(self, size):
+        src = """if 1:
+                    N = %(size)d
+                    part_a = 'a' * N
+                    part_b = 'b' * N
+                    for i in xrange(20):
+                        ao = '%%s%%s' %% (part_a, part_b)
+                    def main():
+                        return 42
+""" % {'size': size}
+        log = self.run(src, [], threshold=10)
+        assert log.result == 42
+        loop, = log.loops_by_filename(self.filepath)
+        # assert did not crash
diff --git a/pypy/module/pypyjit/test_pypy_c/test_instance.py b/pypy/module/pypyjit/test_pypy_c/test_instance.py
--- a/pypy/module/pypyjit/test_pypy_c/test_instance.py
+++ b/pypy/module/pypyjit/test_pypy_c/test_instance.py
@@ -201,3 +201,28 @@
         loop, = log.loops_by_filename(self.filepath)
         assert loop.match_by_id("compare", "") # optimized away
 
+    def test_super(self):
+        def main():
+            class A(object):
+                def m(self, x):
+                    return x + 1
+            class B(A):
+                def m(self, x):
+                    return super(B, self).m(x)
+            i = 0
+            while i < 300:
+                i = B().m(i)
+            return i
+
+        log = self.run(main, [])
+        loop, = log.loops_by_filename(self.filepath)
+        assert loop.match("""
+            i78 = int_lt(i72, 300)
+            guard_true(i78, descr=...)
+            guard_not_invalidated(descr=...)
+            i79 = force_token()
+            i80 = force_token()
+            i81 = int_add(i72, 1)
+            --TICK--
+            jump(..., descr=...)
+        """)
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py b/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_fastpath.py
@@ -97,6 +97,16 @@
         tf_b.errcheck = errcheck
         assert tf_b(-126) == 'hello'
 
+    def test_array_to_ptr(self):
+        ARRAY = c_int * 8
+        func = dll._testfunc_ai8
+        func.restype = POINTER(c_int)
+        func.argtypes = [ARRAY]
+        array = ARRAY(1, 2, 3, 4, 5, 6, 7, 8)
+        ptr = func(array)
+        assert ptr[0] == 1
+        assert ptr[7] == 8
+
 
 class TestFallbackToSlowpath(BaseCTypesTestChecker):
 
diff --git a/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py b/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py
--- a/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py
+++ b/pypy/module/test_lib_pypy/ctypes_tests/test_prototypes.py
@@ -246,6 +246,14 @@
         def func(): pass
         CFUNCTYPE(None, c_int * 3)(func)
 
+    def test_array_to_ptr_wrongtype(self):
+        ARRAY = c_byte * 8
+        func = testdll._testfunc_ai8
+        func.restype = POINTER(c_int)
+        func.argtypes = [c_int * 8]
+        array = ARRAY(1, 2, 3, 4, 5, 6, 7, 8)
+        py.test.raises(ArgumentError, "func(array)")
+
 ################################################################
 
 if __name__ == '__main__':
diff --git a/pypy/module/test_lib_pypy/test_collections.py b/pypy/module/test_lib_pypy/test_collections.py
--- a/pypy/module/test_lib_pypy/test_collections.py
+++ b/pypy/module/test_lib_pypy/test_collections.py
@@ -6,7 +6,7 @@
 
 from pypy.conftest import gettestobjspace
 
-class AppTestcStringIO:
+class AppTestCollections:
     def test_copy(self):
         import _collections
         def f():
diff --git a/pypy/module/test_lib_pypy/test_datetime.py b/pypy/module/test_lib_pypy/test_datetime.py
--- a/pypy/module/test_lib_pypy/test_datetime.py
+++ b/pypy/module/test_lib_pypy/test_datetime.py
@@ -3,7 +3,7 @@
 import py
 
 import time
-import datetime
+from lib_pypy import datetime
 import copy
 import os
 
@@ -43,4 +43,4 @@
     dt = datetime.datetime.utcnow()
     assert type(dt.microsecond) is int
 
-    copy.copy(dt)
\ No newline at end of file
+    copy.copy(dt)
diff --git a/pypy/objspace/std/dictmultiobject.py b/pypy/objspace/std/dictmultiobject.py
--- a/pypy/objspace/std/dictmultiobject.py
+++ b/pypy/objspace/std/dictmultiobject.py
@@ -142,6 +142,17 @@
             else:
                 return result
 
+    def popitem(self, w_dict):
+        # this is a bad implementation: if we call popitem() repeatedly,
+        # it ends up taking n**2 time, because the next() calls below
+        # will take longer and longer.  But all interesting strategies
+        # provide a better one.
+        space = self.space
+        iterator = self.iter(w_dict)
+        w_key, w_value = iterator.next()
+        self.delitem(w_dict, w_key)
+        return (w_key, w_value)
+
     def clear(self, w_dict):
         strategy = self.space.fromcache(EmptyDictStrategy)
         storage = strategy.get_empty_storage()
diff --git a/pypy/objspace/std/dictproxyobject.py b/pypy/objspace/std/dictproxyobject.py
--- a/pypy/objspace/std/dictproxyobject.py
+++ b/pypy/objspace/std/dictproxyobject.py
@@ -3,7 +3,7 @@
 from pypy.objspace.std.dictmultiobject import W_DictMultiObject, IteratorImplementation
 from pypy.objspace.std.dictmultiobject import DictStrategy
 from pypy.objspace.std.typeobject import unwrap_cell
-from pypy.interpreter.error import OperationError
+from pypy.interpreter.error import OperationError, operationerrfmt
 
 from pypy.rlib import rerased
 
@@ -44,7 +44,8 @@
                 raise
             if not w_type.is_cpytype():
                 raise
-            # xxx obscure workaround: allow cpyext to write to type->tp_dict.
+            # xxx obscure workaround: allow cpyext to write to type->tp_dict
+            # xxx even in the case of a builtin type.
             # xxx like CPython, we assume that this is only done early after
             # xxx the type is created, and we don't invalidate any cache.
             w_type.dict_w[key] = w_value
@@ -86,8 +87,14 @@
                     for (key, w_value) in self.unerase(w_dict.dstorage).dict_w.iteritems()]
 
     def clear(self, w_dict):
-        self.unerase(w_dict.dstorage).dict_w.clear()
-        self.unerase(w_dict.dstorage).mutated(None)
+        space = self.space
+        w_type = self.unerase(w_dict.dstorage)
+        if (not space.config.objspace.std.mutable_builtintypes
+                and not w_type.is_heaptype()):
+            msg = "can't clear dictionary of type '%s'"
+            raise operationerrfmt(space.w_TypeError, msg, w_type.name)
+        w_type.dict_w.clear()
+        w_type.mutated(None)
 
 class DictProxyIteratorImplementation(IteratorImplementation):
     def __init__(self, space, strategy, dictimplementation):
diff --git a/pypy/objspace/std/test/test_dictproxy.py b/pypy/objspace/std/test/test_dictproxy.py
--- a/pypy/objspace/std/test/test_dictproxy.py
+++ b/pypy/objspace/std/test/test_dictproxy.py
@@ -22,6 +22,9 @@
         assert NotEmpty.string == 1
         raises(TypeError, 'NotEmpty.__dict__.setdefault(15, 1)')
 
+        key, value = NotEmpty.__dict__.popitem()
+        assert (key == 'a' and value == 1) or (key == 'b' and value == 4)
+
     def test_dictproxyeq(self):
         class a(object):
             pass
@@ -43,6 +46,11 @@
         assert s1 == s2
         assert s1.startswith('{') and s1.endswith('}')
 
+    def test_immutable_dict_on_builtin_type(self):
+        raises(TypeError, "int.__dict__['a'] = 1")
+        raises(TypeError, int.__dict__.popitem)
+        raises(TypeError, int.__dict__.clear)
+
 class AppTestUserObjectMethodCache(AppTestUserObject):
     def setup_class(cls):
         cls.space = gettestobjspace(
diff --git a/pypy/objspace/std/test/test_typeobject.py b/pypy/objspace/std/test/test_typeobject.py
--- a/pypy/objspace/std/test/test_typeobject.py
+++ b/pypy/objspace/std/test/test_typeobject.py
@@ -993,7 +993,9 @@
         raises(TypeError, setattr, list, 'append', 42)
         raises(TypeError, setattr, list, 'foobar', 42)
         raises(TypeError, delattr, dict, 'keys')
-        
+        raises(TypeError, 'int.__dict__["a"] = 1')
+        raises(TypeError, 'int.__dict__.clear()')
+
     def test_nontype_in_mro(self):
         class OldStyle:
             pass
diff --git a/pypy/objspace/std/typeobject.py b/pypy/objspace/std/typeobject.py
--- a/pypy/objspace/std/typeobject.py
+++ b/pypy/objspace/std/typeobject.py
@@ -103,6 +103,7 @@
                           'terminator',
                           '_version_tag?',
                           'name?',
+                          'mro_w?[*]',
                           ]
 
     # for config.objspace.std.getattributeshortcut
@@ -345,9 +346,9 @@
 
         return w_self._lookup_where(name)
 
+    @unroll_safe
     def lookup_starting_at(w_self, w_starttype, name):
         space = w_self.space
-        # XXX Optimize this with method cache
         look = False
         for w_class in w_self.mro_w:
             if w_class is w_starttype:
diff --git a/pypy/rlib/debug.py b/pypy/rlib/debug.py
--- a/pypy/rlib/debug.py
+++ b/pypy/rlib/debug.py
@@ -19,14 +19,24 @@
         hop.exception_cannot_occur()
         hop.genop('debug_assert', vlist)
 
-def fatalerror(msg, traceback=False):
+def fatalerror(msg):
+    # print the RPython traceback and abort with a fatal error
     from pypy.rpython.lltypesystem import lltype
     from pypy.rpython.lltypesystem.lloperation import llop
-    if traceback:
-        llop.debug_print_traceback(lltype.Void)
+    llop.debug_print_traceback(lltype.Void)
     llop.debug_fatalerror(lltype.Void, msg)
 fatalerror._dont_inline_ = True
-fatalerror._annspecialcase_ = 'specialize:arg(1)'
+fatalerror._jit_look_inside_ = False
+fatalerror._annenforceargs_ = [str]
+
+def fatalerror_notb(msg):
+    # a variant of fatalerror() that doesn't print the RPython traceback
+    from pypy.rpython.lltypesystem import lltype
+    from pypy.rpython.lltypesystem.lloperation import llop
+    llop.debug_fatalerror(lltype.Void, msg)
+fatalerror_notb._dont_inline_ = True
+fatalerror_notb._jit_look_inside_ = False
+fatalerror_notb._annenforceargs_ = [str]
 
 
 class DebugLog(list):
diff --git a/pypy/rlib/jit.py b/pypy/rlib/jit.py
--- a/pypy/rlib/jit.py
+++ b/pypy/rlib/jit.py
@@ -450,6 +450,7 @@
             assert v in self.reds
         self._alllivevars = dict.fromkeys(
             [name for name in self.greens + self.reds if '.' not in name])
+        self._heuristic_order = {}   # check if 'reds' and 'greens' are ordered
         self._make_extregistryentries()
         self.get_jitcell_at = get_jitcell_at
         self.set_jitcell_at = set_jitcell_at
@@ -461,13 +462,61 @@
     def _freeze_(self):
         return True
 
+    def _check_arguments(self, livevars):
+        assert dict.fromkeys(livevars) == self._alllivevars
+        # check heuristically that 'reds' and 'greens' are ordered as
+        # the JIT will need them to be: first INTs, then REFs, then
+        # FLOATs.
+        if len(self._heuristic_order) < len(livevars):
+            from pypy.rlib.rarithmetic import (r_singlefloat, r_longlong,
+                                               r_ulonglong, r_uint)
+            added = False
+            for var, value in livevars.items():
+                if var not in self._heuristic_order:
+                    if (r_ulonglong is not r_uint and
+                            isinstance(value, (r_longlong, r_ulonglong))):
+                        assert 0, ("should not pass a r_longlong argument for "
+                                   "now, because on 32-bit machines it needs "
+                                   "to be ordered as a FLOAT but on 64-bit "
+                                   "machines as an INT")
+                    elif isinstance(value, (int, long, r_singlefloat)):
+                        kind = '1:INT'
+                    elif isinstance(value, float):
+                        kind = '3:FLOAT'
+                    elif isinstance(value, (str, unicode)) and len(value) != 1:
+                        kind = '2:REF'
+                    elif isinstance(value, (list, dict)):
+                        kind = '2:REF'
+                    elif (hasattr(value, '__class__')
+                          and value.__class__.__module__ != '__builtin__'):
+                        if hasattr(value, '_freeze_'):
+                            continue   # value._freeze_() is better not called
+                        elif getattr(value, '_alloc_flavor_', 'gc') == 'gc':
+                            kind = '2:REF'
+                        else:
+                            kind = '1:INT'
+                    else:
+                        continue
+                    self._heuristic_order[var] = kind
+                    added = True
+            if added:
+                for color in ('reds', 'greens'):
+                    lst = getattr(self, color)
+                    allkinds = [self._heuristic_order.get(name, '?')
+                                for name in lst]
+                    kinds = [k for k in allkinds if k != '?']
+                    assert kinds == sorted(kinds), (
+                        "bad order of %s variables in the jitdriver: "
+                        "must be INTs, REFs, FLOATs; got %r" %
+                        (color, allkinds))
+
     def jit_merge_point(_self, **livevars):
         # special-cased by ExtRegistryEntry
-        assert dict.fromkeys(livevars) == _self._alllivevars
+        _self._check_arguments(livevars)
 
     def can_enter_jit(_self, **livevars):
         # special-cased by ExtRegistryEntry
-        assert dict.fromkeys(livevars) == _self._alllivevars
+        _self._check_arguments(livevars)
 
     def loop_header(self):
         # special-cased by ExtRegistryEntry
diff --git a/pypy/rlib/objectmodel.py b/pypy/rlib/objectmodel.py
--- a/pypy/rlib/objectmodel.py
+++ b/pypy/rlib/objectmodel.py
@@ -23,9 +23,11 @@
 
 class _Specialize(object):
     def memo(self):
-        """ Specialize functions based on argument values. All arguments has
-        to be constant at the compile time. The whole function call is replaced
-        by a call result then.
+        """ Specialize the function based on argument values.  All arguments
+        have to be either constants or PBCs (i.e. instances of classes with a
+        _freeze_ method returning True).  The function call is replaced by
+        just its result, or in case several PBCs are used, by some fast
+        look-up of the result.
         """
         def decorated_func(func):
             func._annspecialcase_ = 'specialize:memo'
@@ -33,8 +35,8 @@
         return decorated_func
 
     def arg(self, *args):
-        """ Specialize function based on values of given positions of arguments.
-        They must be compile-time constants in order to work.
+        """ Specialize the function based on the values of given positions
+        of arguments.  They must be compile-time constants in order to work.
 
         There will be a copy of provided function for each combination
         of given arguments on positions in args (that can lead to
@@ -82,8 +84,7 @@
         return decorated_func
 
     def ll_and_arg(self, *args):
-        """ This is like ll(), but instead of specializing on all arguments,
-        specializes on only the arguments at the given positions
+        """ This is like ll(), and additionally like arg(...).
         """
         def decorated_func(func):
             func._annspecialcase_ = 'specialize:ll_and_arg' + self._wrap(args)
diff --git a/pypy/rlib/test/test_jit.py b/pypy/rlib/test/test_jit.py
--- a/pypy/rlib/test/test_jit.py
+++ b/pypy/rlib/test/test_jit.py
@@ -2,6 +2,7 @@
 from pypy.conftest import option
 from pypy.rlib.jit import hint, we_are_jitted, JitDriver, elidable_promote
 from pypy.rlib.jit import JitHintError, oopspec, isconstant
+from pypy.rlib.rarithmetic import r_uint
 from pypy.translator.translator import TranslationContext, graphof
 from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin
 from pypy.rpython.lltypesystem import lltype
@@ -146,6 +147,43 @@
         res = self.interpret(f, [-234])
         assert res == 1
 
+    def test_argument_order_ok(self):
+        myjitdriver = JitDriver(greens=['i1', 'r1', 'f1'], reds=[])
+        class A(object):
+            pass
+        myjitdriver.jit_merge_point(i1=42, r1=A(), f1=3.5)
+        # assert did not raise
+
+    def test_argument_order_wrong(self):
+        myjitdriver = JitDriver(greens=['r1', 'i1', 'f1'], reds=[])
+        class A(object):
+            pass
+        e = raises(AssertionError,
+                   myjitdriver.jit_merge_point, i1=42, r1=A(), f1=3.5)
+
+    def test_argument_order_more_precision_later(self):
+        myjitdriver = JitDriver(greens=['r1', 'i1', 'r2', 'f1'], reds=[])
+        class A(object):
+            pass
+        myjitdriver.jit_merge_point(i1=42, r1=None, r2=None, f1=3.5)
+        e = raises(AssertionError,
+                   myjitdriver.jit_merge_point, i1=42, r1=A(), r2=None, f1=3.5)
+        assert "got ['2:REF', '1:INT', '?', '3:FLOAT']" in repr(e.value)
+
+    def test_argument_order_more_precision_later_2(self):
+        myjitdriver = JitDriver(greens=['r1', 'i1', 'r2', 'f1'], reds=[])
+        class A(object):
+            pass
+        myjitdriver.jit_merge_point(i1=42, r1=None, r2=A(), f1=3.5)
+        e = raises(AssertionError,
+                   myjitdriver.jit_merge_point, i1=42, r1=A(), r2=None, f1=3.5)
+        assert "got ['2:REF', '1:INT', '2:REF', '3:FLOAT']" in repr(e.value)
+
+    def test_argument_order_accept_r_uint(self):
+        # this used to fail on 64-bit, because r_uint == r_ulonglong
+        myjitdriver = JitDriver(greens=['i1'], reds=[])
+        myjitdriver.jit_merge_point(i1=r_uint(42))
+
 
 class TestJITLLtype(BaseTestJIT, LLRtypeMixin):
     pass
diff --git a/pypy/rpython/lltypesystem/rlist.py b/pypy/rpython/lltypesystem/rlist.py
--- a/pypy/rpython/lltypesystem/rlist.py
+++ b/pypy/rpython/lltypesystem/rlist.py
@@ -392,7 +392,11 @@
                                          ('list', r_list.lowleveltype),
                                          ('index', Signed)))
         self.ll_listiter = ll_listiter
-        self.ll_listnext = ll_listnext
+        if (isinstance(r_list, FixedSizeListRepr)
+                and not r_list.listitem.mutated):
+            self.ll_listnext = ll_listnext_foldable
+        else:
+            self.ll_listnext = ll_listnext
         self.ll_getnextindex = ll_getnextindex
 
 def ll_listiter(ITERPTR, lst):
@@ -409,5 +413,14 @@
     iter.index = index + 1      # cannot overflow because index < l.length
     return l.ll_getitem_fast(index)
 
+def ll_listnext_foldable(iter):
+    from pypy.rpython.rlist import ll_getitem_foldable_nonneg
+    l = iter.list
+    index = iter.index
+    if index >= l.ll_length():
+        raise StopIteration
+    iter.index = index + 1      # cannot overflow because index < l.length
+    return ll_getitem_foldable_nonneg(l, index)
+
 def ll_getnextindex(iter):
     return iter.index
diff --git a/pypy/rpython/memory/gc/generation.py b/pypy/rpython/memory/gc/generation.py
--- a/pypy/rpython/memory/gc/generation.py
+++ b/pypy/rpython/memory/gc/generation.py
@@ -41,8 +41,8 @@
 
     # the following values override the default arguments of __init__ when
     # translating to a real backend.
-    TRANSLATION_PARAMS = {'space_size': 8*1024*1024, # XXX adjust
-                          'nursery_size': 896*1024,
+    TRANSLATION_PARAMS = {'space_size': 8*1024*1024,     # 8 MB
+                          'nursery_size': 3*1024*1024,   # 3 MB
                           'min_nursery_size': 48*1024,
                           'auto_nursery_size': True}
 
@@ -92,8 +92,9 @@
         # the GC is fully setup now.  The rest can make use of it.
         if self.auto_nursery_size:
             newsize = nursery_size_from_env()
-            if newsize <= 0:
-                newsize = env.estimate_best_nursery_size()
+            #if newsize <= 0:
+            #    ---disabled--- just use the default value.
+            #    newsize = env.estimate_best_nursery_size()
             if newsize > 0:
                 self.set_nursery_size(newsize)
 
diff --git a/pypy/rpython/memory/gc/minimark.py b/pypy/rpython/memory/gc/minimark.py
--- a/pypy/rpython/memory/gc/minimark.py
+++ b/pypy/rpython/memory/gc/minimark.py
@@ -608,6 +608,11 @@
         specified as 0 if the object is not varsized.  The returned
         object is fully initialized and zero-filled."""
         #
+        # Here we really need a valid 'typeid', not 0 (as the JIT might
+        # try to send us if there is still a bug).
+        ll_assert(bool(self.combine(typeid, 0)),
+                  "external_malloc: typeid == 0")
+        #
         # Compute the total size, carefully checking for overflows.
         size_gc_header = self.gcheaderbuilder.size_gc_header
         nonvarsize = size_gc_header + self.fixed_size(typeid)
diff --git a/pypy/rpython/test/test_rlist.py b/pypy/rpython/test/test_rlist.py
--- a/pypy/rpython/test/test_rlist.py
+++ b/pypy/rpython/test/test_rlist.py
@@ -8,6 +8,7 @@
 from pypy.rpython.rlist import *
 from pypy.rpython.lltypesystem.rlist import ListRepr, FixedSizeListRepr, ll_newlist, ll_fixed_newlist
 from pypy.rpython.lltypesystem import rlist as ll_rlist
+from pypy.rpython.llinterp import LLException
 from pypy.rpython.ootypesystem import rlist as oo_rlist
 from pypy.rpython.rint import signed_repr
 from pypy.objspace.flow.model import Constant, Variable
@@ -1477,6 +1478,80 @@
         assert func1.oopspec == 'list.getitem_foldable(l, index)'
         assert not hasattr(func2, 'oopspec')
 
+    def test_iterate_over_immutable_list(self):
+        from pypy.rpython import rlist
+        class MyException(Exception):
+            pass
+        lst = list('abcdef')
+        def dummyfn():
+            total = 0
+            for c in lst:
+                total += ord(c)
+            return total
+        #
+        prev = rlist.ll_getitem_foldable_nonneg
+        try:
+            def seen_ok(l, index):
+                if index == 5:
+                    raise KeyError     # expected case
+                return prev(l, index)
+            rlist.ll_getitem_foldable_nonneg = seen_ok
+            e = raises(LLException, self.interpret, dummyfn, [])
+            assert 'KeyError' in str(e.value)
+        finally:
+            rlist.ll_getitem_foldable_nonneg = prev
+
+    def test_iterate_over_immutable_list_quasiimmut_attr(self):
+        from pypy.rpython import rlist
+        class MyException(Exception):
+            pass
+        class Foo:
+            _immutable_fields_ = ['lst?[*]']
+            lst = list('abcdef')
+        foo = Foo()
+        def dummyfn():
+            total = 0
+            for c in foo.lst:
+                total += ord(c)
+            return total
+        #
+        prev = rlist.ll_getitem_foldable_nonneg
+        try:
+            def seen_ok(l, index):
+                if index == 5:
+                    raise KeyError     # expected case
+                return prev(l, index)
+            rlist.ll_getitem_foldable_nonneg = seen_ok
+            e = raises(LLException, self.interpret, dummyfn, [])
+            assert 'KeyError' in str(e.value)
+        finally:
+            rlist.ll_getitem_foldable_nonneg = prev
+
+    def test_iterate_over_mutable_list(self):
+        from pypy.rpython import rlist
+        class MyException(Exception):
+            pass
+        lst = list('abcdef')
+        def dummyfn():
+            total = 0
+            for c in lst:
+                total += ord(c)
+            lst[0] = 'x'
+            return total
+        #
+        prev = rlist.ll_getitem_foldable_nonneg
+        try:
+            def seen_ok(l, index):
+                if index == 5:
+                    raise KeyError     # expected case
+                return prev(l, index)
+            rlist.ll_getitem_foldable_nonneg = seen_ok
+            res = self.interpret(dummyfn, [])
+            assert res == sum(map(ord, 'abcdef'))
+        finally:
+            rlist.ll_getitem_foldable_nonneg = prev
+
+
 class TestOOtype(BaseTestRlist, OORtypeMixin):
     rlist = oo_rlist
     type_system = 'ootype'
diff --git a/pypy/tool/jitlogparser/parser.py b/pypy/tool/jitlogparser/parser.py
--- a/pypy/tool/jitlogparser/parser.py
+++ b/pypy/tool/jitlogparser/parser.py
@@ -387,7 +387,7 @@
             m = re.search('guard \d+', comm)
             name = m.group(0)
         else:
-            name = comm[2:comm.find(':')-1]
+            name = " ".join(comm[2:].split(" ", 2)[:2])
         if name in dumps:
             bname, start_ofs, dump = dumps[name]
             loop.force_asm = (lambda dump=dump, start_ofs=start_ofs,
diff --git a/pypy/tool/release/package.py b/pypy/tool/release/package.py
--- a/pypy/tool/release/package.py
+++ b/pypy/tool/release/package.py
@@ -82,6 +82,9 @@
     for file in ['LICENSE', 'README']:
         shutil.copy(str(basedir.join(file)), str(pypydir))
     pypydir.ensure('include', dir=True)
+    if sys.platform == 'win32':
+        shutil.copyfile(str(pypy_c.dirpath().join("libpypy-c.lib")),
+                        str(pypydir.join('include/python27.lib')))
     # we want to put there all *.h and *.inl from trunk/include
     # and from pypy/_interfaces
     includedir = basedir.join('include')
diff --git a/pypy/translator/c/gcc/trackgcroot.py b/pypy/translator/c/gcc/trackgcroot.py
--- a/pypy/translator/c/gcc/trackgcroot.py
+++ b/pypy/translator/c/gcc/trackgcroot.py
@@ -472,7 +472,7 @@
 
     IGNORE_OPS_WITH_PREFIXES = dict.fromkeys([
         'cmp', 'test', 'set', 'sahf', 'lahf', 'cld', 'std',
-        'rep', 'movs', 'lods', 'stos', 'scas', 'cwde', 'prefetch',
+        'rep', 'movs', 'movhp', 'lods', 'stos', 'scas', 'cwde', 'prefetch',
         # floating-point operations cannot produce GC pointers
         'f',
         'cvt', 'ucomi', 'comi', 'subs', 'subp' , 'adds', 'addp', 'xorp',
@@ -484,7 +484,7 @@
         'shl', 'shr', 'sal', 'sar', 'rol', 'ror', 'mul', 'imul', 'div', 'idiv',
         'bswap', 'bt', 'rdtsc',
         'punpck', 'pshufd', 'pcmp', 'pand', 'psllw', 'pslld', 'psllq',
-        'paddq', 'pinsr',
+        'paddq', 'pinsr', 'pmul', 'psrl',
         # sign-extending moves should not produce GC pointers
         'cbtw', 'cwtl', 'cwtd', 'cltd', 'cltq', 'cqto',
         # zero-extending moves should not produce GC pointers
diff --git a/pypy/translator/c/src/asm_gcc_x86.h b/pypy/translator/c/src/asm_gcc_x86.h
--- a/pypy/translator/c/src/asm_gcc_x86.h
+++ b/pypy/translator/c/src/asm_gcc_x86.h
@@ -102,6 +102,12 @@
 #endif  /* !PYPY_CPU_HAS_STANDARD_PRECISION */
 
 
+#ifdef PYPY_X86_CHECK_SSE2
+#define PYPY_X86_CHECK_SSE2_DEFINED
+extern void pypy_x86_check_sse2(void);
+#endif
+
+
 /* implementations */
 
 #ifndef PYPY_NOT_MAIN_FILE
@@ -113,4 +119,25 @@
 }
 #  endif
 
+#  ifdef PYPY_X86_CHECK_SSE2
+void pypy_x86_check_sse2(void)
+{
+    //Read the CPU features.
+    int features;
+    asm("mov $1, %%eax\n"
+        "cpuid\n"
+        "mov %%edx, %0"
+        : "=g"(features) : : "eax", "ebx", "edx", "ecx");
+    
+    //Check bits 25 and 26, this indicates SSE2 support
+    if (((features & (1 << 25)) == 0) || ((features & (1 << 26)) == 0))
+    {
+        fprintf(stderr, "Old CPU with no SSE2 support, cannot continue.\n"
+                        "You need to re-translate with "
+                        "'--jit-backend=x86-without-sse2'\n");
+        abort();
+    }
+}
+#  endif
+
 #endif
diff --git a/pypy/translator/c/src/debug_print.c b/pypy/translator/c/src/debug_print.c
--- a/pypy/translator/c/src/debug_print.c
+++ b/pypy/translator/c/src/debug_print.c
@@ -1,3 +1,4 @@
+#define PYPY_NOT_MAIN_FILE
 
 #include <string.h>
 #include <stddef.h>
diff --git a/pypy/translator/c/src/dtoa.c b/pypy/translator/c/src/dtoa.c
--- a/pypy/translator/c/src/dtoa.c
+++ b/pypy/translator/c/src/dtoa.c
@@ -46,13 +46,13 @@
  *     of return type *Bigint all return NULL to indicate a malloc failure.
  *     Similarly, rv_alloc and nrv_alloc (return type char *) return NULL on
  *     failure.  bigcomp now has return type int (it used to be void) and
- *     returns -1 on failure and 0 otherwise.  _Py_dg_dtoa returns NULL
- *     on failure.  _Py_dg_strtod indicates failure due to malloc failure
+ *     returns -1 on failure and 0 otherwise.  __Py_dg_dtoa returns NULL
+ *     on failure.  __Py_dg_strtod indicates failure due to malloc failure
  *     by returning -1.0, setting errno=ENOMEM and *se to s00.
  *
  *  4. The static variable dtoa_result has been removed.  Callers of
- *     _Py_dg_dtoa are expected to call _Py_dg_freedtoa to free
- *     the memory allocated by _Py_dg_dtoa.
+ *     __Py_dg_dtoa are expected to call __Py_dg_freedtoa to free
+ *     the memory allocated by __Py_dg_dtoa.
  *
  *  5. The code has been reformatted to better fit with Python's
  *     C style guide (PEP 7).
@@ -61,7 +61,7 @@
  *     that hasn't been MALLOC'ed, private_mem should only be used when k <=
  *     Kmax.
  *
- *  7. _Py_dg_strtod has been modified so that it doesn't accept strings with
+ *  7. __Py_dg_strtod has been modified so that it doesn't accept strings with
  *     leading whitespace.
  *
  ***************************************************************/
@@ -283,7 +283,7 @@
 #define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1))
 #define Big1 0xffffffff
 
-/* struct BCinfo is used to pass information from _Py_dg_strtod to bigcomp */
+/* struct BCinfo is used to pass information from __Py_dg_strtod to bigcomp */
 
 typedef struct BCinfo BCinfo;
 struct
@@ -494,7 +494,7 @@
 
 /* convert a string s containing nd decimal digits (possibly containing a
    decimal separator at position nd0, which is ignored) to a Bigint.  This
-   function carries on where the parsing code in _Py_dg_strtod leaves off: on
+   function carries on where the parsing code in __Py_dg_strtod leaves off: on
    entry, y9 contains the result of converting the first 9 digits.  Returns
    NULL on failure. */
 
@@ -1050,7 +1050,7 @@
 }
 
 /* Convert a scaled double to a Bigint plus an exponent.  Similar to d2b,
-   except that it accepts the scale parameter used in _Py_dg_strtod (which
+   except that it accepts the scale parameter used in __Py_dg_strtod (which
    should be either 0 or 2*P), and the normalization for the return value is
    different (see below).  On input, d should be finite and nonnegative, and d
    / 2**scale should be exactly representable as an IEEE 754 double.
@@ -1351,9 +1351,9 @@
 /* The bigcomp function handles some hard cases for strtod, for inputs
    with more than STRTOD_DIGLIM digits.  It's called once an initial
    estimate for the double corresponding to the input string has
-   already been obtained by the code in _Py_dg_strtod.
+   already been obtained by the code in __Py_dg_strtod.
 
-   The bigcomp function is only called after _Py_dg_strtod has found a
+   The bigcomp function is only called after __Py_dg_strtod has found a
    double value rv such that either rv or rv + 1ulp represents the
    correctly rounded value corresponding to the original string.  It
    determines which of these two values is the correct one by
@@ -1368,12 +1368,12 @@
      s0 points to the first significant digit of the input string.
 
      rv is a (possibly scaled) estimate for the closest double value to the
-        value represented by the original input to _Py_dg_strtod.  If
+        value represented by the original input to __Py_dg_strtod.  If
         bc->scale is nonzero, then rv/2^(bc->scale) is the approximation to
         the input value.
 
      bc is a struct containing information gathered during the parsing and
-        estimation steps of _Py_dg_strtod.  Description of fields follows:
+        estimation steps of __Py_dg_strtod.  Description of fields follows:
 
         bc->e0 gives the exponent of the input value, such that dv = (integer
            given by the bd->nd digits of s0) * 10**e0
@@ -1505,7 +1505,7 @@
 }
 
 static double
-_Py_dg_strtod(const char *s00, char **se)
+__Py_dg_strtod(const char *s00, char **se)
 {
     int bb2, bb5, bbe, bd2, bd5, bs2, c, dsign, e, e1, error;
     int esign, i, j, k, lz, nd, nd0, odd, sign;
@@ -1849,7 +1849,7 @@
 
     for(;;) {
 
-        /* This is the main correction loop for _Py_dg_strtod.
+        /* This is the main correction loop for __Py_dg_strtod.
 
            We've got a decimal value tdv, and a floating-point approximation
            srv=rv/2^bc.scale to tdv.  The aim is to determine whether srv is
@@ -2283,7 +2283,7 @@
  */
 
 static void
-_Py_dg_freedtoa(char *s)
+__Py_dg_freedtoa(char *s)
 {
     Bigint *b = (Bigint *)((int *)s - 1);
     b->maxwds = 1 << (b->k = *(int*)b);
@@ -2325,11 +2325,11 @@
  */
 
 /* Additional notes (METD): (1) returns NULL on failure.  (2) to avoid memory
-   leakage, a successful call to _Py_dg_dtoa should always be matched by a
-   call to _Py_dg_freedtoa. */
+   leakage, a successful call to __Py_dg_dtoa should always be matched by a
+   call to __Py_dg_freedtoa. */
 
 static char *
-_Py_dg_dtoa(double dd, int mode, int ndigits,
+__Py_dg_dtoa(double dd, int mode, int ndigits,
             int *decpt, int *sign, char **rve)
 {
     /*  Arguments ndigits, decpt, sign are similar to those
@@ -2926,7 +2926,7 @@
     if (b)
         Bfree(b);
     if (s0)
-        _Py_dg_freedtoa(s0);
+        __Py_dg_freedtoa(s0);
     return NULL;
 }
 
@@ -2947,7 +2947,7 @@
     _PyPy_SET_53BIT_PRECISION_HEADER;
 
     _PyPy_SET_53BIT_PRECISION_START;
-    result = _Py_dg_strtod(s00, se);
+    result = __Py_dg_strtod(s00, se);
     _PyPy_SET_53BIT_PRECISION_END;
     return result;
 }
@@ -2959,14 +2959,14 @@
     _PyPy_SET_53BIT_PRECISION_HEADER;
 
     _PyPy_SET_53BIT_PRECISION_START;
-    result = _Py_dg_dtoa(dd, mode, ndigits, decpt, sign, rve);
+    result = __Py_dg_dtoa(dd, mode, ndigits, decpt, sign, rve);
     _PyPy_SET_53BIT_PRECISION_END;
     return result;
 }
 
 void _PyPy_dg_freedtoa(char *s)
 {
-    _Py_dg_freedtoa(s);
+    __Py_dg_freedtoa(s);
 }
 /* End PYPY hacks */
 
diff --git a/pypy/translator/c/src/main.h b/pypy/translator/c/src/main.h
--- a/pypy/translator/c/src/main.h
+++ b/pypy/translator/c/src/main.h
@@ -36,6 +36,9 @@
     RPyListOfString *list;
 
     pypy_asm_stack_bottom();
+#ifdef PYPY_X86_CHECK_SSE2_DEFINED
+    pypy_x86_check_sse2();
+#endif
     instrument_setup();
 
     if (sizeof(void*) != SIZEOF_LONG) {
diff --git a/pypy/translator/driver.py b/pypy/translator/driver.py
--- a/pypy/translator/driver.py
+++ b/pypy/translator/driver.py
@@ -559,6 +559,9 @@
                 newsoname = newexename.new(basename=soname.basename)
                 shutil.copy(str(soname), str(newsoname))
                 self.log.info("copied: %s" % (newsoname,))
+                if sys.platform == 'win32':
+                    shutil.copyfile(str(soname.new(ext='lib')),
+                                    str(newsoname.new(ext='lib')))
             self.c_entryp = newexename
         self.log.info('usession directory: %s' % (udir,))
         self.log.info("created: %s" % (self.c_entryp,))
diff --git a/pypy/translator/sandbox/test/test_sandbox.py b/pypy/translator/sandbox/test/test_sandbox.py
--- a/pypy/translator/sandbox/test/test_sandbox.py
+++ b/pypy/translator/sandbox/test/test_sandbox.py
@@ -145,9 +145,9 @@
     g = pipe.stdin
     f = pipe.stdout
     expect(f, g, "ll_os.ll_os_getenv", ("PYPY_GENERATIONGC_NURSERY",), None)
-    if sys.platform.startswith('linux'):  # on Mac, uses another (sandboxsafe) approach
-        expect(f, g, "ll_os.ll_os_open", ("/proc/cpuinfo", 0, 420),
-               OSError(5232, "xyz"))
+    #if sys.platform.startswith('linux'):
+    #    expect(f, g, "ll_os.ll_os_open", ("/proc/cpuinfo", 0, 420),
+    #           OSError(5232, "xyz"))
     expect(f, g, "ll_os.ll_os_getenv", ("PYPY_GC_DEBUG",), None)
     g.close()
     tail = f.read()


More information about the pypy-commit mailing list