[pypy-commit] pypy numpy-comparison: merge default, making all equal to default

snus_mumrik noreply at buildbot.pypy.org
Wed Oct 19 23:49:13 CEST 2011


Author: Ilya Osadchiy <osadchiy.ilya at gmail.com>
Branch: numpy-comparison
Changeset: r48245:8e5dc0cfdf63
Date: 2011-10-01 15:31 +0300
http://bitbucket.org/pypy/pypy/changeset/8e5dc0cfdf63/

Log:	merge default, making all equal to default

diff too long, truncating to 10000 out of 22893 lines

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -1,2 +1,3 @@
 b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5
 b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked
+d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6
diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py
--- a/dotviewer/graphparse.py
+++ b/dotviewer/graphparse.py
@@ -36,48 +36,45 @@
     print >> sys.stderr, "Warning: could not guess file type, using 'dot'"
     return 'unknown'
 
-def dot2plain(content, contenttype, use_codespeak=False):
-    if contenttype == 'plain':
-        # already a .plain file
-        return content
+def dot2plain_graphviz(content, contenttype, use_codespeak=False):
+    if contenttype != 'neato':
+        cmdline = 'dot -Tplain'
+    else:
+        cmdline = 'neato -Tplain'
+    #print >> sys.stderr, '* running:', cmdline
+    close_fds = sys.platform != 'win32'
+    p = subprocess.Popen(cmdline, shell=True, close_fds=close_fds,
+                         stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+    (child_in, child_out) = (p.stdin, p.stdout)
+    try:
+        import thread
+    except ImportError:
+        bkgndwrite(child_in, content)
+    else:
+        thread.start_new_thread(bkgndwrite, (child_in, content))
+    plaincontent = child_out.read()
+    child_out.close()
+    if not plaincontent:    # 'dot' is likely not installed
+        raise PlainParseError("no result from running 'dot'")
+    return plaincontent
 
-    if not use_codespeak:
-        if contenttype != 'neato':
-            cmdline = 'dot -Tplain'
-        else:
-            cmdline = 'neato -Tplain'
-        #print >> sys.stderr, '* running:', cmdline
-        close_fds = sys.platform != 'win32'
-        p = subprocess.Popen(cmdline, shell=True, close_fds=close_fds,
-                             stdin=subprocess.PIPE, stdout=subprocess.PIPE)
-        (child_in, child_out) = (p.stdin, p.stdout)
-        try:
-            import thread
-        except ImportError:
-            bkgndwrite(child_in, content)
-        else:
-            thread.start_new_thread(bkgndwrite, (child_in, content))
-        plaincontent = child_out.read()
-        child_out.close()
-        if not plaincontent:    # 'dot' is likely not installed
-            raise PlainParseError("no result from running 'dot'")
-    else:
-        import urllib
-        request = urllib.urlencode({'dot': content})
-        url = 'http://codespeak.net/pypy/convertdot.cgi'
-        print >> sys.stderr, '* posting:', url
-        g = urllib.urlopen(url, data=request)
-        result = []
-        while True:
-            data = g.read(16384)
-            if not data:
-                break
-            result.append(data)
-        g.close()
-        plaincontent = ''.join(result)
-        # very simple-minded way to give a somewhat better error message
-        if plaincontent.startswith('<body'):
-            raise Exception("the dot on codespeak has very likely crashed")
+def dot2plain_codespeak(content, contenttype):
+    import urllib
+    request = urllib.urlencode({'dot': content})
+    url = 'http://codespeak.net/pypy/convertdot.cgi'
+    print >> sys.stderr, '* posting:', url
+    g = urllib.urlopen(url, data=request)
+    result = []
+    while True:
+        data = g.read(16384)
+        if not data:
+            break
+        result.append(data)
+    g.close()
+    plaincontent = ''.join(result)
+    # very simple-minded way to give a somewhat better error message
+    if plaincontent.startswith('<body'):
+        raise Exception("the dot on codespeak has very likely crashed")
     return plaincontent
 
 def bkgndwrite(f, data):
@@ -148,10 +145,13 @@
 
 def parse_dot(graph_id, content, links={}, fixedfont=False):
     contenttype = guess_type(content)
-    try:
-        plaincontent = dot2plain(content, contenttype, use_codespeak=False)
-        return list(parse_plain(graph_id, plaincontent, links, fixedfont))
-    except PlainParseError:
-        # failed, retry via codespeak
-        plaincontent = dot2plain(content, contenttype, use_codespeak=True)
-        return list(parse_plain(graph_id, plaincontent, links, fixedfont))
+    if contenttype == 'plain':
+        plaincontent = content
+    else:
+        try:
+            plaincontent = dot2plain_graphviz(content, contenttype)
+        except PlainParseError, e:
+            print e
+            # failed, retry via codespeak
+            plaincontent = dot2plain_codespeak(content, contenttype)
+    return list(parse_plain(graph_id, plaincontent, links, fixedfont))
diff --git a/lib-python/2.7/ssl.py b/lib-python/2.7/ssl.py
--- a/lib-python/2.7/ssl.py
+++ b/lib-python/2.7/ssl.py
@@ -62,7 +62,6 @@
 from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
 from _ssl import SSLError
 from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
-from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
 from _ssl import RAND_status, RAND_egd, RAND_add
 from _ssl import \
      SSL_ERROR_ZERO_RETURN, \
@@ -74,6 +73,18 @@
      SSL_ERROR_WANT_CONNECT, \
      SSL_ERROR_EOF, \
      SSL_ERROR_INVALID_ERROR_CODE
+from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
+_PROTOCOL_NAMES = {
+    PROTOCOL_TLSv1: "TLSv1",
+    PROTOCOL_SSLv23: "SSLv23",
+    PROTOCOL_SSLv3: "SSLv3",
+}
+try:
+    from _ssl import PROTOCOL_SSLv2
+except ImportError:
+    pass
+else:
+    _PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2"
 
 from socket import socket, _fileobject, _delegate_methods, error as socket_error
 from socket import getnameinfo as _getnameinfo
@@ -408,16 +419,7 @@
     return DER_cert_to_PEM_cert(dercert)
 
 def get_protocol_name(protocol_code):
-    if protocol_code == PROTOCOL_TLSv1:
-        return "TLSv1"
-    elif protocol_code == PROTOCOL_SSLv23:
-        return "SSLv23"
-    elif protocol_code == PROTOCOL_SSLv2:
-        return "SSLv2"
-    elif protocol_code == PROTOCOL_SSLv3:
-        return "SSLv3"
-    else:
-        return "<unknown>"
+    return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
 
 
 # a replacement for the old socket.ssl function
diff --git a/lib-python/2.7/test/test_ssl.py b/lib-python/2.7/test/test_ssl.py
--- a/lib-python/2.7/test/test_ssl.py
+++ b/lib-python/2.7/test/test_ssl.py
@@ -58,32 +58,35 @@
 
 # Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
 def skip_if_broken_ubuntu_ssl(func):
-    # We need to access the lower-level wrapper in order to create an
-    # implicit SSL context without trying to connect or listen.
-    try:
-        import _ssl
-    except ImportError:
-        # The returned function won't get executed, just ignore the error
-        pass
-    @functools.wraps(func)
-    def f(*args, **kwargs):
+    if hasattr(ssl, 'PROTOCOL_SSLv2'):
+        # We need to access the lower-level wrapper in order to create an
+        # implicit SSL context without trying to connect or listen.
         try:
-            s = socket.socket(socket.AF_INET)
-            _ssl.sslwrap(s._sock, 0, None, None,
-                         ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None)
-        except ssl.SSLError as e:
-            if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
-                platform.linux_distribution() == ('debian', 'squeeze/sid', '')
-                and 'Invalid SSL protocol variant specified' in str(e)):
-                raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
-        return func(*args, **kwargs)
-    return f
+            import _ssl
+        except ImportError:
+            # The returned function won't get executed, just ignore the error
+            pass
+        @functools.wraps(func)
+        def f(*args, **kwargs):
+            try:
+                s = socket.socket(socket.AF_INET)
+                _ssl.sslwrap(s._sock, 0, None, None,
+                             ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None)
+            except ssl.SSLError as e:
+                if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
+                    platform.linux_distribution() == ('debian', 'squeeze/sid', '')
+                    and 'Invalid SSL protocol variant specified' in str(e)):
+                    raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
+            return func(*args, **kwargs)
+        return f
+    else:
+        return func
 
 
 class BasicSocketTests(unittest.TestCase):
 
     def test_constants(self):
-        ssl.PROTOCOL_SSLv2
+        #ssl.PROTOCOL_SSLv2
         ssl.PROTOCOL_SSLv23
         ssl.PROTOCOL_SSLv3
         ssl.PROTOCOL_TLSv1
@@ -964,7 +967,8 @@
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
-            try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
+            if hasattr(ssl, 'PROTOCOL_SSLv2'):
+                try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False)
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
 
@@ -976,7 +980,8 @@
             try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
             try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
             try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
-            try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
+            if hasattr(ssl, 'PROTOCOL_SSLv2'):
+                try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
             try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
             try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False)
 
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -317,7 +317,7 @@
     RegrTest('test_multibytecodec.py', usemodules='_multibytecodec'),
     RegrTest('test_multibytecodec_support.py', skip="not a test"),
     RegrTest('test_multifile.py'),
-    RegrTest('test_multiprocessing.py', skip='FIXME leaves subprocesses'),
+    RegrTest('test_multiprocessing.py', skip="FIXME leaves subprocesses"),
     RegrTest('test_mutants.py', core="possibly"),
     RegrTest('test_mutex.py'),
     RegrTest('test_netrc.py'),
@@ -359,7 +359,7 @@
     RegrTest('test_property.py', core=True),
     RegrTest('test_pstats.py'),
     RegrTest('test_pty.py', skip="unsupported extension module"),
-    RegrTest('test_pwd.py', skip=skip_win32),
+    RegrTest('test_pwd.py', usemodules="pwd", skip=skip_win32),
     RegrTest('test_py3kwarn.py'),
     RegrTest('test_pyclbr.py'),
     RegrTest('test_pydoc.py'),
diff --git a/lib-python/modified-2.7/gzip.py b/lib-python/modified-2.7/gzip.py
deleted file mode 100644
--- a/lib-python/modified-2.7/gzip.py
+++ /dev/null
@@ -1,514 +0,0 @@
-"""Functions that read and write gzipped files.
-
-The user of the file doesn't have to worry about the compression,
-but random access is not allowed."""
-
-# based on Andrew Kuchling's minigzip.py distributed with the zlib module
-
-import struct, sys, time, os
-import zlib
-import io
-import __builtin__
-
-__all__ = ["GzipFile","open"]
-
-FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
-
-READ, WRITE = 1, 2
-
-def write32u(output, value):
-    # The L format writes the bit pattern correctly whether signed
-    # or unsigned.
-    output.write(struct.pack("<L", value))
-
-def read32(input):
-    return struct.unpack("<I", input.read(4))[0]
-
-def open(filename, mode="rb", compresslevel=9):
-    """Shorthand for GzipFile(filename, mode, compresslevel).
-
-    The filename argument is required; mode defaults to 'rb'
-    and compresslevel defaults to 9.
-
-    """
-    return GzipFile(filename, mode, compresslevel)
-
-class GzipFile(io.BufferedIOBase):
-    """The GzipFile class simulates most of the methods of a file object with
-    the exception of the readinto() and truncate() methods.
-
-    """
-
-    myfileobj = None
-    max_read_chunk = 10 * 1024 * 1024   # 10Mb
-
-    def __init__(self, filename=None, mode=None,
-                 compresslevel=9, fileobj=None, mtime=None):
-        """Constructor for the GzipFile class.
-
-        At least one of fileobj and filename must be given a
-        non-trivial value.
-
-        The new class instance is based on fileobj, which can be a regular
-        file, a StringIO object, or any other object which simulates a file.
-        It defaults to None, in which case filename is opened to provide
-        a file object.
-
-        When fileobj is not None, the filename argument is only used to be
-        included in the gzip file header, which may includes the original
-        filename of the uncompressed file.  It defaults to the filename of
-        fileobj, if discernible; otherwise, it defaults to the empty string,
-        and in this case the original filename is not included in the header.
-
-        The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
-        depending on whether the file will be read or written.  The default
-        is the mode of fileobj if discernible; otherwise, the default is 'rb'.
-        Be aware that only the 'rb', 'ab', and 'wb' values should be used
-        for cross-platform portability.
-
-        The compresslevel argument is an integer from 1 to 9 controlling the
-        level of compression; 1 is fastest and produces the least compression,
-        and 9 is slowest and produces the most compression.  The default is 9.
-
-        The mtime argument is an optional numeric timestamp to be written
-        to the stream when compressing.  All gzip compressed streams
-        are required to contain a timestamp.  If omitted or None, the
-        current time is used.  This module ignores the timestamp when
-        decompressing; however, some programs, such as gunzip, make use
-        of it.  The format of the timestamp is the same as that of the
-        return value of time.time() and of the st_mtime member of the
-        object returned by os.stat().
-
-        """
-
-        # guarantee the file is opened in binary mode on platforms
-        # that care about that sort of thing
-        if mode and 'b' not in mode:
-            mode += 'b'
-        if fileobj is None:
-            fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
-        if filename is None:
-            if hasattr(fileobj, 'name'): filename = fileobj.name
-            else: filename = ''
-        if mode is None:
-            if hasattr(fileobj, 'mode'): mode = fileobj.mode
-            else: mode = 'rb'
-
-        if mode[0:1] == 'r':
-            self.mode = READ
-            # Set flag indicating start of a new member
-            self._new_member = True
-            # Buffer data read from gzip file. extrastart is offset in
-            # stream where buffer starts. extrasize is number of
-            # bytes remaining in buffer from current stream position.
-            self.extrabuf = ""
-            self.extrasize = 0
-            self.extrastart = 0
-            self.name = filename
-            # Starts small, scales exponentially
-            self.min_readsize = 100
-
-        elif mode[0:1] == 'w' or mode[0:1] == 'a':
-            self.mode = WRITE
-            self._init_write(filename)
-            self.compress = zlib.compressobj(compresslevel,
-                                             zlib.DEFLATED,
-                                             -zlib.MAX_WBITS,
-                                             zlib.DEF_MEM_LEVEL,
-                                             0)
-        else:
-            raise IOError, "Mode " + mode + " not supported"
-
-        self.fileobj = fileobj
-        self.offset = 0
-        self.mtime = mtime
-
-        if self.mode == WRITE:
-            self._write_gzip_header()
-
-    @property
-    def filename(self):
-        import warnings
-        warnings.warn("use the name attribute", DeprecationWarning, 2)
-        if self.mode == WRITE and self.name[-3:] != ".gz":
-            return self.name + ".gz"
-        return self.name
-
-    def __repr__(self):
-        s = repr(self.fileobj)
-        return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
-
-    def _check_closed(self):
-        """Raises a ValueError if the underlying file object has been closed.
-
-        """
-        if self.closed:
-            raise ValueError('I/O operation on closed file.')
-
-    def _init_write(self, filename):
-        self.name = filename
-        self.crc = zlib.crc32("") & 0xffffffffL
-        self.size = 0
-        self.writebuf = []
-        self.bufsize = 0
-
-    def _write_gzip_header(self):
-        self.fileobj.write('\037\213')             # magic header
-        self.fileobj.write('\010')                 # compression method
-        fname = os.path.basename(self.name)
-        if fname.endswith(".gz"):
-            fname = fname[:-3]
-        flags = 0
-        if fname:
-            flags = FNAME
-        self.fileobj.write(chr(flags))
-        mtime = self.mtime
-        if mtime is None:
-            mtime = time.time()
-        write32u(self.fileobj, long(mtime))
-        self.fileobj.write('\002')
-        self.fileobj.write('\377')
-        if fname:
-            self.fileobj.write(fname + '\000')
-
-    def _init_read(self):
-        self.crc = zlib.crc32("") & 0xffffffffL
-        self.size = 0
-
-    def _read_gzip_header(self):
-        magic = self.fileobj.read(2)
-        if magic != '\037\213':
-            raise IOError, 'Not a gzipped file'
-        method = ord( self.fileobj.read(1) )
-        if method != 8:
-            raise IOError, 'Unknown compression method'
-        flag = ord( self.fileobj.read(1) )
-        self.mtime = read32(self.fileobj)
-        # extraflag = self.fileobj.read(1)
-        # os = self.fileobj.read(1)
-        self.fileobj.read(2)
-
-        if flag & FEXTRA:
-            # Read & discard the extra field, if present
-            xlen = ord(self.fileobj.read(1))
-            xlen = xlen + 256*ord(self.fileobj.read(1))
-            self.fileobj.read(xlen)
-        if flag & FNAME:
-            # Read and discard a null-terminated string containing the filename
-            while True:
-                s = self.fileobj.read(1)
-                if not s or s=='\000':
-                    break
-        if flag & FCOMMENT:
-            # Read and discard a null-terminated string containing a comment
-            while True:
-                s = self.fileobj.read(1)
-                if not s or s=='\000':
-                    break
-        if flag & FHCRC:
-            self.fileobj.read(2)     # Read & discard the 16-bit header CRC
-
-    def write(self,data):
-        self._check_closed()
-        if self.mode != WRITE:
-            import errno
-            raise IOError(errno.EBADF, "write() on read-only GzipFile object")
-
-        if self.fileobj is None:
-            raise ValueError, "write() on closed GzipFile object"
-
-        # Convert data type if called by io.BufferedWriter.
-        if isinstance(data, memoryview):
-            data = data.tobytes()
-
-        if len(data) > 0:
-            self.size = self.size + len(data)
-            self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
-            self.fileobj.write( self.compress.compress(data) )
-            self.offset += len(data)
-
-        return len(data)
-
-    def read(self, size=-1):
-        self._check_closed()
-        if self.mode != READ:
-            import errno
-            raise IOError(errno.EBADF, "read() on write-only GzipFile object")
-
-        if self.extrasize <= 0 and self.fileobj is None:
-            return ''
-
-        readsize = 1024
-        if size < 0:        # get the whole thing
-            try:
-                while True:
-                    self._read(readsize)
-                    readsize = min(self.max_read_chunk, readsize * 2)
-            except EOFError:
-                size = self.extrasize
-        elif size == 0:
-            return ""
-        else:               # just get some more of it
-            try:
-                while size > self.extrasize:
-                    self._read(readsize)
-                    readsize = min(self.max_read_chunk, readsize * 2)
-            except EOFError:
-                if size > self.extrasize:
-                    size = self.extrasize
-
-        offset = self.offset - self.extrastart
-        chunk = self.extrabuf[offset: offset + size]
-        self.extrasize = self.extrasize - size
-
-        self.offset += size
-        return chunk
-
-    def _unread(self, buf):
-        self.extrasize = len(buf) + self.extrasize
-        self.offset -= len(buf)
-
-    def _read(self, size=1024):
-        if self.fileobj is None:
-            raise EOFError, "Reached EOF"
-
-        if self._new_member:
-            # If the _new_member flag is set, we have to
-            # jump to the next member, if there is one.
-            #
-            # First, check if we're at the end of the file;
-            # if so, it's time to stop; no more members to read.
-            pos = self.fileobj.tell()   # Save current position
-            self.fileobj.seek(0, 2)     # Seek to end of file
-            if pos == self.fileobj.tell():
-                raise EOFError, "Reached EOF"
-            else:
-                self.fileobj.seek( pos ) # Return to original position
-
-            self._init_read()
-            self._read_gzip_header()
-            self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
-            self._new_member = False
-
-        # Read a chunk of data from the file
-        buf = self.fileobj.read(size)
-
-        # If the EOF has been reached, flush the decompression object
-        # and mark this object as finished.
-
-        if buf == "":
-            uncompress = self.decompress.flush()
-            self._read_eof()
-            self._add_read_data( uncompress )
-            raise EOFError, 'Reached EOF'
-
-        uncompress = self.decompress.decompress(buf)
-        self._add_read_data( uncompress )
-
-        if self.decompress.unused_data != "":
-            # Ending case: we've come to the end of a member in the file,
-            # so seek back to the start of the unused data, finish up
-            # this member, and read a new gzip header.
-            # (The number of bytes to seek back is the length of the unused
-            # data, minus 8 because _read_eof() will rewind a further 8 bytes)
-            self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
-
-            # Check the CRC and file size, and set the flag so we read
-            # a new member on the next call
-            self._read_eof()
-            self._new_member = True
-
-    def _add_read_data(self, data):
-        self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
-        offset = self.offset - self.extrastart
-        self.extrabuf = self.extrabuf[offset:] + data
-        self.extrasize = self.extrasize + len(data)
-        self.extrastart = self.offset
-        self.size = self.size + len(data)
-
-    def _read_eof(self):
-        # We've read to the end of the file, so we have to rewind in order
-        # to reread the 8 bytes containing the CRC and the file size.
-        # We check the that the computed CRC and size of the
-        # uncompressed data matches the stored values.  Note that the size
-        # stored is the true file size mod 2**32.
-        self.fileobj.seek(-8, 1)
-        crc32 = read32(self.fileobj)
-        isize = read32(self.fileobj)  # may exceed 2GB
-        if crc32 != self.crc:
-            raise IOError("CRC check failed %s != %s" % (hex(crc32),
-                                                         hex(self.crc)))
-        elif isize != (self.size & 0xffffffffL):
-            raise IOError, "Incorrect length of data produced"
-
-        # Gzip files can be padded with zeroes and still have archives.
-        # Consume all zero bytes and set the file position to the first
-        # non-zero byte. See http://www.gzip.org/#faq8
-        c = "\x00"
-        while c == "\x00":
-            c = self.fileobj.read(1)
-        if c:
-            self.fileobj.seek(-1, 1)
-
-    @property
-    def closed(self):
-        return self.fileobj is None
-
-    def close(self):
-        if self.fileobj is None:
-            return
-        if self.mode == WRITE:
-            self.fileobj.write(self.compress.flush())
-            write32u(self.fileobj, self.crc)
-            # self.size may exceed 2GB, or even 4GB
-            write32u(self.fileobj, self.size & 0xffffffffL)
-            self.fileobj = None
-        elif self.mode == READ:
-            self.fileobj = None
-        if self.myfileobj:
-            self.myfileobj.close()
-            self.myfileobj = None
-
-    def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
-        self._check_closed()
-        if self.mode == WRITE:
-            # Ensure the compressor's buffer is flushed
-            self.fileobj.write(self.compress.flush(zlib_mode))
-            self.fileobj.flush()
-
-    def fileno(self):
-        """Invoke the underlying file object's fileno() method.
-
-        This will raise AttributeError if the underlying file object
-        doesn't support fileno().
-        """
-        return self.fileobj.fileno()
-
-    def rewind(self):
-        '''Return the uncompressed stream file position indicator to the
-        beginning of the file'''
-        if self.mode != READ:
-            raise IOError("Can't rewind in write mode")
-        self.fileobj.seek(0)
-        self._new_member = True
-        self.extrabuf = ""
-        self.extrasize = 0
-        self.extrastart = 0
-        self.offset = 0
-
-    def readable(self):
-        return self.mode == READ
-
-    def writable(self):
-        return self.mode == WRITE
-
-    def seekable(self):
-        return True
-
-    def seek(self, offset, whence=0):
-        if whence:
-            if whence == 1:
-                offset = self.offset + offset
-            else:
-                raise ValueError('Seek from end not supported')
-        if self.mode == WRITE:
-            if offset < self.offset:
-                raise IOError('Negative seek in write mode')
-            count = offset - self.offset
-            for i in range(count // 1024):
-                self.write(1024 * '\0')
-            self.write((count % 1024) * '\0')
-        elif self.mode == READ:
-            if offset == self.offset:
-                self.read(0) # to make sure that this file is open
-                return self.offset
-            if offset < self.offset:
-                # for negative seek, rewind and do positive seek
-                self.rewind()
-            count = offset - self.offset
-            for i in range(count // 1024):
-                self.read(1024)
-            self.read(count % 1024)
-
-        return self.offset
-
-    def readline(self, size=-1):
-        if size < 0:
-            # Shortcut common case - newline found in buffer.
-            offset = self.offset - self.extrastart
-            i = self.extrabuf.find('\n', offset) + 1
-            if i > 0:
-                self.extrasize -= i - offset
-                self.offset += i - offset
-                return self.extrabuf[offset: i]
-
-            size = sys.maxint
-            readsize = self.min_readsize
-        else:
-            readsize = size
-        bufs = []
-        while size != 0:
-            c = self.read(readsize)
-            i = c.find('\n')
-
-            # We set i=size to break out of the loop under two
-            # conditions: 1) there's no newline, and the chunk is
-            # larger than size, or 2) there is a newline, but the
-            # resulting line would be longer than 'size'.
-            if (size <= i) or (i == -1 and len(c) > size):
-                i = size - 1
-
-            if i >= 0 or c == '':
-                bufs.append(c[:i + 1])    # Add portion of last chunk
-                self._unread(c[i + 1:])   # Push back rest of chunk
-                break
-
-            # Append chunk to list, decrease 'size',
-            bufs.append(c)
-            size = size - len(c)
-            readsize = min(size, readsize * 2)
-        if readsize > self.min_readsize:
-            self.min_readsize = min(readsize, self.min_readsize * 2, 512)
-        return ''.join(bufs) # Return resulting line
-
-
-def _test():
-    # Act like gzip; with -d, act like gunzip.
-    # The input file is not deleted, however, nor are any other gzip
-    # options or features supported.
-    args = sys.argv[1:]
-    decompress = args and args[0] == "-d"
-    if decompress:
-        args = args[1:]
-    if not args:
-        args = ["-"]
-    for arg in args:
-        if decompress:
-            if arg == "-":
-                f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
-                g = sys.stdout
-            else:
-                if arg[-3:] != ".gz":
-                    print "filename doesn't end in .gz:", repr(arg)
-                    continue
-                f = open(arg, "rb")
-                g = __builtin__.open(arg[:-3], "wb")
-        else:
-            if arg == "-":
-                f = sys.stdin
-                g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
-            else:
-                f = __builtin__.open(arg, "rb")
-                g = open(arg + ".gz", "wb")
-        while True:
-            chunk = f.read(1024)
-            if not chunk:
-                break
-            g.write(chunk)
-        if g is not sys.stdout:
-            g.close()
-        if f is not sys.stdin:
-            f.close()
-
-if __name__ == '__main__':
-    _test()
diff --git a/lib-python/modified-2.7/sqlite3/test/regression.py b/lib-python/modified-2.7/sqlite3/test/regression.py
--- a/lib-python/modified-2.7/sqlite3/test/regression.py
+++ b/lib-python/modified-2.7/sqlite3/test/regression.py
@@ -274,6 +274,18 @@
         cur.execute("UPDATE foo SET id = 3 WHERE id = 1")
         self.assertEqual(cur.description, None)
 
+    def CheckStatementCache(self):
+        cur = self.con.cursor()
+        cur.execute("CREATE TABLE foo (id INTEGER)")
+        values = [(i,) for i in xrange(5)]
+        cur.executemany("INSERT INTO foo (id) VALUES (?)", values)
+
+        cur.execute("SELECT id FROM foo")
+        self.assertEqual(list(cur), values)
+        self.con.commit()
+        cur.execute("SELECT id FROM foo")
+        self.assertEqual(list(cur), values)
+
 def suite():
     regression_suite = unittest.makeSuite(RegressionTests, "Check")
     return unittest.TestSuite((regression_suite,))
diff --git a/lib-python/modified-2.7/ssl.py b/lib-python/modified-2.7/ssl.py
--- a/lib-python/modified-2.7/ssl.py
+++ b/lib-python/modified-2.7/ssl.py
@@ -62,7 +62,6 @@
 from _ssl import OPENSSL_VERSION_NUMBER, OPENSSL_VERSION_INFO, OPENSSL_VERSION
 from _ssl import SSLError
 from _ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED
-from _ssl import PROTOCOL_SSLv2, PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
 from _ssl import RAND_status, RAND_egd, RAND_add
 from _ssl import \
      SSL_ERROR_ZERO_RETURN, \
@@ -74,6 +73,18 @@
      SSL_ERROR_WANT_CONNECT, \
      SSL_ERROR_EOF, \
      SSL_ERROR_INVALID_ERROR_CODE
+from _ssl import PROTOCOL_SSLv3, PROTOCOL_SSLv23, PROTOCOL_TLSv1
+_PROTOCOL_NAMES = {
+    PROTOCOL_TLSv1: "TLSv1",
+    PROTOCOL_SSLv23: "SSLv23",
+    PROTOCOL_SSLv3: "SSLv3",
+}
+try:
+    from _ssl import PROTOCOL_SSLv2
+except ImportError:
+    pass
+else:
+    _PROTOCOL_NAMES[PROTOCOL_SSLv2] = "SSLv2"
 
 from socket import socket, _fileobject, error as socket_error
 from socket import getnameinfo as _getnameinfo
@@ -400,16 +411,7 @@
     return DER_cert_to_PEM_cert(dercert)
 
 def get_protocol_name(protocol_code):
-    if protocol_code == PROTOCOL_TLSv1:
-        return "TLSv1"
-    elif protocol_code == PROTOCOL_SSLv23:
-        return "SSLv23"
-    elif protocol_code == PROTOCOL_SSLv2:
-        return "SSLv2"
-    elif protocol_code == PROTOCOL_SSLv3:
-        return "SSLv3"
-    else:
-        return "<unknown>"
+    return _PROTOCOL_NAMES.get(protocol_code, '<unknown>')
 
 
 # a replacement for the old socket.ssl function
diff --git a/lib-python/modified-2.7/tarfile.py b/lib-python/modified-2.7/tarfile.py
--- a/lib-python/modified-2.7/tarfile.py
+++ b/lib-python/modified-2.7/tarfile.py
@@ -252,8 +252,8 @@
        the high bit set. So we calculate two checksums, unsigned and
        signed.
     """
-    unsigned_chksum = 256 + sum(struct.unpack("148B8x356B", buf[:512]))
-    signed_chksum = 256 + sum(struct.unpack("148b8x356b", buf[:512]))
+    unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
+    signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
     return unsigned_chksum, signed_chksum
 
 def copyfileobj(src, dst, length=None):
@@ -265,6 +265,7 @@
     if length is None:
         shutil.copyfileobj(src, dst)
         return
+
     BUFSIZE = 16 * 1024
     blocks, remainder = divmod(length, BUFSIZE)
     for b in xrange(blocks):
@@ -801,19 +802,19 @@
         if self.closed:
             raise ValueError("I/O operation on closed file")
 
+        buf = ""
         if self.buffer:
             if size is None:
-                buf = self.buffer + self.fileobj.read()
+                buf = self.buffer
                 self.buffer = ""
             else:
                 buf = self.buffer[:size]
                 self.buffer = self.buffer[size:]
-                buf += self.fileobj.read(size - len(buf))
+
+        if size is None:
+            buf += self.fileobj.read()
         else:
-            if size is None:
-                buf = self.fileobj.read()
-            else:
-                buf = self.fileobj.read(size)
+            buf += self.fileobj.read(size - len(buf))
 
         self.position += len(buf)
         return buf
diff --git a/lib-python/modified-2.7/test/test_multiprocessing.py b/lib-python/modified-2.7/test/test_multiprocessing.py
--- a/lib-python/modified-2.7/test/test_multiprocessing.py
+++ b/lib-python/modified-2.7/test/test_multiprocessing.py
@@ -510,7 +510,6 @@
 
         p.join()
 
-    @unittest.skipIf(os.name == 'posix', "PYPY: FIXME")
     def test_qsize(self):
         q = self.Queue()
         try:
@@ -532,7 +531,6 @@
             time.sleep(DELTA)
             q.task_done()
 
-    @unittest.skipIf(os.name == 'posix', "PYPY: FIXME")
     def test_task_done(self):
         queue = self.JoinableQueue()
 
@@ -1091,7 +1089,6 @@
 class _TestPoolWorkerLifetime(BaseTestCase):
 
     ALLOWED_TYPES = ('processes', )
-    @unittest.skipIf(os.name == 'posix', "PYPY: FIXME")
     def test_pool_worker_lifetime(self):
         p = multiprocessing.Pool(3, maxtasksperchild=10)
         self.assertEqual(3, len(p._pool))
@@ -1280,7 +1277,6 @@
         queue = manager.get_queue()
         queue.put('hello world')
 
-    @unittest.skipIf(os.name == 'posix', "PYPY: FIXME")
     def test_rapid_restart(self):
         authkey = os.urandom(32)
         manager = QueueManager(
@@ -1297,6 +1293,7 @@
         queue = manager.get_queue()
         self.assertEqual(queue.get(), 'hello world')
         del queue
+        test_support.gc_collect()
         manager.shutdown()
         manager = QueueManager(
             address=addr, authkey=authkey, serializer=SERIALIZER)
@@ -1573,7 +1570,6 @@
 
     ALLOWED_TYPES = ('processes',)
 
-    @unittest.skipIf(os.name == 'posix', "PYPY: FIXME")
     def test_heap(self):
         iterations = 5000
         maxblocks = 50
diff --git a/lib-python/modified-2.7/test/test_ssl.py b/lib-python/modified-2.7/test/test_ssl.py
--- a/lib-python/modified-2.7/test/test_ssl.py
+++ b/lib-python/modified-2.7/test/test_ssl.py
@@ -58,32 +58,35 @@
 
 # Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
 def skip_if_broken_ubuntu_ssl(func):
-    # We need to access the lower-level wrapper in order to create an
-    # implicit SSL context without trying to connect or listen.
-    try:
-        import _ssl
-    except ImportError:
-        # The returned function won't get executed, just ignore the error
-        pass
-    @functools.wraps(func)
-    def f(*args, **kwargs):
+    if hasattr(ssl, 'PROTOCOL_SSLv2'):
+        # We need to access the lower-level wrapper in order to create an
+        # implicit SSL context without trying to connect or listen.
         try:
-            s = socket.socket(socket.AF_INET)
-            _ssl.sslwrap(s._sock, 0, None, None,
-                         ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None)
-        except ssl.SSLError as e:
-            if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
-                platform.linux_distribution() == ('debian', 'squeeze/sid', '')
-                and 'Invalid SSL protocol variant specified' in str(e)):
-                raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
-        return func(*args, **kwargs)
-    return f
+            import _ssl
+        except ImportError:
+            # The returned function won't get executed, just ignore the error
+            pass
+        @functools.wraps(func)
+        def f(*args, **kwargs):
+            try:
+                s = socket.socket(socket.AF_INET)
+                _ssl.sslwrap(s._sock, 0, None, None,
+                             ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None)
+            except ssl.SSLError as e:
+                if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
+                    platform.linux_distribution() == ('debian', 'squeeze/sid', '')
+                    and 'Invalid SSL protocol variant specified' in str(e)):
+                    raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
+            return func(*args, **kwargs)
+        return f
+    else:
+        return func
 
 
 class BasicSocketTests(unittest.TestCase):
 
     def test_constants(self):
-        ssl.PROTOCOL_SSLv2
+        #ssl.PROTOCOL_SSLv2
         ssl.PROTOCOL_SSLv23
         ssl.PROTOCOL_SSLv3
         ssl.PROTOCOL_TLSv1
@@ -966,7 +969,8 @@
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
-            try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
+            if hasattr(ssl, 'PROTOCOL_SSLv2'):
+                try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False)
             try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
 
@@ -978,7 +982,8 @@
             try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
             try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
             try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
-            try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
+            if hasattr(ssl, 'PROTOCOL_SSLv2'):
+                try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
             try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
             try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False)
 
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -54,7 +54,8 @@
     def get_ffi_argtype(self):
         if self._ffiargtype:
             return self._ffiargtype
-        return _shape_to_ffi_type(self._ffiargshape)
+        self._ffiargtype = _shape_to_ffi_type(self._ffiargshape)
+        return self._ffiargtype
 
     def _CData_output(self, resbuffer, base=None, index=-1):
         #assert isinstance(resbuffer, _rawffi.ArrayInstance)
@@ -225,6 +226,7 @@
     'Z' : _ffi.types.void_p,
     'X' : _ffi.types.void_p,
     'v' : _ffi.types.sshort,
+    '?' : _ffi.types.ubyte,
     }
 
 
diff --git a/lib_pypy/_elementtree.py b/lib_pypy/_elementtree.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_elementtree.py
@@ -0,0 +1,6 @@
+# Just use ElementTree.
+
+from xml.etree import ElementTree
+
+globals().update(ElementTree.__dict__)
+del __all__
diff --git a/lib_pypy/_functools.py b/lib_pypy/_functools.py
--- a/lib_pypy/_functools.py
+++ b/lib_pypy/_functools.py
@@ -14,10 +14,9 @@
             raise TypeError("the first argument must be callable")
         self.func = func
         self.args = args
-        self.keywords = keywords
+        self.keywords = keywords or None
 
     def __call__(self, *fargs, **fkeywords):
-        newkeywords = self.keywords.copy()
-        newkeywords.update(fkeywords)
-        return self.func(*(self.args + fargs), **newkeywords)
-
+        if self.keywords is not None:
+            fkeywords = dict(self.keywords, **fkeywords)
+        return self.func(*(self.args + fargs), **fkeywords)
diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py
--- a/lib_pypy/_pypy_interact.py
+++ b/lib_pypy/_pypy_interact.py
@@ -56,6 +56,10 @@
                 prompt = getattr(sys, 'ps1', '>>> ')
             try:
                 line = raw_input(prompt)
+                # Can be None if sys.stdin was redefined
+                encoding = getattr(sys.stdin, 'encoding', None)
+                if encoding and not isinstance(line, unicode):
+                    line = line.decode(encoding)
             except EOFError:
                 console.write("\n")
                 break
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -293,7 +293,7 @@
         #
         if stat.in_use:
             stat = Statement(self.connection, sql)
-        stat.set_cursor_and_factory(cursor, row_factory)
+        stat.set_row_factory(row_factory)
         return stat
 
 
@@ -705,6 +705,8 @@
         from sqlite3.dump import _iterdump
         return _iterdump(self)
 
+DML, DQL, DDL = range(3)
+
 class Cursor(object):
     def __init__(self, con):
         if not isinstance(con, Connection):
@@ -735,9 +737,9 @@
         self.statement = self.connection.statement_cache.get(sql, self, self.row_factory)
 
         if self.connection._isolation_level is not None:
-            if self.statement.kind == "DDL":
+            if self.statement.kind == DDL:
                 self.connection.commit()
-            elif self.statement.kind == "DML":
+            elif self.statement.kind == DML:
                 self.connection._begin()
 
         self.statement.set_params(params)
@@ -748,18 +750,18 @@
             self.statement.reset()
             raise self.connection._get_exception(ret)
 
-        if self.statement.kind == "DQL"and ret == SQLITE_ROW:
+        if self.statement.kind == DQL and ret == SQLITE_ROW:
             self.statement._build_row_cast_map()
-            self.statement._readahead()
+            self.statement._readahead(self)
         else:
             self.statement.item = None
             self.statement.exhausted = True
 
-        if self.statement.kind in ("DML", "DDL"):
+        if self.statement.kind == DML or self.statement.kind == DDL:
             self.statement.reset()
 
         self.rowcount = -1
-        if self.statement.kind == "DML":
+        if self.statement.kind == DML:
             self.rowcount = sqlite.sqlite3_changes(self.connection.db)
 
         return self
@@ -771,8 +773,8 @@
             sql = sql.encode("utf-8")
         self._check_closed()
         self.statement = self.connection.statement_cache.get(sql, self, self.row_factory)
-        
-        if self.statement.kind == "DML":
+
+        if self.statement.kind == DML:
             self.connection._begin()
         else:
             raise ProgrammingError, "executemany is only for DML statements"
@@ -824,7 +826,7 @@
         return self
 
     def __iter__(self):
-        return self.statement
+        return iter(self.fetchone, None)
 
     def _check_reset(self):
         if self.reset:
@@ -841,7 +843,7 @@
             return None
 
         try:
-            return self.statement.next()
+            return self.statement.next(self)
         except StopIteration:
             return None
 
@@ -855,7 +857,7 @@
         if size is None:
             size = self.arraysize
         lst = []
-        for row in self.statement:
+        for row in self:
             lst.append(row)
             if len(lst) == size:
                 break
@@ -866,7 +868,7 @@
         self._check_reset()
         if self.statement is None:
             return []
-        return list(self.statement)
+        return list(self)
 
     def _getdescription(self):
         if self._description is None:
@@ -904,16 +906,15 @@
         self.sql = sql # DEBUG ONLY
         first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper()
         if first_word in ("INSERT", "UPDATE", "DELETE", "REPLACE"):
-            self.kind = "DML"
+            self.kind = DML
         elif first_word in ("SELECT", "PRAGMA"):
-            self.kind = "DQL"
+            self.kind = DQL
         else:
-            self.kind = "DDL"
+            self.kind = DDL
         self.exhausted = False
         self.in_use = False
         #
-        # set by set_cursor_and_factory
-        self.cur = None
+        # set by set_row_factory
         self.row_factory = None
 
         self.statement = c_void_p()
@@ -923,7 +924,7 @@
         if ret == SQLITE_OK and self.statement.value is None:
             # an empty statement, we work around that, as it's the least trouble
             ret = sqlite.sqlite3_prepare_v2(self.con.db, "select 42", -1, byref(self.statement), byref(next_char))
-            self.kind = "DQL"
+            self.kind = DQL
 
         if ret != SQLITE_OK:
             raise self.con._get_exception(ret)
@@ -935,8 +936,7 @@
 
         self._build_row_cast_map()
 
-    def set_cursor_and_factory(self, cur, row_factory):
-        self.cur = weakref.ref(cur)
+    def set_row_factory(self, row_factory):
         self.row_factory = row_factory
 
     def _build_row_cast_map(self):
@@ -1039,10 +1039,7 @@
                     raise ProgrammingError("missing parameter '%s'" %param)
                 self.set_param(idx, param)
 
-    def __iter__(self):
-        return self
-
-    def next(self):
+    def next(self, cursor):
         self.con._check_closed()
         self.con._check_thread()
         if self.exhausted:
@@ -1058,10 +1055,10 @@
             sqlite.sqlite3_reset(self.statement)
             raise exc
 
-        self._readahead()
+        self._readahead(cursor)
         return item
 
-    def _readahead(self):
+    def _readahead(self, cursor):
         self.column_count = sqlite.sqlite3_column_count(self.statement)
         row = []
         for i in xrange(self.column_count):
@@ -1096,13 +1093,14 @@
 
         row = tuple(row)
         if self.row_factory is not None:
-            row = self.row_factory(self.cur(), row)
+            row = self.row_factory(cursor, row)
         self.item = row
 
     def reset(self):
         self.row_cast_map = None
         ret = sqlite.sqlite3_reset(self.statement)
         self.in_use = False
+        self.exhausted = False
         return ret
 
     def finalize(self):
@@ -1118,7 +1116,7 @@
         self.statement = None
 
     def _get_description(self):
-        if self.kind == "DML":
+        if self.kind == DML:
             return None
         desc = []
         for i in xrange(sqlite.sqlite3_column_count(self.statement)):
diff --git a/lib_pypy/distributed/test/test_distributed.py b/lib_pypy/distributed/test/test_distributed.py
--- a/lib_pypy/distributed/test/test_distributed.py
+++ b/lib_pypy/distributed/test/test_distributed.py
@@ -9,7 +9,7 @@
 class AppTestDistributed(object):
     def setup_class(cls):
         cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
-            "usemodules":("_stackless",)})
+            "usemodules":("_continuation",)})
 
     def test_init(self):
         import distributed
@@ -91,10 +91,8 @@
 
 class AppTestDistributedTasklets(object):
     spaceconfig = {"objspace.std.withtproxy": True,
-                   "objspace.usemodules._stackless": True}
+                   "objspace.usemodules._continuation": True}
     def setup_class(cls):
-        #cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
-        #    "usemodules":("_stackless",)})
         cls.w_test_env = cls.space.appexec([], """():
         from distributed import test_env
         return test_env
diff --git a/lib_pypy/distributed/test/test_greensock.py b/lib_pypy/distributed/test/test_greensock.py
--- a/lib_pypy/distributed/test/test_greensock.py
+++ b/lib_pypy/distributed/test/test_greensock.py
@@ -10,7 +10,7 @@
         if not option.runappdirect:
             py.test.skip("Cannot run this on top of py.py because of PopenGateway")
         cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
-                                       "usemodules":("_stackless",)})
+                                       "usemodules":("_continuation",)})
         cls.w_remote_side_code = cls.space.appexec([], """():
         import sys
         sys.path.insert(0, '%s')
diff --git a/lib_pypy/distributed/test/test_socklayer.py b/lib_pypy/distributed/test/test_socklayer.py
--- a/lib_pypy/distributed/test/test_socklayer.py
+++ b/lib_pypy/distributed/test/test_socklayer.py
@@ -9,7 +9,8 @@
 class AppTestSocklayer:
     def setup_class(cls):
         cls.space = gettestobjspace(**{"objspace.std.withtproxy": True,
-                                       "usemodules":("_stackless","_socket", "select")})
+                                       "usemodules":("_continuation",
+                                                     "_socket", "select")})
     
     def test_socklayer(self):
         class X(object):
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -48,18 +48,23 @@
     def switch(self, *args):
         "Switch execution to this greenlet, optionally passing the values "
         "given as argument(s).  Returns the value passed when switching back."
-        return self.__switch(_continulet.switch, args)
+        return self.__switch('switch', args)
 
     def throw(self, typ=GreenletExit, val=None, tb=None):
         "raise exception in greenlet, return value passed when switching back"
-        return self.__switch(_continulet.throw, typ, val, tb)
+        return self.__switch('throw', typ, val, tb)
 
-    def __switch(target, unbound_method, *args):
+    def __switch(target, methodname, *args):
         current = getcurrent()
         #
         while not target:
             if not target.__started:
-                _continulet.__init__(target, _greenlet_start, *args)
+                if methodname == 'switch':
+                    greenlet_func = _greenlet_start
+                else:
+                    greenlet_func = _greenlet_throw
+                _continulet.__init__(target, greenlet_func, *args)
+                methodname = 'switch'
                 args = ()
                 target.__started = True
                 break
@@ -70,22 +75,8 @@
             target = target.parent
         #
         try:
-            if current.__main:
-                if target.__main:
-                    # switch from main to main
-                    if unbound_method == _continulet.throw:
-                        raise args[0], args[1], args[2]
-                    (args,) = args
-                else:
-                    # enter from main to target
-                    args = unbound_method(target, *args)
-            else:
-                if target.__main:
-                    # leave to go to target=main
-                    args = unbound_method(current, *args)
-                else:
-                    # switch from non-main to non-main
-                    args = unbound_method(current, *args, to=target)
+            unbound_method = getattr(_continulet, methodname)
+            args = unbound_method(current, *args, to=target)
         except GreenletExit, e:
             args = (e,)
         finally:
@@ -105,7 +96,16 @@
 
     @property
     def gr_frame(self):
-        raise NotImplementedError("attribute 'gr_frame' of greenlet objects")
+        # xxx this doesn't work when called on either the current or
+        # the main greenlet of another thread
+        if self is getcurrent():
+            return None
+        if self.__main:
+            self = getcurrent()
+        f = _continulet.__reduce__(self)[2][0]
+        if not f:
+            return None
+        return f.f_back.f_back.f_back   # go past start(), __switch(), switch()
 
 # ____________________________________________________________
 # Internal stuff
@@ -133,6 +133,12 @@
     try:
         res = greenlet.run(*args)
     finally:
-        if greenlet.parent is not _tls.main:
-            _continuation.permute(greenlet, greenlet.parent)
+        _continuation.permute(greenlet, greenlet.parent)
     return (res,)
+
+def _greenlet_throw(greenlet, exc, value, tb):
+    _tls.current = greenlet
+    try:
+        raise exc, value, tb
+    finally:
+        _continuation.permute(greenlet, greenlet.parent)
diff --git a/lib_pypy/pypy_test/test_coroutine.py b/lib_pypy/pypy_test/test_coroutine.py
--- a/lib_pypy/pypy_test/test_coroutine.py
+++ b/lib_pypy/pypy_test/test_coroutine.py
@@ -2,7 +2,7 @@
 from py.test import skip, raises
 
 try:
-    from lib_pypy.stackless import coroutine, CoroutineExit
+    from stackless import coroutine, CoroutineExit
 except ImportError, e:
     skip('cannot import stackless: %s' % (e,))
 
@@ -20,10 +20,6 @@
         assert not co.is_zombie
 
     def test_is_zombie_del_without_frame(self):
-        try:
-            import _stackless # are we on pypy with a stackless build?
-        except ImportError:
-            skip("only works on pypy-c-stackless")
         import gc
         res = []
         class MyCoroutine(coroutine):
@@ -45,10 +41,6 @@
         assert res[0], "is_zombie was False in __del__"
 
     def test_is_zombie_del_with_frame(self):
-        try:
-            import _stackless # are we on pypy with a stackless build?
-        except ImportError:
-            skip("only works on pypy-c-stackless")
         import gc
         res = []
         class MyCoroutine(coroutine):
diff --git a/lib_pypy/pypy_test/test_stackless_pickling.py b/lib_pypy/pypy_test/test_stackless_pickling.py
--- a/lib_pypy/pypy_test/test_stackless_pickling.py
+++ b/lib_pypy/pypy_test/test_stackless_pickling.py
@@ -1,7 +1,3 @@
-"""
-this test should probably not run from CPython or py.py.
-I'm not entirely sure, how to do that.
-"""
 from __future__ import absolute_import
 from py.test import skip
 try:
@@ -16,11 +12,15 @@
 
 class Test_StacklessPickling:
 
+    def test_pickle_main_coroutine(self):
+        import stackless, pickle
+        s = pickle.dumps(stackless.coroutine.getcurrent())
+        print s
+        c = pickle.loads(s)
+        assert c is stackless.coroutine.getcurrent()
+
     def test_basic_tasklet_pickling(self):
-        try:
-            import stackless
-        except ImportError:
-            skip("can't load stackless and don't know why!!!")
+        import stackless
         from stackless import run, schedule, tasklet
         import pickle
 
diff --git a/lib_pypy/pyrepl/completing_reader.py b/lib_pypy/pyrepl/completing_reader.py
--- a/lib_pypy/pyrepl/completing_reader.py
+++ b/lib_pypy/pyrepl/completing_reader.py
@@ -229,7 +229,8 @@
 
     def after_command(self, cmd):
         super(CompletingReader, self).after_command(cmd)
-        if not isinstance(cmd, complete) and not isinstance(cmd, self_insert):
+        if not isinstance(cmd, self.commands['complete']) \
+           and not isinstance(cmd, self.commands['self_insert']):
             self.cmpltn_reset()
 
     def calc_screen(self):
diff --git a/lib_pypy/pyrepl/reader.py b/lib_pypy/pyrepl/reader.py
--- a/lib_pypy/pyrepl/reader.py
+++ b/lib_pypy/pyrepl/reader.py
@@ -576,7 +576,7 @@
         self.console.push_char(char)
         self.handle1(0)
     
-    def readline(self):
+    def readline(self, returns_unicode=False):
         """Read a line.  The implementation of this method also shows
         how to drive Reader if you want more control over the event
         loop."""
@@ -585,6 +585,8 @@
             self.refresh()
             while not self.finished:
                 self.handle1()
+            if returns_unicode:
+                return self.get_unicode()
             return self.get_buffer()
         finally:
             self.restore()
diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py
--- a/lib_pypy/pyrepl/readline.py
+++ b/lib_pypy/pyrepl/readline.py
@@ -198,7 +198,7 @@
         reader.ps1 = prompt
         return reader.readline()
 
-    def multiline_input(self, more_lines, ps1, ps2):
+    def multiline_input(self, more_lines, ps1, ps2, returns_unicode=False):
         """Read an input on possibly multiple lines, asking for more
         lines as long as 'more_lines(unicodetext)' returns an object whose
         boolean value is true.
@@ -209,7 +209,7 @@
             reader.more_lines = more_lines
             reader.ps1 = reader.ps2 = ps1
             reader.ps3 = reader.ps4 = ps2
-            return reader.readline()
+            return reader.readline(returns_unicode=returns_unicode)
         finally:
             reader.more_lines = saved
 
diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py
--- a/lib_pypy/pyrepl/simple_interact.py
+++ b/lib_pypy/pyrepl/simple_interact.py
@@ -54,7 +54,8 @@
             ps1 = getattr(sys, 'ps1', '>>> ')
             ps2 = getattr(sys, 'ps2', '... ')
             try:
-                statement = multiline_input(more_lines, ps1, ps2)
+                statement = multiline_input(more_lines, ps1, ps2,
+                                            returns_unicode=True)
             except EOFError:
                 break
             more = console.push(statement)
diff --git a/lib_pypy/stackless.py b/lib_pypy/stackless.py
--- a/lib_pypy/stackless.py
+++ b/lib_pypy/stackless.py
@@ -4,121 +4,110 @@
 Please refer to their documentation.
 """
 
-DEBUG = True
 
-def dprint(*args):
-    for arg in args:
-        print arg,
-    print
+import _continuation
 
-import traceback
-import sys
+class TaskletExit(Exception):
+    pass
+
+CoroutineExit = TaskletExit
+
+
+def _coroutine_getcurrent():
+    "Returns the current coroutine (i.e. the one which called this function)."
+    try:
+        return _tls.current_coroutine
+    except AttributeError:
+        # first call in this thread: current == main
+        return _coroutine_getmain()
+
+def _coroutine_getmain():
+    try:
+        return _tls.main_coroutine
+    except AttributeError:
+        # create the main coroutine for this thread
+        continulet = _continuation.continulet
+        main = coroutine()
+        main._frame = continulet.__new__(continulet)
+        main._is_started = -1
+        _tls.current_coroutine = _tls.main_coroutine = main
+        return _tls.main_coroutine
+
+
+class coroutine(object):
+    _is_started = 0      # 0=no, 1=yes, -1=main
+
+    def __init__(self):
+        self._frame = None
+
+    def bind(self, func, *argl, **argd):
+        """coro.bind(f, *argl, **argd) -> None.
+           binds function f to coro. f will be called with
+           arguments *argl, **argd
+        """
+        if self.is_alive:
+            raise ValueError("cannot bind a bound coroutine")
+        def run(c):
+            _tls.current_coroutine = self
+            self._is_started = 1
+            return func(*argl, **argd)
+        self._is_started = 0
+        self._frame = _continuation.continulet(run)
+
+    def switch(self):
+        """coro.switch() -> returnvalue
+           switches to coroutine coro. If the bound function
+           f finishes, the returnvalue is that of f, otherwise
+           None is returned
+        """
+        current = _coroutine_getcurrent()
+        try:
+            current._frame.switch(to=self._frame)
+        finally:
+            _tls.current_coroutine = current
+
+    def kill(self):
+        """coro.kill() : kill coroutine coro"""
+        current = _coroutine_getcurrent()
+        try:
+            current._frame.throw(CoroutineExit, to=self._frame)
+        finally:
+            _tls.current_coroutine = current
+
+    @property
+    def is_alive(self):
+        return self._is_started < 0 or (
+            self._frame is not None and self._frame.is_pending())
+
+    @property
+    def is_zombie(self):
+        return self._is_started > 0 and not self._frame.is_pending()
+
+    getcurrent = staticmethod(_coroutine_getcurrent)
+
+    def __reduce__(self):
+        if self._is_started < 0:
+            return _coroutine_getmain, ()
+        else:
+            return type(self), (), self.__dict__
+
+
 try:
-    # If _stackless can be imported then TaskletExit and CoroutineExit are 
-    # automatically added to the builtins.
-    from _stackless import coroutine, greenlet
-except ImportError: # we are running from CPython
-    from greenlet import greenlet, GreenletExit
-    TaskletExit = CoroutineExit = GreenletExit
-    del GreenletExit
-    try:
-        from functools import partial
-    except ImportError: # we are not running python 2.5
-        class partial(object):
-            # just enough of 'partial' to be usefull
-            def __init__(self, func, *argl, **argd):
-                self.func = func
-                self.argl = argl
-                self.argd = argd
+    from thread import _local
+except ImportError:
+    class _local(object):    # assume no threads
+        pass
 
-            def __call__(self):
-                return self.func(*self.argl, **self.argd)
+_tls = _local()
 
-    class GWrap(greenlet):
-        """This is just a wrapper around greenlets to allow
-           to stick additional attributes to a greenlet.
-           To be more concrete, we need a backreference to
-           the coroutine object"""
 
-    class MWrap(object):
-        def __init__(self,something):
-            self.something = something
+# ____________________________________________________________
 
-        def __getattr__(self, attr):
-            return getattr(self.something, attr)
-
-    class coroutine(object):
-        "we can't have greenlet as a base, because greenlets can't be rebound"
-
-        def __init__(self):
-            self._frame = None
-            self.is_zombie = False
-
-        def __getattr__(self, attr):
-            return getattr(self._frame, attr)
-
-        def __del__(self):
-            self.is_zombie = True
-            del self._frame
-            self._frame = None
-
-        def bind(self, func, *argl, **argd):
-            """coro.bind(f, *argl, **argd) -> None.
-               binds function f to coro. f will be called with
-               arguments *argl, **argd
-            """
-            if self._frame is None or self._frame.dead:
-                self._frame = frame = GWrap()
-                frame.coro = self
-            if hasattr(self._frame, 'run') and self._frame.run:
-                raise ValueError("cannot bind a bound coroutine")
-            self._frame.run = partial(func, *argl, **argd)
-
-        def switch(self):
-            """coro.switch() -> returnvalue
-               switches to coroutine coro. If the bound function
-               f finishes, the returnvalue is that of f, otherwise
-               None is returned
-            """
-            try:
-                return greenlet.switch(self._frame)
-            except TypeError, exp: # self._frame is the main coroutine
-                return greenlet.switch(self._frame.something)
-
-        def kill(self):
-            """coro.kill() : kill coroutine coro"""
-            self._frame.throw()
-
-        def _is_alive(self):
-            if self._frame is None:
-                return False
-            return not self._frame.dead
-        is_alive = property(_is_alive)
-        del _is_alive
-
-        def getcurrent():
-            """coroutine.getcurrent() -> the currently running coroutine"""
-            try:
-                return greenlet.getcurrent().coro
-            except AttributeError:
-                return _maincoro
-        getcurrent = staticmethod(getcurrent)
-
-        def __reduce__(self):
-            raise TypeError, 'pickling is not possible based upon greenlets'
-
-    _maincoro = coroutine()
-    maingreenlet = greenlet.getcurrent()
-    _maincoro._frame = frame = MWrap(maingreenlet)
-    frame.coro = _maincoro
-    del frame
-    del maingreenlet
 
 from collections import deque
 
 import operator
-__all__ = 'run getcurrent getmain schedule tasklet channel coroutine \
-                greenlet'.split()
+__all__ = 'run getcurrent getmain schedule tasklet channel coroutine'.split()
 
 _global_task_id = 0
 _squeue = None
@@ -131,7 +120,8 @@
 def _scheduler_remove(value):
     try:
         del _squeue[operator.indexOf(_squeue, value)]
-    except ValueError:pass
+    except ValueError:
+        pass
 
 def _scheduler_append(value, normal=True):
     if normal:
@@ -157,10 +147,7 @@
     _last_task = next
     assert not next.blocked
     if next is not current:
-        try:
-            next.switch()
-        except CoroutineExit:
-            raise TaskletExit
+        next.switch()
     return current
 
 def set_schedule_callback(callback):
@@ -184,34 +171,6 @@
         raise self.type, self.value, self.traceback
 
 #
-# helpers for pickling
-#
-
-_stackless_primitive_registry = {}
-
-def register_stackless_primitive(thang, retval_expr='None'):
-    import types
-    func = thang
-    if isinstance(thang, types.MethodType):
-        func = thang.im_func
-    code = func.func_code
-    _stackless_primitive_registry[code] = retval_expr
-    # It is not too nice to attach info via the code object, but
-    # I can't think of a better solution without a real transform.
-
-def rewrite_stackless_primitive(coro_state, alive, tempval):
-    flags, frame, thunk, parent = coro_state
-    while frame is not None:
-        retval_expr = _stackless_primitive_registry.get(frame.f_code)
-        if retval_expr:
-            # this tasklet needs to stop pickling here and return its value.
-            tempval = eval(retval_expr, globals(), frame.f_locals)
-            coro_state = flags, frame, thunk, parent
-            break
-        frame = frame.f_back
-    return coro_state, alive, tempval
-
-#
 #
 
 class channel(object):
@@ -363,8 +322,6 @@
         """
         return self._channel_action(None, -1)
 
-    register_stackless_primitive(receive, retval_expr='receiver.tempval')
-
     def send_exception(self, exp_type, msg):
         self.send(bomb(exp_type, exp_type(msg)))
 
@@ -381,9 +338,8 @@
         the runnables list.
         """
         return self._channel_action(msg, 1)
-            
-    register_stackless_primitive(send)
-            
+
+
 class tasklet(coroutine):
     """
     A tasklet object represents a tiny task in a Python thread.
@@ -455,6 +411,7 @@
         def _func():
             try:
                 try:
+                    coroutine.switch(back)
                     func(*argl, **argd)
                 except TaskletExit:
                     pass
@@ -464,6 +421,8 @@
 
         self.func = None
         coroutine.bind(self, _func)
+        back = _coroutine_getcurrent()
+        coroutine.switch(self)
         self.alive = True
         _scheduler_append(self)
         return self
@@ -486,39 +445,6 @@
             raise RuntimeError, "The current tasklet cannot be removed."
             # not sure if I will revive this  " Use t=tasklet().capture()"
         _scheduler_remove(self)
-        
-    def __reduce__(self):
-        one, two, coro_state = coroutine.__reduce__(self)
-        assert one is coroutine
-        assert two == ()
-        # we want to get rid of the parent thing.
-        # for now, we just drop it
-        a, frame, c, d = coro_state
-
-        # Removing all frames related to stackless.py.
-        # They point to stuff we don't want to be pickled.
-
-        pickleframe = frame
-        while frame is not None:
-            if frame.f_code == schedule.func_code:
-                # Removing everything including and after the
-                # call to stackless.schedule()
-                pickleframe = frame.f_back
-                break
-            frame = frame.f_back
-        if d:
-            assert isinstance(d, coroutine)
-        coro_state = a, pickleframe, c, None
-        coro_state, alive, tempval = rewrite_stackless_primitive(coro_state, self.alive, self.tempval)
-        inst_dict = self.__dict__.copy()
-        inst_dict.pop('tempval', None)
-        return self.__class__, (), (coro_state, alive, tempval, inst_dict)
-
-    def __setstate__(self, (coro_state, alive, tempval, inst_dict)):
-        coroutine.__setstate__(self, coro_state)
-        self.__dict__.update(inst_dict)
-        self.alive = alive
-        self.tempval = tempval
 
 def getmain():
     """
@@ -607,30 +533,7 @@
     global _last_task
     _global_task_id = 0
     _main_tasklet = coroutine.getcurrent()
-    try:
-        _main_tasklet.__class__ = tasklet
-    except TypeError: # we are running pypy-c
-        class TaskletProxy(object):
-            """TaskletProxy is needed to give the _main_coroutine tasklet behaviour"""
-            def __init__(self, coro):
-                self._coro = coro
-
-            def __getattr__(self,attr):
-                return getattr(self._coro,attr)
-
-            def __str__(self):
-                return '<tasklet %s a:%s>' % (self._task_id, self.is_alive)
-
-            def __reduce__(self):
-                return getmain, ()
-
-            __repr__ = __str__
-
-
-        global _main_coroutine
-        _main_coroutine = _main_tasklet
-        _main_tasklet = TaskletProxy(_main_tasklet)
-        assert _main_tasklet.is_alive and not _main_tasklet.is_zombie
+    _main_tasklet.__class__ = tasklet         # XXX HAAAAAAAAAAAAAAAAAAAAACK
     _last_task = _main_tasklet
     tasklet._init.im_func(_main_tasklet, label='main')
     _squeue = deque()
diff --git a/py/_code/source.py b/py/_code/source.py
--- a/py/_code/source.py
+++ b/py/_code/source.py
@@ -139,7 +139,7 @@
                 trysource = self[start:end]
                 if trysource.isparseable():
                     return start, end
-        return start, end
+        return start, len(self)
 
     def getblockend(self, lineno):
         # XXX
diff --git a/pypy/annotation/annrpython.py b/pypy/annotation/annrpython.py
--- a/pypy/annotation/annrpython.py
+++ b/pypy/annotation/annrpython.py
@@ -149,7 +149,7 @@
         desc = olddesc.bind_self(classdef)
         args = self.bookkeeper.build_args("simple_call", args_s[:])
         desc.consider_call_site(self.bookkeeper, desc.getcallfamily(), [desc],
-            args, annmodel.s_ImpossibleValue)
+            args, annmodel.s_ImpossibleValue, None)
         result = []
         def schedule(graph, inputcells):
             result.append((graph, inputcells))
diff --git a/pypy/annotation/bookkeeper.py b/pypy/annotation/bookkeeper.py
--- a/pypy/annotation/bookkeeper.py
+++ b/pypy/annotation/bookkeeper.py
@@ -209,8 +209,8 @@
                 self.consider_call_site(call_op)
 
             for pbc, args_s in self.emulated_pbc_calls.itervalues():
-                self.consider_call_site_for_pbc(pbc, 'simple_call', 
-                                                args_s, s_ImpossibleValue)
+                self.consider_call_site_for_pbc(pbc, 'simple_call',
+                                                args_s, s_ImpossibleValue, None)
             self.emulated_pbc_calls = {}
         finally:
             self.leave()
@@ -257,18 +257,18 @@
             args_s = [lltype_to_annotation(adtmeth.ll_ptrtype)] + args_s
         if isinstance(s_callable, SomePBC):
             s_result = binding(call_op.result, s_ImpossibleValue)
-            self.consider_call_site_for_pbc(s_callable,
-                                            call_op.opname,
-                                            args_s, s_result)
+            self.consider_call_site_for_pbc(s_callable, call_op.opname, args_s,
+                                            s_result, call_op)
 
-    def consider_call_site_for_pbc(self, s_callable, opname, args_s, s_result):
+    def consider_call_site_for_pbc(self, s_callable, opname, args_s, s_result,
+                                   call_op):
         descs = list(s_callable.descriptions)
         if not descs:
             return
         family = descs[0].getcallfamily()
         args = self.build_args(opname, args_s)
         s_callable.getKind().consider_call_site(self, family, descs, args,
-                                                s_result)
+                                                s_result, call_op)
 
     def getuniqueclassdef(self, cls):
         """Get the ClassDef associated with the given user cls.
@@ -656,6 +656,7 @@
                 whence = None
             else:
                 whence = emulated # callback case
+            op = None
             s_previous_result = s_ImpossibleValue
 
         def schedule(graph, inputcells):
@@ -663,7 +664,7 @@
 
         results = []
         for desc in descs:
-            results.append(desc.pycall(schedule, args, s_previous_result))
+            results.append(desc.pycall(schedule, args, s_previous_result, op))
         s_result = unionof(*results)
         return s_result
 
diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py
--- a/pypy/annotation/description.py
+++ b/pypy/annotation/description.py
@@ -255,7 +255,11 @@
             raise TypeError, "signature mismatch: %s" % e.getmsg(self.name)
         return inputcells
 
-    def specialize(self, inputcells):
+    def specialize(self, inputcells, op=None):
+        if (op is None and
+            getattr(self.bookkeeper, "position_key", None) is not None):
+            _, block, i = self.bookkeeper.position_key
+            op = block.operations[i]
         if self.specializer is None:
             # get the specializer based on the tag of the 'pyobj'
             # (if any), according to the current policy
@@ -269,11 +273,14 @@
                 enforceargs = Sig(*enforceargs)
                 self.pyobj._annenforceargs_ = enforceargs
             enforceargs(self, inputcells) # can modify inputcells in-place
-        return self.specializer(self, inputcells)
+        if getattr(self.pyobj, '_annspecialcase_', '').endswith("call_location"):
+            return self.specializer(self, inputcells, op)
+        else:
+            return self.specializer(self, inputcells)
 
-    def pycall(self, schedule, args, s_previous_result):
+    def pycall(self, schedule, args, s_previous_result, op=None):
         inputcells = self.parse_arguments(args)
-        result = self.specialize(inputcells)
+        result = self.specialize(inputcells, op)
         if isinstance(result, FunctionGraph):
             graph = result         # common case
             # if that graph has a different signature, we need to re-parse
@@ -296,17 +303,17 @@
                                              None,       # selfclassdef
                                              name)
 
-    def consider_call_site(bookkeeper, family, descs, args, s_result):
+    def consider_call_site(bookkeeper, family, descs, args, s_result, op):
         shape = rawshape(args)
-        row = FunctionDesc.row_to_consider(descs, args)
+        row = FunctionDesc.row_to_consider(descs, args, op)
         family.calltable_add_row(shape, row)
     consider_call_site = staticmethod(consider_call_site)
 
-    def variant_for_call_site(bookkeeper, family, descs, args):
+    def variant_for_call_site(bookkeeper, family, descs, args, op):
         shape = rawshape(args)
         bookkeeper.enter(None)
         try:
-            row = FunctionDesc.row_to_consider(descs, args)
+            row = FunctionDesc.row_to_consider(descs, args, op)
         finally:
             bookkeeper.leave()
         index = family.calltable_lookup_row(shape, row)
@@ -316,7 +323,7 @@
     def rowkey(self):
         return self
 
-    def row_to_consider(descs, args):
+    def row_to_consider(descs, args, op):
         # see comments in CallFamily
         from pypy.annotation.model import s_ImpossibleValue
         row = {}
@@ -324,7 +331,7 @@
             def enlist(graph, ignore):
                 row[desc.rowkey()] = graph
                 return s_ImpossibleValue   # meaningless
-            desc.pycall(enlist, args, s_ImpossibleValue)
+            desc.pycall(enlist, args, s_ImpossibleValue, op)
         return row
     row_to_consider = staticmethod(row_to_consider)
 
@@ -521,7 +528,7 @@
                             "specialization" % (self.name,))
         return self.getclassdef(None)
 
-    def pycall(self, schedule, args, s_previous_result):
+    def pycall(self, schedule, args, s_previous_result, op=None):
         from pypy.annotation.model import SomeInstance, SomeImpossibleValue
         if self.specialize:
             if self.specialize == 'specialize:ctr_location':
@@ -664,7 +671,7 @@
             cdesc = cdesc.basedesc
         return s_result     # common case
 
-    def consider_call_site(bookkeeper, family, descs, args, s_result):
+    def consider_call_site(bookkeeper, family, descs, args, s_result, op):
         from pypy.annotation.model import SomeInstance, SomePBC, s_None
         if len(descs) == 1:
             # call to a single class, look at the result annotation
@@ -709,7 +716,7 @@
             initdescs[0].mergecallfamilies(*initdescs[1:])
             initfamily = initdescs[0].getcallfamily()
             MethodDesc.consider_call_site(bookkeeper, initfamily, initdescs,
-                                          args, s_None)
+                                          args, s_None, op)
     consider_call_site = staticmethod(consider_call_site)
 
     def getallbases(self):
@@ -782,13 +789,13 @@
     def getuniquegraph(self):
         return self.funcdesc.getuniquegraph()
 
-    def pycall(self, schedule, args, s_previous_result):
+    def pycall(self, schedule, args, s_previous_result, op=None):
         from pypy.annotation.model import SomeInstance
         if self.selfclassdef is None:
             raise Exception("calling %r" % (self,))
         s_instance = SomeInstance(self.selfclassdef, flags = self.flags)
         args = args.prepend(s_instance)
-        return self.funcdesc.pycall(schedule, args, s_previous_result)
+        return self.funcdesc.pycall(schedule, args, s_previous_result, op)
 
     def bind_under(self, classdef, name):
         self.bookkeeper.warning("rebinding an already bound %r" % (self,))
@@ -801,10 +808,10 @@
                                              self.name,
                                              flags)
 
-    def consider_call_site(bookkeeper, family, descs, args, s_result):
+    def consider_call_site(bookkeeper, family, descs, args, s_result, op):
         shape = rawshape(args, nextra=1)     # account for the extra 'self'
         funcdescs = [methoddesc.funcdesc for methoddesc in descs]
-        row = FunctionDesc.row_to_consider(descs, args)
+        row = FunctionDesc.row_to_consider(descs, args, op)
         family.calltable_add_row(shape, row)
     consider_call_site = staticmethod(consider_call_site)
 
@@ -956,16 +963,16 @@
         return '<MethodOfFrozenDesc %r of %r>' % (self.funcdesc,
                                                   self.frozendesc)
 
-    def pycall(self, schedule, args, s_previous_result):
+    def pycall(self, schedule, args, s_previous_result, op=None):
         from pypy.annotation.model import SomePBC
         s_self = SomePBC([self.frozendesc])
         args = args.prepend(s_self)
-        return self.funcdesc.pycall(schedule, args, s_previous_result)
+        return self.funcdesc.pycall(schedule, args, s_previous_result, op)
 
-    def consider_call_site(bookkeeper, family, descs, args, s_result):
+    def consider_call_site(bookkeeper, family, descs, args, s_result, op):
         shape = rawshape(args, nextra=1)    # account for the extra 'self'
         funcdescs = [mofdesc.funcdesc for mofdesc in descs]
-        row = FunctionDesc.row_to_consider(descs, args)
+        row = FunctionDesc.row_to_consider(descs, args, op)
         family.calltable_add_row(shape, row)
     consider_call_site = staticmethod(consider_call_site)
 
diff --git a/pypy/annotation/policy.py b/pypy/annotation/policy.py
--- a/pypy/annotation/policy.py
+++ b/pypy/annotation/policy.py
@@ -1,7 +1,7 @@
 # base annotation policy for specialization
 from pypy.annotation.specialize import default_specialize as default
-from pypy.annotation.specialize import specialize_argvalue, specialize_argtype, specialize_arglistitemtype
-from pypy.annotation.specialize import memo
+from pypy.annotation.specialize import specialize_argvalue, specialize_argtype, specialize_arglistitemtype, specialize_arg_or_var
+from pypy.annotation.specialize import memo, specialize_call_location
 # for some reason, model must be imported first,
 # or we create a cycle.
 from pypy.annotation import model as annmodel
@@ -73,8 +73,10 @@
     default_specialize = staticmethod(default)
     specialize__memo = staticmethod(memo)
     specialize__arg = staticmethod(specialize_argvalue) # specialize:arg(N)
+    specialize__arg_or_var = staticmethod(specialize_arg_or_var)
     specialize__argtype = staticmethod(specialize_argtype) # specialize:argtype(N)
     specialize__arglistitemtype = staticmethod(specialize_arglistitemtype)
+    specialize__call_location = staticmethod(specialize_call_location)
 
     def specialize__ll(pol, *args):
         from pypy.rpython.annlowlevel import LowLevelAnnotatorPolicy
diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py
--- a/pypy/annotation/specialize.py
+++ b/pypy/annotation/specialize.py
@@ -353,6 +353,16 @@
     key = tuple(key)
     return maybe_star_args(funcdesc, key, args_s)
 
+def specialize_arg_or_var(funcdesc, args_s, *argindices):
+    for argno in argindices:
+        if not args_s[argno].is_constant():
+            break
+    else:
+        # all constant
+        return specialize_argvalue(funcdesc, args_s, *argindices)
+    # some not constant
+    return maybe_star_args(funcdesc, None, args_s)
+
 def specialize_argtype(funcdesc, args_s, *argindices):
     key = tuple([args_s[i].knowntype for i in argindices])
     for cls in key:
@@ -370,3 +380,7 @@
     else:
         key = s.listdef.listitem.s_value.knowntype
     return maybe_star_args(funcdesc, key, args_s)
+
+def specialize_call_location(funcdesc, args_s, op):
+    assert op is not None
+    return maybe_star_args(funcdesc, op, args_s)
diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py
--- a/pypy/annotation/test/test_annrpython.py
+++ b/pypy/annotation/test/test_annrpython.py
@@ -1099,8 +1099,8 @@
         allocdesc = a.bookkeeper.getdesc(alloc)
         s_C1 = a.bookkeeper.immutablevalue(C1)
         s_C2 = a.bookkeeper.immutablevalue(C2)
-        graph1 = allocdesc.specialize([s_C1])
-        graph2 = allocdesc.specialize([s_C2])
+        graph1 = allocdesc.specialize([s_C1], None)
+        graph2 = allocdesc.specialize([s_C2], None)
         assert a.binding(graph1.getreturnvar()).classdef == C1df
         assert a.binding(graph2.getreturnvar()).classdef == C2df
         assert graph1 in a.translator.graphs
@@ -1135,8 +1135,8 @@
         allocdesc = a.bookkeeper.getdesc(alloc)
         s_C1 = a.bookkeeper.immutablevalue(C1)
         s_C2 = a.bookkeeper.immutablevalue(C2)
-        graph1 = allocdesc.specialize([s_C1, s_C2])
-        graph2 = allocdesc.specialize([s_C2, s_C2])
+        graph1 = allocdesc.specialize([s_C1, s_C2], None)
+        graph2 = allocdesc.specialize([s_C2, s_C2], None)
         assert a.binding(graph1.getreturnvar()).classdef == C1df
         assert a.binding(graph2.getreturnvar()).classdef == C2df
         assert graph1 in a.translator.graphs
@@ -1194,6 +1194,33 @@
         assert len(executedesc._cache[(0, 'star', 2)].startblock.inputargs) == 4
         assert len(executedesc._cache[(1, 'star', 3)].startblock.inputargs) == 5
 
+    def test_specialize_arg_or_var(self):
+        def f(a):
+            return 1
+        f._annspecialcase_ = 'specialize:arg_or_var(0)'
+
+        def fn(a):
+            return f(3) + f(a)
+
+        a = self.RPythonAnnotator()
+        a.build_types(fn, [int])
+        executedesc = a.bookkeeper.getdesc(f)
+        assert sorted(executedesc._cache.keys()) == [None, (3,)]
+        # we got two different special
+
+    def test_specialize_call_location(self):
+        def g(a):
+            return a
+        g._annspecialcase_ = "specialize:call_location"
+        def f(x):
+            return g(x)
+        f._annspecialcase_ = "specialize:argtype(0)"
+        def h(y):
+            w = f(y)
+            return int(f(str(y))) + w
+        a = self.RPythonAnnotator()
+        assert a.build_types(h, [int]) == annmodel.SomeInteger()
+
     def test_assert_list_doesnt_lose_info(self):
         class T(object):
             pass
@@ -3177,6 +3204,8 @@
         s = a.build_types(f, [])
         assert isinstance(s, annmodel.SomeList)
         assert not s.listdef.listitem.resized
+        assert not s.listdef.listitem.immutable
+        assert s.listdef.listitem.mutated
 
     def test_delslice(self):
         def f():
diff --git a/pypy/annotation/unaryop.py b/pypy/annotation/unaryop.py
--- a/pypy/annotation/unaryop.py
+++ b/pypy/annotation/unaryop.py
@@ -352,6 +352,7 @@
         check_negative_slice(s_start, s_stop)
         if not isinstance(s_iterable, SomeList):
             raise Exception("list[start:stop] = x: x must be a list")
+        lst.listdef.mutate()
         lst.listdef.agree(s_iterable.listdef)
         # note that setslice is not allowed to resize a list in RPython
 
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -27,7 +27,7 @@
 # --allworkingmodules
 working_modules = default_modules.copy()
 working_modules.update(dict.fromkeys(
-    ["_socket", "unicodedata", "mmap", "fcntl", "_locale",
+    ["_socket", "unicodedata", "mmap", "fcntl", "_locale", "pwd",
      "rctime" , "select", "zipimport", "_lsprof",
      "crypt", "signal", "_rawffi", "termios", "zlib", "bz2",
      "struct", "_hashlib", "_md5", "_sha", "_minimal_curses", "cStringIO",
@@ -58,6 +58,7 @@
     # unix only modules
     del working_modules["crypt"]
     del working_modules["fcntl"]
+    del working_modules["pwd"]
     del working_modules["termios"]
     del working_modules["_minimal_curses"]
 
diff --git a/pypy/config/test/test_config.py b/pypy/config/test/test_config.py
--- a/pypy/config/test/test_config.py
+++ b/pypy/config/test/test_config.py
@@ -281,11 +281,11 @@
 
 def test_underscore_in_option_name():
     descr = OptionDescription("opt", "", [
-        BoolOption("_stackless", "", default=False),
+        BoolOption("_foobar", "", default=False),
     ])
     config = Config(descr)
     parser = to_optparse(config)
-    assert parser.has_option("--_stackless")
+    assert parser.has_option("--_foobar")
 
 def test_none():
     dummy1 = BoolOption('dummy1', 'doc dummy', default=False, cmdline=None)
diff --git a/pypy/doc/config/objspace.usemodules._stackless.txt b/pypy/doc/config/objspace.usemodules._stackless.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.usemodules._stackless.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Deprecated.
diff --git a/pypy/doc/config/objspace.usemodules.pwd.txt b/pypy/doc/config/objspace.usemodules.pwd.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/config/objspace.usemodules.pwd.txt
@@ -0,0 +1,2 @@
+Use the 'pwd' module. 
+This module is expected to be fully working.
diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst
--- a/pypy/doc/index.rst
+++ b/pypy/doc/index.rst
@@ -21,8 +21,6 @@
 
 * `Papers`_: Academic papers, talks, and related projects
 
-* `Videos`_: Videos of PyPy talks and presentations
-
 * `speed.pypy.org`_: Daily benchmarks of how fast PyPy is
 
 * `potential project ideas`_: In case you want to get your feet wet...
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -53,6 +53,18 @@
 this is an ideal task to get started, because it does not require any deep
 knowledge of the internals.
 
+Optimized Unicode Representation
+--------------------------------
+
+CPython 3.3 will use an `optimized unicode representation`_ which switches between
+different ways to represent a unicode string, depending on whether the string
+fits into ASCII, has only two-byte characters or needs four-byte characters.
+
+The actual details would be rather differen in PyPy, but we would like to have
+the same optimization implemented.
+
+.. _`optimized unicode representation`: http://www.python.org/dev/peps/pep-0393/
+
 Translation Toolchain
 ---------------------
 
diff --git a/pypy/doc/stackless.rst b/pypy/doc/stackless.rst
--- a/pypy/doc/stackless.rst
+++ b/pypy/doc/stackless.rst
@@ -66,7 +66,7 @@
 In practice, in PyPy, you cannot change the ``f_back`` of an abitrary
 frame, but only of frames stored in ``continulets``.
 
-Continulets are internally implemented using stacklets.  Stacklets are a
+Continulets are internally implemented using stacklets_.  Stacklets are a
 bit more primitive (they are really one-shot continuations), but that
 idea only works in C, not in Python.  The basic idea of continulets is
 to have at any point in time a complete valid stack; this is important
@@ -215,11 +215,6 @@
 
 * Support for other CPUs than x86 and x86-64
 
-* The app-level ``f_back`` field of frames crossing continulet boundaries
-  is None for now, unlike what I explain in the theoretical overview
-  above.  It mostly means that in a ``pdb.set_trace()`` you cannot go
-  ``up`` past countinulet boundaries.  This could be fixed.
-
 .. __: `recursion depth limit`_
 
 (*) Pickling, as well as changing threads, could be implemented by using
@@ -285,6 +280,24 @@
 to use other interfaces like genlets and greenlets.)
 
 
+Stacklets
++++++++++
+
+Continulets are internally implemented using stacklets, which is the
+generic RPython-level building block for "one-shot continuations".  For
+more information about them please see the documentation in the C source
+at `pypy/translator/c/src/stacklet/stacklet.h`_.
+
+The module ``pypy.rlib.rstacklet`` is a thin wrapper around the above
+functions.  The key point is that new() and switch() always return a
+fresh stacklet handle (or an empty one), and switch() additionally
+consumes one.  It makes no sense to have code in which the returned
+handle is ignored, or used more than once.  Note that ``stacklet.c`` is
+written assuming that the user knows that, and so no additional checking
+occurs; this can easily lead to obscure crashes if you don't use a
+wrapper like PyPy's '_continuation' module.
+
+
 Theory of composability
 +++++++++++++++++++++++
 
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -125,6 +125,7 @@
 
     ###  Manipulation  ###
 
+    @jit.look_inside_iff(lambda self: not self._dont_jit)
     def unpack(self): # slowish
         "Return a ([w1,w2...], {'kw':w3...}) pair."
         kwds_w = {}
@@ -245,6 +246,8 @@
 
     ###  Parsing for function calls  ###
 
+    # XXX: this should be @jit.look_inside_iff, but we need key word arguments,
+    # and it doesn't support them for now.
     def _match_signature(self, w_firstarg, scope_w, signature, defaults_w=None,
                          blindargs=0):
         """Parse args and kwargs according to the signature of a code object,
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -3,18 +3,18 @@
 from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag
 from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction
 from pypy.interpreter.error import OperationError, operationerrfmt
-from pypy.interpreter.error import new_exception_class
+from pypy.interpreter.error import new_exception_class, typed_unwrap_error_msg
 from pypy.interpreter.argument import Arguments
 from pypy.interpreter.miscutils import ThreadLocals
 from pypy.tool.cache import Cache
 from pypy.tool.uid import HUGEVAL_BYTES
-from pypy.rlib.objectmodel import we_are_translated
+from pypy.rlib.objectmodel import we_are_translated, newlist, compute_unique_id
 from pypy.rlib.debug import make_sure_not_resized
 from pypy.rlib.timer import DummyTimer, Timer
 from pypy.rlib.rarithmetic import r_uint
 from pypy.rlib import jit
 from pypy.tool.sourcetools import func_with_new_name
-import os, sys, py
+import os, sys
 
 __all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'W_Root']
 
@@ -186,6 +186,28 @@
     def _set_mapdict_storage_and_map(self, storage, map):
         raise NotImplementedError
 
+    # -------------------------------------------------------------------
+
+    def str_w(self, space):
+        w_msg = typed_unwrap_error_msg(space, "string", self)
+        raise OperationError(space.w_TypeError, w_msg)
+
+    def unicode_w(self, space):
+        raise OperationError(space.w_TypeError,
+                             typed_unwrap_error_msg(space, "unicode", self))
+
+    def int_w(self, space):
+        raise OperationError(space.w_TypeError,
+                             typed_unwrap_error_msg(space, "integer", self))
+    
+    def uint_w(self, space):
+        raise OperationError(space.w_TypeError,
+                             typed_unwrap_error_msg(space, "integer", self))
+    
+    def bigint_w(self, space):
+        raise OperationError(space.w_TypeError,
+                             typed_unwrap_error_msg(space, "integer", self))
+
 
 class Wrappable(W_Root):
     """A subclass of Wrappable is an internal, interpreter-level class
@@ -757,7 +779,18 @@
         w_iterator = self.iter(w_iterable)
         # If we know the expected length we can preallocate.
         if expected_length == -1:
-            items = []
+            try:
+                lgt_estimate = self.len_w(w_iterable)
+            except OperationError, o:
+                if (not o.match(self, self.w_AttributeError) and
+                    not o.match(self, self.w_TypeError)):
+                    raise
+                items = []
+            else:
+                try:
+                    items = newlist(lgt_estimate)
+                except MemoryError:
+                    items = [] # it might have lied
         else:
             items = [None] * expected_length
         idx = 0
@@ -890,7 +923,7 @@
         ec.c_call_trace(frame, w_func, args)
         try:
             w_res = self.call_args(w_func, args)
-        except OperationError, e:
+        except OperationError:
             ec.c_exception_trace(frame, w_func)
             raise
         ec.c_return_trace(frame, w_func, args)
@@ -936,6 +969,9 @@
     def isinstance_w(self, w_obj, w_type):
         return self.is_true(self.isinstance(w_obj, w_type))
 
+    def id(self, w_obj):
+        return self.wrap(compute_unique_id(w_obj))
+
     # The code below only works
     # for the simple case (new-style instance).
     # These methods are patched with the full logic by the __builtin__
@@ -988,8 +1024,6 @@
 
     def eval(self, expression, w_globals, w_locals, hidden_applevel=False):
         "NOT_RPYTHON: For internal debugging."
-        import types
-        from pypy.interpreter.pycode import PyCode
         if isinstance(expression, str):
             compiler = self.createcompiler()
             expression = compiler.compile(expression, '?', 'eval', 0,
@@ -1001,7 +1035,6 @@
     def exec_(self, statement, w_globals, w_locals, hidden_applevel=False,
               filename=None):
         "NOT_RPYTHON: For internal debugging."
-        import types
         if filename is None:
             filename = '?'
         from pypy.interpreter.pycode import PyCode
@@ -1199,6 +1232,18 @@
             return None
         return self.str_w(w_obj)
 
+    def str_w(self, w_obj):
+        return w_obj.str_w(self)
+
+    def int_w(self, w_obj):
+        return w_obj.int_w(self)
+
+    def uint_w(self, w_obj):
+        return w_obj.uint_w(self)
+
+    def bigint_w(self, w_obj):
+        return w_obj.bigint_w(self)
+
     def realstr_w(self, w_obj):
         # Like str_w, but only works if w_obj is really of type 'str'.
         if not self.is_true(self.isinstance(w_obj, self.w_str)):
@@ -1206,6 +1251,9 @@
                                  self.wrap('argument must be a string'))
         return self.str_w(w_obj)
 
+    def unicode_w(self, w_obj):
+        return w_obj.unicode_w(self)
+
     def realunicode_w(self, w_obj):
         # Like unicode_w, but only works if w_obj is really of type
         # 'unicode'.
diff --git a/pypy/interpreter/error.py b/pypy/interpreter/error.py
--- a/pypy/interpreter/error.py
+++ b/pypy/interpreter/error.py
@@ -458,3 +458,7 @@
     if module:
         space.setattr(w_exc, space.wrap("__module__"), space.wrap(module))
     return w_exc
+
+def typed_unwrap_error_msg(space, expected, w_obj):
+    type_name = space.type(w_obj).getname(space)
+    return space.wrap("expected %s, got %s object" % (expected, type_name))
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -1,5 +1,4 @@
 import sys
-from pypy.interpreter.miscutils import Stack
 from pypy.interpreter.error import OperationError
 from pypy.rlib.rarithmetic import LONG_BIT
 from pypy.rlib.unroll import unrolling_iterable
@@ -48,6 +47,7 @@
         return frame
 
     @staticmethod
+    @jit.unroll_safe  # should usually loop 0 times, very rarely more than once
     def getnextframe_nohidden(frame):
         frame = frame.f_backref()
         while frame and frame.hide():
@@ -81,58 +81,6 @@
 
     # ________________________________________________________________
 
-
-    class Subcontext(object):
-        # coroutine: subcontext support
-
-        def __init__(self):
-            self.topframe = None
-            self.w_tracefunc = None
-            self.profilefunc = None
-            self.w_profilefuncarg = None
-            self.is_tracing = 0
-
-        def enter(self, ec):
-            ec.topframeref = jit.non_virtual_ref(self.topframe)
-            ec.w_tracefunc = self.w_tracefunc
-            ec.profilefunc = self.profilefunc
-            ec.w_profilefuncarg = self.w_profilefuncarg
-            ec.is_tracing = self.is_tracing
-            ec.space.frame_trace_action.fire()
-
-        def leave(self, ec):
-            self.topframe = ec.gettopframe()
-            self.w_tracefunc = ec.w_tracefunc
-            self.profilefunc = ec.profilefunc
-            self.w_profilefuncarg = ec.w_profilefuncarg
-            self.is_tracing = ec.is_tracing
-
-        def clear_framestack(self):
-            self.topframe = None
-
-        # the following interface is for pickling and unpickling
-        def getstate(self, space):
-            if self.topframe is None:
-                return space.w_None
-            return self.topframe
-
-        def setstate(self, space, w_state):
-            from pypy.interpreter.pyframe import PyFrame
-            if space.is_w(w_state, space.w_None):
-                self.topframe = None
-            else:
-                self.topframe = space.interp_w(PyFrame, w_state)
-
-        def getframestack(self):
-            lst = []
-            f = self.topframe
-            while f is not None:
-                lst.append(f)
-                f = f.f_backref()
-            lst.reverse()
-            return lst
-        # coroutine: I think this is all, folks!
-
     def c_call_trace(self, frame, w_func, args=None):
         "Profile the call of a builtin function"
         self._c_call_return_trace(frame, w_func, args, 'c_call')
diff --git a/pypy/interpreter/function.py b/pypy/interpreter/function.py
--- a/pypy/interpreter/function.py
+++ b/pypy/interpreter/function.py
@@ -242,8 +242,10 @@
             # we have been seen by other means so rtyping should not choke
             # on us
             identifier = self.code.identifier
-            assert Function._all.get(identifier, self) is self, ("duplicate "
-                                                                 "function ids")
+            previous = Function._all.get(identifier, self)
+            assert previous is self, (
+                "duplicate function ids with identifier=%r: %r and %r" % (
+                identifier, previous, self))
             self.add_to_table()
         return False
 
diff --git a/pypy/interpreter/miscutils.py b/pypy/interpreter/miscutils.py
--- a/pypy/interpreter/miscutils.py
+++ b/pypy/interpreter/miscutils.py
@@ -2,154 +2,6 @@
 Miscellaneous utilities.
 """
 
-import types
-
-from pypy.rlib.rarithmetic import r_uint
-
-class RootStack:
-    pass
-
-class Stack(RootStack):
-    """Utility class implementing a stack."""
-
-    _annspecialcase_ = "specialize:ctr_location" # polymorphic
-
-    def __init__(self):
-        self.items = []
-
-    def clone(self):
-        s = self.__class__()
-        for item in self.items:
-            try:
-                item = item.clone()
-            except AttributeError:
-                pass
-            s.push(item)
-        return s
-
-    def push(self, item):
-        self.items.append(item)
-
-    def pop(self):
-        return self.items.pop()
-
-    def drop(self, n):
-        if n > 0:
-            del self.items[-n:]
-
-    def top(self, position=0):
-        """'position' is 0 for the top of the stack, 1 for the item below,
-        and so on.  It must not be negative."""
-        if position < 0:
-            raise ValueError, 'negative stack position'
-        if position >= len(self.items):
-            raise IndexError, 'not enough entries in stack'
-        return self.items[~position]
-
-    def set_top(self, value, position=0):
-        """'position' is 0 for the top of the stack, 1 for the item below,
-        and so on.  It must not be negative."""
-        if position < 0:
-            raise ValueError, 'negative stack position'
-        if position >= len(self.items):
-            raise IndexError, 'not enough entries in stack'
-        self.items[~position] = value
-
-    def depth(self):
-        return len(self.items)
-
-    def empty(self):
-        return len(self.items) == 0
-
-
-class FixedStack(RootStack):
-    _annspecialcase_ = "specialize:ctr_location" # polymorphic
-
-    # unfortunately, we have to re-do everything
-    def __init__(self):
-        pass
-
-    def setup(self, stacksize):
-        self.ptr = r_uint(0) # we point after the last element
-        self.items = [None] * stacksize
-
-    def clone(self):
-        # this is only needed if we support flow space
-        s = self.__class__()
-        s.setup(len(self.items))
-        for item in self.items[:self.ptr]:
-            try:
-                item = item.clone()
-            except AttributeError:
-                pass
-            s.push(item)
-        return s
-
-    def push(self, item):
-        ptr = self.ptr
-        self.items[ptr] = item
-        self.ptr = ptr + 1
-
-    def pop(self):
-        ptr = self.ptr - 1
-        ret = self.items[ptr]   # you get OverflowError if the stack is empty
-        self.items[ptr] = None
-        self.ptr = ptr
-        return ret
-
-    def drop(self, n):
-        while n > 0:
-            n -= 1
-            self.ptr -= 1
-            self.items[self.ptr] = None
-
-    def top(self, position=0):
-        # for a fixed stack, we assume correct indices
-        return self.items[self.ptr + ~position]
-
-    def set_top(self, value, position=0):
-        # for a fixed stack, we assume correct indices
-        self.items[self.ptr + ~position] = value
-
-    def depth(self):
-        return self.ptr
-
-    def empty(self):
-        return not self.ptr
-
-
-class InitializedClass(type):
-    """NOT_RPYTHON.  A meta-class that allows a class to initialize itself (or
-    its subclasses) by calling __initclass__() as a class method."""
-    def __init__(self, name, bases, dict):
-        super(InitializedClass, self).__init__(name, bases, dict)
-        for basecls in self.__mro__:
-            raw = basecls.__dict__.get('__initclass__')
-            if isinstance(raw, types.FunctionType):
-                raw(self)   # call it as a class method
-
-
-class RwDictProxy(object):
-    """NOT_RPYTHON.  A dict-like class standing for 'cls.__dict__', to work
-    around the fact that the latter is a read-only proxy for new-style
-    classes."""
-    
-    def __init__(self, cls):
-        self.cls = cls
-
-    def __getitem__(self, attr):
-        return self.cls.__dict__[attr]
-
-    def __setitem__(self, attr, value):
-        setattr(self.cls, attr, value)
-
-    def __contains__(self, value):
-        return value in self.cls.__dict__
-
-    def items(self):
-        return self.cls.__dict__.items()
-
-
 class ThreadLocals:
     """Pseudo thread-local storage, for 'space.threadlocals'.
     This is not really thread-local at all; the intention is that the PyPy
@@ -167,3 +19,7 @@
 
     def getmainthreadvalue(self):
         return self._value
+
+    def getallvalues(self):
+        return {0: self._value}
+
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -10,7 +10,7 @@
 from pypy.interpreter.argument import Signature
 from pypy.interpreter.error import OperationError
 from pypy.interpreter.gateway import NoneNotWrapped, unwrap_spec
-from pypy.interpreter.astcompiler.consts import (CO_OPTIMIZED,
+from pypy.interpreter.astcompiler.consts import (
     CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS, CO_NESTED,
     CO_GENERATOR, CO_CONTAINSGLOBALS)
 from pypy.rlib.rarithmetic import intmask
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -614,7 +614,8 @@
         return self.get_builtin().getdict(space)
 
     def fget_f_back(self, space):
-        return self.space.wrap(self.f_backref())
+        f_back = ExecutionContext.getnextframe_nohidden(self)
+        return self.space.wrap(f_back)
 
     def fget_f_lasti(self, space):
         return self.space.wrap(self.last_instr)
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1523,10 +1523,8 @@
 
         if not isinstance(prog, codetype):
             filename = '<string>'
-            if not isinstance(prog, str):
-                if isinstance(prog, basestring):
-                    prog = str(prog)
-                elif isinstance(prog, file):
+            if not isinstance(prog, basestring):
+                if isinstance(prog, file):
                     filename = prog.name
                     prog = prog.read()
                 else:
diff --git a/pypy/interpreter/pyparser/future.py b/pypy/interpreter/pyparser/future.py
--- a/pypy/interpreter/pyparser/future.py
+++ b/pypy/interpreter/pyparser/future.py
@@ -109,25 +109,19 @@
             self.getc() == self.getc(+2)):
             self.pos += 3
             while 1: # Deal with a triple quoted docstring
-                if self.getc() == '\\':
-                    self.pos += 2
+                c = self.getc()
+                if c == '\\':
+                    self.pos += 1
+                    self._skip_next_char_from_docstring()
+                elif c != endchar:
+                    self._skip_next_char_from_docstring()
                 else:
-                    c = self.getc()
-                    if c != endchar:
-                        self.pos += 1
-                        if c == '\n':
-                            self.atbol()
-                        elif c == '\r':
-                            if self.getc() == '\n':
-                                self.pos += 1
-                                self.atbol()
-                    else:
-                        self.pos += 1
-                        if (self.getc() == endchar and
-                            self.getc(+1) == endchar):
-                            self.pos += 2
-                            self.consume_empty_line()
-                            break
+                    self.pos += 1
+                    if (self.getc() == endchar and
+                        self.getc(+1) == endchar):
+                        self.pos += 2
+                        self.consume_empty_line()
+                        break
 
         else: # Deal with a single quoted docstring
             self.pos += 1
@@ -138,17 +132,21 @@
                     self.consume_empty_line()
                     return
                 elif c == '\\':
-                    # Deal with linefeeds
-                    if self.getc() != '\r':
-                        self.pos += 1
-                    else:
-                        self.pos += 1
-                        if self.getc() == '\n':
-                            self.pos += 1
+                    self._skip_next_char_from_docstring()
                 elif c in '\r\n':
                     # Syntax error
                     return
 
+    def _skip_next_char_from_docstring(self):
+        c = self.getc()
+        self.pos += 1
+        if c == '\n':
+            self.atbol()
+        elif c == '\r':
+            if self.getc() == '\n':
+                self.pos += 1
+            self.atbol()
+
     def consume_continuation(self):
         c = self.getc()
         if c in '\n\r':
@@ -227,14 +225,16 @@
             raise DoneException
         self.consume_whitespace()
 
-    def consume_whitespace(self):
+    def consume_whitespace(self, newline_ok=False):
         while 1:
             c = self.getc()
             if c in whitespace:
                 self.pos += 1
                 continue
-            elif c == '\\':
-                self.pos += 1
+            elif c == '\\' or newline_ok:
+                slash = c == '\\'
+                if slash:
+                    self.pos += 1
                 c = self.getc()
                 if c == '\n':
                     self.pos += 1
@@ -245,8 +245,10 @@
                     if self.getc() == '\n':
                         self.pos += 1
                         self.atbol()
+                elif slash:
+                    raise DoneException
                 else:
-                    raise DoneException
+                    return
             else:
                 return
 
@@ -283,7 +285,7 @@
             return
         else:
             self.pos += 1
-            self.consume_whitespace()
+            self.consume_whitespace(paren_list)
             if paren_list and self.getc() == ')':
                 self.pos += 1
                 return # Handles trailing comma inside parenthesis
diff --git a/pypy/interpreter/pyparser/test/test_futureautomaton.py b/pypy/interpreter/pyparser/test/test_futureautomaton.py
--- a/pypy/interpreter/pyparser/test/test_futureautomaton.py
+++ b/pypy/interpreter/pyparser/test/test_futureautomaton.py
@@ -3,7 +3,7 @@
 from pypy.tool import stdlib___future__ as fut
 
 def run(s):
-    f = future.FutureAutomaton(future.futureFlags_2_5, s)
+    f = future.FutureAutomaton(future.futureFlags_2_7, s)
     try:
         f.start()
     except future.DoneException:
@@ -113,6 +113,14 @@
     assert f.lineno == 1
     assert f.col_offset == 0
 
+def test_paren_with_newline():
+    s = 'from __future__ import (division,\nabsolute_import)\n'
+    f = run(s)
+    assert f.pos == len(s)
+    assert f.flags == (fut.CO_FUTURE_DIVISION | fut.CO_FUTURE_ABSOLUTE_IMPORT)
+    assert f.lineno == 1
+    assert f.col_offset == 0
+
 def test_multiline():
     s = '"abc" #def\n  #ghi\nfrom  __future__ import (division as b, generators,)\nfrom __future__ import with_statement\n'
     f = run(s)
@@ -221,6 +229,14 @@
     assert f.lineno == 3
     assert f.col_offset == 0
 
+def test_lots_of_continuation_lines():
+    s = "\\\n\\\n\\\n\\\n\\\n\\\n\nfrom __future__ import with_statement\n"
+    f = run(s)
+    assert f.pos == len(s)
+    assert f.flags == fut.CO_FUTURE_WITH_STATEMENT
+    assert f.lineno == 8
+    assert f.col_offset == 0
+
 # This looks like a bug in cpython parser
 # and would require extensive modifications
 # to future.py in order to emulate the same behaviour
@@ -239,3 +255,19 @@
         raise AssertionError('IndentationError not raised')
     assert f.lineno == 2
     assert f.col_offset == 0
+
+def test_continuation_lines_in_docstring_single_quoted():
+    s = '"\\\n\\\n\\\n\\\n\\\n\\\n"\nfrom  __future__ import division\n'
+    f = run(s)
+    assert f.pos == len(s)
+    assert f.flags == fut.CO_FUTURE_DIVISION
+    assert f.lineno == 8
+    assert f.col_offset == 0
+
+def test_continuation_lines_in_docstring_triple_quoted():
+    s = '"""\\\n\\\n\\\n\\\n\\\n\\\n"""\nfrom  __future__ import division\n'
+    f = run(s)
+    assert f.pos == len(s)
+    assert f.flags == fut.CO_FUTURE_DIVISION
+    assert f.lineno == 8
+    assert f.col_offset == 0
diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py
--- a/pypy/interpreter/test/test_exec.py
+++ b/pypy/interpreter/test/test_exec.py
@@ -219,3 +219,30 @@
             raise e
 
         assert res == 1
+
+    def test_exec_unicode(self):
+        # 's' is a string
+        s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'"
+        # 'u' is a unicode
+        u = s.decode('utf-8')
+        exec u
+        assert len(x) == 6
+        assert ord(x[0]) == 0x0439
+        assert ord(x[1]) == 0x0446
+        assert ord(x[2]) == 0x0443
+        assert ord(x[3]) == 0x043a
+        assert ord(x[4]) == 0x0435
+        assert ord(x[5]) == 0x043d
+
+    def test_eval_unicode(self):
+        u = "u'%s'" % unichr(0x1234)
+        v = eval(u)
+        assert v == unichr(0x1234)
+
+    def test_compile_unicode(self):
+        s = "x = u'\xd0\xb9\xd1\x86\xd1\x83\xd0\xba\xd0\xb5\xd0\xbd'"
+        u = s.decode('utf-8')
+        c = compile(u, '<input>', 'exec')
+        exec c
+        assert len(x) == 6
+        assert ord(x[0]) == 0x0439
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -71,6 +71,23 @@
         assert err.value.match(space, space.w_ValueError)
         err = raises(OperationError, space.unpackiterable, w_l, 5)
         assert err.value.match(space, space.w_ValueError)
+        w_a = space.appexec((), """():
+        class A(object):
+            def __iter__(self):
+                return self
+            def next(self):
+                raise StopIteration
+            def __len__(self):
+                1/0
+        return A()
+        """)
+        try:
+            space.unpackiterable(w_a)
+        except OperationError, o:
+            if not o.match(space, space.w_ZeroDivisionError):
+                raise Exception("DID NOT RAISE")
+        else:
+            raise Exception("DID NOT RAISE")
 
     def test_fixedview(self):
         space = self.space
diff --git a/pypy/interpreter/test/test_pyframe.py b/pypy/interpreter/test/test_pyframe.py
--- a/pypy/interpreter/test/test_pyframe.py
+++ b/pypy/interpreter/test/test_pyframe.py
@@ -1,4 +1,5 @@
 from pypy.tool import udir
+from pypy.conftest import option
 
 
 class AppTestPyFrame:
@@ -6,6 +7,15 @@
     def setup_class(cls):
         cls.w_udir = cls.space.wrap(str(udir.udir))
         cls.w_tempfile1 = cls.space.wrap(str(udir.udir.join('tempfile1')))
+        if not option.runappdirect:
+            w_call_further = cls.space.appexec([], """():
+                def call_further(f):
+                    return f()
+                return call_further
+            """)
+            assert not w_call_further.code.hidden_applevel
+            w_call_further.code.hidden_applevel = True       # hack
+            cls.w_call_further = w_call_further
 
     # test for the presence of the attributes, not functionality
 
@@ -107,6 +117,22 @@
         frame = f()
         assert frame.f_back.f_code.co_name == 'f'
 
+    def test_f_back_hidden(self):
+        if not hasattr(self, 'call_further'):
+            skip("not for runappdirect testing")
+        import sys
+        def f():
+            return (sys._getframe(0),
+                    sys._getframe(1),
+                    sys._getframe(0).f_back)
+        def main():
+            return self.call_further(f)
+        f0, f1, f1bis = main()
+        assert f0.f_code.co_name == 'f'
+        assert f1.f_code.co_name == 'main'
+        assert f1bis is f1
+        assert f0.f_back is f1
+
     def test_f_exc_xxx(self):
         import sys
 
diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py
--- a/pypy/jit/backend/llsupport/llmodel.py
+++ b/pypy/jit/backend/llsupport/llmodel.py
@@ -496,6 +496,16 @@
         u = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string)
         u.chars[index] = unichr(newvalue)
 
+    def bh_copystrcontent(self, src, dst, srcstart, dststart, length):
+        src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src)
+        dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst)
+        rstr.copy_string_contents(src, dst, srcstart, dststart, length)
+
+    def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length):
+        src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src)
+        dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst)
+        rstr.copy_unicode_contents(src, dst, srcstart, dststart, length)
+
     def bh_call_i(self, func, calldescr, args_i, args_r, args_f):
         assert isinstance(calldescr, BaseIntCallDescr)
         if not we_are_translated():
diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py
--- a/pypy/jit/backend/llsupport/regalloc.py
+++ b/pypy/jit/backend/llsupport/regalloc.py
@@ -57,11 +57,13 @@
     all_regs              = []
     no_lower_byte_regs    = []
     save_around_call_regs = []
-    
+    frame_reg             = None
+
     def __init__(self, longevity, frame_manager=None, assembler=None):
         self.free_regs = self.all_regs[:]
         self.longevity = longevity
         self.reg_bindings = {}
+        self.bindings_to_frame_reg = {}
         self.position = -1
         self.frame_manager = frame_manager
         self.assembler = assembler
@@ -218,6 +220,10 @@
         self.reg_bindings[v] = loc
         return loc
 
+    def force_allocate_frame_reg(self, v):
+        """ Allocate the new variable v in the frame register."""
+        self.bindings_to_frame_reg[v] = None
+
     def force_spill_var(self, var):
         self._sync_var(var)
         try:
@@ -236,6 +242,8 @@
         try:
             return self.reg_bindings[box]
         except KeyError:
+            if box in self.bindings_to_frame_reg:
+                return self.frame_reg
             return self.frame_manager.loc(box)
 
     def return_constant(self, v, forbidden_vars=[], selected_reg=None):
@@ -264,8 +272,9 @@
         self._check_type(v)
         if isinstance(v, Const):
             return self.return_constant(v, forbidden_vars, selected_reg)
-        
         prev_loc = self.loc(v)
+        if prev_loc is self.frame_reg and selected_reg is None:
+            return prev_loc
         loc = self.force_allocate_reg(v, forbidden_vars, selected_reg,
                                       need_lower_byte=need_lower_byte)
         if prev_loc is not loc:
diff --git a/pypy/jit/backend/model.py b/pypy/jit/backend/model.py
--- a/pypy/jit/backend/model.py
+++ b/pypy/jit/backend/model.py
@@ -78,7 +78,7 @@
         Optionally, return a ``ops_offset`` dictionary.  See the docstring of
         ``compiled_loop`` for more informations about it.
         """
-        raise NotImplementedError    
+        raise NotImplementedError
 
     def dump_loop_token(self, looptoken):
         """Print a disassembled version of looptoken to stdout"""
@@ -298,6 +298,10 @@
         raise NotImplementedError
     def bh_unicodesetitem(self, string, index, newvalue):
         raise NotImplementedError
+    def bh_copystrcontent(self, src, dst, srcstart, dststart, length):
+        raise NotImplementedError
+    def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length):
+        raise NotImplementedError
 
     def force(self, force_token):
         raise NotImplementedError
diff --git a/pypy/jit/backend/x86/assembler.py b/pypy/jit/backend/x86/assembler.py
--- a/pypy/jit/backend/x86/assembler.py
+++ b/pypy/jit/backend/x86/assembler.py
@@ -957,6 +957,7 @@
         if (isinstance(from_loc, RegLoc) and from_loc.is_xmm) or (isinstance(to_loc, RegLoc) and to_loc.is_xmm):
             self.mc.MOVSD(to_loc, from_loc)
         else:
+            assert to_loc is not ebp
             self.mc.MOV(to_loc, from_loc)
 
     regalloc_mov = mov # legacy interface
@@ -2510,11 +2511,6 @@
 
     genop_discard_cond_call_gc_wb_array = genop_discard_cond_call_gc_wb
 
-    def genop_force_token(self, op, arglocs, resloc):
-        # RegAlloc.consider_force_token ensures this:
-        assert isinstance(resloc, RegLoc)
-        self.mc.LEA_rb(resloc.value, FORCE_INDEX_OFS)
-
     def not_implemented_op_discard(self, op, arglocs):
         not_implemented("not implemented operation: %s" % op.getopname())
 
diff --git a/pypy/jit/backend/x86/regalloc.py b/pypy/jit/backend/x86/regalloc.py
--- a/pypy/jit/backend/x86/regalloc.py
+++ b/pypy/jit/backend/x86/regalloc.py
@@ -29,6 +29,7 @@
     all_regs = [eax, ecx, edx, ebx, esi, edi]
     no_lower_byte_regs = [esi, edi]
     save_around_call_regs = [eax, edx, ecx]
+    frame_reg = ebp
 
     REGLOC_TO_GCROOTMAP_REG_INDEX = {
         ebx: 1,
@@ -312,8 +313,11 @@
                     self.fm.frame_bindings[arg] = loc
             else:
                 if isinstance(loc, RegLoc):
-                    self.rm.reg_bindings[arg] = loc
-                    used[loc] = None
+                    if loc is ebp:
+                        self.rm.bindings_to_frame_reg[arg] = None
+                    else:
+                        self.rm.reg_bindings[arg] = loc
+                        used[loc] = None
                 else:
                     self.fm.frame_bindings[arg] = loc
         self.rm.free_regs = []
@@ -1358,8 +1362,8 @@
                                             self.assembler.datablockwrapper)
 
     def consider_force_token(self, op):
-        loc = self.rm.force_allocate_reg(op.result)
-        self.Perform(op, [], loc)
+        # the FORCE_TOKEN operation returns directly 'ebp'
+        self.rm.force_allocate_frame_reg(op.result)
 
     def not_implemented_op(self, op):
         not_implemented("not implemented operation: %s" % op.getopname())
diff --git a/pypy/jit/backend/x86/runner.py b/pypy/jit/backend/x86/runner.py
--- a/pypy/jit/backend/x86/runner.py
+++ b/pypy/jit/backend/x86/runner.py
@@ -119,7 +119,8 @@
             setitem(index, null)
 
     def get_latest_force_token(self):
-        return self.assembler.fail_ebp + FORCE_INDEX_OFS
+        # the FORCE_TOKEN operation and this helper both return 'ebp'.
+        return self.assembler.fail_ebp
 
     def execute_token(self, executable_token):
         addr = executable_token._x86_bootstrap_code
@@ -153,8 +154,9 @@
                                        flavor='raw', zero=True,
                                        immortal=True)
 
-    def force(self, addr_of_force_index):
+    def force(self, addr_of_force_token):
         TP = rffi.CArrayPtr(lltype.Signed)
+        addr_of_force_index = addr_of_force_token + FORCE_INDEX_OFS
         fail_index = rffi.cast(TP, addr_of_force_index)[0]
         assert fail_index >= 0, "already forced!"
         faildescr = self.get_fail_descr_from_number(fail_index)
@@ -164,7 +166,7 @@
         # start of "no gc operation!" block
         fail_index_2 = self.assembler.grab_frame_values(
             bytecode,
-            addr_of_force_index - FORCE_INDEX_OFS,
+            addr_of_force_token,
             self.all_null_registers)
         self.assembler.leave_jitted_hook()
         # end of "no gc operation!" block
diff --git a/pypy/jit/codewriter/jtransform.py b/pypy/jit/codewriter/jtransform.py
--- a/pypy/jit/codewriter/jtransform.py
+++ b/pypy/jit/codewriter/jtransform.py
@@ -1158,6 +1158,12 @@
             return SpaceOperation('%s_assert_green' % kind, args, None)
         elif oopspec_name == 'jit.current_trace_length':
             return SpaceOperation('current_trace_length', [], op.result)
+        elif oopspec_name == 'jit.isconstant':
+            kind = getkind(args[0].concretetype)
+            return SpaceOperation('%s_isconstant' % kind, args, op.result)
+        elif oopspec_name == 'jit.isvirtual':
+            kind = getkind(args[0].concretetype)
+            return SpaceOperation('%s_isvirtual' % kind, args, op.result)
         else:
             raise AssertionError("missing support for %r" % oopspec_name)
 
@@ -1415,6 +1421,14 @@
         else:
             assert 0, "args[0].concretetype must be STR or UNICODE"
         #
+        if oopspec_name == 'stroruni.copy_contents':
+            if SoU.TO == rstr.STR:
+                new_op = 'copystrcontent'
+            elif SoU.TO == rstr.UNICODE:
+                new_op = 'copyunicodecontent'
+            else:
+                assert 0
+            return SpaceOperation(new_op, args, op.result)
         if oopspec_name == "stroruni.equal":
             for otherindex, othername, argtypes, resulttype in [
                 (EffectInfo.OS_STREQ_SLICE_CHECKNULL,
diff --git a/pypy/jit/metainterp/blackhole.py b/pypy/jit/metainterp/blackhole.py
--- a/pypy/jit/metainterp/blackhole.py
+++ b/pypy/jit/metainterp/blackhole.py
@@ -835,6 +835,18 @@
     def bhimpl_current_trace_length():
         return -1
 
+    @arguments("i", returns="i")
+    def bhimpl_int_isconstant(x):
+        return False
+
+    @arguments("r", returns="i")
+    def bhimpl_ref_isconstant(x):
+        return False
+
+    @arguments("r", returns="i")
+    def bhimpl_ref_isvirtual(x):
+        return False
+
     # ----------
     # the main hints and recursive calls
 
@@ -1224,6 +1236,9 @@
     @arguments("cpu", "r", "i", "i")
     def bhimpl_strsetitem(cpu, string, index, newchr):
         cpu.bh_strsetitem(string, index, newchr)
+    @arguments("cpu", "r", "r", "i", "i", "i")
+    def bhimpl_copystrcontent(cpu, src, dst, srcstart, dststart, length):
+        cpu.bh_copystrcontent(src, dst, srcstart, dststart, length)
 
     @arguments("cpu", "i", returns="r")
     def bhimpl_newunicode(cpu, length):
@@ -1237,6 +1252,9 @@
     @arguments("cpu", "r", "i", "i")
     def bhimpl_unicodesetitem(cpu, unicode, index, newchr):
         cpu.bh_unicodesetitem(unicode, index, newchr)
+    @arguments("cpu", "r", "r", "i", "i", "i")
+    def bhimpl_copyunicodecontent(cpu, src, dst, srcstart, dststart, length):
+        cpu.bh_copyunicodecontent(src, dst, srcstart, dststart, length)
 
     @arguments(returns=(longlong.is_64_bit and "i" or "f"))
     def bhimpl_ll_read_timestamp():
@@ -1441,7 +1459,7 @@
 def resume_in_blackhole(metainterp_sd, jitdriver_sd, resumedescr,
                         all_virtuals=None):
     from pypy.jit.metainterp.resume import blackhole_from_resumedata
-    debug_start('jit-blackhole')
+    #debug_start('jit-blackhole')
     metainterp_sd.profiler.start_blackhole()
     blackholeinterp = blackhole_from_resumedata(
         metainterp_sd.blackholeinterpbuilder,
@@ -1460,12 +1478,12 @@
         _run_forever(blackholeinterp, current_exc)
     finally:
         metainterp_sd.profiler.end_blackhole()
-        debug_stop('jit-blackhole')
+        #debug_stop('jit-blackhole')
 
 def convert_and_run_from_pyjitpl(metainterp, raising_exception=False):
     # Get a chain of blackhole interpreters and fill them by copying
     # 'metainterp.framestack'.
-    debug_start('jit-blackhole')
+    #debug_start('jit-blackhole')
     metainterp_sd = metainterp.staticdata
     metainterp_sd.profiler.start_blackhole()
     nextbh = None
@@ -1488,4 +1506,4 @@
         _run_forever(firstbh, current_exc)
     finally:
         metainterp_sd.profiler.end_blackhole()
-        debug_stop('jit-blackhole')
+        #debug_stop('jit-blackhole')
diff --git a/pypy/jit/metainterp/heapcache.py b/pypy/jit/metainterp/heapcache.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/metainterp/heapcache.py
@@ -0,0 +1,210 @@
+from pypy.jit.metainterp.history import ConstInt
+from pypy.jit.metainterp.resoperation import rop
+
+
+class HeapCache(object):
+    def __init__(self):
+        self.reset()
+
+    def reset(self):
+        # contains boxes where the class is already known
+        self.known_class_boxes = {}
+        # store the boxes that contain newly allocated objects, this maps the
+        # boxes to a bool, the bool indicates whether or not the object has
+        # escaped the trace or not (True means the box never escaped, False
+        # means it did escape), its presences in the mapping shows that it was
+        # allocated inside the trace
+        self.new_boxes = {}
+        # Tracks which boxes should be marked as escaped when the key box
+        # escapes.
+        self.dependencies = {}
+        # contains frame boxes that are not virtualizables
+        self.nonstandard_virtualizables = {}
+        # heap cache
+        # maps descrs to {from_box, to_box} dicts
+        self.heap_cache = {}
+        # heap array cache
+        # maps descrs to {index: {from_box: to_box}} dicts
+        self.heap_array_cache = {}
+        # cache the length of arrays
+        self.length_cache = {}
+
+    def invalidate_caches(self, opnum, descr, argboxes):
+        self.mark_escaped(opnum, argboxes)
+        self.clear_caches(opnum, descr, argboxes)
+
+    def mark_escaped(self, opnum, argboxes):
+        idx = 0
+        if opnum == rop.SETFIELD_GC:
+            assert len(argboxes) == 2
+            box, valuebox = argboxes
+            if self.is_unescaped(box) and self.is_unescaped(valuebox):
+                self.dependencies.setdefault(box, []).append(valuebox)
+            else:
+                self._escape(valuebox)
+        # GETFIELD_GC doesn't escape it's argument
+        elif opnum != rop.GETFIELD_GC:
+            for box in argboxes:
+                # setarrayitem_gc don't escape its first argument
+                if not (idx == 0 and opnum in [rop.SETARRAYITEM_GC]):
+                    self._escape(box)
+                idx += 1
+
+    def _escape(self, box):
+        if box in self.new_boxes:
+            self.new_boxes[box] = False
+        if box in self.dependencies:
+            for dep in self.dependencies[box]:
+                self._escape(dep)
+            del self.dependencies[box]
+
+    def clear_caches(self, opnum, descr, argboxes):
+        if opnum == rop.SETFIELD_GC:
+            return
+        if opnum == rop.SETARRAYITEM_GC:
+            return
+        if opnum == rop.SETFIELD_RAW:
+            return
+        if opnum == rop.SETARRAYITEM_RAW:
+            return
+        if rop._OVF_FIRST <= opnum <= rop._OVF_LAST:
+            return
+        if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST:
+            return
+        if opnum == rop.CALL or opnum == rop.CALL_LOOPINVARIANT:
+            effectinfo = descr.get_extra_info()
+            ef = effectinfo.extraeffect
+            if ef == effectinfo.EF_LOOPINVARIANT or \
+               ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \
+               ef == effectinfo.EF_ELIDABLE_CAN_RAISE:
+                return
+            # A special case for ll_arraycopy, because it is so common, and its
+            # effects are so well defined.
+            elif effectinfo.oopspecindex == effectinfo.OS_ARRAYCOPY:
+                # The destination box
+                if argboxes[2] in self.new_boxes:
+                    # XXX: no descr here so we invalidate any of them, not just
+                    # of the correct type
+                    # XXX: in theory the indices of the copy could be looked at
+                    # as well
+                    for descr, cache in self.heap_array_cache.iteritems():
+                        for idx, cache in cache.iteritems():
+                            for frombox in cache.keys():
+                                if frombox not in self.new_boxes:
+                                    del cache[frombox]
+                    return
+
+        self.heap_cache.clear()
+        self.heap_array_cache.clear()
+
+    def is_class_known(self, box):
+        return box in self.known_class_boxes
+
+    def class_now_known(self, box):
+        self.known_class_boxes[box] = None
+
+    def is_nonstandard_virtualizable(self, box):
+        return box in self.nonstandard_virtualizables
+
+    def nonstandard_virtualizables_now_known(self, box):
+        self.nonstandard_virtualizables[box] = None
+
+    def is_unescaped(self, box):
+        return self.new_boxes.get(box, False)
+
+    def new(self, box):
+        self.new_boxes[box] = True
+
+    def new_array(self, box, lengthbox):
+        self.new(box)
+        self.arraylen_now_known(box, lengthbox)
+
+    def getfield(self, box, descr):
+        d = self.heap_cache.get(descr, None)
+        if d:
+            tobox = d.get(box, None)
+            if tobox:
+                return tobox
+        return None
+
+    def getfield_now_known(self, box, descr, fieldbox):
+        self.heap_cache.setdefault(descr, {})[box] = fieldbox
+
+    def setfield(self, box, descr, fieldbox):
+        d = self.heap_cache.get(descr, None)
+        new_d = self._do_write_with_aliasing(d, box, fieldbox)
+        self.heap_cache[descr] = new_d
+
+    def _do_write_with_aliasing(self, d, box, fieldbox):
+        # slightly subtle logic here
+        # a write to an arbitrary box, all other boxes can alias this one
+        if not d or box not in self.new_boxes:
+            # therefore we throw away the cache
+            return {box: fieldbox}
+        # the object we are writing to is freshly allocated
+        # only remove some boxes from the cache
+        new_d = {}
+        for frombox, tobox in d.iteritems():
+            # the other box is *also* freshly allocated
+            # therefore frombox and box *must* contain different objects
+            # thus we can keep it in the cache
+            if frombox in self.new_boxes:
+                new_d[frombox] = tobox
+        new_d[box] = fieldbox
+        return new_d
+
+    def getarrayitem(self, box, descr, indexbox):
+        if not isinstance(indexbox, ConstInt):
+            return
+        index = indexbox.getint()
+        cache = self.heap_array_cache.get(descr, None)
+        if cache:
+            indexcache = cache.get(index, None)
+            if indexcache is not None:
+                return indexcache.get(box, None)
+
+    def getarrayitem_now_known(self, box, descr, indexbox, valuebox):
+        if not isinstance(indexbox, ConstInt):
+            return
+        index = indexbox.getint()
+        cache = self.heap_array_cache.setdefault(descr, {})
+        indexcache = cache.get(index, None)
+        if indexcache is not None:
+            indexcache[box] = valuebox
+        else:
+            cache[index] = {box: valuebox}
+
+    def setarrayitem(self, box, descr, indexbox, valuebox):
+        if not isinstance(indexbox, ConstInt):
+            cache = self.heap_array_cache.get(descr, None)
+            if cache is not None:
+                cache.clear()
+            return
+        index = indexbox.getint()
+        cache = self.heap_array_cache.setdefault(descr, {})
+        indexcache = cache.get(index, None)
+        cache[index] = self._do_write_with_aliasing(indexcache, box, valuebox)
+
+    def arraylen(self, box):
+        return self.length_cache.get(box, None)
+
+    def arraylen_now_known(self, box, lengthbox):
+        self.length_cache[box] = lengthbox
+
+    def _replace_box(self, d, oldbox, newbox):
+        new_d = {}
+        for frombox, tobox in d.iteritems():
+            if frombox is oldbox:
+                frombox = newbox
+            if tobox is oldbox:
+                tobox = newbox
+            new_d[frombox] = tobox
+        return new_d
+
+    def replace_box(self, oldbox, newbox):
+        for descr, d in self.heap_cache.iteritems():
+            self.heap_cache[descr] = self._replace_box(d, oldbox, newbox)
+        for descr, d in self.heap_array_cache.iteritems():
+            for index, cache in d.iteritems():
+                d[index] = self._replace_box(cache, oldbox, newbox)
+        self.length_cache = self._replace_box(self.length_cache, oldbox, newbox)
diff --git a/pypy/jit/metainterp/optimizeopt/heap.py b/pypy/jit/metainterp/optimizeopt/heap.py
--- a/pypy/jit/metainterp/optimizeopt/heap.py
+++ b/pypy/jit/metainterp/optimizeopt/heap.py
@@ -25,7 +25,7 @@
         #      'cached_fields'.
         #
         self._cached_fields = {}
-        self._cached_fields_getfield_op = {}        
+        self._cached_fields_getfield_op = {}
         self._lazy_setfield = None
         self._lazy_setfield_registered = False
 
@@ -37,6 +37,12 @@
             self.force_lazy_setfield(optheap)
             assert not self.possible_aliasing(optheap, structvalue)
         cached_fieldvalue = self._cached_fields.get(structvalue, None)
+
+        # Hack to ensure constants are imported from the preamble
+        if cached_fieldvalue and fieldvalue.is_constant(): 
+            optheap.optimizer.ensure_imported(cached_fieldvalue)
+            cached_fieldvalue = self._cached_fields.get(structvalue, None)
+
         if cached_fieldvalue is not fieldvalue:
             # common case: store the 'op' as lazy_setfield, and register
             # myself in the optheap's _lazy_setfields_and_arrayitems list
@@ -75,7 +81,7 @@
     def remember_field_value(self, structvalue, fieldvalue, getfield_op=None):
         assert self._lazy_setfield is None
         self._cached_fields[structvalue] = fieldvalue
-        self._cached_fields_getfield_op[structvalue] = getfield_op        
+        self._cached_fields_getfield_op[structvalue] = getfield_op
 
     def force_lazy_setfield(self, optheap, can_cache=True):
         op = self._lazy_setfield
@@ -132,9 +138,7 @@
                         result = newresult
                     getop = ResOperation(rop.GETFIELD_GC, [op.getarg(0)],
                                          result, op.getdescr())
-                    getop = shortboxes.add_potential(getop)
-                    self._cached_fields_getfield_op[structvalue] = getop
-                    self._cached_fields[structvalue] = optimizer.getvalue(result)
+                    shortboxes.add_potential(getop, synthetic=True)
                 elif op.result is not None:
                     shortboxes.add_potential(op)
 
@@ -163,7 +167,7 @@
 
     def new(self):
         return OptHeap()
-        
+
     def produce_potential_short_preamble_ops(self, sb):
         descrkeys = self.cached_fields.keys()
         if not we_are_translated():
diff --git a/pypy/jit/metainterp/optimizeopt/optimizer.py b/pypy/jit/metainterp/optimizeopt/optimizer.py
--- a/pypy/jit/metainterp/optimizeopt/optimizer.py
+++ b/pypy/jit/metainterp/optimizeopt/optimizer.py
@@ -10,6 +10,7 @@
 from pypy.jit.metainterp.typesystem import llhelper, oohelper
 from pypy.tool.pairtype import extendabletype
 from pypy.rlib.debug import debug_start, debug_stop, debug_print
+from pypy.rlib.objectmodel import specialize
 
 LEVEL_UNKNOWN    = '\x00'
 LEVEL_NONNULL    = '\x01'
@@ -25,6 +26,9 @@
         self.descr = descr
         self.bound = bound
 
+    def clone(self):
+        return LenBound(self.mode, self.descr, self.bound.clone())
+
 class OptValue(object):
     __metaclass__ = extendabletype
     _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound', 'lenbound')
@@ -67,7 +71,7 @@
             guards.append(op)
         elif self.level == LEVEL_KNOWNCLASS:
             op = ResOperation(rop.GUARD_NONNULL, [box], None)
-            guards.append(op)            
+            guards.append(op)
             op = ResOperation(rop.GUARD_CLASS, [box, self.known_class], None)
             guards.append(op)
         else:
@@ -88,8 +92,27 @@
                     assert False
                 guards.append(op)
                 self.lenbound.bound.make_guards(lenbox, guards)
+        return guards
 
-        return guards
+    def import_from(self, other, optimizer):
+        assert self.level <= LEVEL_NONNULL
+        if other.level == LEVEL_CONSTANT:
+            self.make_constant(other.get_key_box())
+            optimizer.turned_constant(self)
+        elif other.level == LEVEL_KNOWNCLASS:
+            self.make_constant_class(other.known_class, -1)
+        else:
+            if other.level == LEVEL_NONNULL:
+                self.ensure_nonnull()
+            self.intbound.intersect(other.intbound)
+            if other.lenbound:
+                if self.lenbound:
+                    assert other.lenbound.mode == self.lenbound.mode
+                    assert other.lenbound.descr == self.lenbound.descr
+                    self.lenbound.bound.intersect(other.lenbound.bound)
+                else:
+                    self.lenbound = other.lenbound.clone()
+
 
     def force_box(self):
         return self.box
@@ -123,7 +146,7 @@
         assert isinstance(constbox, Const)
         self.box = constbox
         self.level = LEVEL_CONSTANT
-        
+
         if isinstance(constbox, ConstInt):
             val = constbox.getint()
             self.intbound = IntBound(val, val)
@@ -200,6 +223,9 @@
     def __init__(self, box):
         self.make_constant(box)
 
+    def __repr__(self):
+        return 'Constant(%r)' % (self.box,)
+
 CONST_0      = ConstInt(0)
 CONST_1      = ConstInt(1)
 CVAL_ZERO    = ConstantValue(CONST_0)
@@ -308,7 +334,6 @@
         self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd)
         self.bool_boxes = {}
         self.pure_operations = args_dict()
-        self.emitted_pure_operations = {}
         self.producer = {}
         self.pendingfields = []
         self.posponedop = None
@@ -316,12 +341,11 @@
         self.quasi_immutable_deps = None
         self.opaque_pointers = {}
         self.newoperations = []
-        self.emitting_dissabled = False
-        self.emitted_guards = 0        
         if loop is not None:
             self.call_pure_results = loop.call_pure_results
 
         self.set_optimizations(optimizations)
+        self.setup()
 
     def set_optimizations(self, optimizations):
         if optimizations:
@@ -348,23 +372,18 @@
         assert self.posponedop is None
 
     def new(self):
+        new = Optimizer(self.metainterp_sd, self.loop)
+        return self._new(new)
+
+    def _new(self, new):
         assert self.posponedop is None
-        new = Optimizer(self.metainterp_sd, self.loop)
         optimizations = [o.new() for o in self.optimizations]
         new.set_optimizations(optimizations)
         new.quasi_immutable_deps = self.quasi_immutable_deps
         return new
-        
+
     def produce_potential_short_preamble_ops(self, sb):
-        for op in self.emitted_pure_operations:
-            if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \
-               op.getopnum() == rop.STRGETITEM or \
-               op.getopnum() == rop.UNICODEGETITEM:
-                if not self.getvalue(op.getarg(1)).is_constant():
-                    continue
-            sb.add_potential(op)
-        for opt in self.optimizations:
-            opt.produce_potential_short_preamble_ops(sb)
+        raise NotImplementedError('This is implemented in unroll.UnrollableOptimizer')
 
     def turned_constant(self, value):
         for o in self.optimizations:
@@ -386,19 +405,26 @@
         else:
             return box
 
+    @specialize.argtype(0)
     def getvalue(self, box):
         box = self.getinterned(box)
         try:
             value = self.values[box]
         except KeyError:
             value = self.values[box] = OptValue(box)
+        self.ensure_imported(value)
         return value
 
+    def ensure_imported(self, value):
+        pass
+
+    @specialize.argtype(0)
     def get_constant_box(self, box):
         if isinstance(box, Const):
             return box
         try:
             value = self.values[box]
+            self.ensure_imported(value)
         except KeyError:
             return None
         if value.is_constant():
@@ -481,18 +507,22 @@
     def emit_operation(self, op):
         if op.returns_bool_result():
             self.bool_boxes[self.getvalue(op.result)] = None
-        if self.emitting_dissabled:
-            return
-        
+        self._emit_operation(op)
+
+    @specialize.argtype(0)
+    def _emit_operation(self, op):
         for i in range(op.numargs()):
             arg = op.getarg(i)
-            if arg in self.values:
-                box = self.values[arg].force_box()
-                op.setarg(i, box)
+            try:
+                value = self.values[arg]
+            except KeyError:
+                pass
+            else:
+                self.ensure_imported(value)
+                op.setarg(i, value.force_box())
         self.metainterp_sd.profiler.count(jitprof.OPT_OPS)
         if op.is_guard():
             self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS)
-            self.emitted_guards += 1 # FIXME: can we reuse above counter?
             op = self.store_final_boxes_in_guard(op)
         elif op.can_raise():
             self.exception_might_have_happened = True
@@ -541,9 +571,10 @@
                 arg = value.get_key_box()
             args[i] = arg
         args[n] = ConstInt(op.getopnum())
-        args[n+1] = op.getdescr()
+        args[n + 1] = op.getdescr()
         return args
 
+    @specialize.argtype(0)
     def optimize_default(self, op):
         canfold = op.is_always_pure()
         if op.is_ovf():
@@ -579,13 +610,16 @@
                 return
             else:
                 self.pure_operations[args] = op
-                self.emitted_pure_operations[op] = True
+                self.remember_emitting_pure(op)
 
         # otherwise, the operation remains
         self.emit_operation(op)
         if nextop:
             self.emit_operation(nextop)
 
+    def remember_emitting_pure(self, op):
+        pass
+
     def constant_fold(self, op):
         argboxes = [self.get_constant_box(op.getarg(i))
                     for i in range(op.numargs())]
@@ -627,9 +661,9 @@
             arrayvalue = self.getvalue(op.getarg(0))
             arrayvalue.make_len_gt(MODE_UNICODE, op.getdescr(), indexvalue.box.getint())
         self.optimize_default(op)
-        
 
-    
+
+
 
 dispatch_opt = make_dispatcher_method(Optimizer, 'optimize_',
         default=Optimizer.optimize_default)
diff --git a/pypy/jit/metainterp/optimizeopt/rewrite.py b/pypy/jit/metainterp/optimizeopt/rewrite.py
--- a/pypy/jit/metainterp/optimizeopt/rewrite.py
+++ b/pypy/jit/metainterp/optimizeopt/rewrite.py
@@ -19,7 +19,7 @@
 
     def new(self):
         return OptRewrite()
-        
+
     def produce_potential_short_preamble_ops(self, sb):
         for op in self.loop_invariant_producer.values():
             sb.add_potential(op)
@@ -231,6 +231,17 @@
             else:
                 self.make_constant(op.result, result)
                 return
+
+        args = self.optimizer.make_args_key(op)
+        oldop = self.optimizer.pure_operations.get(args, None)
+        if oldop is not None and oldop.getdescr() is op.getdescr():
+            assert oldop.getopnum() == op.getopnum()
+            self.make_equal_to(op.result, self.getvalue(oldop.result))
+            return
+        else:
+            self.optimizer.pure_operations[args] = op
+            self.optimizer.remember_emitting_pure(op)
+
         # replace CALL_PURE with just CALL
         args = op.getarglist()
         self.emit_operation(ResOperation(rop.CALL, args, op.result,
@@ -351,7 +362,7 @@
         # expects a compile-time constant
         assert isinstance(arg, Const)
         key = make_hashable_int(arg.getint())
-        
+
         resvalue = self.loop_invariant_results.get(key, None)
         if resvalue is not None:
             self.make_equal_to(op.result, resvalue)
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizebasic.py
@@ -4711,6 +4711,83 @@
         """
         self.optimize_loop(ops, expected)
 
+    def test_empty_copystrunicontent(self):
+        ops = """
+        [p0, p1, i0, i2, i3]
+        i4 = int_eq(i3, 0)
+        guard_true(i4) []
+        copystrcontent(p0, p1, i0, i2, i3)
+        jump(p0, p1, i0, i2, i3)
+        """
+        expected = """
+        [p0, p1, i0, i2, i3]
+        i4 = int_eq(i3, 0)
+        guard_true(i4) []
+        jump(p0, p1, i0, i2, 0)
+        """
+        self.optimize_strunicode_loop(ops, expected)
+
+    def test_empty_copystrunicontent_virtual(self):
+        ops = """
+        [p0]
+        p1 = newstr(23)
+        copystrcontent(p0, p1, 0, 0, 0)
+        jump(p0)
+        """
+        expected = """
+        [p0]
+        jump(p0)
+        """
+        self.optimize_strunicode_loop(ops, expected)
+
+    def test_forced_virtuals_aliasing(self):
+        ops = """
+        [i0, i1]
+        p0 = new(descr=ssize)
+        p1 = new(descr=ssize)
+        escape(p0)
+        escape(p1)
+        setfield_gc(p0, i0, descr=adescr)
+        setfield_gc(p1, i1, descr=adescr)
+        i2 = getfield_gc(p0, descr=adescr)
+        jump(i2, i2)
+        """
+        expected = """
+        [i0, i1]
+        p0 = new(descr=ssize)
+        escape(p0)
+        p1 = new(descr=ssize)
+        escape(p1)
+        setfield_gc(p0, i0, descr=adescr)
+        setfield_gc(p1, i1, descr=adescr)
+        jump(i0, i0)
+        """
+        py.test.skip("not implemented")
+        # setfields on things that used to be virtual still can't alias each
+        # other
+        self.optimize_loop(ops, expected)
+
+    def test_plain_virtual_string_copy_content(self):
+        ops = """
+        []
+        p0 = newstr(6)
+        copystrcontent(s"hello!", p0, 0, 0, 6)
+        p1 = call(0, p0, s"abc123", descr=strconcatdescr)
+        i0 = strgetitem(p1, 0)
+        finish(i0)
+        """
+        expected = """
+        []
+        p0 = newstr(6)
+        copystrcontent(s"hello!", p0, 0, 0, 6)
+        p1 = newstr(12)
+        copystrcontent(p0, p1, 0, 0, 6)
+        copystrcontent(s"abc123", p1, 0, 6, 6)
+        i0 = strgetitem(p1, 0)
+        finish(i0)
+        """
+        self.optimize_strunicode_loop(ops, expected)
+
 
 class TestLLtype(BaseTestOptimizeBasic, LLtypeMixin):
     pass
diff --git a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
--- a/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
+++ b/pypy/jit/metainterp/optimizeopt/test/test_optimizeopt.py
@@ -102,9 +102,9 @@
             print "Short Preamble:"
             short = loop.preamble.token.short_preamble[0]
             print short.inputargs
-            print '\n'.join([str(o) for o in short.operations])        
+            print '\n'.join([str(o) for o in short.operations])
             print
-        
+
         assert expected != "crash!", "should have raised an exception"
         self.assert_equal(loop, expected)
         if expected_preamble:
@@ -113,7 +113,7 @@
         if expected_short:
             self.assert_equal(short, expected_short,
                               text_right='expected short preamble')
-            
+
         return loop
 
 class OptimizeOptTest(BaseTestWithUnroll):
@@ -472,7 +472,13 @@
         [i0]
         jump(i0)
         """
-        self.optimize_loop(ops, expected, preamble)
+        short = """
+        [i0]
+        i1 = int_is_true(i0)
+        guard_value(i1, 1) []
+        jump(i0)
+        """
+        self.optimize_loop(ops, expected, preamble, expected_short=short)
 
     def test_bound_int_is_true(self):
         ops = """
@@ -860,10 +866,10 @@
         setfield_gc(p3sub, i1, descr=valuedescr)
         setfield_gc(p1, p3sub, descr=nextdescr)
         # XXX: We get two extra operations here because the setfield
-        #      above is the result of forcing p1 and thus not 
+        #      above is the result of forcing p1 and thus not
         #      registered with the heap optimizer. I've makred tests
         #      below with VIRTUALHEAP if they suffer from this issue
-        p3sub2 = getfield_gc(p1, descr=nextdescr) 
+        p3sub2 = getfield_gc(p1, descr=nextdescr)
         guard_nonnull_class(p3sub2, ConstClass(node_vtable2)) []
         jump(i1, p1, p3sub2)
         """
@@ -1405,7 +1411,7 @@
         guard_isnull(p18) [p0, p8]
         p31 = new(descr=ssize)
         p35 = new_with_vtable(ConstClass(node_vtable))
-        setfield_gc(p35, p31, descr=valuedescr)        
+        setfield_gc(p35, p31, descr=valuedescr)
         jump(p0, p35)
         """
         expected = """
@@ -1420,7 +1426,7 @@
         guard_isnull(p18) [p0, p8]
         p31 = new(descr=ssize)
         p35 = new_with_vtable(ConstClass(node_vtable))
-        setfield_gc(p35, p31, descr=valuedescr)        
+        setfield_gc(p35, p31, descr=valuedescr)
         jump(p0, p35, p19, p18)
         """
         expected = """
@@ -1429,7 +1435,7 @@
         jump(p0, NULL)
         """
         self.optimize_loop(ops, expected)
-        
+
     def test_varray_1(self):
         ops = """
         [i1]
@@ -2175,7 +2181,7 @@
         jump(p1)
         """
         self.optimize_loop(ops, expected)
-        
+
     def test_duplicate_getarrayitem_2(self):
         ops = """
         [p1, i0]
@@ -2193,7 +2199,7 @@
         jump(p1, i7, i6)
         """
         self.optimize_loop(ops, expected)
-        
+
     def test_duplicate_getarrayitem_after_setarrayitem_1(self):
         ops = """
         [p1, p2]
@@ -2806,14 +2812,14 @@
         guard_no_overflow() []
         i3b = int_is_true(i3)
         guard_true(i3b) []
-        setfield_gc(p1, i1, descr=valuedescr)        
+        setfield_gc(p1, i1, descr=valuedescr)
         escape(i3)
         escape(i3)
         jump(i1, p1, i3)
         """
         expected = """
         [i1, p1, i3]
-        setfield_gc(p1, i1, descr=valuedescr)        
+        setfield_gc(p1, i1, descr=valuedescr)
         escape(i3)
         escape(i3)
         jump(i1, p1, i3)
@@ -2824,7 +2830,7 @@
         ops = """
         [p8, p11, i24]
         p26 = new_with_vtable(ConstClass(node_vtable))
-        setfield_gc(p26, i24, descr=adescr)        
+        setfield_gc(p26, i24, descr=adescr)
         i34 = getfield_gc_pure(p11, descr=valuedescr)
         i35 = getfield_gc_pure(p26, descr=adescr)
         i36 = int_add_ovf(i34, i35)
@@ -2833,10 +2839,10 @@
         """
         expected = """
         [p8, p11, i26]
-        jump(p8, p11, i26)        
-        """
-        self.optimize_loop(ops, expected)
-        
+        jump(p8, p11, i26)
+        """
+        self.optimize_loop(ops, expected)
+
     def test_ovf_guard_in_short_preamble2(self):
         ops = """
         [p8, p11, p12]
@@ -3185,13 +3191,18 @@
         jump(p1, i4, i3)
         '''
         expected = '''
+        [p1, i4, i3, i5]
+        setfield_gc(p1, i5, descr=valuedescr)
+        jump(p1, i3, i5, i5)
+        '''
+        preamble = '''
         [p1, i1, i4]
         setfield_gc(p1, i1, descr=valuedescr)
         i3 = call(p1, descr=plaincalldescr)
         setfield_gc(p1, i3, descr=valuedescr)
-        jump(p1, i4, i3)
+        jump(p1, i4, i3, i3)
         '''
-        self.optimize_loop(ops, expected, expected)
+        self.optimize_loop(ops, expected, preamble)
 
     def test_call_pure_invalidates_heap_knowledge(self):
         # CALL_PURE should still force the setfield_gc() to occur before it
@@ -3203,21 +3214,20 @@
         jump(p1, i4, i3)
         '''
         expected = '''
+        [p1, i4, i3, i5]
+        setfield_gc(p1, i4, descr=valuedescr)
+        jump(p1, i3, i5, i5)
+        '''
+        preamble = '''
         [p1, i1, i4]
         setfield_gc(p1, i1, descr=valuedescr)
         i3 = call(p1, descr=plaincalldescr)
         setfield_gc(p1, i1, descr=valuedescr)
-        jump(p1, i4, i3)
+        jump(p1, i4, i3, i3)
         '''
-        self.optimize_loop(ops, expected, expected)
+        self.optimize_loop(ops, expected, preamble)
 
     def test_call_pure_constant_folding(self):
-        # CALL_PURE is not marked as is_always_pure(), because it is wrong
-        # to call the function arbitrary many times at arbitrary points in
-        # time.  Check that it is either constant-folded (and replaced by
-        # the result of the call, recorded as the first arg), or turned into
-        # a regular CALL.
-        # XXX can this test be improved with unrolling?
         arg_consts = [ConstInt(i) for i in (123456, 4, 5, 6)]
         call_pure_results = {tuple(arg_consts): ConstInt(42)}
         ops = '''
@@ -3233,14 +3243,13 @@
         escape(i1)
         escape(i2)
         i4 = call(123456, 4, i0, 6, descr=plaincalldescr)
-        jump(i0, i4)
+        jump(i0, i4, i4)
         '''
         expected = '''
-        [i0, i2]
+        [i0, i4, i5]
         escape(42)
-        escape(i2)
-        i4 = call(123456, 4, i0, 6, descr=plaincalldescr)
-        jump(i0, i4)
+        escape(i4)
+        jump(i0, i5, i5)
         '''
         self.optimize_loop(ops, expected, preamble, call_pure_results)
 
@@ -3264,18 +3273,43 @@
         escape(i2)
         i4 = call(123456, 4, i0, 6, descr=plaincalldescr)
         guard_no_exception() []
-        jump(i0, i4)
+        jump(i0, i4, i4)
         '''
         expected = '''
-        [i0, i2]
+        [i0, i2, i3]
         escape(42)
         escape(i2)
-        i4 = call(123456, 4, i0, 6, descr=plaincalldescr)
-        guard_no_exception() []
-        jump(i0, i4)
+        jump(i0, i3, i3)
         '''
         self.optimize_loop(ops, expected, preamble, call_pure_results)
 
+    def test_call_pure_returning_virtual(self):
+        # XXX: This kind of loop invaraint call_pure will be forced
+        #      both in the preamble and in the peeled loop
+        ops = '''
+        [p1, i1, i2]
+        p2 = call_pure(0, p1, i1, i2, descr=strslicedescr)
+        escape(p2)
+        jump(p1, i1, i2)
+        '''
+        preamble = '''
+        [p1, i1, i2]
+        i6 = int_sub(i2, i1)
+        p2 = newstr(i6)
+        copystrcontent(p1, p2, i1, 0, i6)
+        escape(p2)
+        jump(p1, i1, i2, i6)
+        '''
+        expected = '''
+        [p1, i1, i2, i6]
+        p2 = newstr(i6)
+        copystrcontent(p1, p2, i1, 0, i6)
+        escape(p2)
+        jump(p1, i1, i2, i6)
+        '''
+        self.optimize_loop(ops, expected, preamble)
+        
+
     # ----------
 
     def test_vref_nonvirtual_nonescape(self):
@@ -5144,14 +5178,14 @@
         [i0, i1, i10, i11, i2, i3, i4]
         escape(i2)
         escape(i3)
-        escape(i4)        
+        escape(i4)
         i24 = int_mul_ovf(i10, i11)
         guard_no_overflow() []
         i23 = int_sub_ovf(i10, i11)
         guard_no_overflow() []
         i22 = int_add_ovf(i10, i11)
         guard_no_overflow() []
-        jump(i0, i1, i10, i11, i2, i3, i4) 
+        jump(i0, i1, i10, i11, i2, i3, i4)
         """
         self.optimize_loop(ops, expected)
 
@@ -5360,6 +5394,8 @@
         """
         self.optimize_strunicode_loop(ops, expected, expected)
 
+    # XXX Should some of the call's below now be call_pure?
+
     def test_str_concat_1(self):
         ops = """
         [p1, p2]
@@ -5693,14 +5729,14 @@
         ops = """
         [p0, i0]
         i1 = unicodegetitem(p0, i0)
-        i10 = unicodegetitem(p0, i0)        
+        i10 = unicodegetitem(p0, i0)
         i2 = int_lt(i1, 0)
         guard_false(i2) []
         jump(p0, i0)
         """
         expected = """
         [p0, i0]
-        i1 = unicodegetitem(p0, i0)        
+        i1 = unicodegetitem(p0, i0)
         jump(p0, i0)
         """
         self.optimize_loop(ops, expected)
@@ -5859,7 +5895,7 @@
         """
         preamble = """
         [p1, i1, i2, p3]
-        guard_nonnull(p3) []        
+        guard_nonnull(p3) []
         i4 = int_sub(i2, i1)
         i0 = call(0, p1, i1, i4, p3, descr=streq_slice_nonnull_descr)
         escape(i0)
@@ -6468,7 +6504,7 @@
         setfield_gc(p3, i1, descr=adescr)
         setfield_gc(p3, i2, descr=bdescr)
         i5 = int_gt(ii, 42)
-        guard_true(i5) []        
+        guard_true(i5) []
         jump(p0, p1, p3, ii2, ii, i1, i2)
         """
         self.optimize_loop(ops, expected)
@@ -6494,7 +6530,7 @@
         p1 = getfield_gc(p0, descr=nextdescr)
         guard_nonnull_class(p1, ConstClass(node_vtable)) []
         p2 = getfield_gc(p1, descr=nextdescr)
-        guard_nonnull_class(p2, ConstClass(node_vtable)) []        
+        guard_nonnull_class(p2, ConstClass(node_vtable)) []
         jump(p0)
         """
         expected = """
@@ -6508,11 +6544,11 @@
         guard_class(p1, ConstClass(node_vtable)) []
         p2 = getfield_gc(p1, descr=nextdescr)
         guard_nonnull(p2) []
-        guard_class(p2, ConstClass(node_vtable)) []        
+        guard_class(p2, ConstClass(node_vtable)) []
         jump(p0)
         """
         self.optimize_loop(ops, expected, expected_short=short)
-        
+
     def test_forced_virtual_pure_getfield(self):
         ops = """
         [p0]
@@ -6576,7 +6612,7 @@
         jump(p1, i2)
         """
         self.optimize_loop(ops, expected)
-        
+
     def test_loopinvariant_strlen(self):
         ops = """
         [p9]
@@ -6709,7 +6745,7 @@
         [p0, p1]
         p2 = new_with_vtable(ConstClass(node_vtable))
         p3 = new_with_vtable(ConstClass(node_vtable))
-        setfield_gc(p2, p3, descr=nextdescr) 
+        setfield_gc(p2, p3, descr=nextdescr)
         jump(p2, p3)
         """
         expected = """
@@ -6728,7 +6764,7 @@
         jump(p2, i2)
         """
         expected = """
-        [p1]        
+        [p1]
         p2 = getarrayitem_gc(p1, 7, descr=<GcPtrArrayDescr>)
         i1 = arraylen_gc(p1)
         jump(p2)
@@ -6769,8 +6805,8 @@
         jump(p0, p2, p1)
         """
         self.optimize_loop(ops, expected, expected_short=short)
-        
-        
+
+
     def test_loopinvariant_constant_strgetitem(self):
         ops = """
         [p0]
@@ -6824,11 +6860,11 @@
         expected = """
         [p0, i22, p1]
         call(i22, descr=nonwritedescr)
-        i3 = unicodelen(p1) # Should be killed by backend        
+        i3 = unicodelen(p1) # Should be killed by backend
         jump(p0, i22, p1)
         """
         self.optimize_loop(ops, expected, expected_short=short)
-        
+
     def test_propagate_virtual_arryalen(self):
         ops = """
         [p0]
@@ -6897,7 +6933,7 @@
         [p0, p1, p10, p11]
         i1 = arraylen_gc(p10, descr=arraydescr)
         getarrayitem_gc(p11, 1, descr=arraydescr)
-        call(i1, descr=nonwritedescr)        
+        call(i1, descr=nonwritedescr)
         jump(p1, p0, p11, p10)
         """
         self.optimize_loop(ops, expected)
@@ -6906,20 +6942,20 @@
         ops = """
         [p5]
         i10 = getfield_gc(p5, descr=valuedescr)
-        call(i10, descr=nonwritedescr) 
+        call(i10, descr=nonwritedescr)
         setfield_gc(p5, 1, descr=valuedescr)
         jump(p5)
         """
         preamble = """
         [p5]
         i10 = getfield_gc(p5, descr=valuedescr)
-        call(i10, descr=nonwritedescr) 
+        call(i10, descr=nonwritedescr)
         setfield_gc(p5, 1, descr=valuedescr)
         jump(p5)
         """
         expected = """
         [p5]
-        call(1, descr=nonwritedescr) 
+        call(1, descr=nonwritedescr)
         jump(p5)
         """
         self.optimize_loop(ops, expected, preamble)
@@ -6957,7 +6993,7 @@
         [p9]
         call_assembler(0, descr=asmdescr)
         i18 = getfield_gc(p9, descr=valuedescr)
-        guard_value(i18, 0) []        
+        guard_value(i18, 0) []
         jump(p9)
         """
         self.optimize_loop(ops, expected)
@@ -6986,17 +7022,37 @@
         i10 = getfield_gc(p5, descr=valuedescr)
         i11 = getfield_gc(p6, descr=nextdescr)
         call(i10, i11, descr=nonwritedescr)
-        setfield_gc(p6, i10, descr=nextdescr)        
+        setfield_gc(p6, i10, descr=nextdescr)
         jump(p5, p6)
         """
         expected = """
         [p5, p6, i10, i11]
         call(i10, i11, descr=nonwritedescr)
-        setfield_gc(p6, i10, descr=nextdescr)        
+        setfield_gc(p6, i10, descr=nextdescr)
         jump(p5, p6, i10, i10)
         """
         self.optimize_loop(ops, expected)
-        
+
+    def test_cached_pure_func_of_equal_fields(self):
+        ops = """
+        [p5, p6]
+        i10 = getfield_gc(p5, descr=valuedescr)
+        i11 = getfield_gc(p6, descr=nextdescr)
+        i12 = int_add(i10, 7)
+        i13 = int_add(i11, 7)
+        call(i12, i13, descr=nonwritedescr)
+        setfield_gc(p6, i10, descr=nextdescr)
+        jump(p5, p6)
+        """
+        expected = """
+        [p5, p6, i14, i12, i10]
+        i13 = int_add(i14, 7)
+        call(i12, i13, descr=nonwritedescr)
+        setfield_gc(p6, i10, descr=nextdescr)
+        jump(p5, p6, i10, i12, i10)
+        """
+        self.optimize_loop(ops, expected)
+
     def test_forced_counter(self):
         # XXX: VIRTUALHEAP (see above)
         py.test.skip("would be fixed by make heap optimizer aware of virtual setfields")
@@ -7086,8 +7142,94 @@
         """
         self.optimize_loop(ops, expected)
 
-        
+    def test_import_constants_when_folding_pure_operations(self):
+        ops = """
+        [p0]
+        f1 = getfield_gc(p0, descr=valuedescr)
+        f2 = float_abs(f1)
+        call(7.0, descr=nonwritedescr)
+        setfield_gc(p0, -7.0, descr=valuedescr)
+        jump(p0)
+        """
+        expected = """
+        [p0]
+        call(7.0, descr=nonwritedescr)
+        jump(p0)
+        """
+        self.optimize_loop(ops, expected)
+
+    def test_exploding_duplicatipon(self):
+        ops = """
+        [i1, i2]
+        i3 = int_add(i1, i1)
+        i4 = int_add(i3, i3)
+        i5 = int_add(i4, i4)
+        i6 = int_add(i5, i5)
+        call(i6, descr=nonwritedescr)
+        jump(i1, i3)
+        """
+        expected = """
+        [i1, i2, i6, i3]
+        call(i6, descr=nonwritedescr)
+        jump(i1, i3, i6, i3)
+        """
+        short = """
+        [i1, i2]
+        i3 = int_add(i1, i1)
+        i4 = int_add(i3, i3)
+        i5 = int_add(i4, i4)
+        i6 = int_add(i5, i5)
+        jump(i1, i2, i6, i3)
+        """
+        self.optimize_loop(ops, expected, expected_short=short)
+
+    def test_prioritize_getfield1(self):
+        ops = """
+        [p1, p2]
+        i1 = getfield_gc(p1, descr=valuedescr)
+        setfield_gc(p2, i1, descr=nextdescr)
+        i2 = int_neg(i1)
+        call(i2, descr=nonwritedescr)
+        jump(p1, p2)
+        """
+        expected = """
+        [p1, p2, i2, i1]
+        call(i2, descr=nonwritedescr)
+        setfield_gc(p2, i1, descr=nextdescr)
+        jump(p1, p2, i2, i1)
+        """
+        self.optimize_loop(ops, expected)
+
+    def test_prioritize_getfield2(self):
+        # Same as previous, but with descrs intercahnged which means
+        # that the getfield is discovered first when looking for
+        # potential short boxes during tests
+        ops = """
+        [p1, p2]
+        i1 = getfield_gc(p1, descr=nextdescr)
+        setfield_gc(p2, i1, descr=valuedescr)
+        i2 = int_neg(i1)
+        call(i2, descr=nonwritedescr)
+        jump(p1, p2)
+        """
+        expected = """
+        [p1, p2, i2, i1]
+        call(i2, descr=nonwritedescr)
+        setfield_gc(p2, i1, descr=valuedescr)
+        jump(p1, p2, i2, i1)
+        """
+        self.optimize_loop(ops, expected)
+
+    def test_setarrayitem_followed_by_arraycopy(self):
+        ops = """
+        [p1, p2]
+        setarrayitem_gc(p1, 2, 10, descr=arraydescr)
+        setarrayitem_gc(p2, 3, 13, descr=arraydescr)        
+        call(0, p1, p2, 0, 0, 10, descr=arraycopydescr)
+        jump(p1, p2)
+        """
+        self.optimize_loop(ops, ops)
 
 class TestLLtype(OptimizeOptTest, LLtypeMixin):
     pass
-        
+
diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py
--- a/pypy/jit/metainterp/optimizeopt/unroll.py
+++ b/pypy/jit/metainterp/optimizeopt/unroll.py
@@ -70,6 +70,47 @@
         self.snapshot_map[snapshot] = new_snapshot
         return new_snapshot
 
+class UnrollableOptimizer(Optimizer):
+    def setup(self):
+        self.importable_values = {}
+        self.emitting_dissabled = False
+        self.emitted_guards = 0
+        self.emitted_pure_operations = {}
+
+    def ensure_imported(self, value):
+        if not self.emitting_dissabled and value in self.importable_values:
+            imp = self.importable_values[value]
+            del self.importable_values[value]
+            imp.import_value(value)
+
+    def emit_operation(self, op):
+        if op.returns_bool_result():
+            self.bool_boxes[self.getvalue(op.result)] = None
+        if self.emitting_dissabled:
+            return
+        if op.is_guard():
+            self.emitted_guards += 1 # FIXME: can we use counter in self._emit_operation?
+        self._emit_operation(op)
+
+    def new(self):
+        new = UnrollableOptimizer(self.metainterp_sd, self.loop)
+        return self._new(new)
+
+    def remember_emitting_pure(self, op):
+        self.emitted_pure_operations[op] = True
+
+    def produce_potential_short_preamble_ops(self, sb):
+        for op in self.emitted_pure_operations:
+            if op.getopnum() == rop.GETARRAYITEM_GC_PURE or \
+               op.getopnum() == rop.STRGETITEM or \
+               op.getopnum() == rop.UNICODEGETITEM:
+                if not self.getvalue(op.getarg(1)).is_constant():
+                    continue
+            sb.add_potential(op)
+        for opt in self.optimizations:
+            opt.produce_potential_short_preamble_ops(sb)
+
+
 
 class UnrollOptimizer(Optimization):
     """Unroll the loop into two iterations. The first one will
@@ -77,7 +118,7 @@
     distinction anymore)"""
 
     def __init__(self, metainterp_sd, loop, optimizations):
-        self.optimizer = Optimizer(metainterp_sd, loop, optimizations)
+        self.optimizer = UnrollableOptimizer(metainterp_sd, loop, optimizations)
         self.cloned_operations = []
         for op in self.optimizer.loop.operations:
             newop = op.clone()
@@ -150,6 +191,7 @@
                 args = ", ".join([logops.repr_of_arg(arg) for arg in short_inputargs])
                 debug_print('short inputargs: ' + args)
                 self.short_boxes.debug_print(logops)
+                
 
             # Force virtuals amoung the jump_args of the preamble to get the
             # operations needed to setup the proper state of those virtuals
@@ -161,8 +203,9 @@
                 if box in seen:
                     continue
                 seen[box] = True
-                value = preamble_optimizer.getvalue(box)
-                inputarg_setup_ops.extend(value.make_guards(box))
+                preamble_value = preamble_optimizer.getvalue(box)
+                value = self.optimizer.getvalue(box)
+                value.import_from(preamble_value, self.optimizer)
             for box in short_inputargs:
                 if box in seen:
                     continue
@@ -181,23 +224,17 @@
             for op in self.short_boxes.operations():
                 self.ensure_short_op_emitted(op, self.optimizer, seen)
                 if op and op.result:
-                    # The order of these guards is not important as 
-                    # self.optimizer.emitting_dissabled is False
-                    value = preamble_optimizer.getvalue(op.result)
-                    for guard in value.make_guards(op.result):
-                        self.optimizer.send_extra_operation(guard)
+                    preamble_value = preamble_optimizer.getvalue(op.result)
+                    value = self.optimizer.getvalue(op.result)
+                    if not value.is_virtual():
+                        imp = ValueImporter(self, preamble_value, op)
+                        self.optimizer.importable_values[value] = imp
                     newresult = self.optimizer.getvalue(op.result).get_key_box()
                     if newresult is not op.result:
                         self.short_boxes.alias(newresult, op.result)
             self.optimizer.flush()
             self.optimizer.emitting_dissabled = False
 
-            # XXX Hack to prevent the arraylen/strlen/unicodelen ops generated
-            #     by value.make_guards() from ending up in pure_operations
-            for key, op in self.optimizer.pure_operations.items():
-                if not self.short_boxes.has_producer(op.result):
-                    del self.optimizer.pure_operations[key]
-
             initial_inputargs_len = len(inputargs)
             self.inliner = Inliner(loop.inputargs, jump_args)
 
@@ -276,16 +313,11 @@
 
         short_jumpargs = inputargs[:]
 
-        short = []
-        short_seen = {}
+        short = self.short = []
+        short_seen = self.short_seen = {}
         for box, const in self.constant_inputargs.items():
             short_seen[box] = True
 
-        for op in self.short_boxes.operations():
-            if op is not None:
-                if len(self.getvalue(op.result).make_guards(op.result)) > 0:
-                    self.add_op_to_short(op, short, short_seen, False, True)
-
         # This loop is equivalent to the main optimization loop in
         # Optimizer.propagate_all_forward
         jumpop = None
@@ -380,7 +412,7 @@
         if op.is_ovf():
             guard = ResOperation(rop.GUARD_NO_OVERFLOW, [], None)
             optimizer.send_extra_operation(guard)
-        
+
     def add_op_to_short(self, op, short, short_seen, emit=True, guards_needed=False):
         if op is None:
             return None
@@ -536,6 +568,13 @@
                         loop_token.failed_states.append(virtual_state)
         self.emit_operation(op)
 
+class ValueImporter(object):
+    def __init__(self, unroll, value, op):
+        self.unroll = unroll
+        self.preamble_value = value
+        self.op = op
 
-
-
+    def import_value(self, value):
+        value.import_from(self.preamble_value, self.unroll.optimizer)
+        self.unroll.add_op_to_short(self.op, self.unroll.short, self.unroll.short_seen, False, True)        
+        
diff --git a/pypy/jit/metainterp/optimizeopt/virtualize.py b/pypy/jit/metainterp/optimizeopt/virtualize.py
--- a/pypy/jit/metainterp/optimizeopt/virtualize.py
+++ b/pypy/jit/metainterp/optimizeopt/virtualize.py
@@ -58,6 +58,9 @@
     def _really_force(self):
         raise NotImplementedError("abstract base")
 
+    def import_from(self, other, optimizer):
+        raise NotImplementedError("should not be called at this level")
+    
 def get_fielddescrlist_cache(cpu):
     if not hasattr(cpu, '_optimizeopt_fielddescrlist_cache'):
         result = descrlist_dict()
diff --git a/pypy/jit/metainterp/optimizeopt/virtualstate.py b/pypy/jit/metainterp/optimizeopt/virtualstate.py
--- a/pypy/jit/metainterp/optimizeopt/virtualstate.py
+++ b/pypy/jit/metainterp/optimizeopt/virtualstate.py
@@ -12,6 +12,7 @@
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.debug import debug_start, debug_stop, debug_print
 from pypy.rlib.objectmodel import we_are_translated
+import os
 
 class AbstractVirtualStateInfo(resume.AbstractVirtualInfo):
     position = -1
@@ -461,8 +462,10 @@
 class ShortBoxes(object):
     def __init__(self, optimizer, surviving_boxes):
         self.potential_ops = {}
-        self.duplicates = {}
+        self.alternatives = {}
+        self.synthetic = {}
         self.aliases = {}
+        self.rename = {}
         self.optimizer = optimizer
         for box in surviving_boxes:
             self.potential_ops[box] = None
@@ -476,33 +479,81 @@
             except BoxNotProducable:
                 pass
 
+    def prioritized_alternatives(self, box):
+        if box not in self.alternatives:
+            return [self.potential_ops[box]]
+        alts = self.alternatives[box]
+        hi, lo = 0, len(alts) - 1
+        while hi < lo:
+            if alts[lo] is None: # Inputarg, lowest priority
+                alts[lo], alts[-1] = alts[-1], alts[lo]
+                lo -= 1
+            elif alts[lo] not in self.synthetic: # Hi priority
+                alts[hi], alts[lo] = alts[lo], alts[hi]
+                hi += 1
+            else: # Low priority
+                lo -= 1
+        return alts
+            
+    def renamed(self, box):
+        if box in self.rename:
+            return self.rename[box]
+        return box
+    
+    def add_to_short(self, box, op):
+        if op:
+            op = op.clone()
+            for i in range(op.numargs()):
+                op.setarg(i, self.renamed(op.getarg(i)))
+        if box in self.short_boxes:
+            if op is None:
+                oldop = self.short_boxes[box].clone()
+                oldres = oldop.result
+                newbox = oldop.result = oldres.clonebox()
+                self.rename[box] = newbox
+                self.short_boxes[box] = None
+                self.short_boxes[newbox] = oldop
+            else:
+                newop = op.clone()
+                newbox = newop.result = op.result.clonebox()
+                self.short_boxes[newop.result] = newop
+            value = self.optimizer.getvalue(box)
+            self.optimizer.make_equal_to(newbox, value)
+        else:
+            self.short_boxes[box] = op
+        
     def produce_short_preamble_box(self, box):
         if box in self.short_boxes:
             return 
         if isinstance(box, Const):
             return 
         if box in self.potential_ops:
-            op = self.potential_ops[box]
-            if op:
-                for arg in op.getarglist():
-                    self.produce_short_preamble_box(arg)
-            self.short_boxes[box] = op
+            ops = self.prioritized_alternatives(box)
+            produced_one = False
+            for op in ops:
+                try:
+                    if op:
+                        for arg in op.getarglist():
+                            self.produce_short_preamble_box(arg)
+                except BoxNotProducable:
+                    pass
+                else:
+                    produced_one = True
+                    self.add_to_short(box, op)
+            if not produced_one:
+                raise BoxNotProducable
         else:
             raise BoxNotProducable
 
-    def add_potential(self, op):
+    def add_potential(self, op, synthetic=False):
         if op.result not in self.potential_ops:
             self.potential_ops[op.result] = op
-            return op
-        newop = op.clone()
-        newop.result = op.result.clonebox()
-        self.potential_ops[newop.result] = newop
-        if op.result in self.duplicates:
-            self.duplicates[op.result].append(newop.result)
         else:
-            self.duplicates[op.result] = [newop.result]
-        self.optimizer.make_equal_to(newop.result, self.optimizer.getvalue(op.result))
-        return newop
+            if op.result not in self.alternatives:
+                self.alternatives[op.result] = [self.potential_ops[op.result]]
+            self.alternatives[op.result].append(op)
+        if synthetic:
+            self.synthetic[op] = True
 
     def debug_print(self, logops):
         debug_start('jit-short-boxes')
diff --git a/pypy/jit/metainterp/optimizeopt/vstring.py b/pypy/jit/metainterp/optimizeopt/vstring.py
--- a/pypy/jit/metainterp/optimizeopt/vstring.py
+++ b/pypy/jit/metainterp/optimizeopt/vstring.py
@@ -141,6 +141,11 @@
                                    for c in self._chars])
 
     def string_copy_parts(self, optimizer, targetbox, offsetbox, mode):
+        if not self.is_virtual() and targetbox is not self.box:
+            lengthbox = self.getstrlen(optimizer, mode)
+            srcbox = self.force_box()
+            return copy_str_content(optimizer, srcbox, targetbox,
+                                CONST_0, offsetbox, lengthbox, mode)
         for i in range(len(self._chars)):
             charbox = self._chars[i].force_box()
             if not (isinstance(charbox, Const) and charbox.same_constant(CONST_0)):
@@ -296,7 +301,7 @@
 
 
 def copy_str_content(optimizer, srcbox, targetbox,
-                     srcoffsetbox, offsetbox, lengthbox, mode):
+                     srcoffsetbox, offsetbox, lengthbox, mode, need_next_offset=True):
     if isinstance(srcbox, ConstPtr) and isinstance(srcoffsetbox, Const):
         M = 5
     else:
@@ -313,7 +318,10 @@
                                               None))
             offsetbox = _int_add(optimizer, offsetbox, CONST_1)
     else:
-        nextoffsetbox = _int_add(optimizer, offsetbox, lengthbox)
+        if need_next_offset:
+            nextoffsetbox = _int_add(optimizer, offsetbox, lengthbox)
+        else:
+            nextoffsetbox = None
         op = ResOperation(mode.COPYSTRCONTENT, [srcbox, targetbox,
                                                 srcoffsetbox, offsetbox,
                                                 lengthbox], None)
@@ -365,7 +373,7 @@
 
     def new(self):
         return OptString()
-    
+
     def make_vstring_plain(self, box, source_op, mode):
         vvalue = VStringPlainValue(self.optimizer, box, source_op, mode)
         self.make_equal_to(box, vvalue)
@@ -435,7 +443,11 @@
         #
         if isinstance(value, VStringPlainValue):  # even if no longer virtual
             if vindex.is_constant():
-                return value.getitem(vindex.box.getint())
+                res = value.getitem(vindex.box.getint())
+                # If it is uninitialized we can't return it, it was set by a
+                # COPYSTRCONTENT, not a STRSETITEM
+                if res is not optimizer.CVAL_UNINITIALIZED_ZERO:
+                    return res
         #
         resbox = _strgetitem(self.optimizer, value.force_box(), vindex.force_box(), mode)
         return self.getvalue(resbox)
@@ -450,6 +462,30 @@
         lengthbox = value.getstrlen(self.optimizer, mode)
         self.make_equal_to(op.result, self.getvalue(lengthbox))
 
+    def optimize_COPYSTRCONTENT(self, op):
+        self._optimize_COPYSTRCONTENT(op, mode_string)
+    def optimize_COPYUNICODECONTENT(self, op):
+        self._optimize_COPYSTRCONTENT(op, mode_unicode)
+
+    def _optimize_COPYSTRCONTENT(self, op, mode):
+        # args: src dst srcstart dststart length
+        src = self.getvalue(op.getarg(0))
+        dst = self.getvalue(op.getarg(1))
+        srcstart = self.getvalue(op.getarg(2))
+        dststart = self.getvalue(op.getarg(3))
+        length = self.getvalue(op.getarg(4))
+
+        if length.is_constant() and length.box.getint() == 0:
+            return
+        copy_str_content(self.optimizer,
+            src.force_box(),
+            dst.force_box(),
+            srcstart.force_box(),
+            dststart.force_box(),
+            length.force_box(),
+            mode, need_next_offset=False
+        )
+
     def optimize_CALL(self, op):
         # dispatch based on 'oopspecindex' to a method that handles
         # specifically the given oopspec call.  For non-oopspec calls,
diff --git a/pypy/jit/metainterp/pyjitpl.py b/pypy/jit/metainterp/pyjitpl.py
--- a/pypy/jit/metainterp/pyjitpl.py
+++ b/pypy/jit/metainterp/pyjitpl.py
@@ -17,6 +17,7 @@
 from pypy.jit.metainterp.jitprof import ABORT_TOO_LONG, ABORT_BRIDGE, \
                                         ABORT_FORCE_QUASIIMMUT, ABORT_BAD_LOOP
 from pypy.jit.metainterp.jitexc import JitException, get_llexception
+from pypy.jit.metainterp.heapcache import HeapCache
 from pypy.rlib.objectmodel import specialize
 from pypy.jit.codewriter.jitcode import JitCode, SwitchDictDescr
 from pypy.jit.codewriter import heaptracker
@@ -209,7 +210,8 @@
                 self.metainterp.clear_exception()
                 resbox = self.execute(rop.%s, b1, b2)
                 self.make_result_of_lastop(resbox)  # same as execute_varargs()
-                self.metainterp.handle_possible_overflow_error()
+                if not isinstance(resbox, Const):
+                    self.metainterp.handle_possible_overflow_error()
                 return resbox
         ''' % (_opimpl, _opimpl.upper())).compile()
 
@@ -321,7 +323,7 @@
     def _establish_nullity(self, box, orgpc):
         value = box.nonnull()
         if value:
-            if box not in self.metainterp.known_class_boxes:
+            if not self.metainterp.heapcache.is_class_known(box):
                 self.generate_guard(rop.GUARD_NONNULL, box, resumepc=orgpc)
         else:
             if not isinstance(box, Const):
@@ -366,14 +368,17 @@
 
     @arguments("descr")
     def opimpl_new(self, sizedescr):
-        return self.execute_with_descr(rop.NEW, sizedescr)
+        resbox = self.execute_with_descr(rop.NEW, sizedescr)
+        self.metainterp.heapcache.new(resbox)
+        return resbox
 
     @arguments("descr")
     def opimpl_new_with_vtable(self, sizedescr):
         cpu = self.metainterp.cpu
         cls = heaptracker.descr2vtable(cpu, sizedescr)
         resbox = self.execute(rop.NEW_WITH_VTABLE, ConstInt(cls))
-        self.metainterp.known_class_boxes[resbox] = None
+        self.metainterp.heapcache.new(resbox)
+        self.metainterp.heapcache.class_now_known(resbox)
         return resbox
 
 ##    @FixME  #arguments("box")
@@ -392,26 +397,30 @@
 ##        self.execute(rop.SUBCLASSOF, box1, box2)
 
     @arguments("descr", "box")
-    def opimpl_new_array(self, itemsizedescr, countbox):
-        return self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, countbox)
+    def opimpl_new_array(self, itemsizedescr, lengthbox):
+        resbox = self.execute_with_descr(rop.NEW_ARRAY, itemsizedescr, lengthbox)
+        self.metainterp.heapcache.new_array(resbox, lengthbox)
+        return resbox
+
+    @specialize.arg(1)
+    def _do_getarrayitem_gc_any(self, op, arraybox, arraydescr, indexbox):
+        tobox = self.metainterp.heapcache.getarrayitem(
+                arraybox, arraydescr, indexbox)
+        if tobox:
+            # sanity check: see whether the current array value
+            # corresponds to what the cache thinks the value is
+            resbox = executor.execute(self.metainterp.cpu, self.metainterp, op,
+                                      arraydescr, arraybox, indexbox)
+            assert resbox.constbox().same_constant(tobox.constbox())
+            return tobox
+        resbox = self.execute_with_descr(op, arraydescr, arraybox, indexbox)
+        self.metainterp.heapcache.getarrayitem_now_known(
+                arraybox, arraydescr, indexbox, resbox)
+        return resbox
 
     @arguments("box", "descr", "box")
     def _opimpl_getarrayitem_gc_any(self, arraybox, arraydescr, indexbox):
-        cache = self.metainterp.heap_array_cache.get(arraydescr, None)
-        if cache and isinstance(indexbox, ConstInt):
-            index = indexbox.getint()
-            frombox, tobox = cache.get(index, (None, None))
-            if frombox is arraybox:
-                return tobox
-        resbox = self.execute_with_descr(rop.GETARRAYITEM_GC,
-                                         arraydescr, arraybox, indexbox)
-        if isinstance(indexbox, ConstInt):
-            if not cache:
-                cache = self.metainterp.heap_array_cache[arraydescr] = {}
-            index = indexbox.getint()
-            cache[index] = arraybox, resbox
-        return resbox
-
+        return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC, arraybox, arraydescr, indexbox)
 
     opimpl_getarrayitem_gc_i = _opimpl_getarrayitem_gc_any
     opimpl_getarrayitem_gc_r = _opimpl_getarrayitem_gc_any
@@ -427,8 +436,7 @@
 
     @arguments("box", "descr", "box")
     def _opimpl_getarrayitem_gc_pure_any(self, arraybox, arraydescr, indexbox):
-        return self.execute_with_descr(rop.GETARRAYITEM_GC_PURE,
-                                       arraydescr, arraybox, indexbox)
+        return self._do_getarrayitem_gc_any(rop.GETARRAYITEM_GC_PURE, arraybox, arraydescr, indexbox)
 
     opimpl_getarrayitem_gc_pure_i = _opimpl_getarrayitem_gc_pure_any
     opimpl_getarrayitem_gc_pure_r = _opimpl_getarrayitem_gc_pure_any
@@ -439,13 +447,8 @@
                                     indexbox, itembox):
         self.execute_with_descr(rop.SETARRAYITEM_GC, arraydescr, arraybox,
                                 indexbox, itembox)
-        if isinstance(indexbox, ConstInt):
-            cache = self.metainterp.heap_array_cache.setdefault(arraydescr, {})
-            cache[indexbox.getint()] = arraybox, itembox
-        else:
-            cache = self.metainterp.heap_array_cache.get(arraydescr, None)
-            if cache:
-                cache.clear()
+        self.metainterp.heapcache.setarrayitem(
+                arraybox, arraydescr, indexbox, itembox)
 
     opimpl_setarrayitem_gc_i = _opimpl_setarrayitem_gc_any
     opimpl_setarrayitem_gc_r = _opimpl_setarrayitem_gc_any
@@ -462,7 +465,12 @@
 
     @arguments("box", "descr")
     def opimpl_arraylen_gc(self, arraybox, arraydescr):
-        return self.execute_with_descr(rop.ARRAYLEN_GC, arraydescr, arraybox)
+        lengthbox = self.metainterp.heapcache.arraylen(arraybox)
+        if lengthbox is None:
+            lengthbox = self.execute_with_descr(
+                    rop.ARRAYLEN_GC, arraydescr, arraybox)
+            self.metainterp.heapcache.arraylen_now_known(arraybox, lengthbox)
+        return lengthbox
 
     @arguments("orgpc", "box", "descr", "box")
     def opimpl_check_neg_index(self, orgpc, arraybox, arraydescr, indexbox):
@@ -471,19 +479,17 @@
         negbox = self.implement_guard_value(orgpc, negbox)
         if negbox.getint():
             # the index is < 0; add the array length to it
-            lenbox = self.metainterp.execute_and_record(
-                rop.ARRAYLEN_GC, arraydescr, arraybox)
+            lengthbox = self.opimpl_arraylen_gc(arraybox, arraydescr)
             indexbox = self.metainterp.execute_and_record(
-                rop.INT_ADD, None, indexbox, lenbox)
+                rop.INT_ADD, None, indexbox, lengthbox)
         return indexbox
 
     @arguments("descr", "descr", "descr", "descr", "box")
     def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr,
                        sizebox):
-        sbox = self.metainterp.execute_and_record(rop.NEW, structdescr)
+        sbox = self.opimpl_new(structdescr)
         self._opimpl_setfield_gc_any(sbox, lengthdescr, sizebox)
-        abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr,
-                                                  sizebox)
+        abox = self.opimpl_new_array(arraydescr, sizebox)
         self._opimpl_setfield_gc_any(sbox, itemsdescr, abox)
         return sbox
 
@@ -540,11 +546,15 @@
 
     @specialize.arg(1)
     def _opimpl_getfield_gc_any_pureornot(self, opnum, box, fielddescr):
-        frombox, tobox = self.metainterp.heap_cache.get(fielddescr, (None, None))
-        if frombox is box:
+        tobox = self.metainterp.heapcache.getfield(box, fielddescr)
+        if tobox is not None:
+            # sanity check: see whether the current struct value
+            # corresponds to what the cache thinks the value is
+            resbox = executor.execute(self.metainterp.cpu, self.metainterp,
+                                      rop.GETFIELD_GC, fielddescr, box)
             return tobox
         resbox = self.execute_with_descr(opnum, fielddescr, box)
-        self.metainterp.heap_cache[fielddescr] = (box, resbox)
+        self.metainterp.heapcache.getfield_now_known(box, fielddescr, resbox)
         return resbox
 
     @arguments("orgpc", "box", "descr")
@@ -565,11 +575,11 @@
 
     @arguments("box", "descr", "box")
     def _opimpl_setfield_gc_any(self, box, fielddescr, valuebox):
-        frombox, tobox = self.metainterp.heap_cache.get(fielddescr, (None, None))
-        if frombox is box and tobox is valuebox:
+        tobox = self.metainterp.heapcache.getfield(box, fielddescr)
+        if tobox is valuebox:
             return
         self.execute_with_descr(rop.SETFIELD_GC, fielddescr, box, valuebox)
-        self.metainterp.heap_cache[fielddescr] = (box, valuebox)
+        self.metainterp.heapcache.setfield(box, fielddescr, valuebox)
     opimpl_setfield_gc_i = _opimpl_setfield_gc_any
     opimpl_setfield_gc_r = _opimpl_setfield_gc_any
     opimpl_setfield_gc_f = _opimpl_setfield_gc_any
@@ -633,7 +643,7 @@
         standard_box = self.metainterp.virtualizable_boxes[-1]
         if standard_box is box:
             return False
-        if box in self.metainterp.nonstandard_virtualizables:
+        if self.metainterp.heapcache.is_nonstandard_virtualizable(box):
             return True
         eqbox = self.metainterp.execute_and_record(rop.PTR_EQ, None,
                                                    box, standard_box)
@@ -642,7 +652,7 @@
         if isstandard:
             self.metainterp.replace_box(box, standard_box)
         else:
-            self.metainterp.nonstandard_virtualizables[box] = None
+            self.metainterp.heapcache.nonstandard_virtualizables_now_known(box)
         return not isstandard
 
     def _get_virtualizable_field_index(self, fielddescr):
@@ -727,7 +737,7 @@
     def opimpl_arraylen_vable(self, pc, box, fdescr, adescr):
         if self._nonstandard_virtualizable(pc, box):
             arraybox = self._opimpl_getfield_gc_any(box, fdescr)
-            return self.execute_with_descr(rop.ARRAYLEN_GC, adescr, arraybox)
+            return self.opimpl_arraylen_gc(arraybox, adescr)
         vinfo = self.metainterp.jitdriver_sd.virtualizable_info
         virtualizable_box = self.metainterp.virtualizable_boxes[-1]
         virtualizable = vinfo.unwrap_virtualizable_box(virtualizable_box)
@@ -858,6 +868,14 @@
     def opimpl_newunicode(self, lengthbox):
         return self.execute(rop.NEWUNICODE, lengthbox)
 
+    @arguments("box", "box", "box", "box", "box")
+    def opimpl_copystrcontent(self, srcbox, dstbox, srcstartbox, dststartbox, lengthbox):
+        return self.execute(rop.COPYSTRCONTENT, srcbox, dstbox, srcstartbox, dststartbox, lengthbox)
+
+    @arguments("box", "box", "box", "box", "box")
+    def opimpl_copyunicodecontent(self, srcbox, dstbox, srcstartbox, dststartbox, lengthbox):
+        return self.execute(rop.COPYUNICODECONTENT, srcbox, dstbox, srcstartbox, dststartbox, lengthbox)
+
 ##    @FixME  #arguments("descr", "varargs")
 ##    def opimpl_residual_oosend_canraise(self, methdescr, varargs):
 ##        return self.execute_varargs(rop.OOSEND, varargs, descr=methdescr,
@@ -884,9 +902,9 @@
     @arguments("orgpc", "box")
     def opimpl_guard_class(self, orgpc, box):
         clsbox = self.cls_of_box(box)
-        if box not in self.metainterp.known_class_boxes:
+        if not self.metainterp.heapcache.is_class_known(box):
             self.generate_guard(rop.GUARD_CLASS, box, [clsbox], resumepc=orgpc)
-            self.metainterp.known_class_boxes[box] = None
+            self.metainterp.heapcache.class_now_known(box)
         return clsbox
 
     @arguments("int", "orgpc")
@@ -1052,6 +1070,18 @@
         return ConstInt(trace_length)
 
     @arguments("box")
+    def _opimpl_isconstant(self, box):
+        return ConstInt(isinstance(box, Const))
+
+    opimpl_int_isconstant = opimpl_ref_isconstant = _opimpl_isconstant
+
+    @arguments("box")
+    def _opimpl_isvirtual(self, box):
+        return ConstInt(self.metainterp.heapcache.is_unescaped(box))
+
+    opimpl_ref_isvirtual = _opimpl_isvirtual
+
+    @arguments("box")
     def opimpl_virtual_ref(self, box):
         # Details on the content of metainterp.virtualref_boxes:
         #
@@ -1492,16 +1522,7 @@
         self.last_exc_value_box = None
         self.retracing_loop_from = None
         self.call_pure_results = args_dict_box()
-        # contains boxes where the class is already known
-        self.known_class_boxes = {}
-        # contains frame boxes that are not virtualizables
-        self.nonstandard_virtualizables = {}
-        # heap cache
-        # maps descrs to (from_box, to_box) tuples
-        self.heap_cache = {}
-        # heap array cache
-        # maps descrs to {index: (from_box, to_box)} dicts
-        self.heap_array_cache = {}
+        self.heapcache = HeapCache()
 
     def perform_call(self, jitcode, boxes, greenkey=None):
         # causes the metainterp to enter the given subfunction
@@ -1674,32 +1695,18 @@
 
     def _record_helper_nonpure_varargs(self, opnum, resbox, descr, argboxes):
         assert resbox is None or isinstance(resbox, Box)
+        if (rop._OVF_FIRST <= opnum <= rop._OVF_LAST and
+            self.last_exc_value_box is None and
+            self._all_constants_varargs(argboxes)):
+            return resbox.constbox()
         # record the operation
         profiler = self.staticdata.profiler
         profiler.count_ops(opnum, RECORDED_OPS)
-        self._invalidate_caches(opnum, descr)
+        self.heapcache.invalidate_caches(opnum, descr, argboxes)
         op = self.history.record(opnum, argboxes, resbox, descr)
         self.attach_debug_info(op)
         return resbox
 
-    def _invalidate_caches(self, opnum, descr):
-        if opnum == rop.SETFIELD_GC:
-            return
-        if opnum == rop.SETARRAYITEM_GC:
-            return
-        if rop._NOSIDEEFFECT_FIRST <= opnum <= rop._NOSIDEEFFECT_LAST:
-            return
-        if opnum == rop.CALL:
-            effectinfo = descr.get_extra_info()
-            ef = effectinfo.extraeffect
-            if ef == effectinfo.EF_LOOPINVARIANT or \
-               ef == effectinfo.EF_ELIDABLE_CANNOT_RAISE or \
-               ef == effectinfo.EF_ELIDABLE_CAN_RAISE:
-                return
-        if self.heap_cache:
-            self.heap_cache.clear()
-        if self.heap_array_cache:
-            self.heap_array_cache.clear()
 
     def attach_debug_info(self, op):
         if (not we_are_translated() and op is not None
@@ -1862,10 +1869,7 @@
                 duplicates[box] = None
 
     def reached_loop_header(self, greenboxes, redboxes, resumedescr):
-        self.known_class_boxes = {}
-        self.nonstandard_virtualizables = {} # XXX maybe not needed?
-        self.heap_cache = {}
-        self.heap_array_cache = {}
+        self.heapcache.reset()
 
         duplicates = {}
         self.remove_consts_and_duplicates(redboxes, len(redboxes),
@@ -2373,17 +2377,7 @@
             for i in range(len(boxes)):
                 if boxes[i] is oldbox:
                     boxes[i] = newbox
-        for descr, (frombox, tobox) in self.heap_cache.iteritems():
-            change = False
-            if frombox is oldbox:
-                change = True
-                frombox = newbox
-            if tobox is oldbox:
-                change = True
-                tobox = newbox
-            if change:
-                self.heap_cache[descr] = frombox, tobox
-        # XXX what about self.heap_array_cache?
+        self.heapcache.replace_box(oldbox, newbox)
 
     def find_biggest_function(self):
         start_stack = []
diff --git a/pypy/jit/metainterp/test/test_ajit.py b/pypy/jit/metainterp/test/test_ajit.py
--- a/pypy/jit/metainterp/test/test_ajit.py
+++ b/pypy/jit/metainterp/test/test_ajit.py
@@ -1,23 +1,25 @@
+import sys
+
 import py
-import sys
-from pypy.rlib.jit import JitDriver, we_are_jitted, hint, dont_look_inside
-from pypy.rlib.jit import loop_invariant, elidable, promote
-from pypy.rlib.jit import jit_debug, assert_green, AssertGreenFailed
-from pypy.rlib.jit import unroll_safe, current_trace_length
+
+from pypy import conftest
+from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy
 from pypy.jit.metainterp import pyjitpl, history
+from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT
+from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin, noConst
+from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper
+from pypy.jit.metainterp.warmspot import get_stats
 from pypy.jit.metainterp.warmstate import set_future_value
-from pypy.jit.metainterp.warmspot import get_stats
-from pypy.jit.codewriter.policy import JitPolicy, StopAtXPolicy
-from pypy import conftest
+from pypy.rlib.jit import (JitDriver, we_are_jitted, hint, dont_look_inside,
+    loop_invariant, elidable, promote, jit_debug, assert_green,
+    AssertGreenFailed, unroll_safe, current_trace_length, look_inside_iff,
+    isconstant, isvirtual)
 from pypy.rlib.rarithmetic import ovfcheck
-from pypy.jit.metainterp.typesystem import LLTypeHelper, OOTypeHelper
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.rpython.ootypesystem import ootype
-from pypy.jit.metainterp.optimizeopt import ALL_OPTS_DICT
-from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin, noConst
+
 
 class BasicTests:
-
     def test_basic(self):
         def f(x, y):
             return x + y
@@ -99,14 +101,14 @@
                 myjitdriver.jit_merge_point(x=x, y=y, res=res)
                 res += x * x
                 x += 1
-                res += x * x                
+                res += x * x
                 y -= 1
             return res
         res = self.meta_interp(f, [6, 7])
         assert res == 1323
         self.check_loop_count(1)
         self.check_loops(int_mul=1)
-        
+
     def test_loop_variant_mul_ovf(self):
         myjitdriver = JitDriver(greens = [], reds = ['y', 'res', 'x'])
         def f(x, y):
@@ -1372,7 +1374,7 @@
             return x
         res = self.meta_interp(f, [299], listops=True)
         assert res == f(299)
-        self.check_loops(guard_class=0, guard_value=3)        
+        self.check_loops(guard_class=0, guard_value=3)
         self.check_loops(guard_class=0, guard_value=6, everywhere=True)
 
     def test_merge_guardnonnull_guardclass(self):
@@ -2118,7 +2120,7 @@
             return sa
         res = self.meta_interp(f, [32, 7])
         assert res == f(32, 7)
-        
+
     def test_caching_setarrayitem_fixed(self):
         myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'node'])
         def f(n, a):
@@ -2138,7 +2140,7 @@
             return sa
         res = self.meta_interp(f, [32, 7])
         assert res == f(32, 7)
-        
+
     def test_caching_setarrayitem_var(self):
         myjitdriver = JitDriver(greens = [], reds = ['sa', 'i', 'n', 'a', 'b', 'node'])
         def f(n, a, b):
@@ -2668,7 +2670,7 @@
             myjitdriver.set_param('threshold', 3)
             myjitdriver.set_param('trace_eagerness', 1)
             myjitdriver.set_param('retrace_limit', 5)
-            myjitdriver.set_param('function_threshold', -1)            
+            myjitdriver.set_param('function_threshold', -1)
             pc = sa = i = 0
             while pc < len(bytecode):
                 myjitdriver.jit_merge_point(pc=pc, n=n, sa=sa, i=i)
@@ -2693,12 +2695,12 @@
         def g(n1, n2):
             for i in range(10):
                 f(n1)
-            for i in range(10):                
+            for i in range(10):
                 f(n2)
 
         nn = [10, 3]
         assert self.meta_interp(g, nn) == g(*nn)
-        
+
         # The attempts of retracing first loop will end up retracing the
         # second and thus fail 5 times, saturating the retrace_count. Instead a
         # bridge back to the preamble of the first loop is produced. A guard in
@@ -2709,7 +2711,7 @@
         self.check_tree_loop_count(2 + 3)
 
         # FIXME: Add a gloabl retrace counter and test that we are not trying more than 5 times.
-        
+
         def g(n):
             for i in range(n):
                 for j in range(10):
@@ -2945,15 +2947,15 @@
             a = [0, 1, 2, 3, 4]
             while i < n:
                 myjitdriver.jit_merge_point(sa=sa, n=n, a=a, i=i)
-                if i < n/2:
+                if i < n / 2:
                     sa += a[4]
-                elif i == n/2:
+                elif i == n / 2:
                     a.pop()
                 i += 1
         res = self.meta_interp(f, [32])
         assert res == f(32)
         self.check_loops(arraylen_gc=2)
-        
+
 class TestOOtype(BasicTests, OOJitMixin):
 
     def test_oohash(self):
@@ -3173,7 +3175,7 @@
         res = self.meta_interp(f, [32])
         assert res == f(32)
         self.check_tree_loop_count(3)
-        
+
     def test_two_loopinvariant_arrays3(self):
         from pypy.rpython.lltypesystem import lltype, llmemory, rffi
         myjitdriver = JitDriver(greens = [], reds = ['sa', 'n', 'i', 'a'])
@@ -3197,7 +3199,7 @@
         res = self.meta_interp(f, [32])
         assert res == f(32)
         self.check_tree_loop_count(2)
-        
+
     def test_two_loopinvariant_arrays_boxed(self):
         class A(object):
             def __init__(self, a):
@@ -3222,7 +3224,7 @@
         res = self.meta_interp(f, [32])
         assert res == f(32)
         self.check_loops(arraylen_gc=2, everywhere=True)
-        
+
     def test_release_gil_flush_heap_cache(self):
         if sys.platform == "win32":
             py.test.skip("needs 'time'")
@@ -3276,7 +3278,205 @@
             return n
 
         self.meta_interp(f, [10], repeat=3)
+
+    def test_jit_merge_point_with_pbc(self):
+        driver = JitDriver(greens = [], reds = ['x'])
+
+        class A(object):
+            def __init__(self, x):
+                self.x = x
+            def _freeze_(self):
+                return True
+        pbc = A(1)
+
+        def main(x):
+            return f(x, pbc)
+
+        def f(x, pbc):
+            while x > 0:
+                driver.jit_merge_point(x = x)
+                x -= pbc.x
+            return x
+
+        self.meta_interp(main, [10])
+
+    def test_look_inside_iff_const(self):
+        @look_inside_iff(lambda arg: isconstant(arg))
+        def f(arg):
+            s = 0
+            while arg > 0:
+                s += arg
+                arg -= 1
+            return s
+
+        driver = JitDriver(greens = ['code'], reds = ['n', 'arg', 's'])
+
+        def main(code, n, arg):
+            s = 0
+            while n > 0:
+                driver.jit_merge_point(code=code, n=n, arg=arg, s=s)
+                if code == 0:
+                    s += f(arg)
+                else:
+                    s += f(1)
+                n -= 1
+            return s
+
+        res = self.meta_interp(main, [0, 10, 2], enable_opts='')
+        assert res == main(0, 10, 2)
+        self.check_loops(call=1)
+        res = self.meta_interp(main, [1, 10, 2], enable_opts='')
+        assert res == main(1, 10, 2)
+        self.check_loops(call=0)
+
+    def test_look_inside_iff_virtual(self):
+        # There's no good reason for this to be look_inside_iff, but it's a test!
+        @look_inside_iff(lambda arg, n: isvirtual(arg))
+        def f(arg, n):
+            if n == 100:
+                for i in xrange(n):
+                    n += i
+            return arg.x
+        class A(object):
+            def __init__(self, x):
+                self.x = x
+        driver = JitDriver(greens=['n'], reds=['i', 'a'])
+        def main(n):
+            i = 0
+            a = A(3)
+            while i < 20:
+                driver.jit_merge_point(i=i, n=n, a=a)
+                if n == 0:
+                    i += f(a, n)
+                else:
+                    i += f(A(2), n)
+        res = self.meta_interp(main, [0], enable_opts='')
+        assert res == main(0)
+        self.check_loops(call=1, getfield_gc=0)
+        res = self.meta_interp(main, [1], enable_opts='')
+        assert res == main(1)
+        self.check_loops(call=0, getfield_gc=0)
+
+    def test_reuse_elidable_result(self):
+        driver = JitDriver(reds=['n', 's'], greens = [])
+        def main(n):
+            s = 0
+            while n > 0:
+                driver.jit_merge_point(s=s, n=n)
+                s += len(str(n)) + len(str(n))
+                n -= 1
+            return s
+        res = self.meta_interp(main, [10])
+        assert res == main(10)
+        self.check_loops({
+            'call': 1, 'guard_no_exception': 1, 'guard_true': 1, 'int_add': 2,
+            'int_gt': 1, 'int_sub': 1, 'strlen': 1, 'jump': 1,
+        })
+
+    def test_look_inside_iff_const_getarrayitem_gc_pure(self):
+        driver = JitDriver(greens=['unroll'], reds=['s', 'n'])
+
+        class A(object):
+            _immutable_fields_ = ["x[*]"]
+            def __init__(self, x):
+                self.x = [x]
+
+        @look_inside_iff(lambda x: isconstant(x))
+        def f(x):
+            i = 0
+            for c in x:
+                i += 1
+            return i
+
+        def main(unroll, n):
+            s = 0
+            while n > 0:
+                driver.jit_merge_point(s=s, n=n, unroll=unroll)
+                if unroll:
+                    x = A("xx")
+                else:
+                    x = A("x" * n)
+                s += f(x.x[0])
+                n -= 1
+            return s
+
+        res = self.meta_interp(main, [0, 10])
+        assert res == main(0, 10)
+        # 2 calls, one for f() and one for char_mul
+        self.check_loops(call=2)
+        res = self.meta_interp(main, [1, 10])
+        assert res == main(1, 10)
+        self.check_loops(call=0)
+
+    def test_setarrayitem_followed_by_arraycopy(self):
+        myjitdriver = JitDriver(greens = [], reds = ['n', 'sa', 'x', 'y'])
+        def f(n):
+            sa = 0
+            x = [1,2,n]
+            y = [1,2,3]
+            while n > 0:
+                myjitdriver.jit_merge_point(sa=sa, n=n, x=x, y=y)
+                y[0] = n
+                x[0:3] = y
+                sa += x[0]
+                n -= 1
+            return sa
+        res = self.meta_interp(f, [16])
+        assert res == f(16)
         
 
+
 class TestLLtype(BaseLLtypeTests, LLJitMixin):
-    pass
+    def test_tagged(self):
+        py.test.skip("implement me")
+        from pypy.rlib.objectmodel import UnboxedValue
+        class Base(object):
+            __slots__ = ()
+
+        class Int(UnboxedValue, Base):
+            __slots__ = ["a"]
+
+            def is_pos(self):
+                return self.a > 0
+
+            def dec(self):
+                return Int(self.a - 1)
+
+
+        class Float(Base):
+            def __init__(self, a):
+                self.a = a
+
+            def is_pos(self):
+                return self.a > 0
+
+            def dec(self):
+                return Float(self.a - 1)
+
+        driver = JitDriver(greens=['pc', 's'], reds=['o'])
+
+        def main(fl, n, s):
+            if s:
+                s = "--j"
+            else:
+                s = "---j"
+            if fl:
+                o = Float(float(n))
+            else:
+                o = Int(n)
+            pc = 0
+            while True:
+                driver.jit_merge_point(s=s, pc=pc, o=o)
+                c = s[pc]
+                if c == "j":
+                    driver.can_enter_jit(s=s, pc=pc, o=o)
+                    if o.is_pos():
+                        pc = 0
+                        continue
+                    else:
+                        break
+                elif c == "-":
+                    o = o.dec()
+                pc += 1
+            return pc
+        res = self.meta_interp(main, [False, 100, True], taggedpointers=True)
diff --git a/pypy/jit/metainterp/test/test_dict.py b/pypy/jit/metainterp/test/test_dict.py
--- a/pypy/jit/metainterp/test/test_dict.py
+++ b/pypy/jit/metainterp/test/test_dict.py
@@ -153,11 +153,7 @@
 
         res = self.meta_interp(f, [100], listops=True)
         assert res == f(50)
-        # XXX: ideally there would be 7 calls here, but repeated CALL_PURE with
-        # the same arguments are not folded, because we have conflicting
-        # definitions of pure, once strhash can be appropriately folded
-        # this should be decreased to seven.
-        self.check_loops({"call": 8, "guard_false": 1, "guard_no_exception": 6,
+        self.check_loops({"call": 7, "guard_false": 1, "guard_no_exception": 6,
                           "guard_true": 1, "int_and": 1, "int_gt": 1,
                           "int_is_true": 1, "int_sub": 1, "jump": 1,
                           "new_with_vtable": 1, "setfield_gc": 1})
diff --git a/pypy/jit/metainterp/test/test_heapcache.py b/pypy/jit/metainterp/test/test_heapcache.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/metainterp/test/test_heapcache.py
@@ -0,0 +1,365 @@
+from pypy.jit.metainterp.heapcache import HeapCache
+from pypy.jit.metainterp.resoperation import rop
+from pypy.jit.metainterp.history import ConstInt
+
+box1 = object()
+box2 = object()
+box3 = object()
+box4 = object()
+lengthbox1 = object()
+lengthbox2 = object()
+descr1 = object()
+descr2 = object()
+descr3 = object()
+
+index1 = ConstInt(0)
+index2 = ConstInt(1)
+
+
+class FakeEffektinfo(object):
+    EF_ELIDABLE_CANNOT_RAISE           = 0 #elidable function (and cannot raise)
+    EF_LOOPINVARIANT                   = 1 #special: call it only once per loop
+    EF_CANNOT_RAISE                    = 2 #a function which cannot raise
+    EF_ELIDABLE_CAN_RAISE              = 3 #elidable function (but can raise)
+    EF_CAN_RAISE                       = 4 #normal function (can raise)
+    EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 5 #can raise and force virtualizables
+    EF_RANDOM_EFFECTS                  = 6 #can do whatever
+
+    OS_ARRAYCOPY = 0
+
+    def __init__(self, extraeffect, oopspecindex):
+        self.extraeffect = extraeffect
+        self.oopspecindex = oopspecindex
+
+class FakeCallDescr(object):
+    def __init__(self, extraeffect, oopspecindex=None):
+        self.extraeffect = extraeffect
+        self.oopspecindex = oopspecindex
+
+    def get_extra_info(self):
+        return FakeEffektinfo(self.extraeffect, self.oopspecindex)
+
+class TestHeapCache(object):
+    def test_known_class_box(self):
+        h = HeapCache()
+        assert not h.is_class_known(1)
+        assert not h.is_class_known(2)
+        h.class_now_known(1)
+        assert h.is_class_known(1)
+        assert not h.is_class_known(2)
+
+        h.reset()
+        assert not h.is_class_known(1)
+        assert not h.is_class_known(2)
+
+    def test_nonstandard_virtualizable(self):
+        h = HeapCache()
+        assert not h.is_nonstandard_virtualizable(1)
+        assert not h.is_nonstandard_virtualizable(2)
+        h.nonstandard_virtualizables_now_known(1)
+        assert h.is_nonstandard_virtualizable(1)
+        assert not h.is_nonstandard_virtualizable(2)
+
+        h.reset()
+        assert not h.is_nonstandard_virtualizable(1)
+        assert not h.is_nonstandard_virtualizable(2)
+
+
+    def test_heapcache_fields(self):
+        h = HeapCache()
+        assert h.getfield(box1, descr1) is None
+        assert h.getfield(box1, descr2) is None
+        h.setfield(box1, descr1, box2)
+        assert h.getfield(box1, descr1) is box2
+        assert h.getfield(box1, descr2) is None
+        h.setfield(box1, descr2, box3)
+        assert h.getfield(box1, descr1) is box2
+        assert h.getfield(box1, descr2) is box3
+        h.setfield(box1, descr1, box3)
+        assert h.getfield(box1, descr1) is box3
+        assert h.getfield(box1, descr2) is box3
+        h.setfield(box3, descr1, box1)
+        assert h.getfield(box3, descr1) is box1
+        assert h.getfield(box1, descr1) is None
+        assert h.getfield(box1, descr2) is box3
+
+        h.reset()
+        assert h.getfield(box1, descr1) is None
+        assert h.getfield(box1, descr2) is None
+        assert h.getfield(box3, descr1) is None
+
+    def test_heapcache_read_fields_multiple(self):
+        h = HeapCache()
+        h.getfield_now_known(box1, descr1, box2)
+        h.getfield_now_known(box3, descr1, box4)
+        assert h.getfield(box1, descr1) is box2
+        assert h.getfield(box1, descr2) is None
+        assert h.getfield(box3, descr1) is box4
+        assert h.getfield(box3, descr2) is None
+
+        h.reset()
+        assert h.getfield(box1, descr1) is None
+        assert h.getfield(box1, descr2) is None
+        assert h.getfield(box3, descr1) is None
+        assert h.getfield(box3, descr2) is None
+
+    def test_heapcache_write_fields_multiple(self):
+        h = HeapCache()
+        h.setfield(box1, descr1, box2)
+        assert h.getfield(box1, descr1) is box2
+        h.setfield(box3, descr1, box4)
+        assert h.getfield(box3, descr1) is box4
+        assert h.getfield(box1, descr1) is None # box1 and box3 can alias
+
+        h = HeapCache()
+        h.new(box1)
+        h.setfield(box1, descr1, box2)
+        assert h.getfield(box1, descr1) is box2
+        h.setfield(box3, descr1, box4)
+        assert h.getfield(box3, descr1) is box4
+        assert h.getfield(box1, descr1) is None # box1 and box3 can alias
+
+        h = HeapCache()
+        h.new(box1)
+        h.new(box3)
+        h.setfield(box1, descr1, box2)
+        assert h.getfield(box1, descr1) is box2
+        h.setfield(box3, descr1, box4)
+        assert h.getfield(box3, descr1) is box4
+        assert h.getfield(box1, descr1) is box2 # box1 and box3 cannot alias
+        h.setfield(box1, descr1, box3)
+        assert h.getfield(box1, descr1) is box3
+
+
+    def test_heapcache_arrays(self):
+        h = HeapCache()
+        assert h.getarrayitem(box1, descr1, index1) is None
+        assert h.getarrayitem(box1, descr2, index1) is None
+        assert h.getarrayitem(box1, descr1, index2) is None
+        assert h.getarrayitem(box1, descr2, index2) is None
+
+        h.setarrayitem(box1, descr1, index1, box2)
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        assert h.getarrayitem(box1, descr2, index1) is None
+        assert h.getarrayitem(box1, descr1, index2) is None
+        assert h.getarrayitem(box1, descr2, index2) is None
+        h.setarrayitem(box1, descr1, index2, box4)
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        assert h.getarrayitem(box1, descr2, index1) is None
+        assert h.getarrayitem(box1, descr1, index2) is box4
+        assert h.getarrayitem(box1, descr2, index2) is None
+
+        h.setarrayitem(box1, descr2, index1, box3)
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        assert h.getarrayitem(box1, descr2, index1) is box3
+        assert h.getarrayitem(box1, descr1, index2) is box4
+        assert h.getarrayitem(box1, descr2, index2) is None
+
+        h.setarrayitem(box1, descr1, index1, box3)
+        assert h.getarrayitem(box1, descr1, index1) is box3
+        assert h.getarrayitem(box1, descr2, index1) is box3
+        assert h.getarrayitem(box1, descr1, index2) is box4
+        assert h.getarrayitem(box1, descr2, index2) is None
+
+        h.setarrayitem(box3, descr1, index1, box1)
+        assert h.getarrayitem(box3, descr1, index1) is box1
+        assert h.getarrayitem(box1, descr1, index1) is None
+        assert h.getarrayitem(box1, descr2, index1) is box3
+        assert h.getarrayitem(box1, descr1, index2) is box4
+        assert h.getarrayitem(box1, descr2, index2) is None
+
+        h.reset()
+        assert h.getarrayitem(box1, descr1, index1) is None
+        assert h.getarrayitem(box1, descr2, index1) is None
+        assert h.getarrayitem(box3, descr1, index1) is None
+
+    def test_heapcache_array_nonconst_index(self):
+        h = HeapCache()
+        h.setarrayitem(box1, descr1, index1, box2)
+        h.setarrayitem(box1, descr1, index2, box4)
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        assert h.getarrayitem(box1, descr1, index2) is box4
+        h.setarrayitem(box1, descr1, box2, box3)
+        assert h.getarrayitem(box1, descr1, index1) is None
+        assert h.getarrayitem(box1, descr1, index2) is None
+
+    def test_heapcache_read_fields_multiple_array(self):
+        h = HeapCache()
+        h.getarrayitem_now_known(box1, descr1, index1, box2)
+        h.getarrayitem_now_known(box3, descr1, index1, box4)
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        assert h.getarrayitem(box1, descr2, index1) is None
+        assert h.getarrayitem(box3, descr1, index1) is box4
+        assert h.getarrayitem(box3, descr2, index1) is None
+
+        h.reset()
+        assert h.getarrayitem(box1, descr1, index1) is None
+        assert h.getarrayitem(box1, descr2, index1) is None
+        assert h.getarrayitem(box3, descr1, index1) is None
+        assert h.getarrayitem(box3, descr2, index1) is None
+
+    def test_heapcache_write_fields_multiple_array(self):
+        h = HeapCache()
+        h.setarrayitem(box1, descr1, index1, box2)
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        h.setarrayitem(box3, descr1, index1, box4)
+        assert h.getarrayitem(box3, descr1, index1) is box4
+        assert h.getarrayitem(box1, descr1, index1) is None # box1 and box3 can alias
+
+        h = HeapCache()
+        h.new(box1)
+        h.setarrayitem(box1, descr1, index1, box2)
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        h.setarrayitem(box3, descr1, index1, box4)
+        assert h.getarrayitem(box3, descr1, index1) is box4
+        assert h.getarrayitem(box1, descr1, index1) is None # box1 and box3 can alias
+
+        h = HeapCache()
+        h.new(box1)
+        h.new(box3)
+        h.setarrayitem(box1, descr1, index1, box2)
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        h.setarrayitem(box3, descr1, index1, box4)
+        assert h.getarrayitem(box3, descr1, index1) is box4
+        assert h.getarrayitem(box1, descr1, index1) is box2 # box1 and box3 cannot alias
+        h.setarrayitem(box1, descr1, index1, box3)
+        assert h.getarrayitem(box3, descr1, index1) is box4
+        assert h.getarrayitem(box1, descr1, index1) is box3 # box1 and box3 cannot alias
+
+    def test_length_cache(self):
+        h = HeapCache()
+        h.new_array(box1, lengthbox1)
+        assert h.arraylen(box1) is lengthbox1
+
+        assert h.arraylen(box2) is None
+        h.arraylen_now_known(box2, lengthbox2)
+        assert h.arraylen(box2) is lengthbox2
+
+
+    def test_invalidate_cache(self):
+        h = HeapCache()
+        h.setfield(box1, descr1, box2)
+        h.setarrayitem(box1, descr1, index1, box2)
+        h.setarrayitem(box1, descr1, index2, box4)
+        h.invalidate_caches(rop.INT_ADD, None, [])
+        h.invalidate_caches(rop.INT_ADD_OVF, None, [])
+        h.invalidate_caches(rop.SETFIELD_RAW, None, [])
+        h.invalidate_caches(rop.SETARRAYITEM_RAW, None, [])
+        assert h.getfield(box1, descr1) is box2
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        assert h.getarrayitem(box1, descr1, index2) is box4
+
+        h.invalidate_caches(
+            rop.CALL, FakeCallDescr(FakeEffektinfo.EF_ELIDABLE_CANNOT_RAISE), [])
+        assert h.getfield(box1, descr1) is box2
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        assert h.getarrayitem(box1, descr1, index2) is box4
+
+        h.invalidate_caches(
+            rop.CALL_LOOPINVARIANT, FakeCallDescr(FakeEffektinfo.EF_LOOPINVARIANT), [])
+
+        h.invalidate_caches(
+            rop.CALL, FakeCallDescr(FakeEffektinfo.EF_RANDOM_EFFECTS), [])
+        assert h.getfield(box1, descr1) is None
+        assert h.getarrayitem(box1, descr1, index1) is None
+        assert h.getarrayitem(box1, descr1, index2) is None
+
+
+    def test_replace_box(self):
+        h = HeapCache()
+        h.setfield(box1, descr1, box2)
+        h.setfield(box1, descr2, box3)
+        h.setfield(box2, descr3, box3)
+        h.replace_box(box1, box4)
+        assert h.getfield(box1, descr1) is None
+        assert h.getfield(box1, descr2) is None
+        assert h.getfield(box4, descr1) is box2
+        assert h.getfield(box4, descr2) is box3
+        assert h.getfield(box2, descr3) is box3
+
+    def test_replace_box_array(self):
+        h = HeapCache()
+        h.setarrayitem(box1, descr1, index1, box2)
+        h.setarrayitem(box1, descr2, index1, box3)
+        h.arraylen_now_known(box1, lengthbox1)
+        h.setarrayitem(box2, descr1, index2, box1)
+        h.setarrayitem(box3, descr2, index2, box1)
+        h.setarrayitem(box2, descr3, index2, box3)
+        h.replace_box(box1, box4)
+        assert h.getarrayitem(box1, descr1, index1) is None
+        assert h.getarrayitem(box1, descr2, index1) is None
+        assert h.arraylen(box1) is None
+        assert h.arraylen(box4) is lengthbox1
+        assert h.getarrayitem(box4, descr1, index1) is box2
+        assert h.getarrayitem(box4, descr2, index1) is box3
+        assert h.getarrayitem(box2, descr1, index2) is box4
+        assert h.getarrayitem(box3, descr2, index2) is box4
+        assert h.getarrayitem(box2, descr3, index2) is box3
+
+        h.replace_box(lengthbox1, lengthbox2)
+        assert h.arraylen(box4) is lengthbox2
+
+    def test_ll_arraycopy(self):
+        h = HeapCache()
+        h.new_array(box1, lengthbox1)
+        h.setarrayitem(box1, descr1, index1, box2)
+        h.new_array(box2, lengthbox1)
+        # Just need the destination box for this call
+        h.invalidate_caches(
+            rop.CALL,
+            FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY),
+            [None, None, box2, None, None]
+        )
+        assert h.getarrayitem(box1, descr1, index1) is box2
+        h.invalidate_caches(
+            rop.CALL,
+            FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY),
+            [None, None, box3, None, None]
+        )
+        assert h.getarrayitem(box1, descr1, index1) is None
+
+        h.setarrayitem(box4, descr1, index1, box2)
+        assert h.getarrayitem(box4, descr1, index1) is box2
+        h.invalidate_caches(
+            rop.CALL,
+            FakeCallDescr(FakeEffektinfo.EF_CANNOT_RAISE, FakeEffektinfo.OS_ARRAYCOPY),
+            [None, None, box2, None, None]
+        )
+        assert h.getarrayitem(box4, descr1, index1) is None
+
+    def test_unescaped(self):
+        h = HeapCache()
+        assert not h.is_unescaped(box1)
+        h.new(box2)
+        assert h.is_unescaped(box2)
+        h.invalidate_caches(rop.SETFIELD_GC, None, [box2, box1])
+        assert h.is_unescaped(box2)
+        h.invalidate_caches(rop.SETFIELD_GC, None, [box1, box2])
+        assert not h.is_unescaped(box2)
+
+    def test_unescaped_testing(self):
+        h = HeapCache()
+        h.new(box1)
+        h.new(box2)
+        assert h.is_unescaped(box1)
+        assert h.is_unescaped(box2)
+        # Putting a virtual inside of another virtual doesn't escape it.
+        h.invalidate_caches(rop.SETFIELD_GC, None, [box1, box2])
+        assert h.is_unescaped(box2)
+        # Reading a field from a virtual doesn't escape it.
+        h.invalidate_caches(rop.GETFIELD_GC, None, [box1])
+        assert h.is_unescaped(box1)
+        # Escaping a virtual transitively escapes anything inside of it.
+        assert not h.is_unescaped(box3)
+        h.invalidate_caches(rop.SETFIELD_GC, None, [box3, box1])
+        assert not h.is_unescaped(box1)
+        assert not h.is_unescaped(box2)
+
+    def test_unescaped_array(self):
+        h = HeapCache()
+        h.new_array(box1, lengthbox1)
+        assert h.is_unescaped(box1)
+        h.invalidate_caches(rop.SETARRAYITEM_GC, None, [box1, index1, box2])
+        assert h.is_unescaped(box1)
+        h.invalidate_caches(rop.SETARRAYITEM_GC, None, [box2, index1, box1])
+        assert not h.is_unescaped(box1)
\ No newline at end of file
diff --git a/pypy/jit/metainterp/test/test_list.py b/pypy/jit/metainterp/test/test_list.py
--- a/pypy/jit/metainterp/test/test_list.py
+++ b/pypy/jit/metainterp/test/test_list.py
@@ -34,7 +34,7 @@
                 l = [x + 1]
                 n -= 1
             return l[0]
-        
+
         res = self.meta_interp(f, [10], listops=True)
         assert res == f(10)
         self.check_all_virtualized()
@@ -60,7 +60,7 @@
 
     def test_ll_fixed_setitem_fast(self):
         jitdriver = JitDriver(greens = [], reds = ['n', 'l'])
-        
+
         def f(n):
             l = [1, 2, 3]
 
@@ -116,7 +116,7 @@
         assert res == f(10)
         py.test.skip("'[non-null] * n' gives a residual call so far")
         self.check_loops(setarrayitem_gc=0, getarrayitem_gc=0, call=0)
-    
+
     def test_arraycopy_simpleoptimize(self):
         def f():
             l = [1, 2, 3, 4]
@@ -208,6 +208,26 @@
         assert res == f(15)
         self.check_loops(guard_exception=0)
 
+    def test_virtual_resize(self):
+        jitdriver = JitDriver(greens = [], reds = ['n', 's'])
+        def f(n):
+            s = 0
+            while n > 0:
+                jitdriver.jit_merge_point(n=n, s=s)
+                lst = []
+                lst += [1]
+                n -= len(lst)
+                s += lst[0]
+                lst.pop()
+                lst.append(1)
+                s /= lst.pop()
+            return s
+        res = self.meta_interp(f, [15], listops=True)
+        assert res == f(15)
+        self.check_loops({"int_add": 1, "int_sub": 1, "int_gt": 1,
+                          "guard_true": 1, "jump": 1})
+
+
 class TestOOtype(ListTests, OOJitMixin):
     pass
 
@@ -236,8 +256,6 @@
             return a * b
         res = self.meta_interp(f, [37])
         assert res == f(37)
-        # There is the one actual field on a, plus 2 getfield's from the list
-        # itself, 1 to get the length (which is then incremented and passed to
-        # the resize func), and then a read of the items field to actually
-        # perform the setarrayitem on
-        self.check_loops(getfield_gc=5, everywhere=True)
+        # There is the one actual field on a, plus several fields on the list
+        # itself
+        self.check_loops(getfield_gc=10, everywhere=True)
diff --git a/pypy/jit/metainterp/test/test_slist.py b/pypy/jit/metainterp/test/test_slist.py
--- a/pypy/jit/metainterp/test/test_slist.py
+++ b/pypy/jit/metainterp/test/test_slist.py
@@ -5,7 +5,6 @@
 class ListTests(object):
 
     def test_basic_list(self):
-        py.test.skip("not yet")
         myjitdriver = JitDriver(greens = [], reds = ['n', 'lst'])
         def f(n):
             lst = []
@@ -34,7 +33,7 @@
             return m
         res = self.interp_operations(f, [11], listops=True)
         assert res == 49
-        self.check_operations_history(call=5)
+        self.check_operations_history(call=3)
 
     def test_list_of_voids(self):
         myjitdriver = JitDriver(greens = [], reds = ['n', 'lst'])
@@ -93,7 +92,7 @@
             return x
         res = self.meta_interp(f, [-2], listops=True)
         assert res == 41
-        self.check_loops(call=1, guard_value=0)
+        self.check_loops(call=0, guard_value=0)
 
 # we don't support resizable lists on ootype
 #class TestOOtype(ListTests, OOJitMixin):
diff --git a/pypy/jit/metainterp/test/test_string.py b/pypy/jit/metainterp/test/test_string.py
--- a/pypy/jit/metainterp/test/test_string.py
+++ b/pypy/jit/metainterp/test/test_string.py
@@ -1,9 +1,11 @@
 import py
+
+from pypy.jit.codewriter.policy import StopAtXPolicy
+from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin
+from pypy.rlib.debug import debug_print
 from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted
-from pypy.rlib.debug import debug_print
-from pypy.jit.codewriter.policy import StopAtXPolicy
+from pypy.rlib.rstring import StringBuilder
 from pypy.rpython.ootypesystem import ootype
-from pypy.jit.metainterp.test.support import LLJitMixin, OOJitMixin
 
 
 class StringTests:
@@ -27,7 +29,7 @@
             return i
         res = self.meta_interp(f, [10, True, _str('h')], listops=True)
         assert res == 5
-        self.check_loops(**{self.CALL: 1, self.CALL_PURE: 0})
+        self.check_loops(**{self.CALL: 1, self.CALL_PURE: 0, 'everywhere': True})
 
     def test_eq_folded(self):
         _str = self._str
@@ -327,7 +329,7 @@
     def test_str_slice_len_surviving(self):
         _str = self._str
         longstring = _str("Unrolling Trouble")
-        mydriver = JitDriver(reds = ['i', 'a', 'sa'], greens = []) 
+        mydriver = JitDriver(reds = ['i', 'a', 'sa'], greens = [])
         def f(a):
             i = sa = a
             while i < len(longstring):
@@ -343,7 +345,7 @@
         fillers = _str("abcdefghijklmnopqrstuvwxyz")
         data = _str("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
 
-        mydriver = JitDriver(reds = ['line', 'noise', 'res'], greens = []) 
+        mydriver = JitDriver(reds = ['line', 'noise', 'res'], greens = [])
         def f():
             line = data
             noise = fillers
@@ -370,7 +372,7 @@
             def __init__(self, value):
                 self.value = value
         mydriver = JitDriver(reds = ['ratio', 'line', 'noise', 'res'],
-                             greens = []) 
+                             greens = [])
         def f():
             line = Str(data)
             noise = Str(fillers)
@@ -408,7 +410,7 @@
             return len(sa)
         assert self.meta_interp(f, [16]) == f(16)
 
-    def test_loop_invariant_string_slize(self):
+    def test_loop_invariant_string_slice(self):
         _str = self._str
         mydriver = JitDriver(reds = ['i', 'n', 'sa', 's', 's1'], greens = [])
         def f(n, c):
@@ -425,7 +427,7 @@
             return sa
         assert self.meta_interp(f, [16, 'a']) == f(16, 'a')
 
-    def test_loop_invariant_string_slize_boxed(self):
+    def test_loop_invariant_string_slice_boxed(self):
         class Str(object):
             def __init__(self, value):
                 self.value = value
@@ -445,7 +447,7 @@
             return sa
         assert self.meta_interp(f, [16, 'a']) == f(16, 'a')
 
-    def test_loop_invariant_string_slize_in_array(self):
+    def test_loop_invariant_string_slice_in_array(self):
         _str = self._str
         mydriver = JitDriver(reds = ['i', 'n', 'sa', 's', 's1'], greens = [])
         def f(n, c):
@@ -513,7 +515,7 @@
                 m -= 1
             return 42
         self.meta_interp(f, [6, 7])
-        self.check_loops(call=3,    # str(), _str(), escape()
+        self.check_loops(call=1,    # escape()
                          newunicode=1, unicodegetitem=0,
                          unicodesetitem=1, copyunicodecontent=1)
 
@@ -536,3 +538,55 @@
         self.check_loops(call_pure=0, call=1,
                          newunicode=0, unicodegetitem=0,
                          unicodesetitem=0, copyunicodecontent=0)
+
+    def test_join_chars(self):
+        jitdriver = JitDriver(reds=['a', 'b', 'c', 'i'], greens=[])
+        def f(a, b, c):
+            i = 0
+            while i < 10:
+                jitdriver.jit_merge_point(a=a, b=b, c=c, i=i)
+                x = []
+                if a:
+                    x.append("a")
+                if b:
+                    x.append("b")
+                if c:
+                    x.append("c")
+                i += len("".join(x))
+            return i
+        res = self.meta_interp(f, [1, 1, 1])
+        assert res == f(True, True, True)
+        # The "".join should be unrolled, since the length of x is known since
+        # it is virtual, ensure there are no calls to ll_join_chars, or
+        # allocations.
+        self.check_loops({
+            "guard_true": 5, "int_is_true": 3, "int_lt": 2, "int_add": 2, "jump": 2,
+        }, everywhere=True)
+
+    def test_virtual_copystringcontent(self):
+        jitdriver = JitDriver(reds=['n', 'result'], greens=[])
+        def main(n):
+            result = 0
+            while n >= 0:
+                jitdriver.jit_merge_point(n=n, result=result)
+                b = StringBuilder(6)
+                b.append("Hello!")
+                result += ord(b.build()[0])
+                n -= 1
+            return result
+        res = self.meta_interp(main, [9])
+        assert res == main(9)
+
+    def test_virtual_copystringcontent2(self):
+        jitdriver = JitDriver(reds=['n', 'result'], greens=[])
+        def main(n):
+            result = 0
+            while n >= 0:
+                jitdriver.jit_merge_point(n=n, result=result)
+                b = StringBuilder(6)
+                b.append("Hello!")
+                result += ord((b.build() + "xyz")[0])
+                n -= 1
+            return result
+        res = self.meta_interp(main, [9])
+        assert res == main(9)
diff --git a/pypy/jit/metainterp/test/test_tracingopts.py b/pypy/jit/metainterp/test/test_tracingopts.py
--- a/pypy/jit/metainterp/test/test_tracingopts.py
+++ b/pypy/jit/metainterp/test/test_tracingopts.py
@@ -1,7 +1,10 @@
+import sys
+
+from pypy.jit.metainterp.test.support import LLJitMixin
+from pypy.rlib import jit
+from pypy.rlib.rarithmetic import ovfcheck
+
 import py
-import sys
-from pypy.rlib import jit
-from pypy.jit.metainterp.test.support import LLJitMixin
 
 
 class TestLLtype(LLJitMixin):
@@ -257,6 +260,28 @@
         self.check_operations_history(setarrayitem_gc=2, setfield_gc=2,
                                       getarrayitem_gc=0, getfield_gc=2)
 
+    def test_promote_changes_array_cache(self):
+        a1 = [0, 0]
+        a2 = [0, 0]
+        def fn(n):
+            if n > 0:
+                a = a1
+            else:
+                a = a2
+            a[0] = n
+            jit.hint(n, promote=True)
+            x1 = a[0]
+            jit.hint(x1, promote=True)
+            a[n - n] = n + 1
+            return a[0] + x1
+        res = self.interp_operations(fn, [7])
+        assert res == 7 + 7 + 1
+        self.check_operations_history(getarrayitem_gc=0, guard_value=1)
+        res = self.interp_operations(fn, [-7])
+        assert res == -7 - 7 + 1
+        self.check_operations_history(getarrayitem_gc=0, guard_value=1)
+
+
     def test_list_caching(self):
         a1 = [0, 0]
         a2 = [0, 0]
@@ -357,7 +382,7 @@
         assert res == f(10, 1, 1)
         self.check_history(getarrayitem_gc=0, getfield_gc=0)
 
-    def test_heap_caching_pure(self):
+    def test_heap_caching_array_pure(self):
         class A(object):
             pass
         p1 = A()
@@ -405,3 +430,164 @@
         assert res == -7 + 7
         self.check_operations_history(getfield_gc=0)
         return
+
+    def test_heap_caching_multiple_objects(self):
+        class Gbl(object):
+            pass
+        g = Gbl()
+        class A(object):
+            pass
+        a1 = A()
+        g.a1 = a1
+        a1.x = 7
+        a2 = A()
+        g.a2 = a2
+        a2.x = 7
+        def gn(a1, a2):
+            return a1.x + a2.x
+        def fn(n):
+            if n < 0:
+                a1 = A()
+                g.a1 = a1
+                a1.x = n
+                a2 = A()
+                g.a2 = a2
+                a2.x = n - 1
+            else:
+                a1 = g.a1
+                a2 = g.a2
+            return a1.x + a2.x + gn(a1, a2)
+        res = self.interp_operations(fn, [-7])
+        assert res == 2 * -7 + 2 * -8
+        self.check_operations_history(setfield_gc=4, getfield_gc=0)
+        res = self.interp_operations(fn, [7])
+        assert res == 4 * 7
+        self.check_operations_history(getfield_gc=4)
+
+    def test_heap_caching_multiple_tuples(self):
+        class Gbl(object):
+            pass
+        g = Gbl()
+        def gn(a1, a2):
+            return a1[0] + a2[0]
+        def fn(n):
+            a1 = (n, )
+            g.a = a1
+            a2 = (n - 1, )
+            g.a = a2
+            jit.promote(n)
+            return a1[0] + a2[0] + gn(a1, a2)
+        res = self.interp_operations(fn, [7])
+        assert res == 2 * 7 + 2 * 6
+        self.check_operations_history(getfield_gc_pure=0)
+        res = self.interp_operations(fn, [-7])
+        assert res == 2 * -7 + 2 * -8
+        self.check_operations_history(getfield_gc_pure=0)
+
+    def test_heap_caching_multiple_arrays(self):
+        class Gbl(object):
+            pass
+        g = Gbl()
+        def fn(n):
+            a1 = [n, n, n]
+            g.a = a1
+            a1[0] = n
+            a2 = [n, n, n]
+            g.a = a2
+            a2[0] = n - 1
+            return a1[0] + a2[0] + a1[0] + a2[0]
+        res = self.interp_operations(fn, [7])
+        assert res == 2 * 7 + 2 * 6
+        self.check_operations_history(getarrayitem_gc=0)
+        res = self.interp_operations(fn, [-7])
+        assert res == 2 * -7 + 2 * -8
+        self.check_operations_history(getarrayitem_gc=0)
+
+    def test_heap_caching_multiple_arrays_getarrayitem(self):
+        class Gbl(object):
+            pass
+        g = Gbl()
+        g.a1 = [7, 8, 9]
+        g.a2 = [8, 9, 10, 11]
+
+        def fn(i):
+            if i < 0:
+                g.a1 = [7, 8, 9]
+                g.a2 = [7, 8, 9, 10]
+            jit.promote(i)
+            a1 = g.a1
+            a1[i + 1] = 15 # make lists mutable
+            a2 = g.a2
+            a2[i + 1] = 19
+            return a1[i] + a2[i] + a1[i] + a2[i]
+        res = self.interp_operations(fn, [0])
+        assert res == 2 * 7 + 2 * 8
+        self.check_operations_history(getarrayitem_gc=2)
+
+
+    def test_heap_caching_multiple_lists(self):
+        class Gbl(object):
+            pass
+        g = Gbl()
+        g.l = []
+        def fn(n):
+            if n < -100:
+                g.l.append(1)
+            a1 = [n, n, n]
+            g.l = a1
+            a1[0] = n
+            a2 = [n, n, n]
+            g.l = a2
+            a2[0] = n - 1
+            return a1[0] + a2[0] + a1[0] + a2[0]
+        res = self.interp_operations(fn, [7])
+        assert res == 2 * 7 + 2 * 6
+        self.check_operations_history(getarrayitem_gc=0, getfield_gc=0)
+        res = self.interp_operations(fn, [-7])
+        assert res == 2 * -7 + 2 * -8
+        self.check_operations_history(getarrayitem_gc=0, getfield_gc=0)
+
+    def test_length_caching(self):
+        class Gbl(object):
+            pass
+        g = Gbl()
+        g.a = [0] * 7
+        def fn(n):
+            a = g.a
+            res = len(a) + len(a)
+            a1 = [0] * n
+            g.a = a1
+            return len(a1) + res
+        res = self.interp_operations(fn, [7])
+        assert res == 7 * 3
+        self.check_operations_history(arraylen_gc=1)
+
+    def test_arraycopy(self):
+        class Gbl(object):
+            pass
+        g = Gbl()
+        g.a = [0] * 7
+        def fn(n):
+            assert n >= 0
+            a = g.a
+            x = [0] * n
+            x[2] = 21
+            return len(a[:n]) + x[2]
+        res = self.interp_operations(fn, [3])
+        assert res == 24
+        self.check_operations_history(getarrayitem_gc=0)
+
+    def test_fold_int_add_ovf(self):
+        def fn(n):
+            jit.promote(n)
+            try:
+                n = ovfcheck(n + 1)
+            except OverflowError:
+                return 12
+            else:
+                return n
+        res = self.interp_operations(fn, [3])
+        assert res == 4
+        self.check_operations_history(int_add_ovf=0)
+        res = self.interp_operations(fn, [sys.maxint])
+        assert res == 12
\ No newline at end of file
diff --git a/pypy/jit/metainterp/test/test_virtualstate.py b/pypy/jit/metainterp/test/test_virtualstate.py
--- a/pypy/jit/metainterp/test/test_virtualstate.py
+++ b/pypy/jit/metainterp/test/test_virtualstate.py
@@ -2,7 +2,7 @@
 import py
 from pypy.jit.metainterp.optimize import InvalidLoop
 from pypy.jit.metainterp.optimizeopt.virtualstate import VirtualStateInfo, VStructStateInfo, \
-     VArrayStateInfo, NotVirtualStateInfo, VirtualState
+     VArrayStateInfo, NotVirtualStateInfo, VirtualState, ShortBoxes
 from pypy.jit.metainterp.optimizeopt.optimizer import OptValue
 from pypy.jit.metainterp.history import BoxInt, BoxFloat, BoxPtr, ConstInt, ConstPtr
 from pypy.rpython.lltypesystem import lltype
@@ -11,6 +11,7 @@
 from pypy.jit.metainterp.history import TreeLoop, LoopToken
 from pypy.jit.metainterp.optimizeopt.test.test_optimizeopt import FakeDescr, FakeMetaInterpStaticData
 from pypy.jit.metainterp.optimize import RetraceLoop
+from pypy.jit.metainterp.resoperation import ResOperation, rop
 
 class TestBasic:
     someptr1 = LLtypeMixin.myptr
@@ -129,6 +130,7 @@
             info.fieldstate = [info]
             assert info.generalization_of(info, {}, {})
 
+
 class BaseTestGenerateGuards(BaseTest):
     def guards(self, info1, info2, box, expected):
         info1.position = info2.position = 0
@@ -910,3 +912,111 @@
 class TestLLtypeBridges(BaseTestBridges, LLtypeMixin):
     pass
 
+class FakeOptimizer:
+    def make_equal_to(*args):
+        pass
+    def getvalue(*args):
+        pass
+
+class TestShortBoxes:
+    p1 = BoxPtr()
+    p2 = BoxPtr()
+    p3 = BoxPtr()
+    p4 = BoxPtr()
+    i1 = BoxInt()
+    i2 = BoxInt()
+    i3 = BoxInt()
+    i4 = BoxInt()
+    
+    def test_short_box_duplication_direct(self):
+        class Optimizer(FakeOptimizer):
+            def produce_potential_short_preamble_ops(_self, sb):
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1))
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1))
+        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
+        assert len(sb.short_boxes) == 4
+        assert self.i1 in sb.short_boxes
+        assert sum([op.result is self.i1 for op in sb.short_boxes.values() if op]) == 1
+
+    def test_dont_duplicate_potential_boxes(self):
+        class Optimizer(FakeOptimizer):
+            def produce_potential_short_preamble_ops(_self, sb):
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1))
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [BoxPtr()], self.i1))
+                sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2))
+                sb.add_potential(ResOperation(rop.INT_ADD, [ConstInt(7), self.i2],
+                                              self.i3))
+        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
+        assert len(sb.short_boxes) == 5
+
+    def test_prioritize1(self):
+        class Optimizer(FakeOptimizer):
+            def produce_potential_short_preamble_ops(_self, sb):
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1))
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1))
+                sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2))
+        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
+        assert len(sb.short_boxes.values()) == 5
+        int_neg = [op for op in sb.short_boxes.values()
+                   if op and op.getopnum() == rop.INT_NEG]
+        assert len(int_neg) == 1
+        int_neg = int_neg[0]
+        getfield = [op for op in sb.short_boxes.values()
+                    if op and op.result == int_neg.getarg(0)]
+        assert len(getfield) == 1
+        assert getfield[0].getarg(0) in [self.p1, self.p2]
+
+    def test_prioritize1bis(self):
+        class Optimizer(FakeOptimizer):
+            def produce_potential_short_preamble_ops(_self, sb):
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1),
+                                 synthetic=True)
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1),
+                                 synthetic=True)
+                sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2))
+        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
+        assert len(sb.short_boxes.values()) == 5
+        int_neg = [op for op in sb.short_boxes.values()
+                   if op and op.getopnum() == rop.INT_NEG]
+        assert len(int_neg) == 1
+        int_neg = int_neg[0]
+        getfield = [op for op in sb.short_boxes.values()
+                    if op and op.result == int_neg.getarg(0)]
+        assert len(getfield) == 1
+        assert getfield[0].getarg(0) in [self.p1, self.p2]
+        
+    def test_prioritize2(self):
+        class Optimizer(FakeOptimizer):
+            def produce_potential_short_preamble_ops(_self, sb):
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1),
+                                 synthetic=True)
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1))
+                sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2))
+        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
+        assert len(sb.short_boxes.values()) == 5
+        int_neg = [op for op in sb.short_boxes.values()
+                   if op and op.getopnum() == rop.INT_NEG]
+        assert len(int_neg) == 1
+        int_neg = int_neg[0]
+        getfield = [op for op in sb.short_boxes.values()
+                    if op and op.result == int_neg.getarg(0)]
+        assert len(getfield) == 1
+        assert getfield[0].getarg(0) == self.p2
+        
+    def test_prioritize3(self):
+        class Optimizer(FakeOptimizer):
+            def produce_potential_short_preamble_ops(_self, sb):
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p1], self.i1))
+                sb.add_potential(ResOperation(rop.GETFIELD_GC, [self.p2], self.i1),
+                                 synthetic=True)
+                sb.add_potential(ResOperation(rop.INT_NEG, [self.i1], self.i2))
+        sb = ShortBoxes(Optimizer(), [self.p1, self.p2])
+        assert len(sb.short_boxes.values()) == 5
+        int_neg = [op for op in sb.short_boxes.values()
+                   if op and op.getopnum() == rop.INT_NEG]
+        assert len(int_neg) == 1
+        int_neg = int_neg[0]
+        getfield = [op for op in sb.short_boxes.values()
+                    if op and op.result == int_neg.getarg(0)]
+        assert len(getfield) == 1
+        assert getfield[0].getarg(0) == self.p1
diff --git a/pypy/jit/metainterp/warmspot.py b/pypy/jit/metainterp/warmspot.py
--- a/pypy/jit/metainterp/warmspot.py
+++ b/pypy/jit/metainterp/warmspot.py
@@ -53,6 +53,8 @@
         extraconfigopts = {'translation.list_comprehension_operations': True}
     else:
         extraconfigopts = {}
+    if kwds.pop("taggedpointers", False):
+        extraconfigopts["translation.taggedpointers"] = True
     interp, graph = get_interpreter(function, args,
                                     backendopt=False,  # will be done below
                                     type_system=type_system,
@@ -130,8 +132,15 @@
     results = _find_jit_marker(graphs, 'jit_merge_point')
     if not results:
         raise Exception("no jit_merge_point found!")
+    seen = set([graph for graph, block, pos in results])
+    assert len(seen) == len(results), (
+        "found several jit_merge_points in the same graph")
     return results
 
+def locate_jit_merge_point(graph):
+    [(graph, block, pos)] = find_jit_merge_points([graph])
+    return block, pos, block.operations[pos]
+
 def find_set_param(graphs):
     return _find_jit_marker(graphs, 'set_param')
 
@@ -235,7 +244,7 @@
     def split_graph_and_record_jitdriver(self, graph, block, pos):
         op = block.operations[pos]
         jd = JitDriverStaticData()
-        jd._jit_merge_point_pos = (graph, op)
+        jd._jit_merge_point_in = graph
         args = op.args[2:]
         s_binding = self.translator.annotator.binding
         jd._portal_args_s = [s_binding(v) for v in args]
@@ -504,7 +513,8 @@
             self.make_args_specification(jd)
 
     def make_args_specification(self, jd):
-        graph, op = jd._jit_merge_point_pos
+        graph = jd._jit_merge_point_in
+        _, _, op = locate_jit_merge_point(graph)
         greens_v, reds_v = support.decode_hp_hint_args(op)
         ALLARGS = [v.concretetype for v in (greens_v + reds_v)]
         jd._green_args_spec = [v.concretetype for v in greens_v]
@@ -552,7 +562,7 @@
             assert jitdriver in sublists, \
                    "can_enter_jit with no matching jit_merge_point"
             jd, sublist = sublists[jitdriver]
-            origportalgraph = jd._jit_merge_point_pos[0]
+            origportalgraph = jd._jit_merge_point_in
             if graph is not origportalgraph:
                 sublist.append((graph, block, index))
                 jd.no_loop_header = False
@@ -582,7 +592,7 @@
             can_enter_jits = [(jd.portal_graph, jd.portal_graph.startblock, 0)]
 
         for graph, block, index in can_enter_jits:
-            if graph is jd._jit_merge_point_pos[0]:
+            if graph is jd._jit_merge_point_in:
                 continue
 
             op = block.operations[index]
@@ -640,7 +650,7 @@
         #           while 1:
         #               more stuff
         #
-        origportalgraph = jd._jit_merge_point_pos[0]
+        origportalgraph = jd._jit_merge_point_in
         portalgraph = jd.portal_graph
         PORTALFUNC = jd._PORTAL_FUNCTYPE
 
@@ -794,14 +804,7 @@
         # ____________________________________________________________
         # Now mutate origportalgraph to end with a call to portal_runner_ptr
         #
-        _, op = jd._jit_merge_point_pos
-        for origblock in origportalgraph.iterblocks():
-            if op in origblock.operations:
-                break
-        else:
-            assert False, "lost the operation %r in the graph %r" % (
-                op, origportalgraph)
-        origindex = origblock.operations.index(op)
+        origblock, origindex, op = locate_jit_merge_point(origportalgraph)
         assert op.opname == 'jit_marker'
         assert op.args[0].value == 'jit_merge_point'
         greens_v, reds_v = support.decode_hp_hint_args(op)
diff --git a/pypy/jit/metainterp/warmstate.py b/pypy/jit/metainterp/warmstate.py
--- a/pypy/jit/metainterp/warmstate.py
+++ b/pypy/jit/metainterp/warmstate.py
@@ -367,9 +367,9 @@
             # ---------- execute assembler ----------
             while True:     # until interrupted by an exception
                 metainterp_sd.profiler.start_running()
-                debug_start("jit-running")
+                #debug_start("jit-running")
                 fail_descr = warmrunnerdesc.execute_token(loop_token)
-                debug_stop("jit-running")
+                #debug_stop("jit-running")
                 metainterp_sd.profiler.end_running()
                 loop_token = None     # for test_memmgr
                 if vinfo is not None:
diff --git a/pypy/jit/tl/pypyjit.py b/pypy/jit/tl/pypyjit.py
--- a/pypy/jit/tl/pypyjit.py
+++ b/pypy/jit/tl/pypyjit.py
@@ -40,7 +40,7 @@
 config.objspace.usemodules.array = False
 config.objspace.usemodules._weakref = True
 config.objspace.usemodules._sre = False
-config.objspace.usemodules._lsprof = True
+config.objspace.usemodules._lsprof = False
 #
 config.objspace.usemodules._ffi = True
 config.objspace.usemodules.micronumpy = False
@@ -77,7 +77,7 @@
 
 def read_code():
     from pypy.module.marshal.interp_marshal import dumps
-    
+
     filename = 'pypyjit_demo.py'
     source = readfile(filename)
     ec = space.getexecutioncontext()
diff --git a/pypy/jit/tl/pypyjit_demo.py b/pypy/jit/tl/pypyjit_demo.py
--- a/pypy/jit/tl/pypyjit_demo.py
+++ b/pypy/jit/tl/pypyjit_demo.py
@@ -2,22 +2,16 @@
 pypyjit.set_param(threshold=200)
 
 
-def main(a, b):
-    i = sa = 0
-    while i < 300:
-        if a > 0: # Specialises the loop
-            pass
-        if b < 2 and b > 0:
-            pass
-        if (a >> b) >= 0:
-            sa += 1
-        if (a << b) > 2:
-            sa += 10000
-        i += 1
-    return sa
+def f(n):
+    pairs = [(0.0, 1.0), (2.0, 3.0)] * n
+    mag = 0
+    for (x1, x2) in pairs:
+        dx = x1 - x2
+        mag += ((dx * dx ) ** (-1.5))            
+    return n
 
 try:
-    print main(2, 1)
+    print f(301)
 
 except Exception, e:
     print "Exception: ", type(e)
diff --git a/pypy/module/__builtin__/functional.py b/pypy/module/__builtin__/functional.py
--- a/pypy/module/__builtin__/functional.py
+++ b/pypy/module/__builtin__/functional.py
@@ -3,13 +3,13 @@
 
 """
 
+from pypy.interpreter.baseobjspace import Wrappable
 from pypy.interpreter.error import OperationError
-from pypy.interpreter.gateway import NoneNotWrapped
-from pypy.interpreter.gateway import interp2app, unwrap_spec
+from pypy.interpreter.gateway import NoneNotWrapped, interp2app, unwrap_spec
 from pypy.interpreter.typedef import TypeDef
-from pypy.interpreter.baseobjspace import Wrappable
+from pypy.rlib import jit
+from pypy.rlib.objectmodel import specialize
 from pypy.rlib.rarithmetic import r_uint, intmask
-from pypy.rlib.objectmodel import specialize
 from pypy.rlib.rbigint import rbigint
 
 
@@ -134,29 +134,15 @@
 
 
 @specialize.arg(2)
+ at jit.look_inside_iff(lambda space, args, implementation_of:
+    jit.isconstant(len(args.arguments_w)) and
+    len(args.arguments_w) == 2
+)
 def min_max(space, args, implementation_of):
     if implementation_of == "max":
         compare = space.gt
     else:
         compare = space.lt
-
-    args_w = args.arguments_w
-    if len(args_w) == 2 and not args.keywords:
-        # simple case, suitable for the JIT
-        w_arg0, w_arg1 = args_w
-        if space.is_true(compare(w_arg0, w_arg1)):
-            return w_arg0
-        else:
-            return w_arg1
-    else:
-        return min_max_loop(space, args, implementation_of)
-
- at specialize.arg(2)
-def min_max_loop(space, args, implementation_of):
-    if implementation_of == "max":
-        compare = space.gt
-    else:
-        compare = space.lt
     args_w = args.arguments_w
     if len(args_w) > 1:
         w_sequence = space.newtuple(args_w)
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -8,6 +8,7 @@
     appleveldefs = {}
 
     interpleveldefs = {
+        "StringBuilder": "interp_builders.W_StringBuilder",
         "UnicodeBuilder": "interp_builders.W_UnicodeBuilder",
     }
 
diff --git a/pypy/module/__pypy__/interp_builders.py b/pypy/module/__pypy__/interp_builders.py
--- a/pypy/module/__pypy__/interp_builders.py
+++ b/pypy/module/__pypy__/interp_builders.py
@@ -2,49 +2,55 @@
 from pypy.interpreter.error import OperationError
 from pypy.interpreter.gateway import interp2app, unwrap_spec
 from pypy.interpreter.typedef import TypeDef
-from pypy.rlib.rstring import UnicodeBuilder
+from pypy.rlib.rstring import UnicodeBuilder, StringBuilder
+from pypy.tool.sourcetools import func_with_new_name
 
 
-class W_UnicodeBuilder(Wrappable):
-    def __init__(self, space, size):
-        if size < 0:
-            self.builder = UnicodeBuilder()
-        else:
-            self.builder = UnicodeBuilder(size)
-        self.done = False
+def create_builder(name, strtype, builder_cls):
+    class W_Builder(Wrappable):
+        def __init__(self, space, size):
+            if size < 0:
+                self.builder = builder_cls()
+            else:
+                self.builder = builder_cls(size)
 
-    def _check_done(self, space):
-        if self.done:
-            raise OperationError(space.w_ValueError, space.wrap("Can't operate on a done builder"))
+        def _check_done(self, space):
+            if self.builder is None:
+                raise OperationError(space.w_ValueError, space.wrap("Can't operate on a done builder"))
 
-    @unwrap_spec(size=int)
-    def descr__new__(space, w_subtype, size=-1):
-        return W_UnicodeBuilder(space, size)
+        @unwrap_spec(size=int)
+        def descr__new__(space, w_subtype, size=-1):
+            return W_Builder(space, size)
 
-    @unwrap_spec(s=unicode)
-    def descr_append(self, space, s):
-        self._check_done(space)
-        self.builder.append(s)
+        @unwrap_spec(s=strtype)
+        def descr_append(self, space, s):
+            self._check_done(space)
+            self.builder.append(s)
 
-    @unwrap_spec(s=unicode, start=int, end=int)
-    def descr_append_slice(self, space, s, start, end):
-        self._check_done(space)
-        if not 0 <= start <= end <= len(s):
-            raise OperationError(space.w_ValueError, space.wrap("bad start/stop"))
-        self.builder.append_slice(s, start, end)
+        @unwrap_spec(s=strtype, start=int, end=int)
+        def descr_append_slice(self, space, s, start, end):
+            self._check_done(space)
+            if not 0 <= start <= end <= len(s):
+                raise OperationError(space.w_ValueError, space.wrap("bad start/stop"))
+            self.builder.append_slice(s, start, end)
 
-    def descr_build(self, space):
-        self._check_done(space)
-        w_s = space.wrap(self.builder.build())
-        self.done = True
-        return w_s
+        def descr_build(self, space):
+            self._check_done(space)
+            w_s = space.wrap(self.builder.build())
+            self.builder = None
+            return w_s
 
+    W_Builder.__name__ = "W_%s" % name
+    W_Builder.typedef = TypeDef(name,
+        __new__ = interp2app(func_with_new_name(
+                                    W_Builder.descr__new__.im_func,
+                                    '%s_new' % (name,))),
+        append = interp2app(W_Builder.descr_append),
+        append_slice = interp2app(W_Builder.descr_append_slice),
+        build = interp2app(W_Builder.descr_build),
+    )
+    W_Builder.typedef.acceptable_as_base_class = False
+    return W_Builder
 
-W_UnicodeBuilder.typedef = TypeDef("UnicodeBuilder",
-    __new__ = interp2app(W_UnicodeBuilder.descr__new__.im_func),
-
-    append = interp2app(W_UnicodeBuilder.descr_append),
-    append_slice = interp2app(W_UnicodeBuilder.descr_append_slice),
-    build = interp2app(W_UnicodeBuilder.descr_build),
-)
-W_UnicodeBuilder.typedef.acceptable_as_base_class = False
+W_StringBuilder = create_builder("StringBuilder", str, StringBuilder)
+W_UnicodeBuilder = create_builder("UnicodeBuilder", unicode, UnicodeBuilder)
diff --git a/pypy/module/__pypy__/test/test_builders.py b/pypy/module/__pypy__/test/test_builders.py
--- a/pypy/module/__pypy__/test/test_builders.py
+++ b/pypy/module/__pypy__/test/test_builders.py
@@ -31,4 +31,14 @@
         raises(ValueError, b.append_slice, u"1", 2, 1)
         s = b.build()
         assert s == "cde"
-        raises(ValueError, b.append_slice, u"abc", 1, 2)
\ No newline at end of file
+        raises(ValueError, b.append_slice, u"abc", 1, 2)
+
+    def test_stringbuilder(self):
+        from __pypy__.builders import StringBuilder
+        b = StringBuilder()
+        b.append("abc")
+        b.append("123")
+        b.append("you and me")
+        s = b.build()
+        assert s == "abc123you and me"
+        raises(ValueError, b.build)
\ No newline at end of file
diff --git a/pypy/module/_continuation/__init__.py b/pypy/module/_continuation/__init__.py
--- a/pypy/module/_continuation/__init__.py
+++ b/pypy/module/_continuation/__init__.py
@@ -37,4 +37,5 @@
     interpleveldefs = {
         'continulet': 'interp_continuation.W_Continulet',
         'permute': 'interp_continuation.permute',
+        '_p': 'interp_continuation.unpickle',      # pickle support
     }
diff --git a/pypy/module/_continuation/interp_continuation.py b/pypy/module/_continuation/interp_continuation.py
--- a/pypy/module/_continuation/interp_continuation.py
+++ b/pypy/module/_continuation/interp_continuation.py
@@ -5,6 +5,8 @@
 from pypy.interpreter.baseobjspace import Wrappable
 from pypy.interpreter.typedef import TypeDef
 from pypy.interpreter.gateway import interp2app
+from pypy.interpreter.pycode import PyCode
+from pypy.interpreter.pyframe import PyFrame
 
 
 class W_Continulet(Wrappable):
@@ -20,66 +22,69 @@
     def check_sthread(self):
         ec = self.space.getexecutioncontext()
         if ec.stacklet_thread is not self.sthread:
-            start_state.clear()
+            global_state.clear()
             raise geterror(self.space, "inter-thread support is missing")
-        return ec
 
     def descr_init(self, w_callable, __args__):
         if self.sthread is not None:
             raise geterror(self.space, "continulet already __init__ialized")
-        start_state.origin = self
-        start_state.w_callable = w_callable
-        start_state.args = __args__
-        self.sthread = build_sthread(self.space)
-        try:
-            self.h = self.sthread.new(new_stacklet_callback)
-            if self.sthread.is_empty_handle(self.h):    # early return
-                raise MemoryError
-        except MemoryError:
-            self.sthread = None
-            start_state.clear()
-            raise getmemoryerror(self.space)
+        #
+        # hackish: build the frame "by hand", passing it the correct arguments
+        space = self.space
+        w_args, w_kwds = __args__.topacked()
+        bottomframe = space.createframe(get_entrypoint_pycode(space),
+                                        get_w_module_dict(space), None)
+        bottomframe.locals_stack_w[0] = space.wrap(self)
+        bottomframe.locals_stack_w[1] = w_callable
+        bottomframe.locals_stack_w[2] = w_args
+        bottomframe.locals_stack_w[3] = w_kwds
+        self.bottomframe = bottomframe
+        #
+        global_state.origin = self
+        sthread = build_sthread(self.space)
+        self.sthread = sthread
+        h = sthread.new(new_stacklet_callback)
+        post_switch(sthread, h)
 
     def switch(self, w_to):
+        sthread = self.sthread
+        if sthread is not None and sthread.is_empty_handle(self.h):
+            global_state.clear()
+            raise geterror(self.space, "continulet already finished")
         to = self.space.interp_w(W_Continulet, w_to, can_be_None=True)
+        if to is not None and to.sthread is None:
+            to = None
+        if sthread is None:      # if self is non-initialized:
+            if to is not None:   #     if we are given a 'to'
+                self = to        #         then just use it and ignore 'self'
+                sthread = self.sthread
+                to = None
+            else:
+                return get_result()  # else: no-op
         if to is not None:
+            if to.sthread is not sthread:
+                global_state.clear()
+                raise geterror(self.space, "cross-thread double switch")
             if self is to:    # double-switch to myself: no-op
                 return get_result()
-            if to.sthread is None:
-                start_state.clear()
-                raise geterror(self.space, "continulet not initialized yet")
-        if self.sthread is None:
-            start_state.clear()
-            raise geterror(self.space, "continulet not initialized yet")
-        ec = self.check_sthread()
-        saved_topframeref = ec.topframeref
+            if sthread.is_empty_handle(to.h):
+                global_state.clear()
+                raise geterror(self.space, "continulet already finished")
+        self.check_sthread()
         #
-        start_state.origin = self
+        global_state.origin = self
         if to is None:
             # simple switch: going to self.h
-            start_state.destination = self
+            global_state.destination = self
         else:
             # double switch: the final destination is to.h
-            start_state.destination = to
+            global_state.destination = to
         #
-        h = start_state.destination.h
-        sthread = self.sthread
-        if sthread.is_empty_handle(h):
-            start_state.clear()
-            raise geterror(self.space, "continulet already finished")
-        #
-        try:
-            do_switch(sthread, h)
-        except MemoryError:
-            start_state.clear()
-            raise getmemoryerror(self.space)
-        #
-        ec = sthread.ec
-        ec.topframeref = saved_topframeref
-        return get_result()
+        h = sthread.switch(global_state.destination.h)
+        return post_switch(sthread, h)
 
     def descr_switch(self, w_value=None, w_to=None):
-        start_state.w_value = w_value
+        global_state.w_value = w_value
         return self.switch(w_to)
 
     def descr_throw(self, w_type, w_val=None, w_tb=None, w_to=None):
@@ -94,8 +99,8 @@
         #
         operr = OperationError(w_type, w_val, tb)
         operr.normalize_exception(space)
-        start_state.w_value = None
-        start_state.propagate_exception = operr
+        global_state.w_value = None
+        global_state.propagate_exception = operr
         return self.switch(w_to)
 
     def descr_is_pending(self):
@@ -103,12 +108,26 @@
                  and not self.sthread.is_empty_handle(self.h))
         return self.space.newbool(valid)
 
+    def descr__reduce__(self):
+        from pypy.module._continuation import interp_pickle
+        return interp_pickle.reduce(self)
+
+    def descr__setstate__(self, w_args):
+        from pypy.module._continuation import interp_pickle
+        interp_pickle.setstate(self, w_args)
+
 
 def W_Continulet___new__(space, w_subtype, __args__):
     r = space.allocate_instance(W_Continulet, w_subtype)
     r.__init__(space)
     return space.wrap(r)
 
+def unpickle(space, w_subtype):
+    """Pickle support."""
+    r = space.allocate_instance(W_Continulet, w_subtype)
+    r.__init__(space)
+    return space.wrap(r)
+
 
 W_Continulet.typedef = TypeDef(
     'continulet',
@@ -118,26 +137,52 @@
     switch      = interp2app(W_Continulet.descr_switch),
     throw       = interp2app(W_Continulet.descr_throw),
     is_pending  = interp2app(W_Continulet.descr_is_pending),
+    __reduce__  = interp2app(W_Continulet.descr__reduce__),
+    __setstate__= interp2app(W_Continulet.descr__setstate__),
     )
 
-
 # ____________________________________________________________
 
+# Continulet objects maintain a dummy frame object in order to ensure
+# that the 'f_back' chain is consistent.  We hide this dummy frame
+# object by giving it a dummy code object with hidden_applevel=True.
 
 class State:
     def __init__(self, space):
-        self.space = space 
+        self.space = space
         w_module = space.getbuiltinmodule('_continuation')
         self.w_error = space.getattr(w_module, space.wrap('error'))
-        self.w_memoryerror = OperationError(space.w_MemoryError, space.w_None)
+        # the following function switches away immediately, so that
+        # continulet.__init__() doesn't immediately run func(), but it
+        # also has the hidden purpose of making sure we have a single
+        # bottomframe for the whole duration of the continulet's run.
+        # Hackish: only the func_code is used, and used in the context
+        # of w_globals == this module, so we can access the name
+        # 'continulet' directly.
+        w_code = space.appexec([], '''():
+            def start(c, func, args, kwds):
+                if continulet.switch(c) is not None:
+                    raise TypeError(
+                     "can\'t send non-None value to a just-started continulet")
+                return func(c, *args, **kwds)
+            return start.func_code
+        ''')
+        self.entrypoint_pycode = space.interp_w(PyCode, w_code)
+        self.entrypoint_pycode.hidden_applevel = True
+        self.w_unpickle = w_module.get('_p')
+        self.w_module_dict = w_module.getdict(space)
 
 def geterror(space, message):
     cs = space.fromcache(State)
     return OperationError(cs.w_error, space.wrap(message))
 
-def getmemoryerror(space):
+def get_entrypoint_pycode(space):
     cs = space.fromcache(State)
-    return cs.w_memoryerror
+    return cs.entrypoint_pycode
+
+def get_w_module_dict(space):
+    cs = space.fromcache(State)
+    return cs.w_module_dict
 
 # ____________________________________________________________
 
@@ -148,71 +193,63 @@
         StackletThread.__init__(self, space.config)
         self.space = space
         self.ec = ec
+        # for unpickling
+        from pypy.rlib.rweakref import RWeakKeyDictionary
+        self.frame2continulet = RWeakKeyDictionary(PyFrame, W_Continulet)
 
 ExecutionContext.stacklet_thread = None
 
 # ____________________________________________________________
 
 
-class StartState:   # xxx a single global to pass around the function to start
+class GlobalState:
     def clear(self):
         self.origin = None
         self.destination = None
-        self.w_callable = None
-        self.args = None
         self.w_value = None
         self.propagate_exception = None
-start_state = StartState()
-start_state.clear()
+global_state = GlobalState()
+global_state.clear()
 
 
 def new_stacklet_callback(h, arg):
-    self       = start_state.origin
-    w_callable = start_state.w_callable
-    args       = start_state.args
-    start_state.clear()
-    try:
-        do_switch(self.sthread, h)
-    except MemoryError:
-        return h       # oups!  do an early return in this case
-    #
+    self = global_state.origin
+    self.h = h
+    global_state.clear()
     space = self.space
     try:
-        ec = self.sthread.ec
-        ec.topframeref = jit.vref_None
-
-        if start_state.propagate_exception is not None:
-            raise start_state.propagate_exception   # just propagate it further
-        if start_state.w_value is not space.w_None:
-            raise OperationError(space.w_TypeError, space.wrap(
-                "can't send non-None value to a just-started continulet"))
-
-        args = args.prepend(self.space.wrap(self))
-        w_result = space.call_args(w_callable, args)
+        frame = self.bottomframe
+        w_result = frame.execute_frame()
     except Exception, e:
-        start_state.propagate_exception = e
+        global_state.propagate_exception = e
     else:
-        start_state.w_value = w_result
-    start_state.origin = self
-    start_state.destination = self
+        global_state.w_value = w_result
+    self.sthread.ec.topframeref = jit.vref_None
+    global_state.origin = self
+    global_state.destination = self
     return self.h
 
-
-def do_switch(sthread, h):
-    h = sthread.switch(h)
-    origin = start_state.origin
-    self = start_state.destination
-    start_state.origin = None
-    start_state.destination = None
+def post_switch(sthread, h):
+    origin = global_state.origin
+    self = global_state.destination
+    global_state.origin = None
+    global_state.destination = None
     self.h, origin.h = origin.h, h
+    #
+    current = sthread.ec.topframeref
+    sthread.ec.topframeref = self.bottomframe.f_backref
+    self.bottomframe.f_backref = origin.bottomframe.f_backref
+    origin.bottomframe.f_backref = current
+    #
+    return get_result()
 
 def get_result():
-    if start_state.propagate_exception:
-        e = start_state.propagate_exception
-        start_state.propagate_exception = None
+    if global_state.propagate_exception:
+        e = global_state.propagate_exception
+        global_state.propagate_exception = None
         raise e
-    w_value = start_state.w_value
-    start_state.w_value = None
+    w_value = global_state.w_value
+    global_state.w_value = None
     return w_value
 
 def build_sthread(space):
@@ -232,7 +269,7 @@
         cont = space.interp_w(W_Continulet, w_cont)
         if cont.sthread is not sthread:
             if cont.sthread is None:
-                raise geterror(space, "got a non-initialized continulet")
+                continue   # ignore non-initialized continulets
             else:
                 raise geterror(space, "inter-thread support is missing")
         elif sthread.is_empty_handle(cont.h):
@@ -240,6 +277,9 @@
         contlist.append(cont)
     #
     if len(contlist) > 1:
-        other = contlist[-1].h
+        otherh = contlist[-1].h
+        otherb = contlist[-1].bottomframe.f_backref
         for cont in contlist:
-            other, cont.h = cont.h, other
+            otherh, cont.h = cont.h, otherh
+            b = cont.bottomframe
+            otherb, b.f_backref = b.f_backref, otherb
diff --git a/pypy/module/_continuation/interp_pickle.py b/pypy/module/_continuation/interp_pickle.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_continuation/interp_pickle.py
@@ -0,0 +1,128 @@
+from pypy.tool import stdlib_opcode as pythonopcode
+from pypy.rlib import jit
+from pypy.interpreter.error import OperationError
+from pypy.interpreter.pyframe import PyFrame
+from pypy.module._continuation.interp_continuation import State, global_state
+from pypy.module._continuation.interp_continuation import build_sthread
+from pypy.module._continuation.interp_continuation import post_switch
+from pypy.module._continuation.interp_continuation import get_result, geterror
+
+
+def getunpickle(space):
+    cs = space.fromcache(State)
+    return cs.w_unpickle
+
+
+def reduce(self):
+    # xxx this is known to be not completely correct with respect
+    # to subclasses, e.g. no __slots__ support, no looking for a
+    # __getnewargs__ or __getstate__ defined in the subclass, etc.
+    # Doing the right thing looks involved, though...
+    space = self.space
+    if self.sthread is None:
+        w_frame = space.w_False
+    elif self.sthread.is_empty_handle(self.h):
+        w_frame = space.w_None
+    else:
+        w_frame = space.wrap(self.bottomframe)
+    w_continulet_type = space.type(space.wrap(self))
+    w_dict = self.getdict(space) or space.w_None
+    args = [getunpickle(space),
+            space.newtuple([w_continulet_type]),
+            space.newtuple([w_frame, w_dict]),
+            ]
+    return space.newtuple(args)
+
+def setstate(self, w_args):
+    space = self.space
+    if self.sthread is not None:
+        raise geterror(space, "continulet.__setstate__() on an already-"
+                              "initialized continulet")
+    w_frame, w_dict = space.fixedview(w_args, expected_length=2)
+    if not space.is_w(w_dict, space.w_None):
+        self.setdict(space, w_dict)
+    if space.is_w(w_frame, space.w_False):
+        return    # not initialized
+    sthread = build_sthread(self.space)
+    self.sthread = sthread
+    self.bottomframe = space.interp_w(PyFrame, w_frame, can_be_None=True)
+    #
+    global_state.origin = self
+    if self.bottomframe is not None:
+        sthread.frame2continulet.set(self.bottomframe, self)
+    self.h = sthread.new(resume_trampoline_callback)
+    get_result()    # propagate the eventual MemoryError
+
+# ____________________________________________________________
+
+def resume_trampoline_callback(h, arg):
+    self = global_state.origin
+    self.h = h
+    space = self.space
+    sthread = self.sthread
+    try:
+        global_state.clear()
+        if self.bottomframe is None:
+            w_result = space.w_None
+        else:
+            h = sthread.switch(self.h)
+            try:
+                w_result = post_switch(sthread, h)
+                operr = None
+            except OperationError, e:
+                w_result = None
+                operr = e
+            #
+            while True:
+                ec = sthread.ec
+                frame = ec.topframeref()
+                assert frame is not None     # XXX better error message
+                exit_continulet = sthread.frame2continulet.get(frame)
+                #
+                continue_after_call(frame)
+                #
+                # small hack: unlink frame out of the execution context,
+                # because execute_frame will add it there again
+                ec.topframeref = frame.f_backref
+                #
+                try:
+                    w_result = frame.execute_frame(w_result, operr)
+                    operr = None
+                except OperationError, e:
+                    w_result = None
+                    operr = e
+                if exit_continulet is not None:
+                    self = exit_continulet
+                    break
+            sthread.ec.topframeref = jit.vref_None
+            if operr:
+                raise operr
+    except Exception, e:
+        global_state.propagate_exception = e
+    else:
+        global_state.w_value = w_result
+    global_state.origin = self
+    global_state.destination = self
+    return self.h
+
+def continue_after_call(frame):
+    code = frame.pycode.co_code
+    instr = frame.last_instr
+    opcode = ord(code[instr])
+    map = pythonopcode.opmap
+    call_ops = [map['CALL_FUNCTION'], map['CALL_FUNCTION_KW'],
+                map['CALL_FUNCTION_VAR'], map['CALL_FUNCTION_VAR_KW'],
+                map['CALL_METHOD']]
+    assert opcode in call_ops   # XXX check better, and complain better
+    instr += 1
+    oparg = ord(code[instr]) | ord(code[instr + 1]) << 8
+    nargs = oparg & 0xff
+    nkwds = (oparg >> 8) & 0xff
+    if nkwds == 0:     # only positional arguments
+        # fast paths leaves things on the stack, pop them
+        if (frame.space.config.objspace.opcodes.CALL_METHOD and
+            opcode == map['CALL_METHOD']):
+            frame.dropvalues(nargs + 2)
+        elif opcode == map['CALL_FUNCTION']:
+            frame.dropvalues(nargs + 1)
+    frame.last_instr = instr + 1    # continue after the call
diff --git a/pypy/module/_continuation/test/support.py b/pypy/module/_continuation/test/support.py
--- a/pypy/module/_continuation/test/support.py
+++ b/pypy/module/_continuation/test/support.py
@@ -9,4 +9,4 @@
             import pypy.rlib.rstacklet
         except CompilationError, e:
             py.test.skip("cannot import rstacklet: %s" % e)
-        cls.space = gettestobjspace(usemodules=['_continuation'])
+        cls.space = gettestobjspace(usemodules=['_continuation'], continuation=True)
diff --git a/pypy/module/_continuation/test/test_stacklet.py b/pypy/module/_continuation/test/test_stacklet.py
--- a/pypy/module/_continuation/test/test_stacklet.py
+++ b/pypy/module/_continuation/test/test_stacklet.py
@@ -13,7 +13,7 @@
         from _continuation import continulet
         #
         def empty_callback(c):
-            pass
+            never_called
         #
         c = continulet(empty_callback)
         assert type(c) is continulet
@@ -36,7 +36,7 @@
         from _continuation import continulet, error
         #
         def empty_callback(c1):
-            pass
+            never_called
         #
         c = continulet(empty_callback)
         raises(error, c.__init__, empty_callback)
@@ -135,12 +135,6 @@
         e = raises(error, c.switch)
         assert str(e.value) == "continulet already finished"
 
-    def test_not_initialized_yet(self):
-        from _continuation import continulet, error
-        c = continulet.__new__(continulet)
-        e = raises(error, c.switch)
-        assert str(e.value) == "continulet not initialized yet"
-
     def test_go_depth2(self):
         from _continuation import continulet
         #
@@ -254,6 +248,15 @@
         res = c_upper.switch('D')
         assert res == 'E'
 
+    def test_switch_not_initialized(self):
+        from _continuation import continulet
+        c0 = continulet.__new__(continulet)
+        res = c0.switch()
+        assert res is None
+        res = c0.switch(123)
+        assert res == 123
+        raises(ValueError, c0.throw, ValueError)
+
     def test_exception_with_switch_depth2(self):
         from _continuation import continulet
         #
@@ -312,7 +315,7 @@
         res = f()
         assert res == 2002
 
-    def test_f_back_is_None_for_now(self):
+    def test_f_back(self):
         import sys
         from _continuation import continulet
         #
@@ -321,6 +324,7 @@
             c.switch(sys._getframe(0).f_back)
             c.switch(sys._getframe(1))
             c.switch(sys._getframe(1).f_back)
+            assert sys._getframe(2) is f3.f_back
             c.switch(sys._getframe(2))
         def f(c):
             g(c)
@@ -331,10 +335,21 @@
         f2 = c.switch()
         assert f2.f_code.co_name == 'f'
         f3 = c.switch()
-        assert f3.f_code.co_name == 'f'
-        f4 = c.switch()
-        assert f4 is None
-        raises(ValueError, c.switch)    # "call stack is not deep enough"
+        assert f3 is f2
+        assert f1.f_back is f3
+        def main():
+            f4 = c.switch()
+            assert f4.f_code.co_name == 'main', repr(f4.f_code.co_name)
+            assert f3.f_back is f1    # not running, so a loop
+        def main2():
+            f5 = c.switch()
+            assert f5.f_code.co_name == 'main2', repr(f5.f_code.co_name)
+            assert f3.f_back is f1    # not running, so a loop
+        main()
+        main2()
+        res = c.switch()
+        assert res is None
+        assert f3.f_back is None
 
     def test_traceback_is_complete(self):
         import sys
@@ -487,16 +502,31 @@
         assert res == 'z'
         raises(TypeError, c1.switch, to=c2)  # "can't send non-None value"
 
-    def test_switch2_not_initialized_yet(self):
-        from _continuation import continulet, error
+    def test_switch2_not_initialized(self):
+        from _continuation import continulet
+        c0 = continulet.__new__(continulet)
+        c0bis = continulet.__new__(continulet)
+        res = c0.switch(123, to=c0)
+        assert res == 123
+        res = c0.switch(123, to=c0bis)
+        assert res == 123
+        raises(ValueError, c0.throw, ValueError, to=c0)
+        raises(ValueError, c0.throw, ValueError, to=c0bis)
         #
         def f1(c1):
-            not_reachable
-        #
+            c1.switch('a')
+            raises(ValueError, c1.switch, 'b')
+            raises(KeyError, c1.switch, 'c')
+            return 'd'
         c1 = continulet(f1)
-        c2 = continulet.__new__(continulet)
-        e = raises(error, c1.switch, to=c2)
-        assert str(e.value) == "continulet not initialized yet"
+        res = c0.switch(to=c1)
+        assert res == 'a'
+        res = c1.switch(to=c0)
+        assert res == 'b'
+        res = c1.throw(ValueError, to=c0)
+        assert res == 'c'
+        res = c0.throw(KeyError, to=c1)
+        assert res == 'd'
 
     def test_switch2_already_finished(self):
         from _continuation import continulet, error
@@ -609,6 +639,7 @@
         assert res == "ok"
 
     def test_permute(self):
+        import sys
         from _continuation import continulet, permute
         #
         def f1(c1):
@@ -617,14 +648,34 @@
             return "done"
         #
         def f2(c2):
+            assert sys._getframe(1).f_code.co_name == 'main'
             permute(c1, c2)
+            assert sys._getframe(1).f_code.co_name == 'f1'
             return "ok"
         #
         c1 = continulet(f1)
         c2 = continulet(f2)
+        def main():
+            c1.switch()
+            res = c2.switch()
+            assert res == "done"
+        main()
+
+    def test_permute_noninitialized(self):
+        from _continuation import continulet, permute
+        permute(continulet.__new__(continulet))    # ignored
+        permute(continulet.__new__(continulet),    # ignored
+                continulet.__new__(continulet))
+
+    def test_bug_finish_with_already_finished_stacklet(self):
+        from _continuation import continulet, error
+        # make an already-finished continulet
+        c1 = continulet(lambda x: x)
         c1.switch()
-        res = c2.switch()
-        assert res == "done"
+        # make another continulet
+        c2 = continulet(lambda x: x)
+        # this switch is forbidden, because it causes a crash when c2 finishes
+        raises(error, c1.switch, to=c2)
 
     def test_various_depths(self):
         skip("may fail on top of CPython")
diff --git a/pypy/module/_continuation/test/test_zpickle.py b/pypy/module/_continuation/test/test_zpickle.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/_continuation/test/test_zpickle.py
@@ -0,0 +1,262 @@
+from pypy.conftest import gettestobjspace
+
+
+class AppTestCopy:
+    def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=('_continuation',),
+                                    CALL_METHOD=True)
+        cls.space.config.translation.continuation = True
+
+    def test_basic_setup(self):
+        from _continuation import continulet
+        lst = [4]
+        co = continulet(lst.append)
+        assert lst == [4]
+        res = co.switch()
+        assert res is None
+        assert lst == [4, co]
+
+    def test_copy_continulet_not_started(self):
+        from _continuation import continulet, error
+        import copy
+        lst = []
+        co = continulet(lst.append)
+        co2, lst2 = copy.deepcopy((co, lst))
+        #
+        assert lst == []
+        co.switch()
+        assert lst == [co]
+        #
+        assert lst2 == []
+        co2.switch()
+        assert lst2 == [co2]
+
+    def test_copy_continulet_not_started_multiple(self):
+        from _continuation import continulet, error
+        import copy
+        lst = []
+        co = continulet(lst.append)
+        co2, lst2 = copy.deepcopy((co, lst))
+        co3, lst3 = copy.deepcopy((co, lst))
+        co4, lst4 = copy.deepcopy((co, lst))
+        #
+        assert lst == []
+        co.switch()
+        assert lst == [co]
+        #
+        assert lst2 == []
+        co2.switch()
+        assert lst2 == [co2]
+        #
+        assert lst3 == []
+        co3.switch()
+        assert lst3 == [co3]
+        #
+        assert lst4 == []
+        co4.switch()
+        assert lst4 == [co4]
+
+    def test_copy_continulet_real(self):
+        import new, sys
+        mod = new.module('test_copy_continulet_real')
+        sys.modules['test_copy_continulet_real'] = mod
+        exec '''if 1:
+            from _continuation import continulet
+            import copy
+            def f(co, x):
+                co.switch(x + 1)
+                co.switch(x + 2)
+                return x + 3
+            co = continulet(f, 40)
+            res = co.switch()
+            assert res == 41
+            co2 = copy.deepcopy(co)
+            #
+            res = co2.switch()
+            assert res == 42
+            assert co2.is_pending()
+            res = co2.switch()
+            assert res == 43
+            assert not co2.is_pending()
+            #
+            res = co.switch()
+            assert res == 42
+            assert co.is_pending()
+            res = co.switch()
+            assert res == 43
+            assert not co.is_pending()
+        ''' in mod.__dict__
+
+    def test_copy_continulet_already_finished(self):
+        from _continuation import continulet, error
+        import copy
+        lst = []
+        co = continulet(lst.append)
+        co.switch()
+        co2 = copy.deepcopy(co)
+        assert not co.is_pending()
+        assert not co2.is_pending()
+        raises(error, co.__init__, lst.append)
+        raises(error, co2.__init__, lst.append)
+        raises(error, co.switch)
+        raises(error, co2.switch)
+
+
+class AppTestPickle:
+    version = 0
+
+    def setup_class(cls):
+        cls.space = gettestobjspace(usemodules=('_continuation',),
+                                    CALL_METHOD=True)
+        cls.space.appexec([], """():
+            global continulet, A, __name__
+
+            import sys
+            __name__ = 'test_pickle_continulet'
+            thismodule = type(sys)(__name__)
+            sys.modules[__name__] = thismodule
+
+            from _continuation import continulet
+            class A(continulet):
+                pass
+
+            thismodule.__dict__.update(globals())
+        """)
+        cls.w_version = cls.space.wrap(cls.version)
+
+    def test_pickle_continulet_empty(self):
+        from _continuation import continulet
+        lst = [4]
+        co = continulet.__new__(continulet)
+        import pickle
+        pckl = pickle.dumps(co, self.version)
+        print repr(pckl)
+        co2 = pickle.loads(pckl)
+        assert co2 is not co
+        assert not co.is_pending()
+        assert not co2.is_pending()
+        # the empty unpickled coroutine can still be used:
+        result = [5]
+        co2.__init__(result.append)
+        res = co2.switch()
+        assert res is None
+        assert result == [5, co2]
+
+    def test_pickle_continulet_empty_subclass(self):
+        from test_pickle_continulet import continulet, A
+        lst = [4]
+        co = continulet.__new__(A)
+        co.foo = 'bar'
+        co.bar = 'baz'
+        import pickle
+        pckl = pickle.dumps(co, self.version)
+        print repr(pckl)
+        co2 = pickle.loads(pckl)
+        assert co2 is not co
+        assert not co.is_pending()
+        assert not co2.is_pending()
+        assert type(co) is type(co2) is A
+        assert co.foo == co2.foo == 'bar'
+        assert co.bar == co2.bar == 'baz'
+        # the empty unpickled coroutine can still be used:
+        result = [5]
+        co2.__init__(result.append)
+        res = co2.switch()
+        assert res is None
+        assert result == [5, co2]
+
+    def test_pickle_continulet_not_started(self):
+        from _continuation import continulet, error
+        import pickle
+        lst = []
+        co = continulet(lst.append)
+        pckl = pickle.dumps((co, lst))
+        print pckl
+        del co, lst
+        for i in range(2):
+            print 'resume...'
+            co2, lst2 = pickle.loads(pckl)
+            assert lst2 == []
+            co2.switch()
+            assert lst2 == [co2]
+
+    def test_pickle_continulet_real(self):
+        import new, sys
+        mod = new.module('test_pickle_continulet_real')
+        sys.modules['test_pickle_continulet_real'] = mod
+        mod.version = self.version
+        exec '''if 1:
+            from _continuation import continulet
+            import pickle
+            def f(co, x):
+                co.switch(x + 1)
+                co.switch(x + 2)
+                return x + 3
+            co = continulet(f, 40)
+            res = co.switch()
+            assert res == 41
+            pckl = pickle.dumps(co, version)
+            print repr(pckl)
+            co2 = pickle.loads(pckl)
+            #
+            res = co2.switch()
+            assert res == 42
+            assert co2.is_pending()
+            res = co2.switch()
+            assert res == 43
+            assert not co2.is_pending()
+            #
+            res = co.switch()
+            assert res == 42
+            assert co.is_pending()
+            res = co.switch()
+            assert res == 43
+            assert not co.is_pending()
+        ''' in mod.__dict__
+
+    def test_pickle_continulet_real_subclass(self):
+        import new, sys
+        mod = new.module('test_pickle_continulet_real_subclass')
+        sys.modules['test_pickle_continulet_real_subclass'] = mod
+        mod.version = self.version
+        exec '''if 1:
+            from _continuation import continulet
+            import pickle
+            class A(continulet):
+                def __init__(self):
+                    crash
+            def f(co):
+                co.switch(co.x + 1)
+                co.switch(co.x + 2)
+                return co.x + 3
+            co = A.__new__(A)
+            continulet.__init__(co, f)
+            co.x = 40
+            res = co.switch()
+            assert res == 41
+            pckl = pickle.dumps(co, version)
+            print repr(pckl)
+            co2 = pickle.loads(pckl)
+            #
+            assert type(co2) is A
+            res = co2.switch()
+            assert res == 42
+            assert co2.is_pending()
+            res = co2.switch()
+            assert res == 43
+            assert not co2.is_pending()
+            #
+            res = co.switch()
+            assert res == 42
+            assert co.is_pending()
+            res = co.switch()
+            assert res == 43
+            assert not co.is_pending()
+        ''' in mod.__dict__
+
+
+class AppTestPickle_v1(AppTestPickle):
+    version = 1
+
+class AppTestPickle_v2(AppTestPickle):
+    version = 2
diff --git a/pypy/module/_multiprocessing/interp_connection.py b/pypy/module/_multiprocessing/interp_connection.py
--- a/pypy/module/_multiprocessing/interp_connection.py
+++ b/pypy/module/_multiprocessing/interp_connection.py
@@ -225,7 +225,9 @@
             except OSError:
                 pass
 
-    def __init__(self, fd, flags):
+    def __init__(self, space, fd, flags):
+        if fd == self.INVALID_HANDLE_VALUE or fd < 0:
+            raise OperationError(space.w_IOError, space.wrap("invalid handle %d" % fd))
         W_BaseConnection.__init__(self, flags)
         self.fd = fd
 
@@ -234,7 +236,7 @@
         flags = (readable and READABLE) | (writable and WRITABLE)
 
         self = space.allocate_instance(W_FileConnection, w_subtype)
-        W_FileConnection.__init__(self, fd, flags)
+        W_FileConnection.__init__(self, space, fd, flags)
         return space.wrap(self)
 
     def fileno(self, space):
diff --git a/pypy/module/_multiprocessing/interp_semaphore.py b/pypy/module/_multiprocessing/interp_semaphore.py
--- a/pypy/module/_multiprocessing/interp_semaphore.py
+++ b/pypy/module/_multiprocessing/interp_semaphore.py
@@ -468,6 +468,9 @@
 
         self.count -= 1
 
+    def after_fork(self):
+        self.count = 0
+
     @unwrap_spec(kind=int, maxvalue=int)
     def rebuild(space, w_cls, w_handle, kind, maxvalue):
         self = space.allocate_instance(W_SemLock, w_cls)
@@ -512,6 +515,7 @@
     acquire = interp2app(W_SemLock.acquire),
     release = interp2app(W_SemLock.release),
     _rebuild = interp2app(W_SemLock.rebuild.im_func, as_classmethod=True),
+    _after_fork = interp2app(W_SemLock.after_fork),
     __enter__=interp2app(W_SemLock.enter),
     __exit__=interp2app(W_SemLock.exit),
     SEM_VALUE_MAX=SEM_VALUE_MAX,
diff --git a/pypy/module/_multiprocessing/test/test_connection.py b/pypy/module/_multiprocessing/test/test_connection.py
--- a/pypy/module/_multiprocessing/test/test_connection.py
+++ b/pypy/module/_multiprocessing/test/test_connection.py
@@ -145,3 +145,9 @@
             else:
                 c.close()
         space.delslice(w_connections, space.wrap(0), space.wrap(100))
+
+    def test_bad_fd(self):
+        import _multiprocessing
+
+        raises(IOError, _multiprocessing.Connection, -1)
+        raises(IOError, _multiprocessing.Connection, -15)
\ No newline at end of file
diff --git a/pypy/module/_multiprocessing/test/test_semaphore.py b/pypy/module/_multiprocessing/test/test_semaphore.py
--- a/pypy/module/_multiprocessing/test/test_semaphore.py
+++ b/pypy/module/_multiprocessing/test/test_semaphore.py
@@ -39,6 +39,10 @@
         sem.release()
         assert sem._count() == 0
 
+        sem.acquire()
+        sem._after_fork()
+        assert sem._count() == 0
+
     def test_recursive(self):
         from _multiprocessing import SemLock
         kind = self.RECURSIVE
diff --git a/pypy/module/_rawffi/test/test__rawffi.py b/pypy/module/_rawffi/test/test__rawffi.py
--- a/pypy/module/_rawffi/test/test__rawffi.py
+++ b/pypy/module/_rawffi/test/test__rawffi.py
@@ -639,33 +639,6 @@
         a1.free()
         cb.free()
 
-    def test_another_callback_in_stackless(self):
-        try:
-            import _stackless
-        except ImportError:
-            skip("only valid in a stackless pypy-c")
-
-        import _rawffi
-        lib = _rawffi.CDLL(self.lib_name)
-        runcallback = lib.ptr('runcallback', ['P'], 'q')
-        def callback():
-            co = _stackless.coroutine()
-            def f():
-                pass
-            try:
-                co.bind(f)
-                co.switch()
-            except RuntimeError:
-                return 1<<42
-            return -5
-
-        cb = _rawffi.CallbackPtr(callback, [], 'q')
-        a1 = cb.byptr()
-        res = runcallback(a1)
-        assert res[0] == 1<<42
-        a1.free()
-        cb.free()
-
     def test_raising_callback(self):
         import _rawffi, sys
         import StringIO
diff --git a/pypy/module/_sre/interp_sre.py b/pypy/module/_sre/interp_sre.py
--- a/pypy/module/_sre/interp_sre.py
+++ b/pypy/module/_sre/interp_sre.py
@@ -99,6 +99,7 @@
 # SRE_Pattern class
 
 class W_SRE_Pattern(Wrappable):
+    _immutable_fields_ = ["code", "flags"]
 
     def cannot_copy_w(self):
         space = self.space
diff --git a/pypy/module/_ssl/interp_ssl.py b/pypy/module/_ssl/interp_ssl.py
--- a/pypy/module/_ssl/interp_ssl.py
+++ b/pypy/module/_ssl/interp_ssl.py
@@ -52,7 +52,8 @@
 constants["CERT_OPTIONAL"] = PY_SSL_CERT_OPTIONAL
 constants["CERT_REQUIRED"] = PY_SSL_CERT_REQUIRED
 
-constants["PROTOCOL_SSLv2"]  = PY_SSL_VERSION_SSL2
+if not OPENSSL_NO_SSL2:
+    constants["PROTOCOL_SSLv2"]  = PY_SSL_VERSION_SSL2
 constants["PROTOCOL_SSLv3"]  = PY_SSL_VERSION_SSL3
 constants["PROTOCOL_SSLv23"] = PY_SSL_VERSION_SSL23
 constants["PROTOCOL_TLSv1"]  = PY_SSL_VERSION_TLS1
@@ -673,7 +674,7 @@
         method = libssl_TLSv1_method()
     elif protocol == PY_SSL_VERSION_SSL3:
         method = libssl_SSLv3_method()
-    elif protocol == PY_SSL_VERSION_SSL2:
+    elif protocol == PY_SSL_VERSION_SSL2 and not OPENSSL_NO_SSL2:
         method = libssl_SSLv2_method()
     elif protocol == PY_SSL_VERSION_SSL23:
         method = libssl_SSLv23_method()
diff --git a/pypy/module/_stackless/__init__.py b/pypy/module/_stackless/__init__.py
deleted file mode 100644
--- a/pypy/module/_stackless/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Package initialisation
-from pypy.interpreter.mixedmodule import MixedModule
-
-class Module(MixedModule):
-    """
-    This module implements Stackless for applications.
-    """
-
-    appleveldefs = {
-        'GreenletExit' : 'app_greenlet.GreenletExit',
-        'GreenletError' : 'app_greenlet.GreenletError',
-    }
-
-    interpleveldefs = {
-        'tasklet'    : 'interp_stackless.tasklet',
-        'coroutine'  : 'interp_coroutine.AppCoroutine',
-        'greenlet'   : 'interp_greenlet.AppGreenlet',
-        'usercostate': 'interp_composable_coroutine.W_UserCoState',
-        '_return_main' : 'interp_coroutine.return_main',
-        'get_stack_depth_limit': 'interp_coroutine.get_stack_depth_limit',
-        'set_stack_depth_limit': 'interp_coroutine.set_stack_depth_limit',
-    }
-
-    def setup_after_space_initialization(self):
-        # post-installing classmethods/staticmethods which
-        # are not yet directly supported
-        from pypy.module._stackless.interp_coroutine import post_install as post_install_coro
-        post_install_coro(self)
-        from pypy.module._stackless.interp_greenlet import post_install as post_install_greenlet
-        post_install_greenlet(self)
-
-        if self.space.config.translation.gc == 'marksweep':
-            from pypy.module._stackless.interp_clonable import post_install as post_install_clonable
-            self.extra_interpdef('clonable', 'interp_clonable.AppClonableCoroutine')
-            self.extra_interpdef('fork',     'interp_clonable.fork')
-            post_install_clonable(self)
diff --git a/pypy/module/_stackless/app_greenlet.py b/pypy/module/_stackless/app_greenlet.py
deleted file mode 100644
--- a/pypy/module/_stackless/app_greenlet.py
+++ /dev/null
@@ -1,5 +0,0 @@
-class GreenletExit(Exception):
-    pass
-
-class GreenletError(Exception):
-    pass
diff --git a/pypy/module/_stackless/interp_clonable.py b/pypy/module/_stackless/interp_clonable.py
deleted file mode 100644
--- a/pypy/module/_stackless/interp_clonable.py
+++ /dev/null
@@ -1,106 +0,0 @@
-from pypy.interpreter.error import OperationError
-from pypy.interpreter.typedef import TypeDef
-from pypy.interpreter.gateway import interp2app
-from pypy.module._stackless.interp_coroutine import AppCoroutine, AppCoState
-from pypy.module._stackless.interp_coroutine import makeStaticMethod
-from pypy.module._stackless.rcoroutine import AbstractThunk
-from pypy.module._stackless.rclonable import InterpClonableMixin
-
-
-class AppClonableCoroutine(AppCoroutine, InterpClonableMixin):
-
-    def newsubctx(self):
-        self.hello_local_pool()
-        AppCoroutine.newsubctx(self)
-        self.goodbye_local_pool()
-
-    def hello(self):
-        self.hello_local_pool()
-        AppCoroutine.hello(self)
-
-    def goodbye(self):
-        AppCoroutine.goodbye(self)
-        self.goodbye_local_pool()
-
-    def descr_method__new__(space, w_subtype):
-        co = space.allocate_instance(AppClonableCoroutine, w_subtype)
-        costate = AppClonableCoroutine._get_state(space)
-        AppClonableCoroutine.__init__(co, space, state=costate)
-        return space.wrap(co)
-
-    def _get_state(space):
-        return space.fromcache(AppClonableCoState)
-    _get_state = staticmethod(_get_state)
-
-    def w_getcurrent(space):
-        return space.wrap(AppClonableCoroutine._get_state(space).current)
-    w_getcurrent = staticmethod(w_getcurrent)
-
-    def w_clone(self):
-        space = self.space
-        costate = self.costate
-        if costate.current is self:
-            raise OperationError(space.w_RuntimeError,
-                                 space.wrap("clone() cannot clone the "
-                                            "current coroutine"
-                                            "; use fork() instead"))
-        copy = AppClonableCoroutine(space, state=costate)
-        copy.subctx = self.clone_into(copy, self.subctx)
-        return space.wrap(copy)
-
-    def descr__reduce__(self, space):
-        raise OperationError(space.w_TypeError,
-                             space.wrap("_stackless.clonable instances are "
-                                        "not picklable"))
-
-
-AppClonableCoroutine.typedef = TypeDef("clonable", AppCoroutine.typedef,
-    __new__    = interp2app(AppClonableCoroutine.descr_method__new__.im_func),
-    getcurrent = interp2app(AppClonableCoroutine.w_getcurrent),
-    clone      = interp2app(AppClonableCoroutine.w_clone),
-    __reduce__ = interp2app(AppClonableCoroutine.descr__reduce__),
-)
-
-class AppClonableCoState(AppCoState):
-    def post_install(self):
-        self.current = self.main = AppClonableCoroutine(self.space, state=self)
-        self.main.subctx.clear_framestack()      # wack
-
-def post_install(module):
-    makeStaticMethod(module, 'clonable', 'getcurrent')
-    space = module.space
-    AppClonableCoroutine._get_state(space).post_install()
-
-# ____________________________________________________________
-
-class ForkThunk(AbstractThunk):
-    def __init__(self, coroutine):
-        self.coroutine = coroutine
-        self.newcoroutine = None
-    def call(self):
-        oldcoro = self.coroutine
-        self.coroutine = None
-        newcoro = AppClonableCoroutine(oldcoro.space, state=oldcoro.costate)
-        newcoro.subctx = oldcoro.clone_into(newcoro, oldcoro.subctx)
-        newcoro.parent = oldcoro
-        self.newcoroutine = newcoro
-
-def fork(space):
-    """Fork, as in the Unix fork(): the call returns twice, and the return
-    value of the call is either the new 'child' coroutine object (if returning
-    into the parent), or None (if returning into the child).  This returns
-    into the parent first, which can switch to the child later.
-    """
-    costate = AppClonableCoroutine._get_state(space)
-    current = costate.current
-    if current is costate.main:
-        raise OperationError(space.w_RuntimeError,
-                             space.wrap("cannot fork() in the main "
-                                        "clonable coroutine"))
-    thunk = ForkThunk(current)
-    coro_fork = AppClonableCoroutine(space, state=costate)
-    coro_fork.bind(thunk)
-    coro_fork.switch()
-    # we resume here twice.  The following would need explanations about
-    # why it returns the correct thing in both the parent and the child...
-    return space.wrap(thunk.newcoroutine)
diff --git a/pypy/module/_stackless/interp_composable_coroutine b/pypy/module/_stackless/interp_composable_coroutine
deleted file mode 100644
--- a/pypy/module/_stackless/interp_composable_coroutine
+++ /dev/null
@@ -1,33 +0,0 @@
-from pypy.interpreter.baseobjspace import Wrappable
-from pypy.interpreter.typedef import TypeDef, interp2app
-from pypy.module._stackless.coroutine import AppCoState, AppCoroutine
-
-
-class W_UserCoState(Wrappable):
-    def __init__(self, space):
-        self.costate = AppCoState(space)
-        self.costate.post_install()
-
-    def descr_method__new__(space, w_subtype):
-        costate = space.allocate_instance(W_UserCoState, w_subtype)
-        W_UserCoState.__init__(costate, space)
-        return space.wrap(costate)
-
-    def w_getcurrent(self):
-        space = self.costate.space
-        return space.wrap(self.costate.current)
-
-    def w_spawn(self, w_subtype=None):
-        space = self.costate.space
-        if space.is_w(w_subtype, space.w_None):
-            w_subtype = space.gettypeobject(AppCoroutine.typedef)
-        co = space.allocate_instance(AppCoroutine, w_subtype)
-        AppCoroutine.__init__(co, space, state=self.costate)
-        return space.wrap(co)
-
-W_UserCoState.typedef = TypeDef("usercostate",
-    __new__ = interp2app(W_UserCoState.descr_method__new__.im_func),
-    __module__ = '_stackless',
-    getcurrent = interp2app(W_UserCoState.w_getcurrent),
-    spawn      = interp2app(W_UserCoState.w_spawn),
-)
diff --git a/pypy/module/_stackless/interp_composable_coroutine.py b/pypy/module/_stackless/interp_composable_coroutine.py
deleted file mode 100644
--- a/pypy/module/_stackless/interp_composable_coroutine.py
+++ /dev/null
@@ -1,34 +0,0 @@
-from pypy.interpreter.baseobjspace import Wrappable
-from pypy.interpreter.typedef import TypeDef, interp2app
-from pypy.module._stackless.interp_coroutine import AppCoState, AppCoroutine
-
-
-class W_UserCoState(Wrappable):
-    def __init__(self, space):
-        self.costate = AppCoState(space)
-        self.costate.post_install()
-
-    def descr_method__new__(space, w_subtype):
-        costate = space.allocate_instance(W_UserCoState, w_subtype)
-        W_UserCoState.__init__(costate, space)
-        return space.wrap(costate)
-
-    def w_getcurrent(self):
-        space = self.costate.space
-        return space.wrap(self.costate.current)
-
-    def w_spawn(self, w_subtype=None):
-        space = self.costate.space
-        if space.is_w(w_subtype, space.w_None):
-            w_subtype = space.gettypeobject(AppCoroutine.typedef)
-        co = space.allocate_instance(AppCoroutine, w_subtype)
-        AppCoroutine.__init__(co, space, state=self.costate)
-        return space.wrap(co)
-
-W_UserCoState.typedef = TypeDef("usercostate",
-    __new__ = interp2app(W_UserCoState.descr_method__new__.im_func),
-    __module__ = '_stackless',
-    getcurrent = interp2app(W_UserCoState.w_getcurrent),
-    spawn      = interp2app(W_UserCoState.w_spawn),
-)
-W_UserCoState.acceptable_as_base_class = False
diff --git a/pypy/module/_stackless/interp_coroutine.py b/pypy/module/_stackless/interp_coroutine.py
deleted file mode 100644
--- a/pypy/module/_stackless/interp_coroutine.py
+++ /dev/null
@@ -1,403 +0,0 @@
-"""
-Coroutine implementation for application level on top
-of the internal coroutines.
-This is an extensible concept. Multiple implementations
-of concurrency can exist together, if they follow the
-basic concept of maintaining their own costate.
-
-There is also some diversification possible by using
-multiple costates for the same type. This leads to
-disjoint switchable sets within the same type.
-
-I'm not so sure to what extent the opposite is possible, too.
-I.e., merging the costate of tasklets and greenlets would
-allow them to be parents of each other. Needs a bit more
-experience to decide where to set the limits.
-"""
-
-from pypy.interpreter.argument import Arguments
-from pypy.interpreter.typedef import GetSetProperty, TypeDef
-from pypy.interpreter.gateway import interp2app, unwrap_spec
-from pypy.interpreter.error import OperationError, operationerrfmt
-
-from pypy.module._stackless.stackless_flags import StacklessFlags
-from pypy.module._stackless.rcoroutine import Coroutine, BaseCoState, AbstractThunk, CoroutineExit
-
-from pypy.module.exceptions.interp_exceptions import W_SystemExit, _new_exception
-
-from pypy.rlib import rstack, jit # for resume points
-from pypy.tool import stdlib_opcode as pythonopcode
-
-class _AppThunk(AbstractThunk):
-
-    def __init__(self, space, costate, w_obj, args):
-        self.space = space
-        self.costate = costate
-        if not space.is_true(space.callable(w_obj)):
-            raise operationerrfmt(
-                space.w_TypeError,
-                "'%s' object is not callable",
-                space.type(w_obj).getname(space))
-        self.w_func = w_obj
-        self.args = args
-
-    def call(self):
-        costate = self.costate
-        w_result = self.space.call_args(self.w_func, self.args)
-        costate.w_tempval = w_result
-
-class _ResumeThunk(AbstractThunk):
-    def __init__(self, space, costate, w_frame):
-        self.space = space
-        self.costate = costate
-        self.w_frame = w_frame
-
-    def call(self):
-        w_result = resume_frame(self.space, self.w_frame)
-        # costate.w_tempval = w_result #XXX?
-
-
-W_CoroutineExit = _new_exception('CoroutineExit', W_SystemExit,
-                        """Coroutine killed manually.""")
-
-# Should be moved to interp_stackless.py if it's ever implemented... Currently
-# used by pypy/lib/stackless.py.
-W_TaskletExit = _new_exception('TaskletExit', W_SystemExit,
-            """Tasklet killed manually.""")
-
-class AppCoroutine(Coroutine): # XXX, StacklessFlags):
-
-    def __init__(self, space, state=None):
-        self.space = space
-        if state is None:
-            state = AppCoroutine._get_state(space)
-        Coroutine.__init__(self, state)
-        self.flags = 0
-        self.newsubctx()
-
-    def newsubctx(self):
-        ec = self.space.getexecutioncontext()
-        self.subctx = ec.Subcontext()
-
-    def descr_method__new__(space, w_subtype):
-        co = space.allocate_instance(AppCoroutine, w_subtype)
-        AppCoroutine.__init__(co, space)
-        return space.wrap(co)
-
-    def _get_state(space):
-        return space.fromcache(AppCoState)
-    _get_state = staticmethod(_get_state)
-
-    def w_bind(self, w_func, __args__):
-        space = self.space
-        if self.frame is not None:
-            raise OperationError(space.w_ValueError, space.wrap(
-                "cannot bind a bound Coroutine"))
-        state = self.costate
-        thunk = _AppThunk(space, state, w_func, __args__)
-        self.bind(thunk)
-
-    def w_switch(self):
-        space = self.space
-        if self.frame is None:
-            raise OperationError(space.w_ValueError, space.wrap(
-                "cannot switch to an unbound Coroutine"))
-        state = self.costate
-        self.switch()
-        w_ret, state.w_tempval = state.w_tempval, space.w_None
-        return w_ret
-
-    def switch(self):
-        space = self.space
-        try:
-            Coroutine.switch(self)
-        except CoroutineExit:
-            raise OperationError(self.costate.w_CoroutineExit, space.w_None)
-
-    def w_finished(self, w_excinfo):
-        pass
-
-    def finish(self, operror=None):
-        space = self.space
-        if isinstance(operror, OperationError):
-            w_exctype = operror.w_type
-            w_excvalue = operror.get_w_value(space)
-            w_exctraceback = operror.get_traceback()
-            w_excinfo = space.newtuple([w_exctype, w_excvalue, w_exctraceback])
-
-            if w_exctype is self.costate.w_CoroutineExit:
-                self.coroutine_exit = True
-        else:
-            w_N = space.w_None
-            w_excinfo = space.newtuple([w_N, w_N, w_N])
-
-        return space.call_method(space.wrap(self),'finished', w_excinfo)
-
-    def hello(self):
-        ec = self.space.getexecutioncontext()
-        self.subctx.enter(ec)
-
-    def goodbye(self):
-        ec = self.space.getexecutioncontext()
-        self.subctx.leave(ec)
-
-    def w_kill(self):
-        self.kill()
-
-    def w_throw(self, w_type, w_value=None, w_traceback=None):
-        space = self.space
-
-        operror = OperationError(w_type, w_value)
-        operror.normalize_exception(space)
-
-        if not space.is_w(w_traceback, space.w_None):
-            from pypy.interpreter import pytraceback
-            tb = space.interpclass_w(w_traceback)
-            if tb is None or not space.is_true(space.isinstance(tb,
-                space.gettypeobject(pytraceback.PyTraceback.typedef))):
-                raise OperationError(space.w_TypeError,
-                      space.wrap("throw: arg 3 must be a traceback or None"))
-            operror.set_traceback(tb)
-
-        self._kill(operror)
-
-    def _userdel(self):
-        if self.get_is_zombie():
-            return
-        self.set_is_zombie(True)
-        self.space.userdel(self.space.wrap(self))
-
-    def w_getcurrent(space):
-        return space.wrap(AppCoroutine._get_state(space).current)
-    w_getcurrent = staticmethod(w_getcurrent)
-
-    def w_getmain(space):
-        return space.wrap(AppCoroutine._get_state(space).main)
-    w_getmain = staticmethod(w_getmain)
-
-    # pickling interface
-    def descr__reduce__(self, space):
-        # this is trying to be simplistic at the moment.
-        # we neither allow to pickle main (which can become a mess
-        # since it has some deep anchestor frames)
-        # nor we allow to pickle the current coroutine.
-        # rule: switch before pickling.
-        # you cannot construct the tree that you are climbing.
-        from pypy.interpreter.mixedmodule import MixedModule
-        w_mod    = space.getbuiltinmodule('_stackless')
-        mod      = space.interp_w(MixedModule, w_mod)
-        w_mod2    = space.getbuiltinmodule('_pickle_support')
-        mod2      = space.interp_w(MixedModule, w_mod2)
-        w_new_inst = mod.get('coroutine')
-        w        = space.wrap
-        nt = space.newtuple
-        ec = self.space.getexecutioncontext()
-
-        if self is self.costate.main:
-            return nt([mod.get('_return_main'), nt([])])
-
-        thunk = self.thunk
-        if isinstance(thunk, _AppThunk):
-            w_args, w_kwds = thunk.args.topacked()
-            w_thunk = nt([thunk.w_func, w_args, w_kwds])
-        else:
-            w_thunk = space.w_None
-
-        tup_base = [
-            ]
-        tup_state = [
-            w(self.flags),
-            self.subctx.getstate(space),
-            w_thunk,
-            w(self.parent),
-            ]
-
-        return nt([w_new_inst, nt(tup_base), nt(tup_state)])
-
-    def descr__setstate__(self, space, w_args):
-        w_flags, w_state, w_thunk, w_parent = space.unpackiterable(w_args,
-                                                        expected_length=4)
-        self.flags = space.int_w(w_flags)
-        if space.is_w(w_parent, space.w_None):
-            w_parent = self.w_getmain(space)
-        self.parent = space.interp_w(AppCoroutine, w_parent)
-        ec = self.space.getexecutioncontext()
-        self.subctx.setstate(space, w_state)
-        if space.is_w(w_thunk, space.w_None):
-            if space.is_w(w_state, space.w_None):
-                self.thunk = None
-            else:
-                self.bind(_ResumeThunk(space, self.costate, self.subctx.topframe))
-        else:
-            w_func, w_args, w_kwds = space.unpackiterable(w_thunk,
-                                                          expected_length=3)
-            args = Arguments.frompacked(space, w_args, w_kwds)
-            self.bind(_AppThunk(space, self.costate, w_func, args))
-
-
-# _mixin_ did not work
-for methname in StacklessFlags.__dict__:
-    meth = getattr(StacklessFlags, methname)
-    if hasattr(meth, 'im_func'):
-        setattr(AppCoroutine, meth.__name__, meth.im_func)
-del meth, methname
-
-def w_get_is_zombie(self, space):
-    return space.wrap(self.get_is_zombie())
-AppCoroutine.w_get_is_zombie = w_get_is_zombie
-
-def w_get_is_alive(self, space):
-    return space.wrap(self.is_alive())
-AppCoroutine.w_get_is_alive = w_get_is_alive
-
-def w_descr__framestack(self, space):
-    assert isinstance(self, AppCoroutine)
-    counter = 0
-    f = self.subctx.topframe
-    while f is not None:
-        counter += 1
-        f = f.f_backref()
-    items = [None] * counter
-    f = self.subctx.topframe
-    while f is not None:
-        counter -= 1
-        assert counter >= 0
-        items[counter] = space.wrap(f)
-        f = f.f_backref()
-    assert counter == 0
-    return space.newtuple(items)
-
-def makeStaticMethod(module, classname, funcname):
-    "NOT_RPYTHON"
-    space = module.space
-    w_klass = space.getattr(space.wrap(module), space.wrap(classname))
-    # HACK HACK HACK
-    # make the typeobject mutable for a while
-    from pypy.objspace.std.typeobject import W_TypeObject
-    assert isinstance(w_klass, W_TypeObject)
-    old_flag = w_klass.flag_heaptype
-    w_klass.flag_heaptype = True
-
-    space.appexec([w_klass, space.wrap(funcname)], """
-        (klass, funcname):
-            func = getattr(klass, funcname)
-            setattr(klass, funcname, staticmethod(func.im_func))
-    """)
-    w_klass.flag_heaptype = old_flag
-
-def post_install(module):
-    makeStaticMethod(module, 'coroutine', 'getcurrent')
-    makeStaticMethod(module, 'coroutine', 'getmain')
-    space = module.space
-    AppCoroutine._get_state(space).post_install()
-
-# space.appexec("""() :
-
-# maybe use __spacebind__ for postprocessing
-
-AppCoroutine.typedef = TypeDef("coroutine",
-    __new__ = interp2app(AppCoroutine.descr_method__new__.im_func),
-    bind = interp2app(AppCoroutine.w_bind),
-    switch = interp2app(AppCoroutine.w_switch),
-    kill = interp2app(AppCoroutine.w_kill),
-    throw = interp2app(AppCoroutine.w_throw),
-    finished = interp2app(AppCoroutine.w_finished),
-    is_alive = GetSetProperty(AppCoroutine.w_get_is_alive),
-    is_zombie = GetSetProperty(AppCoroutine.w_get_is_zombie,
-      doc=AppCoroutine.get_is_zombie.__doc__), #--- this flag is a bit obscure
-      # and not useful (it's totally different from Coroutine.is_zombie(), too)
-      # but lib/stackless.py uses it
-    _framestack = GetSetProperty(w_descr__framestack),
-    getcurrent = interp2app(AppCoroutine.w_getcurrent),
-    getmain = interp2app(AppCoroutine.w_getmain),
-    __reduce__   = interp2app(AppCoroutine.descr__reduce__),
-    __setstate__ = interp2app(AppCoroutine.descr__setstate__),
-    __module__ = '_stackless',
-)
-
-class AppCoState(BaseCoState):
-    def __init__(self, space):
-        BaseCoState.__init__(self)
-        self.w_tempval = space.w_None
-        self.space = space
-
-        # XXX Workaround: for now we need to instantiate these classes
-        # explicitly for translation to work
-        W_CoroutineExit(space)
-        W_TaskletExit(space)
-
-        # Exporting new exception to space
-        self.w_CoroutineExit = space.gettypefor(W_CoroutineExit)
-        space.setitem(
-                      space.exceptions_module.w_dict,
-                      space.new_interned_str('CoroutineExit'),
-                      self.w_CoroutineExit)
-        space.setitem(space.builtin.w_dict,
-                      space.new_interned_str('CoroutineExit'),
-                      self.w_CoroutineExit)
-
-        # Should be moved to interp_stackless.py if it's ever implemented...
-        self.w_TaskletExit = space.gettypefor(W_TaskletExit)
-        space.setitem(
-                      space.exceptions_module.w_dict,
-                      space.new_interned_str('TaskletExit'),
-                      self.w_TaskletExit)
-        space.setitem(space.builtin.w_dict,
-                      space.new_interned_str('TaskletExit'),
-                      self.w_TaskletExit)
-
-    def post_install(self):
-        self.current = self.main = AppCoroutine(self.space, state=self)
-        self.main.subctx.clear_framestack()      # wack
-
-def return_main(space):
-    return AppCoroutine._get_state(space).main
-
-def get_stack_depth_limit(space):
-    return space.wrap(rstack.get_stack_depth_limit())
-
- at unwrap_spec(limit=int)
-def set_stack_depth_limit(space, limit):
-    rstack.set_stack_depth_limit(limit)
-
-
-# ___________________________________________________________________
-# unpickling trampoline
-
-def resume_frame(space, w_frame):
-    from pypy.interpreter.pyframe import PyFrame
-    frame = space.interp_w(PyFrame, w_frame, can_be_None=True)
-    w_result = space.w_None
-    operr = None
-    executioncontext = frame.space.getexecutioncontext()
-    while frame is not None:
-        code = frame.pycode.co_code
-        instr = frame.last_instr
-        opcode = ord(code[instr])
-        map = pythonopcode.opmap
-        call_ops = [map['CALL_FUNCTION'], map['CALL_FUNCTION_KW'], map['CALL_FUNCTION_VAR'],
-                    map['CALL_FUNCTION_VAR_KW'], map['CALL_METHOD']]
-        assert opcode in call_ops
-        instr += 1
-        oparg = ord(code[instr]) | ord(code[instr + 1]) << 8
-        nargs = oparg & 0xff
-        nkwds = (oparg >> 8) & 0xff
-        if nkwds == 0:     # only positional arguments
-            # fast paths leaves things on the stack, pop them
-            if space.config.objspace.opcodes.CALL_METHOD and opcode == map['CALL_METHOD']:
-                frame.dropvalues(nargs + 2)
-            elif opcode == map['CALL_FUNCTION']:
-                frame.dropvalues(nargs + 1)
-
-        # small hack: unlink frame out of the execution context, because
-        # execute_frame will add it there again
-        executioncontext.topframeref = jit.non_virtual_ref(frame.f_backref())
-        frame.last_instr = instr + 1 # continue after the call
-        try:
-            w_result = frame.execute_frame(w_result, operr)
-        except OperationError, operr:
-            pass
-        frame = frame.f_backref()
-    if operr:
-        raise operr
-    return w_result
diff --git a/pypy/module/_stackless/interp_greenlet.py b/pypy/module/_stackless/interp_greenlet.py
deleted file mode 100644
--- a/pypy/module/_stackless/interp_greenlet.py
+++ /dev/null
@@ -1,238 +0,0 @@
-from pypy.interpreter.argument import Arguments
-from pypy.interpreter.typedef import GetSetProperty, TypeDef
-from pypy.interpreter.gateway import interp2app
-from pypy.interpreter.gateway import NoneNotWrapped
-from pypy.interpreter.error import OperationError
-
-from pypy.module._stackless.rcoroutine import Coroutine, BaseCoState
-from pypy.module._stackless.rcoroutine import AbstractThunk, syncstate
-from pypy.module._stackless.interp_coroutine import makeStaticMethod
-
-
-class GreenletThunk(AbstractThunk):
-
-    def __init__(self, greenlet):
-        self.greenlet = greenlet
-
-    def call(self):
-        greenlet = self.greenlet
-        greenlet.active = True
-        try:
-            space = greenlet.space
-            args_w = greenlet.costate.args_w
-            __args__ = Arguments(space, args_w)
-            try:
-                w_run = space.getattr(space.wrap(greenlet), space.wrap('run'))
-                greenlet.w_callable = None
-                w_result = space.call_args(w_run, __args__)
-            except OperationError, operror:
-                if not operror.match(space, greenlet.costate.w_GreenletExit):
-                    raise
-                w_result = operror.get_w_value(space)
-        finally:
-            greenlet.active = False
-        greenlet.costate.args_w = [w_result]
-
-class AppGreenletCoState(BaseCoState):
-    def __init__(self, space):
-        BaseCoState.__init__(self)
-        self.args_w = None
-        self.space = space
-        self.w_GreenletExit  = get(space, "GreenletExit")
-        self.w_GreenletError = get(space, "GreenletError")
-
-    def post_install(self):
-        self.current = self.main = AppGreenlet(self.space, is_main=True)
-
-class AppGreenlet(Coroutine):
-    def __init__(self, space, w_callable=None, is_main=False):
-        Coroutine.__init__(self, self._get_state(space))
-        self.space = space
-        self.w_callable = w_callable
-        self.active = is_main
-        self.subctx = space.getexecutioncontext().Subcontext()
-        if is_main:
-            self.subctx.clear_framestack()      # wack
-        else:
-            self.bind(GreenletThunk(self))
-
-    def descr_method__new__(space, w_subtype, __args__):
-        co = space.allocate_instance(AppGreenlet, w_subtype)
-        AppGreenlet.__init__(co, space)
-        return space.wrap(co)
-
-    def descr_method__init__(self, w_run=NoneNotWrapped,
-                                   w_parent=NoneNotWrapped):
-        if w_run is not None:
-            self.set_run(w_run)
-        if w_parent is not None:
-            self.set_parent(w_parent)
-
-    def _get_state(space):
-        return space.fromcache(AppGreenletCoState)
-    _get_state = staticmethod(_get_state)
-
-    def hello(self):
-        ec = self.space.getexecutioncontext()
-        self.subctx.enter(ec)
-
-    def goodbye(self):
-        ec = self.space.getexecutioncontext()
-        self.subctx.leave(ec)
-
-    def w_getcurrent(space):
-        return space.wrap(AppGreenlet._get_state(space).current)
-    w_getcurrent = staticmethod(w_getcurrent)
-
-    def w_switch(self, args_w):
-        # Find the switch target - it might be a parent greenlet
-        space = self.space
-        costate = self.costate
-        target = self
-        while target.isdead():
-            target = target.parent
-            assert isinstance(target, AppGreenlet)
-        # Switch to it
-        costate.args_w = args_w
-        if target is not costate.current:
-            target.switch()
-        else:
-            # case not handled in Coroutine.switch()
-            syncstate._do_things_to_do()
-        result_w = costate.args_w
-        costate.args_w = None
-        # costate.args_w can be set to None above for throw(), but then
-        # switch() should have raised.  At this point cosstate.args_w != None.
-        assert result_w is not None
-        # Return the result of a switch, packaging it in a tuple if
-        # there is more than one value.
-        if len(result_w) == 1:
-            return result_w[0]
-        return space.newtuple(result_w)
-
-    def w_throw(self, w_type=None, w_value=None, w_traceback=None):
-        space = self.space
-        if space.is_w(w_type, space.w_None):
-            w_type = self.costate.w_GreenletExit
-        # Code copied from RAISE_VARARGS but slightly modified.  Not too nice.
-        operror = OperationError(w_type, w_value)
-        operror.normalize_exception(space)
-        if not space.is_w(w_traceback, space.w_None):
-            from pypy.interpreter import pytraceback
-            tb = space.interpclass_w(w_traceback)
-            if tb is None or not space.is_true(space.isinstance(tb, 
-                space.gettypeobject(pytraceback.PyTraceback.typedef))):
-                raise OperationError(space.w_TypeError,
-                      space.wrap("throw: arg 3 must be a traceback or None"))
-            operror.set_traceback(tb)
-        # Dead greenlet: turn GreenletExit into a regular return
-        if self.isdead() and operror.match(space, self.costate.w_GreenletExit):
-            args_w = [operror.get_w_value(space)]
-        else:
-            syncstate.push_exception(operror)
-            args_w = None
-        return self.w_switch(args_w)
-
-    def _userdel(self):
-        self.space.userdel(self.space.wrap(self))
-
-    def isdead(self):
-        return self.thunk is None and not self.active
-
-    def w_get_is_dead(self, space):
-        return space.newbool(self.isdead())
-
-    def descr__nonzero__(self):
-        return self.space.newbool(self.active)
-
-    def w_get_run(self, space):
-        w_run = self.w_callable
-        if w_run is None:
-            raise OperationError(space.w_AttributeError, space.wrap("run"))
-        return w_run
-
-    def set_run(self, w_run):
-        space = self.space
-        if self.thunk is None:
-            raise OperationError(space.w_AttributeError,
-                                 space.wrap("run cannot be set "
-                                            "after the start of the greenlet"))
-        self.w_callable = w_run
-
-    def w_set_run(self, space, w_run):
-        self.set_run(w_run)
-
-    def w_del_run(self, space):
-        if self.w_callable is None:
-            raise OperationError(space.w_AttributeError, space.wrap("run"))
-        self.w_callable = None
-
-    def w_get_parent(self, space):
-        return space.wrap(self.parent)
-
-    def set_parent(self, w_parent):
-        space = self.space
-        newparent = space.interp_w(AppGreenlet, w_parent)
-        if newparent.costate is not self.costate:
-            raise OperationError(self.costate.w_GreenletError,
-                                 space.wrap("invalid foreign parent"))
-        curr = newparent
-        while curr:
-            if curr is self:
-                raise OperationError(space.w_ValueError,
-                                     space.wrap("cyclic parent chain"))
-            curr = curr.parent
-        self.parent = newparent
-
-    def w_set_parent(self, space, w_parent):
-        self.set_parent(w_parent)
-
-    def w_get_frame(self, space):
-        if not self.active or self.costate.current is self:
-            f = None
-        else:
-            f = self.subctx.topframe
-        return space.wrap(f)
-
-def get(space, name):
-    w_module = space.getbuiltinmodule('_stackless')
-    return space.getattr(w_module, space.wrap(name))
-
-def post_install(module):
-    "NOT_RPYTHON"
-    makeStaticMethod(module, 'greenlet', 'getcurrent')
-    space = module.space
-    state = AppGreenlet._get_state(space)
-    state.post_install()
-    w_greenlet = get(space, 'greenlet')
-    # HACK HACK HACK
-    # make the typeobject mutable for a while
-    from pypy.objspace.std.typeobject import W_TypeObject
-    assert isinstance(w_greenlet, W_TypeObject)
-    old_flag = w_greenlet.flag_heaptype
-    w_greenlet.flag_heaptype = True
-    space.appexec([w_greenlet,
-                   state.w_GreenletExit,
-                   state.w_GreenletError], """
-    (greenlet, exit, error):
-        greenlet.GreenletExit = exit
-        greenlet.error = error
-    """)
-    w_greenlet.flag_heaptype = old_flag
-
-AppGreenlet.typedef = TypeDef("greenlet",
-    __new__ = interp2app(AppGreenlet.descr_method__new__.im_func),
-    __init__ = interp2app(AppGreenlet.descr_method__init__),
-    switch = interp2app(AppGreenlet.w_switch),
-    dead = GetSetProperty(AppGreenlet.w_get_is_dead),
-    run = GetSetProperty(AppGreenlet.w_get_run,
-                         AppGreenlet.w_set_run,
-                         AppGreenlet.w_del_run),
-    parent = GetSetProperty(AppGreenlet.w_get_parent,
-                            AppGreenlet.w_set_parent),
-    getcurrent = interp2app(AppGreenlet.w_getcurrent),
-    throw = interp2app(AppGreenlet.w_throw),
-    gr_frame = GetSetProperty(AppGreenlet.w_get_frame),
-    __nonzero__ = interp2app(AppGreenlet.descr__nonzero__),
-    __module__ = '_stackless',
-)
diff --git a/pypy/module/_stackless/interp_stackless.py b/pypy/module/_stackless/interp_stackless.py
deleted file mode 100644
--- a/pypy/module/_stackless/interp_stackless.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from pypy.interpreter.baseobjspace import Wrappable
-from pypy.interpreter.typedef import TypeDef
-from pypy.interpreter.gateway import interp2app
-import os
-
-
-class tasklet(Wrappable):
-
-    def __init__(self, space):
-        self.space = space
-        self.flags = 0
-        self.state = None
-
-    def descr_method__new__(space, w_subtype):
-        t = space.allocate_instance(tasklet, w_subtype)
-        tasklet.__init__(t, space)
-        return space.wrap(t)
-
-    def w_demo(self):
-        output("42")
-
-tasklet.typedef = TypeDef("tasklet",
-    __new__ = interp2app(tasklet.descr_method__new__.im_func),
-    demo = interp2app(tasklet.w_demo),
-)
-
-def output(stuff):
-    os.write(2, stuff + '\n')
diff --git a/pypy/module/_stackless/rclonable.py b/pypy/module/_stackless/rclonable.py
deleted file mode 100644
--- a/pypy/module/_stackless/rclonable.py
+++ /dev/null
@@ -1,87 +0,0 @@
-from pypy.module._stackless.interp_coroutine import AbstractThunk, Coroutine
-from pypy.rlib.rgc import gc_swap_pool, gc_clone
-from pypy.rlib.objectmodel import we_are_translated
-
-
-class InterpClonableMixin:
-    local_pool = None
-    _mixin_ = True
-
-    def hello_local_pool(self):
-        if we_are_translated():
-            self.saved_pool = gc_swap_pool(self.local_pool)
-
-    def goodbye_local_pool(self):
-        if we_are_translated():
-            self.local_pool = gc_swap_pool(self.saved_pool)
-            self.saved_pool = None
-
-    def clone_into(self, copy, extradata=None):
-        if not we_are_translated():
-            raise NotImplementedError
-        # cannot gc_clone() directly self, because it is not in its own
-        # local_pool.  Moreover, it has a __del__, which cloning doesn't
-        # support properly at the moment.
-        copy.parent = self.parent
-        # the hello/goodbye pair has two purposes: it forces
-        # self.local_pool to be computed even if it was None up to now,
-        # and it puts the 'data' tuple in the correct pool to be cloned.
-        self.hello_local_pool()
-        data = (self.frame, extradata)
-        self.goodbye_local_pool()
-        # clone!
-        data, copy.local_pool = gc_clone(data, self.local_pool)
-        copy.frame, extradata = data
-        copy.thunk = self.thunk # in case we haven't switched to self yet
-        return extradata
-
-
-class InterpClonableCoroutine(Coroutine, InterpClonableMixin):
-
-    def hello(self):
-        self.hello_local_pool()
-
-    def goodbye(self):
-        self.goodbye_local_pool()
-
-    def clone(self):
-        # hack, this is overridden in AppClonableCoroutine
-        if self.getcurrent() is self:
-            raise RuntimeError("clone() cannot clone the current coroutine; "
-                               "use fork() instead")
-        copy = InterpClonableCoroutine(self.costate)
-        self.clone_into(copy)
-        return copy
-
-
-class ForkThunk(AbstractThunk):
-    def __init__(self, coroutine):
-        self.coroutine = coroutine
-        self.newcoroutine = None
-    def call(self):
-        oldcoro = self.coroutine
-        self.coroutine = None
-        newcoro = oldcoro.clone()
-        newcoro.parent = oldcoro
-        self.newcoroutine = newcoro
-
-def fork():
-    """Fork, as in the Unix fork(): the call returns twice, and the return
-    value of the call is either the new 'child' coroutine object (if returning
-    into the parent), or None (if returning into the child).  This returns
-    into the parent first, which can switch to the child later.
-    """
-    current = InterpClonableCoroutine.getcurrent()
-    if not isinstance(current, InterpClonableCoroutine):
-        raise RuntimeError("fork() in a non-clonable coroutine")
-    thunk = ForkThunk(current)
-    coro_fork = InterpClonableCoroutine()
-    coro_fork.bind(thunk)
-    coro_fork.switch()
-    # we resume here twice.  The following would need explanations about
-    # why it returns the correct thing in both the parent and the child...
-    return thunk.newcoroutine
-
-##    from pypy.rpython.lltypesystem import lltype, lloperation
-##    lloperation.llop.debug_view(lltype.Void, current, thunk,
-##        lloperation.llop.gc_x_size_header(lltype.Signed))
diff --git a/pypy/module/_stackless/rcoroutine.py b/pypy/module/_stackless/rcoroutine.py
deleted file mode 100644
--- a/pypy/module/_stackless/rcoroutine.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from pypy.rlib.rcoroutine import make_coroutine_classes
-from pypy.interpreter.baseobjspace import Wrappable
-
-d = make_coroutine_classes(Wrappable)
-
-Coroutine = d['Coroutine']
-BaseCoState = d['BaseCoState']
-AbstractThunk = d['AbstractThunk']
-syncstate = d['syncstate']
-CoroutineExit = d['CoroutineExit']
diff --git a/pypy/module/_stackless/stackless_flags.py b/pypy/module/_stackless/stackless_flags.py
deleted file mode 100644
--- a/pypy/module/_stackless/stackless_flags.py
+++ /dev/null
@@ -1,201 +0,0 @@
-"""
-basic definitions for tasklet flags.
-For simplicity and compatibility,
-they are defined the same for coroutines,
-even if they are not used.
-
-taken from tasklet_structs.h
-----------------------------
-
-/***************************************************************************
-
-    Tasklet Flag Definition
-    -----------------------
-
-    blocked:        The tasklet is either waiting in a channel for
-                    writing (1) or reading (-1) or not blocked (0).
-                    Maintained by the channel logic. Do not change.
-
-    atomic:         If true, schedulers will never switch. Driven by
-                    the code object or dynamically, see below.
-
-    ignore_nesting: Allows auto-scheduling, even if nesting_level
-                    is not zero.
-
-    autoschedule:   The tasklet likes to be auto-scheduled. User driven.
-
-    block_trap:     Debugging aid. Whenever the tasklet would be
-                    blocked by a channel, an exception is raised.
-
-    is_zombie:      This tasklet is almost dead, its deallocation has
-                    started. The tasklet *must* die at some time, or the
-                    process can never end.
-
-    pending_irq:    If set, an interrupt was issued during an atomic
-                    operation, and should be handled when possible.
-
-
-    Policy for atomic/autoschedule and switching:
-    ---------------------------------------------
-    A tasklet switch can always be done explicitly by calling schedule().
-    Atomic and schedule are concerned with automatic features.
-
-    atomic  autoschedule
-
-        1       any     Neither a scheduler nor a watchdog will
-                        try to switch this tasklet.
-
-        0       0       The tasklet can be stopped on desire, or it
-                        can be killed by an exception.
-
-        0       1       Like above, plus auto-scheduling is enabled.
-
-    Default settings:
-    -----------------
-    All flags are zero by default.
-
- ***************************************************************************/
-
-typedef struct _tasklet_flags {
-        int blocked: 2;
-        unsigned int atomic: 1;
-        unsigned int ignore_nesting: 1;
-        unsigned int autoschedule: 1;
-        unsigned int block_trap: 1;
-        unsigned int is_zombie: 1;
-        unsigned int pending_irq: 1;
-} PyTaskletFlagStruc;
-"""
-
-from pypy.rlib.rarithmetic import LONG_BIT, intmask
-
-class BitSetDef(object):
-    __slots__ = "_names __dict__ _attrname".split()
-
-    def __init__(self, _attrname):
-        self._names = []
-        self._attrname = _attrname
-        
-    def __setattr__(self, key, value):
-        if key not in self.__slots__:
-            assert key not in self.__dict__
-            self._names.append(key)
-        object.__setattr__(self, key, value)
-
-    def __iter__(self):
-        return self._enum_objects()
-    
-    def _enum_objects(self):
-        for name in self._names:
-            yield name, getattr(self, name)
-
-# negative values are user-writable
-flags = BitSetDef("flags")
-flags.blocked           =   2, """writing (1) or reading (-1) or not blocked (0)"""
-flags.atomic            =  -1, """If true, schedulers will never switch"""
-flags.ignore_nesting    =  -1, """allow auto-scheduling in nested interpreters"""
-flags.autoschedule      =  -1, """enable auto-scheduling"""
-flags.block_trap        =  -1, """raise an exception instead of blocking"""
-flags.is_zombie         =   1, """__del__ is in progress"""
-flags.pending_irq       =   1, """an interrupt occured while being atomic"""
-
-def make_get_bits(name, bits, shift):
-    """ return a bool for single bits, signed int otherwise """
-    signmask = 1 << (bits - 1 + shift)
-    lshift = bits + shift
-    rshift = bits
-    if bits == 1:
-        return "bool(%s & 0x%x)" % (name, signmask)
-    else:
-        return "intmask(%s << (LONG_BIT-%d)) >> (LONG_BIT-%d)" % (name, lshift, rshift)
-
-def make_set_bits(name, bits, shift):
-    datamask = int('1' * bits, 2)
-    clearmask = datamask << shift
-    return "%s & ~0x%x | (value & 0x%x) << %d" % (name, clearmask, datamask, shift)
-
-def gen_code():
-    from cStringIO import StringIO
-    f = StringIO()
-    print >> f, "class StacklessFlags(object):"
-    print >> f, "    _mixin_ = True"
-    shift = 0
-    field = "self.%s" % flags._attrname
-    for name, (bits, doc) in flags:
-        write, bits = bits < 0, abs(bits)
-        print >> f
-        print >> f, '    def get_%s(self):' % name
-        print >> f, '        """%s"""' % doc
-        print >> f, '        return %s' % make_get_bits(field, bits, shift)
-        print >> f, '    def set_%s(self, value):' % name
-        print >> f, '        """%s"""' % doc
-        print >> f, '        %s = %s' % (field, make_set_bits(field, bits, shift))
-        print >> f, '    set_%s._public = %s' % (name, write)
-        shift += bits
-    return f.getvalue()
-
-# BEGIN generated code
-class StacklessFlags(object):
-    _mixin_ = True
-
-    def get_blocked(self):
-        """writing (1) or reading (-1) or not blocked (0)"""
-        return intmask(self.flags << (LONG_BIT-2)) >> (LONG_BIT-2)
-    def set_blocked(self, value):
-        """writing (1) or reading (-1) or not blocked (0)"""
-        self.flags = self.flags & ~0x3 | (value & 0x3) << 0
-    set_blocked._public = False
-
-    def get_atomic(self):
-        """If true, schedulers will never switch"""
-        return bool(self.flags & 0x4)
-    def set_atomic(self, value):
-        """If true, schedulers will never switch"""
-        self.flags = self.flags & ~0x4 | (value & 0x1) << 2
-    set_atomic._public = True
-
-    def get_ignore_nesting(self):
-        """allow auto-scheduling in nested interpreters"""
-        return bool(self.flags & 0x8)
-    def set_ignore_nesting(self, value):
-        """allow auto-scheduling in nested interpreters"""
-        self.flags = self.flags & ~0x8 | (value & 0x1) << 3
-    set_ignore_nesting._public = True
-
-    def get_autoschedule(self):
-        """enable auto-scheduling"""
-        return bool(self.flags & 0x10)
-    def set_autoschedule(self, value):
-        """enable auto-scheduling"""
-        self.flags = self.flags & ~0x10 | (value & 0x1) << 4
-    set_autoschedule._public = True
-
-    def get_block_trap(self):
-        """raise an exception instead of blocking"""
-        return bool(self.flags & 0x20)
-    def set_block_trap(self, value):
-        """raise an exception instead of blocking"""
-        self.flags = self.flags & ~0x20 | (value & 0x1) << 5
-    set_block_trap._public = True
-
-    def get_is_zombie(self):
-        """__del__ is in progress"""
-        return bool(self.flags & 0x40)
-    def set_is_zombie(self, value):
-        """__del__ is in progress"""
-        self.flags = self.flags & ~0x40 | (value & 0x1) << 6
-    set_is_zombie._public = False
-
-    def get_pending_irq(self):
-        """an interrupt occured while being atomic"""
-        return bool(self.flags & 0x80)
-    def set_pending_irq(self, value):
-        """an interrupt occured while being atomic"""
-        self.flags = self.flags & ~0x80 | (value & 0x1) << 7
-    set_pending_irq._public = False
-
-# END generated code
-
-if __name__ == '__main__':
-    # paste this into the file
-    print gen_code()
diff --git a/pypy/module/_stackless/test/__init__.py b/pypy/module/_stackless/test/__init__.py
deleted file mode 100644
--- a/pypy/module/_stackless/test/__init__.py
+++ /dev/null
@@ -1,1 +0,0 @@
-#
\ No newline at end of file
diff --git a/pypy/module/_stackless/test/conftest.py b/pypy/module/_stackless/test/conftest.py
deleted file mode 100644
--- a/pypy/module/_stackless/test/conftest.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import sys
-import py.test
-
-def pytest_runtest_setup(item):
-    py.test.importorskip('greenlet')
-    if sys.platform == 'win32':
-        py.test.skip("stackless tests segfault on Windows")
-
diff --git a/pypy/module/_stackless/test/slp_test_pickle.py b/pypy/module/_stackless/test/slp_test_pickle.py
deleted file mode 100644
--- a/pypy/module/_stackless/test/slp_test_pickle.py
+++ /dev/null
@@ -1,35 +0,0 @@
-from pypy.conftest import gettestobjspace
-
-# app-level testing of coroutine pickling
-
-class AppTest_Pickle:
-
-    def setup_class(cls):
-        space = gettestobjspace(usemodules=('_stackless',))
-        cls.space = space
-
-    def test_simple_ish(self):
-
-        output = []
-        import _stackless
-        def f(coro, n, x):
-            if n == 0:
-                coro.switch()
-                return
-            f(coro, n-1, 2*x)
-            output.append(x)
-
-        def example():
-            main_coro = _stackless.coroutine.getcurrent()
-            sub_coro = _stackless.coroutine()
-            sub_coro.bind(f, main_coro, 5, 1)
-            sub_coro.switch()
-
-            import pickle
-            pckl = pickle.dumps(sub_coro)
-            new_coro = pickle.loads(pckl)
-
-            new_coro.switch()
-
-        example()
-        assert output == [16, 8, 4, 2, 1]
diff --git a/pypy/module/_stackless/test/test_choicepoint.py b/pypy/module/_stackless/test/test_choicepoint.py
deleted file mode 100644
--- a/pypy/module/_stackless/test/test_choicepoint.py
+++ /dev/null
@@ -1,85 +0,0 @@
-import py; py.test.skip("clonable coroutines not really maintained any more")
-
-from pypy.rlib.rcoroutine import AbstractThunk
-from pypy.module._stackless.rclonable import InterpClonableCoroutine as ClonableCoroutine
-
-class ChoicePointHolder(object):
-    def __init__(self):
-        self.choicepoints = []
-        self.clone_me = False
-        self.answer = 0
-        self.solutions_count = 0
-
-    def next_choice(self):
-        return self.choicepoints.pop()
-
-    def add(self, choice, answer=0):
-        self.choicepoints.append((choice, answer))
-
-    def more_choices(self):
-        return bool(self.choicepoints)
-
-    def choice(self):
-        #os.write(1, "choice\n")
-        self.clone_me = True
-        self.g_main.switch()
-        #os.write(1, "answer: %d\n" % (self.answer,))
-        return self.answer
-
-    def fail(self):
-        self.g_main.switch()
-        assert False
-
-choicepoints = ChoicePointHolder()
-
-# ____________________________________________________________
-
-class SearchTask(AbstractThunk):
-    def call(self):
-        path = []
-        for i in range(10):
-            res = choicepoints.choice()
-            assert len(path) == i
-            path.append(res)
-            #os.write(1, "{%x} trying: %s\n" % (id(path), path))
-            if i == 3:
-                import gc; gc.collect()
-        #os.write(1, "{%x} found a solution: %s\n" % (id(path), path))
-        choicepoints.solutions_count += 1
-
-# ____________________________________________________________
-
-
-class SearchAllTask(AbstractThunk):
-    def call(self):
-        search_coro = ClonableCoroutine()
-        search_coro.bind(SearchTask())
-        choicepoints.add(search_coro)
-
-        #os.write(1, "starting\n")
-        while choicepoints.more_choices():
-            searcher, nextvalue = choicepoints.next_choice()
-            choicepoints.clone_me = False
-            choicepoints.answer = nextvalue
-            #os.write(1, '<<< {%x} %d\n' % (id(searcher), nextvalue))
-            searcher.switch()
-            #os.write(1, '>>> %d\n' % (choicepoints.clone_me,))
-            if choicepoints.clone_me:
-                searcher2 = searcher.clone()
-                #os.write(1, 'searcher = {%x}, searcher2 = {%x}\n' % (
-                #    id(searcher), id(searcher2)))
-                choicepoints.add(searcher, 5)
-                choicepoints.add(searcher2, 4)
-
-def entry_point():
-    choicepoints.g_main = ClonableCoroutine()
-    choicepoints.g_main.bind(SearchAllTask())
-    choicepoints.g_main.switch()
-    return choicepoints.solutions_count
-
-def test_choicepoint():
-    from pypy.translator.c.test import test_newgc
-    tester = test_newgc.TestUsingStacklessFramework()
-    fn = tester.getcompiled(entry_point)
-    res = fn()
-    assert res == 2 ** 10
diff --git a/pypy/module/_stackless/test/test_clonable.py b/pypy/module/_stackless/test/test_clonable.py
deleted file mode 100644
--- a/pypy/module/_stackless/test/test_clonable.py
+++ /dev/null
@@ -1,187 +0,0 @@
-import py; py.test.skip("clonable coroutines not really maintained any more")
-
-from pypy.conftest import gettestobjspace, option
-import py, sys
-
-# app-level testing of coroutine cloning
-
-class AppTestClonable:
-
-    def setup_class(cls):
-        if not option.runappdirect:
-            py.test.skip('pure appdirect test (run with -A)')
-        cls.space = space = gettestobjspace(usemodules=('_stackless',))
-        if not space.is_true(space.appexec([], """():
-            import _stackless
-            return hasattr(_stackless, 'clonable')
-        """)):
-            py.test.skip('no _stackless.clonable')
-
-
-    def test_solver(self):
-        import _stackless
-
-        class Fail(Exception):
-            pass
-
-        class Success(Exception):
-            pass
-
-        def first_solution(func):
-            global next_answer
-            co = _stackless.clonable()
-            co.bind(func)
-            pending = [(co, None)]
-            while pending:
-                co, next_answer = pending.pop()
-                try:
-                    co.switch()
-                except Fail:
-                    pass
-                except Success, e:
-                    return e.args[0]
-                else:
-                    # zero_or_one() called, clone the coroutine
-                    co2 = co.clone()
-                    pending.append((co2, 1))
-                    pending.append((co, 0))
-            raise Fail("no solution")
-
-        pending = []
-        main = _stackless.clonable.getcurrent()
-
-        def zero_or_one():
-            main.switch()
-            return next_answer
-
-        # ____________________________________________________________
-
-        invalid_prefixes = {
-            (0, 0): True,
-            (0, 1, 0): True,
-            (0, 1, 1): True,
-            (1, 0): True,
-            (1, 1, 0, 0): True,
-            }
-
-        def example():
-            test = []
-            for n in range(5):
-                test.append(zero_or_one())
-                if tuple(test) in invalid_prefixes:
-                    raise Fail
-            raise Success(test)
-
-        res = first_solution(example)
-        assert res == [1, 1, 0, 1, 0]
-
-
-    def test_myself_may_not_be_me_any_more(self):
-        import gc
-        from _stackless import clonable
-
-        counter = [0]
-
-        def runner():
-            while 1:
-                assert clonable.getcurrent() is coro
-                counter[0] += 1
-                main.switch()
-
-        main = clonable.getcurrent()
-        coro = clonable()
-        coro.bind(runner)
-
-        coro.switch()
-        assert counter == [1]
-
-        assert clonable.getcurrent() is main
-        coro1 = coro.clone()
-        assert counter == [1]
-        assert clonable.getcurrent() is main
-        coro.switch()
-        assert counter == [2]
-        coro.switch()
-        assert counter == [3]
-        assert clonable.getcurrent() is main
-        del coro1
-        gc.collect()
-        #print "collected!"
-        assert clonable.getcurrent() is main
-        assert counter == [3]
-        coro.switch()
-        assert clonable.getcurrent() is main
-        assert counter == [4]
-
-
-    def test_fork(self):
-        import _stackless
-
-        class Fail(Exception):
-            pass
-
-        class Success(Exception):
-            pass
-
-        def first_solution(func):
-            global next_answer
-            co = _stackless.clonable()
-            co.bind(func)
-            try:
-                co.switch()
-            except Success, e:
-                return e.args[0]
-
-        def zero_or_one():
-            sub = _stackless.fork()
-            if sub is not None:
-                # in the parent: run the child first
-                try:
-                    sub.switch()
-                except Fail:
-                    pass
-                # then proceed with answer '1'
-                return 1
-            else:
-                # in the child: answer '0'
-                return 0
-
-        # ____________________________________________________________
-
-        invalid_prefixes = {
-            (0, 0): True,
-            (0, 1, 0): True,
-            (0, 1, 1): True,
-            (1, 0): True,
-            (1, 1, 0, 0): True,
-            }
-
-        def example():
-            test = []
-            for n in range(5):
-                test.append(zero_or_one())
-                if tuple(test) in invalid_prefixes:
-                    raise Fail
-            raise Success(test)
-
-        res = first_solution(example)
-        assert res == [1, 1, 0, 1, 0]
-
-    def test_clone_before_start(self):
-        """Tests that a clonable coroutine can be
-        cloned before it is started
-        (this used to fail with a segmentation fault)
-        """
-        import _stackless
-
-        counter = [0]
-        def simple_coro():
-            print "hello"
-            counter[0] += 1
-
-        s = _stackless.clonable()
-        s.bind(simple_coro)
-        t = s.clone()
-        s.switch()
-        t.switch()
-        assert counter[0] == 2
diff --git a/pypy/module/_stackless/test/test_composable_coroutine.py b/pypy/module/_stackless/test/test_composable_coroutine.py
deleted file mode 100644
--- a/pypy/module/_stackless/test/test_composable_coroutine.py
+++ /dev/null
@@ -1,133 +0,0 @@
-""" a faith is the connection between past and future that divides the
-    application into switch-compatible chunks.
-    -- stakkars
-"""
-from pypy.conftest import gettestobjspace
-from py.test import skip
-
-class AppTest_ComposableCoroutine:
-
-    def setup_class(cls):
-        space = gettestobjspace(usemodules=('_stackless',))
-        cls.space = space
-
-        cls.w_generator_ = space.appexec([], """():
-            import _stackless
-
-            generators_costate = _stackless.usercostate()
-            main = generators_costate.getcurrent()
-
-            class generator_iterator(_stackless.coroutine):
-
-                def __iter__(self):
-                    return self
-
-                def next(self):
-                    if self.gi_answer is not None:
-                        raise ValueError('stackless-generator'
-                                         ' already executing')
-                    self.gi_answer = []
-                    self.gi_caller = generators_costate.getcurrent()
-                    self.switch()
-                    answer = self.gi_answer
-                    self.gi_answer = None
-                    if answer:
-                        return answer[0]
-                    else:
-                        raise StopIteration
-
-            def generator(f):
-                def myfunc(*args, **kwds):
-                    g = generators_costate.spawn(generator_iterator)
-                    g.gi_answer = None
-                    g.bind(f, *args, **kwds)
-                    return g
-                return myfunc
-
-            def Yield(value):
-                g = generators_costate.getcurrent()
-                if g is main:
-                    raise ValueError('Yield() outside any stackless-generator')
-                assert isinstance(g, generator_iterator)
-                assert g.gi_answer == []
-                g.gi_answer.append(value)
-                g.gi_caller.switch()
-
-            generator.Yield = Yield
-            generator._costate = generators_costate
-            return (generator,)
-        """)
-
-    def test_simple_costate(self):
-        import _stackless
-        costate = _stackless.usercostate()
-        main = costate.getcurrent()
-
-        result = []
-        def f():
-            result.append(costate.getcurrent())
-        co = costate.spawn()
-        co.bind(f)
-        co.switch()
-        assert result == [co]
-
-    def test_generator(self):
-        generator, = self.generator_
-
-        def squares(n):
-            for i in range(n):
-                generator.Yield(i*i)
-        squares = generator(squares)
-
-        lst1 = [i*i for i in range(10)]
-        for got in squares(10):
-            expected = lst1.pop(0)
-            assert got == expected
-        assert lst1 == []
-
-    def test_multiple_costates(self):
-        """Test that two independent costates mix transparently:
-
-        - compute_costate, used for a coroutine that fills a list with
-                           some more items each time it is switched to
-
-        - generators_costate, used interally by self.generator (see above)
-        """
-
-        import _stackless
-        generator, = self.generator_
-
-        # you can see how it fails if we don't have two different costates
-        # by setting compute_costate to generator._costate instead
-        compute_costate = _stackless.usercostate()
-        compute_main = compute_costate.getcurrent()
-        lst = []
-
-        def filler():     # -> 0, 1, 2, 100, 101, 102, 200, 201, 202, 300 ...
-            for k in range(5):
-                for j in range(3):
-                    lst.append(100 * k + j)
-                compute_main.switch()
-
-        filler_co = compute_costate.spawn()
-        filler_co.bind(filler)
-
-        def grab_next_value():
-            while not lst:
-                #print 'filling more...'
-                filler_co.switch()
-                #print 'now lst =', lst
-            #print 'grabbing', lst[0]
-            return lst.pop(0)
-
-        def squares(n):
-            for i in range(n):
-                #print 'square:', i
-                generator.Yield(i*grab_next_value())
-        squares = generator(squares)
-
-        lst1 = [0, 1, 4,  300, 404, 510,  1200, 1407, 1616,  2700]
-        for got in squares(10):
-            expected = lst1.pop(0)
-            assert got == expected
-        assert lst1 == []
diff --git a/pypy/module/_stackless/test/test_coroutine.py b/pypy/module/_stackless/test/test_coroutine.py
deleted file mode 100644
--- a/pypy/module/_stackless/test/test_coroutine.py
+++ /dev/null
@@ -1,168 +0,0 @@
-from pypy.conftest import gettestobjspace, option
-from py.test import skip
-
-
-class AppTest_Coroutine:
-
-    def setup_class(cls):
-        space = gettestobjspace(usemodules=('_stackless',))
-        cls.space = space
-
-    def test_raise_propagate(self):
-        import _stackless as stackless
-        co = stackless.coroutine()
-        def f():
-            return 1/0
-        co.bind(f)
-        try:
-            co.switch()
-        except ZeroDivisionError:
-            pass
-        else:
-            raise AssertionError("exception not propagated")
-
-    def test_strange_test(self):
-        from _stackless import coroutine
-        def f():
-            print "in new coro"
-            return 42
-        def create():
-            b = coroutine()
-            b.bind(f)
-            print "bound"
-            b.switch()
-            print "switched"
-            return b
-        a = coroutine()
-        a.bind(create)
-        b = a.switch()
-        # now b.parent = a
-        def nothing():
-            pass
-        a.bind(nothing)
-        def kill():
-            # this sets a.parent = b
-            a.kill()
-        b.bind(kill)
-        b.switch()
-
-    def test_kill(self):
-        import _stackless as stackless
-        co = stackless.coroutine()
-        def f():
-            pass
-        co.bind(f)
-        assert co.is_alive
-        co.kill()
-        assert not co.is_alive
-
-    def test_kill_running(self):
-        coroutineexit = []
-        import _stackless as stackless
-        main = stackless.coroutine.getcurrent()
-        result = []
-        co = stackless.coroutine()
-        def f():
-            x = 2
-            try:
-                result.append(1)
-                main.switch()
-                x = 3
-            except CoroutineExit:
-                coroutineexit.append(True)
-                raise
-            finally:
-                result.append(x)
-            result.append(4)
-        co.bind(f)
-        assert co.is_alive
-        co.switch()
-        assert co.is_alive
-        assert result == [1]
-        co.kill()
-        assert not co.is_alive
-        assert result == [1, 2]
-        assert coroutineexit == [True]
-


More information about the pypy-commit mailing list