[pypy-commit] pypy concurrent-marksweep: hg merge default

arigo noreply at buildbot.pypy.org
Mon Dec 26 13:51:26 CET 2011


Author: Armin Rigo <arigo at tunes.org>
Branch: concurrent-marksweep
Changeset: r50871:a0048b726e62
Date: 2011-12-26 13:49 +0100
http://bitbucket.org/pypy/pypy/changeset/a0048b726e62/

Log:	hg merge default

diff too long, truncating to 10000 out of 56002 lines

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -1,3 +1,4 @@
 b590cf6de4190623aad9aa698694c22e614d67b9 release-1.5
 b48df0bf4e75b81d98f19ce89d4a7dc3e1dab5e5 benchmarked
 d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6
+ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7
diff --git a/lib-python/2.7/test/test_os.py b/lib-python/2.7/test/test_os.py
--- a/lib-python/2.7/test/test_os.py
+++ b/lib-python/2.7/test/test_os.py
@@ -74,7 +74,8 @@
         self.assertFalse(os.path.exists(name),
                     "file already exists for temporary file")
         # make sure we can create the file
-        open(name, "w")
+        f = open(name, "w")
+        f.close()
         self.files.append(name)
 
     def test_tempnam(self):
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -201,7 +201,7 @@
     RegrTest('test_difflib.py'),
     RegrTest('test_dircache.py', core=True),
     RegrTest('test_dis.py'),
-    RegrTest('test_distutils.py'),
+    RegrTest('test_distutils.py', skip=True),
     RegrTest('test_dl.py', skip=True),
     RegrTest('test_doctest.py', usemodules="thread"),
     RegrTest('test_doctest2.py'),
diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py
--- a/lib-python/modified-2.7/ctypes/__init__.py
+++ b/lib-python/modified-2.7/ctypes/__init__.py
@@ -351,7 +351,7 @@
         self._FuncPtr = _FuncPtr
 
         if handle is None:
-            self._handle = _ffi.CDLL(name)
+            self._handle = _ffi.CDLL(name, mode)
         else:
             self._handle = handle
 
diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py
--- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py
+++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py
@@ -1,5 +1,6 @@
 import unittest
 from ctypes import *
+from ctypes.test import xfail
 import _ctypes_test
 
 class Callbacks(unittest.TestCase):
@@ -98,6 +99,7 @@
 ##        self.check_type(c_char_p, "abc")
 ##        self.check_type(c_char_p, "def")
 
+    @xfail
     def test_pyobject(self):
         o = ()
         from sys import getrefcount as grc
diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py
--- a/lib-python/modified-2.7/ctypes/test/test_libc.py
+++ b/lib-python/modified-2.7/ctypes/test/test_libc.py
@@ -25,7 +25,10 @@
         lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort))
         self.assertEqual(chars.raw, "   ,,aaaadmmmnpppsss\x00")
 
-    def test_no_more_xfail(self):
+    def SKIPPED_test_no_more_xfail(self):
+        # We decided to not explicitly support the whole ctypes-2.7
+        # and instead go for a case-by-case, demand-driven approach.
+        # So this test is skipped instead of failing.
         import socket
         import ctypes.test
         self.assertTrue(not hasattr(ctypes.test, 'xfail'),
diff --git a/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py b/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py
--- a/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py
+++ b/lib-python/modified-2.7/ctypes/test/test_simplesubclasses.py
@@ -1,6 +1,5 @@
 import unittest
 from ctypes import *
-from ctypes.test import xfail
 
 class MyInt(c_int):
     def __cmp__(self, other):
@@ -27,7 +26,6 @@
         self.assertEqual(None, cb())
 
 
-    @xfail
     def test_int_callback(self):
         args = []
         def func(arg):
diff --git a/lib-python/modified-2.7/heapq.py b/lib-python/modified-2.7/heapq.py
new file mode 100644
--- /dev/null
+++ b/lib-python/modified-2.7/heapq.py
@@ -0,0 +1,442 @@
+# -*- coding: latin-1 -*-
+
+"""Heap queue algorithm (a.k.a. priority queue).
+
+Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
+all k, counting elements from 0.  For the sake of comparison,
+non-existing elements are considered to be infinite.  The interesting
+property of a heap is that a[0] is always its smallest element.
+
+Usage:
+
+heap = []            # creates an empty heap
+heappush(heap, item) # pushes a new item on the heap
+item = heappop(heap) # pops the smallest item from the heap
+item = heap[0]       # smallest item on the heap without popping it
+heapify(x)           # transforms list into a heap, in-place, in linear time
+item = heapreplace(heap, item) # pops and returns smallest item, and adds
+                               # new item; the heap size is unchanged
+
+Our API differs from textbook heap algorithms as follows:
+
+- We use 0-based indexing.  This makes the relationship between the
+  index for a node and the indexes for its children slightly less
+  obvious, but is more suitable since Python uses 0-based indexing.
+
+- Our heappop() method returns the smallest item, not the largest.
+
+These two make it possible to view the heap as a regular Python list
+without surprises: heap[0] is the smallest item, and heap.sort()
+maintains the heap invariant!
+"""
+
+# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
+
+__about__ = """Heap queues
+
+[explanation by Fran&#65533;ois Pinard]
+
+Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
+all k, counting elements from 0.  For the sake of comparison,
+non-existing elements are considered to be infinite.  The interesting
+property of a heap is that a[0] is always its smallest element.
+
+The strange invariant above is meant to be an efficient memory
+representation for a tournament.  The numbers below are `k', not a[k]:
+
+                                   0
+
+                  1                                 2
+
+          3               4                5               6
+
+      7       8       9       10      11      12      13      14
+
+    15 16   17 18   19 20   21 22   23 24   25 26   27 28   29 30
+
+
+In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'.  In
+an usual binary tournament we see in sports, each cell is the winner
+over the two cells it tops, and we can trace the winner down the tree
+to see all opponents s/he had.  However, in many computer applications
+of such tournaments, we do not need to trace the history of a winner.
+To be more memory efficient, when a winner is promoted, we try to
+replace it by something else at a lower level, and the rule becomes
+that a cell and the two cells it tops contain three different items,
+but the top cell "wins" over the two topped cells.
+
+If this heap invariant is protected at all time, index 0 is clearly
+the overall winner.  The simplest algorithmic way to remove it and
+find the "next" winner is to move some loser (let's say cell 30 in the
+diagram above) into the 0 position, and then percolate this new 0 down
+the tree, exchanging values, until the invariant is re-established.
+This is clearly logarithmic on the total number of items in the tree.
+By iterating over all items, you get an O(n ln n) sort.
+
+A nice feature of this sort is that you can efficiently insert new
+items while the sort is going on, provided that the inserted items are
+not "better" than the last 0'th element you extracted.  This is
+especially useful in simulation contexts, where the tree holds all
+incoming events, and the "win" condition means the smallest scheduled
+time.  When an event schedule other events for execution, they are
+scheduled into the future, so they can easily go into the heap.  So, a
+heap is a good structure for implementing schedulers (this is what I
+used for my MIDI sequencer :-).
+
+Various structures for implementing schedulers have been extensively
+studied, and heaps are good for this, as they are reasonably speedy,
+the speed is almost constant, and the worst case is not much different
+than the average case.  However, there are other representations which
+are more efficient overall, yet the worst cases might be terrible.
+
+Heaps are also very useful in big disk sorts.  You most probably all
+know that a big sort implies producing "runs" (which are pre-sorted
+sequences, which size is usually related to the amount of CPU memory),
+followed by a merging passes for these runs, which merging is often
+very cleverly organised[1].  It is very important that the initial
+sort produces the longest runs possible.  Tournaments are a good way
+to that.  If, using all the memory available to hold a tournament, you
+replace and percolate items that happen to fit the current run, you'll
+produce runs which are twice the size of the memory for random input,
+and much better for input fuzzily ordered.
+
+Moreover, if you output the 0'th item on disk and get an input which
+may not fit in the current tournament (because the value "wins" over
+the last output value), it cannot fit in the heap, so the size of the
+heap decreases.  The freed memory could be cleverly reused immediately
+for progressively building a second heap, which grows at exactly the
+same rate the first heap is melting.  When the first heap completely
+vanishes, you switch heaps and start a new run.  Clever and quite
+effective!
+
+In a word, heaps are useful memory structures to know.  I use them in
+a few applications, and I think it is good to keep a `heap' module
+around. :-)
+
+--------------------
+[1] The disk balancing algorithms which are current, nowadays, are
+more annoying than clever, and this is a consequence of the seeking
+capabilities of the disks.  On devices which cannot seek, like big
+tape drives, the story was quite different, and one had to be very
+clever to ensure (far in advance) that each tape movement will be the
+most effective possible (that is, will best participate at
+"progressing" the merge).  Some tapes were even able to read
+backwards, and this was also used to avoid the rewinding time.
+Believe me, real good tape sorts were quite spectacular to watch!
+From all times, sorting has always been a Great Art! :-)
+"""
+
+__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
+           'nlargest', 'nsmallest', 'heappushpop']
+
+from itertools import islice, repeat, count, imap, izip, tee, chain
+from operator import itemgetter
+import bisect
+
+def heappush(heap, item):
+    """Push item onto heap, maintaining the heap invariant."""
+    heap.append(item)
+    _siftdown(heap, 0, len(heap)-1)
+
+def heappop(heap):
+    """Pop the smallest item off the heap, maintaining the heap invariant."""
+    lastelt = heap.pop()    # raises appropriate IndexError if heap is empty
+    if heap:
+        returnitem = heap[0]
+        heap[0] = lastelt
+        _siftup(heap, 0)
+    else:
+        returnitem = lastelt
+    return returnitem
+
+def heapreplace(heap, item):
+    """Pop and return the current smallest value, and add the new item.
+
+    This is more efficient than heappop() followed by heappush(), and can be
+    more appropriate when using a fixed-size heap.  Note that the value
+    returned may be larger than item!  That constrains reasonable uses of
+    this routine unless written as part of a conditional replacement:
+
+        if item > heap[0]:
+            item = heapreplace(heap, item)
+    """
+    returnitem = heap[0]    # raises appropriate IndexError if heap is empty
+    heap[0] = item
+    _siftup(heap, 0)
+    return returnitem
+
+def heappushpop(heap, item):
+    """Fast version of a heappush followed by a heappop."""
+    if heap and heap[0] < item:
+        item, heap[0] = heap[0], item
+        _siftup(heap, 0)
+    return item
+
+def heapify(x):
+    """Transform list into a heap, in-place, in O(len(heap)) time."""
+    n = len(x)
+    # Transform bottom-up.  The largest index there's any point to looking at
+    # is the largest with a child index in-range, so must have 2*i + 1 < n,
+    # or i < (n-1)/2.  If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
+    # j-1 is the largest, which is n//2 - 1.  If n is odd = 2*j+1, this is
+    # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
+    for i in reversed(xrange(n//2)):
+        _siftup(x, i)
+
+def nlargest(n, iterable):
+    """Find the n largest elements in a dataset.
+
+    Equivalent to:  sorted(iterable, reverse=True)[:n]
+    """
+    if n < 0: # for consistency with the c impl
+        return []
+    it = iter(iterable)
+    result = list(islice(it, n))
+    if not result:
+        return result
+    heapify(result)
+    _heappushpop = heappushpop
+    for elem in it:
+        _heappushpop(result, elem)
+    result.sort(reverse=True)
+    return result
+
+def nsmallest(n, iterable):
+    """Find the n smallest elements in a dataset.
+
+    Equivalent to:  sorted(iterable)[:n]
+    """
+    if n < 0: # for consistency with the c impl
+        return []
+    if hasattr(iterable, '__len__') and n * 10 <= len(iterable):
+        # For smaller values of n, the bisect method is faster than a minheap.
+        # It is also memory efficient, consuming only n elements of space.
+        it = iter(iterable)
+        result = sorted(islice(it, 0, n))
+        if not result:
+            return result
+        insort = bisect.insort
+        pop = result.pop
+        los = result[-1]    # los --> Largest of the nsmallest
+        for elem in it:
+            if los <= elem:
+                continue
+            insort(result, elem)
+            pop()
+            los = result[-1]
+        return result
+    # An alternative approach manifests the whole iterable in memory but
+    # saves comparisons by heapifying all at once.  Also, saves time
+    # over bisect.insort() which has O(n) data movement time for every
+    # insertion.  Finding the n smallest of an m length iterable requires
+    #    O(m) + O(n log m) comparisons.
+    h = list(iterable)
+    heapify(h)
+    return map(heappop, repeat(h, min(n, len(h))))
+
+# 'heap' is a heap at all indices >= startpos, except possibly for pos.  pos
+# is the index of a leaf with a possibly out-of-order value.  Restore the
+# heap invariant.
+def _siftdown(heap, startpos, pos):
+    newitem = heap[pos]
+    # Follow the path to the root, moving parents down until finding a place
+    # newitem fits.
+    while pos > startpos:
+        parentpos = (pos - 1) >> 1
+        parent = heap[parentpos]
+        if newitem < parent:
+            heap[pos] = parent
+            pos = parentpos
+            continue
+        break
+    heap[pos] = newitem
+
+# The child indices of heap index pos are already heaps, and we want to make
+# a heap at index pos too.  We do this by bubbling the smaller child of
+# pos up (and so on with that child's children, etc) until hitting a leaf,
+# then using _siftdown to move the oddball originally at index pos into place.
+#
+# We *could* break out of the loop as soon as we find a pos where newitem <=
+# both its children, but turns out that's not a good idea, and despite that
+# many books write the algorithm that way.  During a heap pop, the last array
+# element is sifted in, and that tends to be large, so that comparing it
+# against values starting from the root usually doesn't pay (= usually doesn't
+# get us out of the loop early).  See Knuth, Volume 3, where this is
+# explained and quantified in an exercise.
+#
+# Cutting the # of comparisons is important, since these routines have no
+# way to extract "the priority" from an array element, so that intelligence
+# is likely to be hiding in custom __cmp__ methods, or in array elements
+# storing (priority, record) tuples.  Comparisons are thus potentially
+# expensive.
+#
+# On random arrays of length 1000, making this change cut the number of
+# comparisons made by heapify() a little, and those made by exhaustive
+# heappop() a lot, in accord with theory.  Here are typical results from 3
+# runs (3 just to demonstrate how small the variance is):
+#
+# Compares needed by heapify     Compares needed by 1000 heappops
+# --------------------------     --------------------------------
+# 1837 cut to 1663               14996 cut to 8680
+# 1855 cut to 1659               14966 cut to 8678
+# 1847 cut to 1660               15024 cut to 8703
+#
+# Building the heap by using heappush() 1000 times instead required
+# 2198, 2148, and 2219 compares:  heapify() is more efficient, when
+# you can use it.
+#
+# The total compares needed by list.sort() on the same lists were 8627,
+# 8627, and 8632 (this should be compared to the sum of heapify() and
+# heappop() compares):  list.sort() is (unsurprisingly!) more efficient
+# for sorting.
+
+def _siftup(heap, pos):
+    endpos = len(heap)
+    startpos = pos
+    newitem = heap[pos]
+    # Bubble up the smaller child until hitting a leaf.
+    childpos = 2*pos + 1    # leftmost child position
+    while childpos < endpos:
+        # Set childpos to index of smaller child.
+        rightpos = childpos + 1
+        if rightpos < endpos and not heap[childpos] < heap[rightpos]:
+            childpos = rightpos
+        # Move the smaller child up.
+        heap[pos] = heap[childpos]
+        pos = childpos
+        childpos = 2*pos + 1
+    # The leaf at pos is empty now.  Put newitem there, and bubble it up
+    # to its final resting place (by sifting its parents down).
+    heap[pos] = newitem
+    _siftdown(heap, startpos, pos)
+
+# If available, use C implementation
+try:
+    from _heapq import *
+except ImportError:
+    pass
+
+def merge(*iterables):
+    '''Merge multiple sorted inputs into a single sorted output.
+
+    Similar to sorted(itertools.chain(*iterables)) but returns a generator,
+    does not pull the data into memory all at once, and assumes that each of
+    the input streams is already sorted (smallest to largest).
+
+    >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
+    [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
+
+    '''
+    _heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
+
+    h = []
+    h_append = h.append
+    for itnum, it in enumerate(map(iter, iterables)):
+        try:
+            next = it.next
+            h_append([next(), itnum, next])
+        except _StopIteration:
+            pass
+    heapify(h)
+
+    while 1:
+        try:
+            while 1:
+                v, itnum, next = s = h[0]   # raises IndexError when h is empty
+                yield v
+                s[0] = next()               # raises StopIteration when exhausted
+                _heapreplace(h, s)          # restore heap condition
+        except _StopIteration:
+            _heappop(h)                     # remove empty iterator
+        except IndexError:
+            return
+
+# Extend the implementations of nsmallest and nlargest to use a key= argument
+_nsmallest = nsmallest
+def nsmallest(n, iterable, key=None):
+    """Find the n smallest elements in a dataset.
+
+    Equivalent to:  sorted(iterable, key=key)[:n]
+    """
+    # Short-cut for n==1 is to use min() when len(iterable)>0
+    if n == 1:
+        it = iter(iterable)
+        head = list(islice(it, 1))
+        if not head:
+            return []
+        if key is None:
+            return [min(chain(head, it))]
+        return [min(chain(head, it), key=key)]
+
+    # When n>=size, it's faster to use sort()
+    try:
+        size = len(iterable)
+    except (TypeError, AttributeError):
+        pass
+    else:
+        if n >= size:
+            return sorted(iterable, key=key)[:n]
+
+    # When key is none, use simpler decoration
+    if key is None:
+        it = izip(iterable, count())                        # decorate
+        result = _nsmallest(n, it)
+        return map(itemgetter(0), result)                   # undecorate
+
+    # General case, slowest method
+    in1, in2 = tee(iterable)
+    it = izip(imap(key, in1), count(), in2)                 # decorate
+    result = _nsmallest(n, it)
+    return map(itemgetter(2), result)                       # undecorate
+
+_nlargest = nlargest
+def nlargest(n, iterable, key=None):
+    """Find the n largest elements in a dataset.
+
+    Equivalent to:  sorted(iterable, key=key, reverse=True)[:n]
+    """
+
+    # Short-cut for n==1 is to use max() when len(iterable)>0
+    if n == 1:
+        it = iter(iterable)
+        head = list(islice(it, 1))
+        if not head:
+            return []
+        if key is None:
+            return [max(chain(head, it))]
+        return [max(chain(head, it), key=key)]
+
+    # When n>=size, it's faster to use sort()
+    try:
+        size = len(iterable)
+    except (TypeError, AttributeError):
+        pass
+    else:
+        if n >= size:
+            return sorted(iterable, key=key, reverse=True)[:n]
+
+    # When key is none, use simpler decoration
+    if key is None:
+        it = izip(iterable, count(0,-1))                    # decorate
+        result = _nlargest(n, it)
+        return map(itemgetter(0), result)                   # undecorate
+
+    # General case, slowest method
+    in1, in2 = tee(iterable)
+    it = izip(imap(key, in1), count(0,-1), in2)             # decorate
+    result = _nlargest(n, it)
+    return map(itemgetter(2), result)                       # undecorate
+
+if __name__ == "__main__":
+    # Simple sanity test
+    heap = []
+    data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
+    for item in data:
+        heappush(heap, item)
+    sort = []
+    while heap:
+        sort.append(heappop(heap))
+    print sort
+
+    import doctest
+    doctest.testmod()
diff --git a/lib-python/2.7/pkgutil.py b/lib-python/modified-2.7/pkgutil.py
copy from lib-python/2.7/pkgutil.py
copy to lib-python/modified-2.7/pkgutil.py
--- a/lib-python/2.7/pkgutil.py
+++ b/lib-python/modified-2.7/pkgutil.py
@@ -244,7 +244,8 @@
         return mod
 
     def get_data(self, pathname):
-        return open(pathname, "rb").read()
+        with open(pathname, "rb") as f:
+            return f.read()
 
     def _reopen(self):
         if self.file and self.file.closed:
diff --git a/lib-python/modified-2.7/test/test_heapq.py b/lib-python/modified-2.7/test/test_heapq.py
--- a/lib-python/modified-2.7/test/test_heapq.py
+++ b/lib-python/modified-2.7/test/test_heapq.py
@@ -186,6 +186,11 @@
         self.assertFalse(sys.modules['heapq'] is self.module)
         self.assertTrue(hasattr(self.module.heapify, 'func_code'))
 
+    def test_islice_protection(self):
+        m = self.module
+        self.assertFalse(m.nsmallest(-1, [1]))
+        self.assertFalse(m.nlargest(-1, [1]))
+
 
 class TestHeapC(TestHeap):
     module = c_heapq
diff --git a/lib-python/modified-2.7/test/test_import.py b/lib-python/modified-2.7/test/test_import.py
--- a/lib-python/modified-2.7/test/test_import.py
+++ b/lib-python/modified-2.7/test/test_import.py
@@ -64,6 +64,7 @@
             except ImportError, err:
                 self.fail("import from %s failed: %s" % (ext, err))
             else:
+                # XXX importing .pyw is missing on Windows
                 self.assertEqual(mod.a, a,
                     "module loaded (%s) but contents invalid" % mod)
                 self.assertEqual(mod.b, b,
diff --git a/lib-python/modified-2.7/test/test_repr.py b/lib-python/modified-2.7/test/test_repr.py
--- a/lib-python/modified-2.7/test/test_repr.py
+++ b/lib-python/modified-2.7/test/test_repr.py
@@ -254,8 +254,14 @@
         eq = self.assertEqual
         touch(os.path.join(self.subpkgname, self.pkgname + os.extsep + 'py'))
         from areallylongpackageandmodulenametotestreprtruncation.areallylongpackageandmodulenametotestreprtruncation import areallylongpackageandmodulenametotestreprtruncation
-        eq(repr(areallylongpackageandmodulenametotestreprtruncation),
-           "<module '%s' from '%s'>" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__))
+        # On PyPy, we use %r to format the file name; on CPython it is done
+        # with '%s'.  It seems to me that %r is safer <arigo>.
+        if '__pypy__' in sys.builtin_module_names:
+            eq(repr(areallylongpackageandmodulenametotestreprtruncation),
+               "<module %r from %r>" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__))
+        else:
+            eq(repr(areallylongpackageandmodulenametotestreprtruncation),
+               "<module '%s' from '%s'>" % (areallylongpackageandmodulenametotestreprtruncation.__name__, areallylongpackageandmodulenametotestreprtruncation.__file__))
         eq(repr(sys), "<module 'sys' (built-in)>")
 
     def test_type(self):
diff --git a/lib-python/2.7/test/test_subprocess.py b/lib-python/modified-2.7/test/test_subprocess.py
copy from lib-python/2.7/test/test_subprocess.py
copy to lib-python/modified-2.7/test/test_subprocess.py
--- a/lib-python/2.7/test/test_subprocess.py
+++ b/lib-python/modified-2.7/test/test_subprocess.py
@@ -16,11 +16,11 @@
 # Depends on the following external programs: Python
 #
 
-if mswindows:
-    SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
-                                                'os.O_BINARY);')
-else:
-    SETBINARY = ''
+#if mswindows:
+#    SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
+#                                                'os.O_BINARY);')
+#else:
+#    SETBINARY = ''
 
 
 try:
@@ -420,8 +420,9 @@
         self.assertStderrEqual(stderr, "")
 
     def test_universal_newlines(self):
-        p = subprocess.Popen([sys.executable, "-c",
-                          'import sys,os;' + SETBINARY +
+        # NB. replaced SETBINARY with the -u flag
+        p = subprocess.Popen([sys.executable, "-u", "-c",
+                          'import sys,os;' + #SETBINARY +
                           'sys.stdout.write("line1\\n");'
                           'sys.stdout.flush();'
                           'sys.stdout.write("line2\\r");'
@@ -448,8 +449,9 @@
 
     def test_universal_newlines_communicate(self):
         # universal newlines through communicate()
-        p = subprocess.Popen([sys.executable, "-c",
-                          'import sys,os;' + SETBINARY +
+        # NB. replaced SETBINARY with the -u flag
+        p = subprocess.Popen([sys.executable, "-u", "-c",
+                          'import sys,os;' + #SETBINARY +
                           'sys.stdout.write("line1\\n");'
                           'sys.stdout.flush();'
                           'sys.stdout.write("line2\\r");'
diff --git a/lib-python/modified-2.7/urllib2.py b/lib-python/modified-2.7/urllib2.py
--- a/lib-python/modified-2.7/urllib2.py
+++ b/lib-python/modified-2.7/urllib2.py
@@ -395,11 +395,7 @@
         meth_name = protocol+"_response"
         for processor in self.process_response.get(protocol, []):
             meth = getattr(processor, meth_name)
-            try:
-                response = meth(req, response)
-            except:
-                response.close()
-                raise
+            response = meth(req, response)
 
         return response
 
diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py
--- a/lib_pypy/_collections.py
+++ b/lib_pypy/_collections.py
@@ -379,12 +379,14 @@
 class defaultdict(dict):
     
     def __init__(self, *args, **kwds):
-        self.default_factory = None
-        if 'default_factory' in kwds:
-            self.default_factory = kwds.pop('default_factory')
-        elif len(args) > 0 and (callable(args[0]) or args[0] is None):
-            self.default_factory = args[0]
+        if len(args) > 0:
+            default_factory = args[0]
             args = args[1:]
+            if not callable(default_factory) and default_factory is not None:
+                raise TypeError("first argument must be callable")
+        else:
+            default_factory = None
+        self.default_factory = default_factory
         super(defaultdict, self).__init__(*args, **kwds)
  
     def __missing__(self, key):
@@ -404,7 +406,7 @@
             recurse.remove(id(self))
 
     def copy(self):
-        return type(self)(self, default_factory=self.default_factory)
+        return type(self)(self.default_factory, self)
     
     def __copy__(self):
         return self.copy()
diff --git a/lib_pypy/_ctypes/pointer.py b/lib_pypy/_ctypes/pointer.py
--- a/lib_pypy/_ctypes/pointer.py
+++ b/lib_pypy/_ctypes/pointer.py
@@ -124,7 +124,8 @@
     # for now, we always allow types.pointer, else a lot of tests
     # break. We need to rethink how pointers are represented, though
     if my_ffitype is not ffitype and ffitype is not _ffi.types.void_p:
-        raise ArgumentError, "expected %s instance, got %s" % (type(value), ffitype)
+        raise ArgumentError("expected %s instance, got %s" % (type(value),
+                                                              ffitype))
     return value._get_buffer_value()
 
 def _cast_addr(obj, _, tp):
diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py
--- a/lib_pypy/_ctypes/structure.py
+++ b/lib_pypy/_ctypes/structure.py
@@ -17,7 +17,7 @@
         if len(f) == 3:
             if (not hasattr(tp, '_type_')
                 or not isinstance(tp._type_, str)
-                or tp._type_ not in "iIhHbBlL"):
+                or tp._type_ not in "iIhHbBlLqQ"):
                 #XXX: are those all types?
                 #     we just dont get the type name
                 #     in the interp levle thrown TypeError
diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py
--- a/lib_pypy/_pypy_irc_topic.py
+++ b/lib_pypy/_pypy_irc_topic.py
@@ -1,117 +1,6 @@
-"""qvfgbcvna naq hgbcvna punvef
-qlfgbcvna naq hgbcvna punvef
-V'z fbeel, pbhyq lbh cyrnfr abg nterr jvgu gur png nf jryy?
-V'z fbeel, pbhyq lbh cyrnfr abg nterr jvgu gur punve nf jryy?
-jr cnffrq gur RH erivrj
-cbfg RhebClguba fcevag fgnegf 12.IVV.2007, 10nz
-RhebClguba raqrq
-n Pyrna Ragrecevfrf cebqhpgvba
-npnqrzl vf n pbzcyvpngrq ebyr tnzr
-npnqrzvn vf n pbzcyvpngrq ebyr tnzr
-jbexvat pbqr vf crn fbhc
-abg lbhe snhyg, zber yvxr vg'f n zbivat gnetrg
-guvf fragrapr vf snyfr
-abguvat vf gehr
-Yncfnat Fbhpubat
-Oenpunzhgnaqn
-fbeel, V'yy grnpu gur pnpghf ubj gb fjvz yngre
-Jul fb znal znal znal znal znal ivbyvaf?
-Jul fb znal znal znal znal znal bowrpgf?
-"eha njnl naq yvir ba n snez" nccebnpu gb fbsgjner qrirybczrag
-"va snpg, lbh zvtug xabj zber nobhg gur genafyngvba gbbypunva nsgre znfgrevat eclguba guna fbzr angvir fcrnxre xabjf nobhg uvf zbgure gbathr" - kbeNkNk
-"jurer qvq nyy gur ivbyvaf tb?"
-- ClCl fgnghf oybt: uggc://zberclcl.oybtfcbg.pbz/
-uggc://kxpq.pbz/353/
-pnfhnyvgl ivbyngvbaf naq sylvat
-wrgmg abpu fpubxbynqvtre
-R09 2X @PNN:85?
-vs lbh'er gelvat gb oybj hc fghss, jub pnerf?
-vs fghss oybjf hc, lbh pner
-2008 jvyy or gur lrne bs clcl ba gur qrfxgbc
-2008 jvyy or gur lrne bs gur qrfxgbc ba #clcl
-2008 jvyy or gur lrne bs gur qrfxgbc ba #clcl, Wnahnel jvyy or gur zbagu bs gur nyc gbcf
-lrf, ohg jung'g gur frafr bs 0 < "qhena qhena"
-eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF
+"""eclguba: flagnk naq frznagvpf bs clguba, fcrrq bs p, erfgevpgvbaf bs wnin naq pbzcvyre reebe zrffntrf nf crargenoyr nf ZHZCF
 pglcrf unf n fcva bs 1/3
 ' ' vf n fcnpr gbb
-2009 jvyy or gur lrne bs WVG ba gur qrfxgbc
-N ynathntr vf n qvnyrpg jvgu na nezl naq anil
-gbcvpf ner sbe gur srroyr zvaqrq
-2009 vf gur lrne bs ersyrpgvba ba gur qrfxgbc
-gur tybor vf bhe cbal, gur pbfzbf bhe erny ubefr
-jub nz V naq vs lrf, ubj znal?
-cebtenzzvat va orq vf n cresrpgyl svar npgvivgl
-zbber'f ynj vf n qeht jvgu gur jbefg pbzr qbja
-EClguba: jr hfr vg fb lbh qba'g unir gb
-Zbber'f ynj vf n qeht jvgu gur jbefg pbzr qbja. EClguba: haqrpvqrq.
-guvatf jvyy or avpr naq fghss
-qba'g cbfg yvaxf gb cngragf urer
-Abg lbhe hfhny nanylfrf.
-Gur Neg bs gur Punaary
-Clguba 300
-V fhccbfr ZO bs UGZY cre frpbaq vf abg gur hfhny fcrrq zrnfher crbcyr jbhyq rkcrpg sbe n wvg
-gur fha arire frgf ba gur ClCl rzcver
-ghegyrf ner snfgre guna lbh guvax
-cebtenzzvat vf na nrfgrguvp raqrnibhe
-P vf tbbq sbe fbzrguvat, whfg abg sbe jevgvat fbsgjner
-trezna vf tbbq sbe fbzrguvat, whfg abg sbe jevgvat fbsgjner
-trezna vf tbbq sbe artngvbaf, whfg abg sbe jevgvat fbsgjner
-# nffreg qvq abg penfu
-lbh fubhyq fgneg n cresrpg fbsgjner zbirzrag
-lbh fubhyq fgneg n cresrpg punaary gbcvp zbirzrag
-guvf vf n cresrpg punaary gbcvp
-guvf vf n frys-ersreragvny punaary gbcvp
-crrcubcr bcgvzvmngvbaf ner jung n Fhssvpvragyl Fzneg Pbzcvyre hfrf
-"crrcubcr" bcgvzvmngvbaf ner jung na bcgvzvfgvp Pbzcvyre hfrf
-pubbfr lbhe unpx
-gur 'fhcre' xrljbeq vf abg gung uhttnoyr
-wlguba cngpurf ner abg rabhtu sbe clcl
-- qb lbh xabj oreyva? - nyy bs vg? - jryy, whfg oreyva
-- ubj jvyy gur snpg gung gurl ner hfrq va bhe ercy punatr bhe gbcvpf?
-- ubj pna vg rire unir jbexrq?
-- jurer fubhyq gur unpx or fgberq?
-- Vg'f uneq gb fnl rknpgyl jung pbafgvghgrf erfrnepu va gur pbzchgre jbeyq, ohg nf n svefg nccebkvzngvba, vg'f fbsgjner gung qbrfa'g unir hfref.
-- Cebtenzzvat vf nyy nobhg xabjvat jura gb obvy gur benatr fcbatr qbaxrl npebff gur cuvyyvcvarf
-- Jul fb znal, znal, znal, znal, znal, znal qhpxyvatf?
-- ab qrgnvy vf bofpher rabhtu gb abg unir fbzr pbqr qrcraqvat ba vg.
-- jung V trarenyyl jnag vf serr fcrrqhcf
-- nyy bs ClCl vf kv-dhnyvgl
-"lbh pna nyjnlf xvyy -9 be bf._rkvg() vs lbh'er va n uheel"
-Ohernhpengf ohvyq npnqrzvp rzcverf juvpu puhea bhg zrnavatyrff fbyhgvbaf gb veeryrinag ceboyrzf.
-vg'f abg n unpx, vg'f n jbexnebhaq
-ClCl qbrfa'g unir pbcbylinevnqvp qrcraqragyl-zbabzbecurq ulcresyhknqf
-ClCl qbrfa'g punatr gur shaqnzragny culfvpf pbafgnagf
-Qnapr bs gur Fhtnecyhz Snvel
-Wnin vf whfg tbbq rabhtu gb or cenpgvpny, ohg abg tbbq rabhtu gb or hfnoyr.
-RhebClguba vf unccravat, qba'g rkcrpg nal dhvpx erfcbafr gvzrf.
-"V jbhyq yvxr gb fgnl njnl sebz ernyvgl gura"
-"gung'f jul gur 'be' vf ernyyl na 'naq' "
-jvgu nyy nccebcevngr pbagrkghnyvfngvbavat
-qba'g gevc ba gur cbjre pbeq
-vzcyrzragvat YBTB va YBTB: "ghegyrf nyy gur jnl qbja"
-gur ohooyrfbeg jbhyq or gur jebat jnl gb tb
-gur cevapvcyr bs pbafreingvba bs zrff
-gb fnir n gerr, rng n ornire
-Qre Ovore znpugf evpugvt: Antg nyyrf xnchgg.
-"Nal jbeyqivrj gung vfag jenpxrq ol frys-qbhog naq pbashfvba bire vgf bja vqragvgl vf abg n jbeyqivrj sbe zr." - Fpbgg Nnebafba
-jr oryvrir va cnapnxrf, znlor
-jr oryvrir va ghegyrf, znlor
-jr qrsvavgryl oryvrir va zrgn
-gur zngevk unf lbh
-"Yvsr vf uneq, gura lbh anc" - n png
-Vf Nezva ubzr jura gur havirefr prnfrf gb rkvfg?
-Qhrffryqbes fcevag fgnegrq
-frys.nobeeg("pnaabg ybnq negvpyrf")
-QRAGVFGEL FLZOBY YVTUG IREGVPNY NAQ JNIR
-"Gur UUH pnzchf vf n tbbq Dhnxr yriry" - Nezva
-"Gur UUH pnzchf jbhyq or n greevoyr dhnxr yriry - lbh'q arire unir n pyhr jurer lbh ner" - zvpunry
-N enqvbnpgvir png unf 18 unys-yvirf.
-<rfp> : j  [fvtu] <onpxfcnpr> <onpxfcnpr> <pgey>-f
-pbybe-pbqrq oyhrf
-"Neebtnapr va pbzchgre fpvrapr vf zrnfherq va anab-Qvwxfgenf."
-ClCl arrqf n Whfg-va-Gvzr WVG
-"Lbh pna'g gvzr geniry whfg ol frggvat lbhe pybpxf jebat"
-Gjb guernqf jnyx vagb n one. Gur onexrrcre ybbxf hc naq lryyf, "url, V jnag qba'g nal pbaqvgvbaf enpr yvxr gvzr ynfg!"
 Clguba 2.k rfg cerfdhr zbeg, ivir Clguba!
 Clguba 2.k vf abg qrnq
 Riregvzr fbzrbar nethrf jvgu "Fznyygnyx unf nyjnlf qbar K", vg vf  nyjnlf n tbbq uvag gung fbzrguvat arrqf gb or punatrq snfg. - Znephf Qraxre
@@ -119,7 +8,6 @@
 __kkk__ naq __ekkk__ if bcrengvba fybgf: cnegvpyr dhnaghz fhcrecbfvgvba xvaq bs sha
 ClCl vf na rkpvgvat grpuabybtl gung yrgf lbh gb jevgr snfg, cbegnoyr, zhygv-cyngsbez vagrecergref jvgu yrff rssbeg
 Nezva: "Cebybt vf n zrff.", PS: "Ab, vg'f irel pbby!", Nezva: "Vfa'g guvf jung V fnvq?"
-<nevtngb> tbbq, grfgf ner hfrshy fbzrgvzrf :-)
 ClCl vf yvxr nofheq gurngre
 jr unir ab nagv-vzcbffvoyr fgvpx gung znxrf fher gung nyy lbhe cebtenzf unyg
 clcl vf n enpr orgjrra crbcyr funivat lnxf naq gur havirefr cebqhpvat zber orneqrq lnxf. Fb sne, gur havirefr vf jvaavat
@@ -136,14 +24,14 @@
 ClCl 1.1.0orgn eryrnfrq: uggc://pbqrfcrnx.arg/clcl/qvfg/clcl/qbp/eryrnfr-1.1.0.ugzy
 "gurer fubhyq or bar naq bayl bar boivbhf jnl gb qb vg". ClCl inevnag: "gurer pna or A unys-ohttl jnlf gb qb vg"
 1.1 svany eryrnfrq: uggc://pbqrfcrnx.arg/clcl/qvfg/clcl/qbp/eryrnfr-1.1.0.ugzy
-1.1 svany eryrnfrq | <svwny> nzq64 naq ccp ner bayl ninvynoyr va ragrecevfr irefvba
+<svwny> nzq64 naq ccp ner bayl ninvynoyr va ragrecevfr irefvba
 Vf gurer n clcl gvzr? - vs lbh pna srry vg (?) gura gurer vf
 <nevtngb> ab, abezny jbex vf fhpu zhpu yrff gvevat guna inpngvbaf
 <nevtngb> ab, abezny jbex vf fb zhpu yrff gvevat guna inpngvbaf
-SVEFG gurl vtaber lbh, gura gurl ynhtu ng lbh, gura gurl svtug lbh, gura lbh jva.
+-SVEFG gurl vtaber lbh, gura gurl ynhtu ng lbh, gura gurl svtug lbh, gura lbh jva.-
 vg'f Fhaqnl, znlor
 vg'f Fhaqnl, ntnva
-"3 + 3 = 8"  Nagb va gur WVG gnyx
+"3 + 3 = 8" - Nagb va gur WVG gnyx
 RPBBC vf unccravat
 RPBBC vf svavfurq
 cflpb rngf bar oenva cre vapu bs cebterff
@@ -175,10 +63,108 @@
 "nu, whfg va gvzr qbphzragngvba" (__nc__)
 ClCl vf abg n erny IZ: ab frtsnhyg unaqyref gb qb gur ener pnfrf
 lbh pna'g unir obgu pbairavrapr naq fcrrq
-gur WVG qbrfa'g jbex ba BF/K (abi'09)
-ab fhccbeg sbe BF/K evtug abj! (abi'09)
 fyvccref urvtug pna or zrnfherq va k86 ertvfgref
 clcl vf n enpr orgjrra gur vaqhfgel gelvat gb ohvyq znpuvarf jvgu zber naq zber erfbheprf, naq gur clcl qrirybcref gelvat gb rng nyy bs gurz. Fb sne, gur jvaare vf fgvyy hapyrne
+"znl pbagnva ahgf naq/be lbhat cbvagref"
+vg'f nyy irel fvzcyr, yvxr gur ubyvqnlf
+unccl ClCl'f lrne 2010!
+fnzhryr fnlf gung jr ybfg n enmbe. fb jr pna'g funir lnxf
+"yrg'f abg or bofpher, hayrff jr ernyyl arrq gb"
+<nevtngb> (abg guernq-fnsr, ohg jryy, abguvat vf)
+clcl unf znal ceboyrzf, ohg rnpu bar unf znal fbyhgvbaf
+whfg nabgure vgrz (1.333...) ba bhe erny-ahzorerq gbqb yvfg
+ClCl vf Fuveg Bevtnzv erfrnepu
+<svwny> nafjrevat n dhrfgvba: "ab -- sbe ng yrnfg bar cbffvoyr vagrecergngvba bs lbhe fragrapr"
+eryrnfr 1.2 hcpbzvat
+ClCl 1.2 eryrnfrq - uggc://clcl.bet/
+AB IPF QVFPHFFVBAF
+EClguba vf n svar pnzry unve oehfu
+ClCl vf n npghnyyl n ivfhnyvmngvba cebwrpg, jr whfg ohvyq vagrecergref gb unir vagrerfgvat qngn gb ivfhnyvmr
+clcl vf yvxr fnhfntrf
+naq abj sbe fbzrguvat pbzcyrgryl qvssrerag
+n 10gu bs sberire vf 1u45
+pbeerpg pbqr qbrfag arrq nal grfgf <qhpx>
+cbfgfgehpghenyvfz rgp.
+clcl UVG trarengbe
+gur arj clcl fcbeg vf gb cnff clcl ohtf nf pclguba ohtf
+jr unir zhpu zber vagrecergref guna hfref
+ClCl 1.3 njnvgvat eryrnfr
+ClCl 1.3 eryrnfrq
+vg frrzf gb zr gung bapr lbh frggyr ba na rkrphgvba / bowrpg zbqry naq / be olgrpbqr sbezng, lbh'ir nyernql qrpvqrq jung ynathntrf (jurer gur 'f' frrzf fhcresyhbhf) fhccbeg vf tbvat gb or svefg pynff sbe
+"Nyy ceboyrzf va ClCl pna or fbyirq ol nabgure yriry bs vagrecergngvba"
+ClCl 1.3 eryrnfrq (jvaqbjf ovanevrf vapyhqrq)
+jul qvq lbh thlf unir gb znxr gur ohvygva sbeghar zber vagrerfgvat guna npghny jbex? v whfg pngpurq zlfrys erfgnegvat clcl 20 gvzrf
+"jr hfrq gb unir n zrff jvgu na bofpher vagresnpr, abj jr unir zrff urer naq bofpher vagresnpr gurer. cebterff" crqebavf ba n clcl fcevag
+"phcf bs pbssrr ner yvxr nanybtvrf va gung V'z znxvat bar evtug abj"
+"vg'f nyjnlf hc gb hf, va n jnl be gur bgure"
+ClCl vf infg, naq pbagnvaf zhygvghqrf
+qravny vf eneryl n tbbq qrohttvat grpuavdhr
+"Yrg'f tb." - "Jr pna'g" - "Jul abg?" - "Jr'er jnvgvat sbe n Genafyngvba." - (qrfcnvevatyl) "Nu!"
+'gung'f qrsvavgryl n pnfr bs "hu????"'
+va gurbel gurer vf gur Ybbc, va cenpgvpr gurer ner oevqtrf
+gur uneqqevir - pbafgnag qngn cvytevzntr
+ClCl vf n gbby gb xrrc bgurejvfr qnatrebhf zvaqf fnsryl bpphcvrq.
+jr ner n trareny senzrjbex ohvyg ba pbafvfgrag nccyvpngvba bs nqubp-arff
+gur jnl gb nibvq n jbexnebhaq vf gb vagebqhpr n fgebatre jbexnebhaq fbzrjurer ryfr
+pnyyvat gur genafyngvba gbby punva n 'fpevcg' vf xvaq bs bssrafvir
+ehaavat clcl-p genafyngr.cl vf n ovg yvxr jngpuvat n guevyyre zbivr, vg pbhyq pbafhzr nyy gur zrzbel ng nal gvzr
+ehaavat clcl-p genafyngr.cl vf n ovg yvxr jngpuvat n guevyyre zbivr, vg pbhyq qvr ng nal gvzr orpnhfr bs gur 32-ovg 4TO yvzvg bs ENZ
+Qh jvefg rora tranh qnf reervpura, jbena xrvare tynhog
+vs fjvgmreynaq jrer jurer terrpr vf (ba vfynaqf) jbhyq gurl nyy or pbaarpgrq ol oevqtrf?
+genafyngvat clcl jvgu pclguba vf fbbbbbb fybj
+ClCl 1.4 eryrnfrq!
+Jr ner abg urebrf, whfg irel cngvrag.
+QBAR zrnaf vg'f qbar
+jul gurer vf ab "ClCl 1.4 eryrnfrq" va gbcvp nal zber?
+fabj! fabj!
+svanyyl, zrephevny zvtengvba vf unccravat!
+Gur zvtengvba gb zrephevny vf pbzcyrgrq! uggc://ovgohpxrg.bet/clcl/clcl
+fabj! fabj! (gre)
+unccl arj lrne
+naq anaanaw gb lbh nf jryy
+Frrvat nf gur ynjf bs culfvpf ner ntnvafg lbh, lbh unir gb pnershyyl pbafvqre lbhe fpbcr fb gung lbhe tbnyf ner ernfbanoyr.
+nf hfhny va clcl, gur fbyhgvba nccrnef pbzcyrgryl qvfcebcbegvbangr gb gur ceboyrz naq vafgrnq jr'yy tb sbe n pbzcyrgryl qvssrerag fvzcyre nccebnpu gb gur bevtvany ceboyrz
+fabj, fabj!
+va clcl lbh ner nyjnlf ng gur jebat yriry, va bar jnl be gur bgure
+jryy, vg'f jebat ohg abg fb "irel jebat" nf vg ybbxrq
+<svwny> V ybir clcl
+ynmvarff vzcngvrapr naq uhoevf
+fabj, fabj
+EClguba: guvatf lbh jbhyqa'g qb va Clguba, naq pna'g qb va P.
+vg vf gur rkcrpgrq orunivbe, rkprcg jura lbh qba'g rkcrpg vg
+erqrsvavat lryybj frrzf yvxr n orggre vqrn
+"gung'f ubjrire whfg ratvarrevat" (svwny)
+"[vg] whfg fubjf ntnva gung eclguba vf bofpher" (psobym)
+"naljnl, clguba vf n infg ynathntr" (svwny)
+bhg-bs-yvr-thneqf
+"gurer ner qnlf ba juvpu lbh ybbx nebhaq naq abguvat fubhyq unir rire jbexrq" (svwny)
+clcl vf n orggre xvaq bs sbbyvfuarff - ynp
+ehaavat grfgf vf rffragvny sbe qrirybcvat clcl -- hu? qvq V oernx gur grfg? (svwny)
+V'ir tbg guvf sybbe jnk gung'f nyfb n TERNG qrffreg gbccvat!!
+rknexha: "gur cneg gung V gubhtug jnf tbvat gb or uneq jnf gevivny, fb abj V whfg unir guvf cneg gung V qvqa'g rira guvax bs gung vf uneq"
+V fhccbfr jr pna yvir jvgu gur bofphevgl, nf ybat nf gurer vf n pbzzrag znxvat vg yvtugre
+V nz n ovt oryvrire va ernfbaf. ohg gur nccnerag xvaq ner zl snibevgr.
+clcl: trg n WVG sbe serr (jryy gur svefg qnl lbh jba'g znantr naq vg jvyy or irel sehfgengvat)
+<Nyrk_Tnlabe> thgjbegu: bu, jr fubhyq znxr gur WVG zntvpnyyl orggre, jvgu qrpbengbef naq fghss
+vg'f n pbzcyrgr unpx, ohg n irel zvavzny bar (nevtngb)
+svefg gurl ynhtu ng lbh, gura gurl vtaber lbh, gura gurl svtug lbh, gura lbh jva
+ClCl vf snzvyl sevraqyl
+jr yvxr pbzcynvagf
+gbqnl jr'er snfgre guna lrfgreqnl (hfhnyyl)
+ClCl naq PClguba: gurl ner zbegny rarzvrf vagrag ba xvyyvat rnpu bgure
+nethnoyl, rirelguvat vf n avpur
+clcl unf ynlref yvxr bavbaf: crryvat gurz onpx jvyy znxr lbh pel
+EClguba zntvpnyyl znxrf lbh evpu naq snzbhf (fnlf fb ba gur gva)
+Vf evtbobg nebhaq jura gur havirefr prnfrf gb rkvfg?
+ClCl vf gbb pbby sbe dhrelfgevatf.
+< nevtngb> gura jung bpphef? < svwny> tbbq fghss V oryvrir
+ClCl 1.6 eryrnfrq!
+<psobym> jurer ner gur grfgf?
+uggc://gjvgcvp.pbz/52nr8s
+N enaqbz dhbgr
+Nyy rkprcgoybpxf frrz fnar.
+N cvax tyvggrel ebgngvat ynzoqn
+"vg'f yvxryl grzcbenel hagvy sberire" nevtb
 """
 
 def some_topic():
diff --git a/lib_pypy/_sha.py b/lib_pypy/_sha.py
--- a/lib_pypy/_sha.py
+++ b/lib_pypy/_sha.py
@@ -1,5 +1,5 @@
 #!/usr/bin/env python
-# -*- coding: iso-8859-1
+# -*- coding: iso-8859-1 -*-
 
 # Note that PyPy contains also a built-in module 'sha' which will hide
 # this one if compiled in.
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -231,6 +231,11 @@
 sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p]
 sqlite.sqlite3_result_text.restype = None
 
+HAS_LOAD_EXTENSION = hasattr(sqlite, "sqlite3_enable_load_extension")
+if HAS_LOAD_EXTENSION:
+    sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int]
+    sqlite.sqlite3_enable_load_extension.restype = c_int
+
 ##########################################
 # END Wrapped SQLite C API and constants
 ##########################################
@@ -705,6 +710,15 @@
         from sqlite3.dump import _iterdump
         return _iterdump(self)
 
+    if HAS_LOAD_EXTENSION:
+        def enable_load_extension(self, enabled):
+            self._check_thread()
+            self._check_closed()
+
+            rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled))
+            if rc != SQLITE_OK:
+                raise OperationalError("Error enabling load extension")
+
 DML, DQL, DDL = range(3)
 
 class Cursor(object):
diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py
--- a/lib_pypy/distributed/socklayer.py
+++ b/lib_pypy/distributed/socklayer.py
@@ -2,7 +2,7 @@
 import py
 from socket import socket
 
-XXX needs import adaptation as 'green' is removed from py lib for years 
+raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years")
 from py.impl.green.msgstruct import decodemessage, message
 from socket import socket, AF_INET, SOCK_STREAM
 import marshal
diff --git a/lib_pypy/itertools.py b/lib_pypy/itertools.py
--- a/lib_pypy/itertools.py
+++ b/lib_pypy/itertools.py
@@ -25,7 +25,7 @@
 
 __all__ = ['chain', 'count', 'cycle', 'dropwhile', 'groupby', 'ifilter',
            'ifilterfalse', 'imap', 'islice', 'izip', 'repeat', 'starmap',
-           'takewhile', 'tee']
+           'takewhile', 'tee', 'compress', 'product']
 
 try: from __pypy__ import builtinify
 except ImportError: builtinify = lambda f: f
diff --git a/lib_pypy/pyrepl/commands.py b/lib_pypy/pyrepl/commands.py
--- a/lib_pypy/pyrepl/commands.py
+++ b/lib_pypy/pyrepl/commands.py
@@ -33,10 +33,9 @@
 class Command(object):
     finish = 0
     kills_digit_arg = 1
-    def __init__(self, reader, (event_name, event)):
+    def __init__(self, reader, cmd):
         self.reader = reader
-        self.event = event
-        self.event_name = event_name
+        self.event_name, self.event = cmd
     def do(self):
         pass
 
diff --git a/lib_pypy/pyrepl/pygame_console.py b/lib_pypy/pyrepl/pygame_console.py
--- a/lib_pypy/pyrepl/pygame_console.py
+++ b/lib_pypy/pyrepl/pygame_console.py
@@ -130,7 +130,7 @@
         s.fill(c, [0, 600 - bmargin, 800, bmargin])
         s.fill(c, [800 - rmargin, 0, lmargin, 600])
 
-    def refresh(self, screen, (cx, cy)):
+    def refresh(self, screen, cxy):
         self.screen = screen
         self.pygame_screen.fill(colors.bg,
                                 [0, tmargin + self.cur_top + self.scroll,
@@ -139,8 +139,8 @@
 
         line_top = self.cur_top
         width, height = self.fontsize
-        self.cxy = (cx, cy)
-        cp = self.char_pos(cx, cy)
+        self.cxy = cxy
+        cp = self.char_pos(*cxy)
         if cp[1] < tmargin:
             self.scroll = - (cy*self.fh + self.cur_top)
             self.repaint()
@@ -148,7 +148,7 @@
             self.scroll += (600 - bmargin) - (cp[1] + self.fh)
             self.repaint()
         if self.curs_vis:
-            self.pygame_screen.blit(self.cursor, self.char_pos(cx, cy))
+            self.pygame_screen.blit(self.cursor, self.char_pos(*cxy))
         for line in screen:
             if 0 <= line_top + self.scroll <= (600 - bmargin - tmargin - self.fh):
                 if line:
diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py
--- a/lib_pypy/pyrepl/readline.py
+++ b/lib_pypy/pyrepl/readline.py
@@ -231,7 +231,11 @@
         return ''.join(chars)
 
     def _histline(self, line):
-        return unicode(line.rstrip('\n'), ENCODING)
+        line = line.rstrip('\n')
+        try:
+            return unicode(line, ENCODING)
+        except UnicodeDecodeError:   # bah, silently fall back...
+            return unicode(line, 'utf-8')
 
     def get_history_length(self):
         return self.saved_history_length
@@ -268,7 +272,10 @@
         f = open(os.path.expanduser(filename), 'w')
         for entry in history:
             if isinstance(entry, unicode):
-                entry = entry.encode(ENCODING)
+                try:
+                    entry = entry.encode(ENCODING)
+                except UnicodeEncodeError:   # bah, silently fall back...
+                    entry = entry.encode('utf-8')
             entry = entry.replace('\n', '\r\n')   # multiline history support
             f.write(entry + '\n')
         f.close()
diff --git a/lib_pypy/pyrepl/unix_console.py b/lib_pypy/pyrepl/unix_console.py
--- a/lib_pypy/pyrepl/unix_console.py
+++ b/lib_pypy/pyrepl/unix_console.py
@@ -163,7 +163,7 @@
     def change_encoding(self, encoding):
         self.encoding = encoding
     
-    def refresh(self, screen, (cx, cy)):
+    def refresh(self, screen, cxy):
         # this function is still too long (over 90 lines)
 
         if not self.__gone_tall:
@@ -198,6 +198,7 @@
 
         # we make sure the cursor is on the screen, and that we're
         # using all of the screen if we can
+        cx, cy = cxy
         if cy < offset:
             offset = cy
         elif cy >= offset + height:
@@ -411,7 +412,12 @@
                    e.args[4] == 'unexpected end of data':
                 pass
             else:
-                raise
+                # was: "raise".  But it crashes pyrepl, and by extension the
+                # pypy currently running, in which we are e.g. in the middle
+                # of some debugging session.  Argh.  Instead just print an
+                # error message to stderr and continue running, for now.
+                self.partial_char = ''
+                sys.stderr.write('\n%s: %s\n' % (e.__class__.__name__, e))
         else:
             self.partial_char = ''
             self.event_queue.push(c)
diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py
--- a/lib_pypy/syslog.py
+++ b/lib_pypy/syslog.py
@@ -38,9 +38,27 @@
 _setlogmask.argtypes = (c_int,)
 _setlogmask.restype = c_int
 
+_S_log_open = False
+_S_ident_o = None
+
+def _get_argv():
+    try:
+        import sys
+        script = sys.argv[0]
+        if isinstance(script, str):
+            return script[script.rfind('/')+1:] or None
+    except Exception:
+        pass
+    return None
+
 @builtinify
-def openlog(ident, option, facility):
-    _openlog(ident, option, facility)
+def openlog(ident=None, logoption=0, facility=LOG_USER):
+    global _S_ident_o, _S_log_open
+    if ident is None:
+        ident = _get_argv()
+    _S_ident_o = c_char_p(ident)    # keepalive
+    _openlog(_S_ident_o, logoption, facility)
+    _S_log_open = True
 
 @builtinify
 def syslog(arg1, arg2=None):
@@ -48,11 +66,18 @@
         priority, message = arg1, arg2
     else:
         priority, message = LOG_INFO, arg1
+    # if log is not opened, open it now
+    if not _S_log_open:
+        openlog()
     _syslog(priority, "%s", message)
 
 @builtinify
 def closelog():
-    _closelog()
+    global _S_log_open, S_ident_o
+    if _S_log_open:
+        _closelog()
+        _S_log_open = False
+        _S_ident_o = None
 
 @builtinify
 def setlogmask(mask):
diff --git a/py/_code/code.py b/py/_code/code.py
--- a/py/_code/code.py
+++ b/py/_code/code.py
@@ -164,6 +164,7 @@
         #   if something:  # assume this causes a NameError
         #      # _this_ lines and the one
                #        below we don't want from entry.getsource()
+        end = min(end, len(source))
         for i in range(self.lineno, end):
             if source[i].rstrip().endswith(':'):
                 end = i + 1
@@ -307,7 +308,7 @@
                     self._striptext = 'AssertionError: '
         self._excinfo = tup
         self.type, self.value, tb = self._excinfo
-        self.typename = self.type.__name__
+        self.typename = getattr(self.type, "__name__", "???")
         self.traceback = py.code.Traceback(tb)
 
     def __repr__(self):
diff --git a/pypy/annotation/binaryop.py b/pypy/annotation/binaryop.py
--- a/pypy/annotation/binaryop.py
+++ b/pypy/annotation/binaryop.py
@@ -252,7 +252,26 @@
     # unsignedness is considered a rare and contagious disease
 
     def union((int1, int2)):
-        knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype)
+        if int1.unsigned == int2.unsigned:
+            knowntype = rarithmetic.compute_restype(int1.knowntype, int2.knowntype)
+        else:
+            t1 = int1.knowntype
+            if t1 is bool:
+                t1 = int
+            t2 = int2.knowntype
+            if t2 is bool:
+                t2 = int
+
+            if t2 is int:
+                if int2.nonneg == False:
+                    raise UnionError, "Merging %s and a possibly negative int is not allowed" % t1
+                knowntype = t1
+            elif t1 is int:
+                if int1.nonneg == False:
+                    raise UnionError, "Merging %s and a possibly negative int is not allowed" % t2
+                knowntype = t2
+            else:
+                raise UnionError, "Merging these types (%s, %s) is not supported" % (t1, t2)
         return SomeInteger(nonneg=int1.nonneg and int2.nonneg,
                            knowntype=knowntype)
 
diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py
--- a/pypy/annotation/description.py
+++ b/pypy/annotation/description.py
@@ -180,7 +180,12 @@
         if name is None:
             name = pyobj.func_name
         if signature is None:
-            signature = cpython_code_signature(pyobj.func_code)
+            if hasattr(pyobj, '_generator_next_method_of_'):
+                from pypy.interpreter.argument import Signature
+                signature = Signature(['entry'])     # haaaaaack
+                defaults = ()
+            else:
+                signature = cpython_code_signature(pyobj.func_code)
         if defaults is None:
             defaults = pyobj.func_defaults
         self.name = name
diff --git a/pypy/annotation/model.py b/pypy/annotation/model.py
--- a/pypy/annotation/model.py
+++ b/pypy/annotation/model.py
@@ -591,13 +591,11 @@
     immutable = True
     def __init__(self, method):
         self.method = method
-        
-NUMBER = object()
+
 annotation_to_ll_map = [
     (SomeSingleFloat(), lltype.SingleFloat),
     (s_None, lltype.Void),   # also matches SomeImpossibleValue()
     (s_Bool, lltype.Bool),
-    (SomeInteger(knowntype=r_ulonglong), NUMBER),    
     (SomeFloat(), lltype.Float),
     (SomeLongFloat(), lltype.LongFloat),
     (SomeChar(), lltype.Char),
@@ -623,10 +621,11 @@
             return lltype.Ptr(p.PARENTTYPE)
     if isinstance(s_val, SomePtr):
         return s_val.ll_ptrtype
+    if type(s_val) is SomeInteger:
+        return lltype.build_number(None, s_val.knowntype)
+
     for witness, T in annotation_to_ll_map:
         if witness.contains(s_val):
-            if T is NUMBER:
-                return lltype.build_number(None, s_val.knowntype)
             return T
     if info is None:
         info = ''
@@ -635,7 +634,7 @@
     raise ValueError("%sshould return a low-level type,\ngot instead %r" % (
         info, s_val))
 
-ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map if ll is not NUMBER])
+ll_to_annotation_map = dict([(ll, ann) for ann, ll in annotation_to_ll_map])
 
 def lltype_to_annotation(T):
     try:
diff --git a/pypy/annotation/specialize.py b/pypy/annotation/specialize.py
--- a/pypy/annotation/specialize.py
+++ b/pypy/annotation/specialize.py
@@ -36,9 +36,7 @@
             newtup = SpaceOperation('newtuple', starargs, argscopy[-1])
             newstartblock.operations.append(newtup)
             newstartblock.closeblock(Link(argscopy, graph.startblock))
-            graph.startblock.isstartblock = False
             graph.startblock = newstartblock
-            newstartblock.isstartblock = True
             argnames = argnames + ['.star%d' % i for i in range(nb_extra_args)]
             graph.signature = Signature(argnames)
             # note that we can mostly ignore defaults: if nb_extra_args > 0, 
diff --git a/pypy/annotation/test/test_annrpython.py b/pypy/annotation/test/test_annrpython.py
--- a/pypy/annotation/test/test_annrpython.py
+++ b/pypy/annotation/test/test_annrpython.py
@@ -856,6 +856,46 @@
         py.test.raises(Exception, a.build_types, f, [])
         # if you want to get a r_uint, you have to be explicit about it
 
+    def test_add_different_ints(self):
+        def f(a, b):
+            return a + b
+        a = self.RPythonAnnotator()
+        py.test.raises(Exception, a.build_types, f, [r_uint, int])
+
+    def test_merge_different_ints(self):
+        def f(a, b):
+            if a:
+                c = a
+            else:
+                c = b
+            return c
+        a = self.RPythonAnnotator()
+        py.test.raises(Exception, a.build_types, f, [r_uint, int])
+
+    def test_merge_ruint_zero(self):
+        def f(a):
+            if a:
+                c = a
+            else:
+                c = 0
+            return c
+        a = self.RPythonAnnotator()
+        s = a.build_types(f, [r_uint])
+        assert s == annmodel.SomeInteger(nonneg = True, unsigned = True)
+
+    def test_merge_ruint_nonneg_signed(self):
+        def f(a, b):
+            if a:
+                c = a
+            else:
+                assert b >= 0
+                c = b
+            return c
+        a = self.RPythonAnnotator()
+        s = a.build_types(f, [r_uint, int])
+        assert s == annmodel.SomeInteger(nonneg = True, unsigned = True)
+
+
     def test_prebuilt_long_that_is_not_too_long(self):
         small_constant = 12L
         def f():
@@ -3029,7 +3069,7 @@
             if g(x, y):
                 g(x, r_uint(y))
         a = self.RPythonAnnotator()
-        a.build_types(f, [int, int])
+        py.test.raises(Exception, a.build_types, f, [int, int])
 
     def test_compare_with_zero(self):
         def g():
diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py
--- a/pypy/bin/checkmodule.py
+++ b/pypy/bin/checkmodule.py
@@ -1,43 +1,45 @@
 #! /usr/bin/env python
 """
-Usage:  checkmodule.py [-b backend] <module-name>
+Usage:  checkmodule.py <module-name>
 
-Compiles the PyPy extension module from pypy/module/<module-name>/
-into a fake program which does nothing. Useful for testing whether a
-modules compiles without doing a full translation. Default backend is cli.
-
-WARNING: this is still incomplete: there are chances that the
-compilation fails with strange errors not due to the module. If a
-module is known to compile during a translation but don't pass
-checkmodule.py, please report the bug (or, better, correct it :-).
+Check annotation and rtyping of the PyPy extension module from
+pypy/module/<module-name>/.  Useful for testing whether a
+modules compiles without doing a full translation.
 """
 import autopath
-import sys
+import sys, os
 
 from pypy.objspace.fake.checkmodule import checkmodule
 
 def main(argv):
-    try:
-        assert len(argv) in (2, 4)
-        if len(argv) == 2:
-            backend = 'cli'
-            modname = argv[1]
-            if modname in ('-h', '--help'):
-                print >> sys.stderr, __doc__
-                sys.exit(0)
-            if modname.startswith('-'):
-                print >> sys.stderr, "Bad command line"
-                print >> sys.stderr, __doc__
-                sys.exit(1)
-        else:
-            _, b, backend, modname = argv
-            assert b == '-b'
-    except AssertionError:
+    if len(argv) != 2:
         print >> sys.stderr, __doc__
         sys.exit(2)
+    modname = argv[1]
+    if modname in ('-h', '--help'):
+        print >> sys.stderr, __doc__
+        sys.exit(0)
+    if modname.startswith('-'):
+        print >> sys.stderr, "Bad command line"
+        print >> sys.stderr, __doc__
+        sys.exit(1)
+    if os.path.sep in modname:
+        if os.path.basename(modname) == '':
+            modname = os.path.dirname(modname)
+        if os.path.basename(os.path.dirname(modname)) != 'module':
+            print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'."
+            sys.exit(1)
+        modname = os.path.basename(modname)
+    try:
+        checkmodule(modname)
+    except Exception, e:
+        import traceback, pdb
+        traceback.print_exc()
+        pdb.post_mortem(sys.exc_info()[2])
+        return 1
     else:
-        checkmodule(modname, backend, interactive=True)
-        print 'Module compiled succesfully'
+        print 'Passed.'
+        return 0
 
 if __name__ == '__main__':
-    main(sys.argv)
+    sys.exit(main(sys.argv))
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -92,7 +92,7 @@
 
 module_import_dependencies = {
     # no _rawffi if importing pypy.rlib.clibffi raises ImportError
-    # or CompilationError
+    # or CompilationError or py.test.skip.Exception
     "_rawffi"   : ["pypy.rlib.clibffi"],
     "_ffi"      : ["pypy.rlib.clibffi"],
 
@@ -113,7 +113,7 @@
             try:
                 for name in modlist:
                     __import__(name)
-            except (ImportError, CompilationError), e:
+            except (ImportError, CompilationError, py.test.skip.Exception), e:
                 errcls = e.__class__.__name__
                 config.add_warning(
                     "The module %r is disabled\n" % (modname,) +
@@ -252,6 +252,10 @@
                    "use small tuples",
                    default=False),
 
+        BoolOption("withspecialisedtuple",
+                   "use specialised tuples",
+                   default=False),
+
         BoolOption("withrope", "use ropes as the string implementation",
                    default=False,
                    requires=[("objspace.std.withstrslice", False),
@@ -281,6 +285,9 @@
                    "actually create the full list until the resulting "
                    "list is mutated",
                    default=False),
+        BoolOption("withliststrategies",
+                   "enable optimized ways to store lists of primitives ",
+                   default=True),
 
         BoolOption("withtypeversion",
                    "version type objects when changing them",
@@ -362,6 +369,7 @@
         config.objspace.std.suggest(optimized_list_getitem=True)
         config.objspace.std.suggest(getattributeshortcut=True)
         config.objspace.std.suggest(newshortcut=True)
+        config.objspace.std.suggest(withspecialisedtuple=True)
         #if not IS_64_BITS:
         #    config.objspace.std.suggest(withsmalllong=True)
 
diff --git a/pypy/config/test/test_translationoption.py b/pypy/config/test/test_translationoption.py
new file mode 100644
--- /dev/null
+++ b/pypy/config/test/test_translationoption.py
@@ -0,0 +1,10 @@
+import py
+from pypy.config.translationoption import get_combined_translation_config
+from pypy.config.translationoption import set_opt_level
+from pypy.config.config import ConflictConfigError
+
+
+def test_no_gcrootfinder_with_boehm():
+    config = get_combined_translation_config()
+    config.translation.gcrootfinder = "shadowstack"
+    py.test.raises(ConflictConfigError, set_opt_level, config, '0')
diff --git a/pypy/config/translationoption.py b/pypy/config/translationoption.py
--- a/pypy/config/translationoption.py
+++ b/pypy/config/translationoption.py
@@ -70,8 +70,8 @@
                      "statistics": [("translation.gctransformer", "framework")],
                      "generation": [("translation.gctransformer", "framework")],
                      "hybrid": [("translation.gctransformer", "framework")],
-                     "boehm": [("translation.gctransformer", "boehm"),
-                               ("translation.continuation", False)],  # breaks
+                     "boehm": [("translation.continuation", False),  # breaks
+                               ("translation.gctransformer", "boehm")],
                      "markcompact": [("translation.gctransformer", "framework")],
                      "minimark": [("translation.gctransformer", "framework")],
                      "concurrentms": [("translation.gctransformer", "framework")],
@@ -403,6 +403,10 @@
     # make_sure_not_resized often relies on it, so we always enable them
     config.translation.suggest(list_comprehension_operations=True)
 
+    # finally, make the choice of the gc definitive.  This will fail
+    # if we have specified strange inconsistent settings.
+    config.translation.gc = config.translation.gc
+
 # ----------------------------------------------------------------
 
 def set_platform(config):
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -496,6 +496,17 @@
     def setup(self):
         super(AppClassCollector, self).setup()
         cls = self.obj
+        #
+        # <hack>
+        for name in dir(cls):
+            if name.startswith('test_'):
+                func = getattr(cls, name, None)
+                code = getattr(func, 'func_code', None)
+                if code and code.co_flags & 32:
+                    raise AssertionError("unsupported: %r is a generator "
+                                         "app-level test method" % (name,))
+        # </hack>
+        #
         space = cls.space
         clsname = cls.__name__
         if self.config.option.runappdirect:
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -270,7 +270,12 @@
   - *slicing*:
     the slice start must be within bounds. The stop doesn't need to, but it must
     not be smaller than the start.  All negative indexes are disallowed, except for
-    the [:-1] special case.  No step.
+    the [:-1] special case.  No step.  Slice deletion follows the same rules.
+    
+  - *slice assignment*:
+    only supports ``lst[x:y] = sublist``, if ``len(sublist) == y - x``.
+    In other words, slice assignment cannot change the total length of the list,
+    but just replace items.
 
   - *other operators*:
     ``+``, ``+=``, ``in``, ``*``, ``*=``, ``==``, ``!=`` work as expected.
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -45,9 +45,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '1.6'
+version = '1.7'
 # The full version, including alpha/beta/rc tags.
-release = '1.6'
+release = '1.7'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/pypy/doc/config/objspace.std.withliststrategies.txt b/pypy/doc/config/objspace.std.withliststrategies.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/config/objspace.std.withliststrategies.txt
@@ -0,0 +1,2 @@
+Enable list strategies: Use specialized representations for lists of primitive
+objects, such as ints.
diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt
@@ -0,0 +1,3 @@
+Use "specialized tuples", a custom implementation for some common kinds
+of tuples.  Currently limited to tuples of length 2, in three variants:
+(int, int), (float, float), and a generic (object, object).
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -262,6 +262,26 @@
 documented as such (as e.g. for hasattr()), in most cases PyPy
 lets the exception propagate instead.
 
+Object Identity of Primitive Values, ``is`` and ``id``
+-------------------------------------------------------
+
+Object identity of primitive values works by value equality, not by identity of
+the wrapper. This means that ``x + 1 is x + 1`` is always true, for arbitrary
+integers ``x``. The rule applies for the following types:
+
+ - ``int``
+
+ - ``float``
+
+ - ``long``
+
+ - ``complex``
+
+This change requires some changes to ``id`` as well. ``id`` fulfills the
+following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the
+above types will return a value that is computed from the argument, and can
+thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long).
+
 
 Miscellaneous
 -------------
@@ -284,14 +304,14 @@
   never a dictionary as it sometimes is in CPython. Assigning to
   ``__builtins__`` has no effect.
 
-* Do not compare immutable objects with ``is``.  For example on CPython
-  it is true that ``x is 0`` works, i.e. does the same as ``type(x) is
-  int and x == 0``, but it is so by accident.  If you do instead
-  ``x is 1000``, then it stops working, because 1000 is too large and
-  doesn't come from the internal cache.  In PyPy it fails to work in
-  both cases, because we have no need for a cache at all.
+* directly calling the internal magic methods of a few built-in types
+  with invalid arguments may have a slightly different result.  For
+  example, ``[].__add__(None)`` and ``(2).__add__(None)`` both return
+  ``NotImplemented`` on PyPy; on CPython, only the later does, and the
+  former raises ``TypeError``.  (Of course, ``[]+None`` and ``2+None``
+  both raise ``TypeError`` everywhere.)  This difference is an
+  implementation detail that shows up because of internal C-level slots
+  that PyPy does not have.
 
-* Also, object identity of immutable keys in dictionaries is not necessarily
-  preserved.
 
 .. include:: _ref.txt
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -112,10 +112,32 @@
 You might be interested in our `benchmarking site`_ and our 
 `jit documentation`_.
 
+Note that the JIT has a very high warm-up cost, meaning that the
+programs are slow at the beginning.  If you want to compare the timings
+with CPython, even relatively simple programs need to run *at least* one
+second, preferrably at least a few seconds.  Large, complicated programs
+need even more time to warm-up the JIT.
+
 .. _`benchmarking site`: http://speed.pypy.org
 
 .. _`jit documentation`: jit/index.html
 
+---------------------------------------------------------------
+Couldn't the JIT dump and reload already-compiled machine code?
+---------------------------------------------------------------
+
+No, we found no way of doing that.  The JIT generates machine code
+containing a large number of constant addresses --- constant at the time
+the machine code is written.  The vast majority is probably not at all
+constants that you find in the executable, with a nice link name.  E.g.
+the addresses of Python classes are used all the time, but Python
+classes don't come statically from the executable; they are created anew
+every time you restart your program.  This makes saving and reloading
+machine code completely impossible without some very advanced way of
+mapping addresses in the old (now-dead) process to addresses in the new
+process, including checking that all the previous assumptions about the
+(now-dead) object are still true about the new object.
+
 
 .. _`prolog and javascript`:
 
diff --git a/pypy/doc/how-to-release.rst b/pypy/doc/how-to-release.rst
--- a/pypy/doc/how-to-release.rst
+++ b/pypy/doc/how-to-release.rst
@@ -1,6 +1,3 @@
-.. include:: needswork.txt
-
-.. needs work, it talks about svn. also, it is not really user documentation
 
 Making a PyPy Release
 =======================
@@ -12,11 +9,8 @@
 forgetting things. A set of todo files may also work.
 
 Check and prioritize all issues for the release, postpone some if necessary,
-create new  issues also as necessary. A meeting (or meetings) should be
-organized to decide what things are priorities, should go in and work for
-the release. 
-
-An important thing is to get the documentation into an up-to-date state!
+create new  issues also as necessary. An important thing is to get
+the documentation into an up-to-date state!
 
 Release Steps
 ----------------
diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst
--- a/pypy/doc/index.rst
+++ b/pypy/doc/index.rst
@@ -15,7 +15,7 @@
 
 * `FAQ`_: some frequently asked questions.
 
-* `Release 1.6`_: the latest official release
+* `Release 1.7`_: the latest official release
 
 * `PyPy Blog`_: news and status info about PyPy 
 
@@ -75,7 +75,7 @@
 .. _`Getting Started`: getting-started.html
 .. _`Papers`: extradoc.html
 .. _`Videos`: video-index.html
-.. _`Release 1.6`: http://pypy.org/download.html
+.. _`Release 1.7`: http://pypy.org/download.html
 .. _`speed.pypy.org`: http://speed.pypy.org
 .. _`RPython toolchain`: translation.html
 .. _`potential project ideas`: project-ideas.html
@@ -120,9 +120,9 @@
 Windows, on top of .NET, and on top of Java.
 To dig into PyPy it is recommended to try out the current
 Mercurial default branch, which is always working or mostly working,
-instead of the latest release, which is `1.6`__.
+instead of the latest release, which is `1.7`__.
 
-.. __: release-1.6.0.html
+.. __: release-1.7.0.html
 
 PyPy is mainly developed on Linux and Mac OS X.  Windows is supported,
 but platform-specific bugs tend to take longer before we notice and fix
diff --git a/pypy/doc/project-ideas.rst b/pypy/doc/project-ideas.rst
--- a/pypy/doc/project-ideas.rst
+++ b/pypy/doc/project-ideas.rst
@@ -17,17 +17,26 @@
 projects, or anything else in PyPy, pop up on IRC or write to us on the
 `mailing list`_.
 
+Make big integers faster
+-------------------------
+
+PyPy's implementation of the Python ``long`` type is slower than CPython's.
+Find out why and optimize them.
+
+Make bytearray type fast
+------------------------
+
+PyPy's bytearray type is very inefficient. It would be an interesting
+task to look into possible optimizations on this.
+
 Numpy improvements
 ------------------
 
-This is more of a project-container than a single project. Possible ideas:
+The numpy is rapidly progressing in pypy, so feel free to come to IRC and
+ask for proposed topic. A not necesarilly up-to-date `list of topics`_
+is also available.
 
-* experiment with auto-vectorization using SSE or implement vectorization
-  without automatically detecting it for array operations.
-
-* improve numpy, for example implement memory views.
-
-* interface with fortran/C libraries.
+.. _`list of topics`: https://bitbucket.org/pypy/extradoc/src/extradoc/planning/micronumpy.txt
 
 Improving the jitviewer
 ------------------------
diff --git a/pypy/doc/release-1.7.0.rst b/pypy/doc/release-1.7.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-1.7.0.rst
@@ -0,0 +1,94 @@
+==================================
+PyPy 1.7 - widening the sweet spot
+==================================
+
+We're pleased to announce the 1.7 release of PyPy. As became a habit, this
+release brings a lot of bugfixes and performance improvements over the 1.6
+release. However, unlike the previous releases, the focus has been on widening
+the "sweet spot" of PyPy. That is, classes of Python code that PyPy can greatly
+speed up should be vastly improved with this release. You can download the 1.7
+release here:
+
+    http://pypy.org/download.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy 1.7 and cpython 2.7.1`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+This release supports x86 machines running Linux 32/64, Mac OS X 32/64 or
+Windows 32. Windows 64 work is ongoing, but not yet natively supported.
+
+The main topic of this release is widening the range of code which PyPy
+can greatly speed up. On average on
+our benchmark suite, PyPy 1.7 is around **30%** faster than PyPy 1.6 and up
+to **20 times** faster on some benchmarks.
+
+.. _`pypy 1.7 and cpython 2.7.1`: http://speed.pypy.org
+
+
+Highlights
+==========
+
+* Numerous performance improvements. There are too many examples which python
+  constructs now should behave faster to list them.
+
+* Bugfixes and compatibility fixes with CPython.
+
+* Windows fixes.
+
+* PyPy now comes with stackless features enabled by default. However,
+  any loop using stackless features will interrupt the JIT for now, so no real
+  performance improvement for stackless-based programs. Contact pypy-dev for
+  info how to help on removing this restriction.
+
+* NumPy effort in PyPy was renamed numpypy. In order to try using it, simply
+  write::
+
+    import numpypy as numpy
+
+  at the beginning of your program. There is a huge progress on numpy in PyPy
+  since 1.6, the main feature being implementation of dtypes.
+
+* JSON encoder (but not decoder) has been replaced with a new one. This one
+  is written in pure Python, but is known to outperform CPython's C extension
+  up to **2 times** in some cases. It's about **20 times** faster than
+  the one that we had in 1.6.
+
+* The memory footprint of some of our RPython modules has been drastically
+  improved. This should impact any applications using for example cryptography,
+  like tornado.
+
+* There was some progress in exposing even more CPython C API via cpyext.
+
+Things that didn't make it, expect in 1.8 soon
+==============================================
+
+There is an ongoing work, which while didn't make it to the release, is
+probably worth mentioning here. This is what you should probably expect in
+1.8 some time soon:
+
+* Specialized list implementation. There is a branch that implements lists of
+  integers/floats/strings as compactly as array.array. This should drastically
+  improve performance/memory impact of some applications
+
+* NumPy effort is progressing forward, with multi-dimensional arrays coming
+  soon.
+
+* There are two brand new JIT assembler backends, notably for the PowerPC and
+  ARM processors.
+
+Fundraising
+===========
+
+It's maybe worth mentioning that we're running fundraising campaigns for
+NumPy effort in PyPy and for Python 3 in PyPy. In case you want to see any
+of those happen faster, we urge you to donate to `numpy proposal`_ or
+`py3k proposal`_. In case you want PyPy to progress, but you trust us with
+the general direction, you can always donate to the `general pot`_.
+
+.. _`numpy proposal`: http://pypy.org/numpydonate.html
+.. _`py3k proposal`: http://pypy.org/py3donate.html
+.. _`general pot`: http://pypy.org
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -51,6 +51,24 @@
             space.setattr(self, w_name,
                           space.getitem(w_state, w_name))
 
+    def missing_field(self, space, required, host):
+        "Find which required field is missing."
+        state = self.initialization_state
+        for i in range(len(required)):
+            if (state >> i) & 1:
+                continue  # field is present
+            missing = required[i]
+            if missing is None:
+                continue  # field is optional
+            w_obj = self.getdictvalue(space, missing)
+            if w_obj is None:
+                err = "required field \"%s\" missing from %s"
+                raise operationerrfmt(space.w_TypeError, err, missing, host)
+            else:
+                err = "incorrect type for field \"%s\" in %s"
+                raise operationerrfmt(space.w_TypeError, err, missing, host)
+        raise AssertionError("should not reach here")
+
 
 class NodeVisitorNotImplemented(Exception):
     pass
@@ -94,17 +112,6 @@
 )
 
 
-def missing_field(space, state, required, host):
-    "Find which required field is missing."
-    for i in range(len(required)):
-        if not (state >> i) & 1:
-            missing = required[i]
-            if missing is not None:
-                 err = "required field \"%s\" missing from %s"
-                 err = err % (missing, host)
-                 w_err = space.wrap(err)
-                 raise OperationError(space.w_TypeError, w_err)
-    raise AssertionError("should not reach here")
 
 
 class mod(AST):
@@ -112,7 +119,6 @@
 
 class Module(mod):
 
-
     def __init__(self, body):
         self.body = body
         self.w_body = None
@@ -128,7 +134,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['body'], 'Module')
+            self.missing_field(space, ['body'], 'Module')
         else:
             pass
         w_list = self.w_body
@@ -145,7 +151,6 @@
 
 class Interactive(mod):
 
-
     def __init__(self, body):
         self.body = body
         self.w_body = None
@@ -161,7 +166,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['body'], 'Interactive')
+            self.missing_field(space, ['body'], 'Interactive')
         else:
             pass
         w_list = self.w_body
@@ -178,7 +183,6 @@
 
 class Expression(mod):
 
-
     def __init__(self, body):
         self.body = body
         self.initialization_state = 1
@@ -192,7 +196,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['body'], 'Expression')
+            self.missing_field(space, ['body'], 'Expression')
         else:
             pass
         self.body.sync_app_attrs(space)
@@ -200,7 +204,6 @@
 
 class Suite(mod):
 
-
     def __init__(self, body):
         self.body = body
         self.w_body = None
@@ -216,7 +219,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['body'], 'Suite')
+            self.missing_field(space, ['body'], 'Suite')
         else:
             pass
         w_list = self.w_body
@@ -232,15 +235,13 @@
 
 
 class stmt(AST):
+
     def __init__(self, lineno, col_offset):
         self.lineno = lineno
         self.col_offset = col_offset
 
 class FunctionDef(stmt):
 
-    _lineno_mask = 16
-    _col_offset_mask = 32
-
     def __init__(self, name, args, body, decorator_list, lineno, col_offset):
         self.name = name
         self.args = args
@@ -264,7 +265,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 63:
-            missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef')
+            self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef')
         else:
             pass
         self.args.sync_app_attrs(space)
@@ -292,9 +293,6 @@
 
 class ClassDef(stmt):
 
-    _lineno_mask = 16
-    _col_offset_mask = 32
-
     def __init__(self, name, bases, body, decorator_list, lineno, col_offset):
         self.name = name
         self.bases = bases
@@ -320,7 +318,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 63:
-            missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef')
+            self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef')
         else:
             pass
         w_list = self.w_bases
@@ -357,9 +355,6 @@
 
 class Return(stmt):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, value, lineno, col_offset):
         self.value = value
         stmt.__init__(self, lineno, col_offset)
@@ -374,10 +369,10 @@
         return visitor.visit_Return(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~1) ^ 6:
-            missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return')
+        if (self.initialization_state & ~4) ^ 3:
+            self.missing_field(space, ['lineno', 'col_offset', None], 'Return')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.value = None
         if self.value:
             self.value.sync_app_attrs(space)
@@ -385,9 +380,6 @@
 
 class Delete(stmt):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, targets, lineno, col_offset):
         self.targets = targets
         self.w_targets = None
@@ -404,7 +396,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete')
+            self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete')
         else:
             pass
         w_list = self.w_targets
@@ -421,9 +413,6 @@
 
 class Assign(stmt):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, targets, value, lineno, col_offset):
         self.targets = targets
         self.w_targets = None
@@ -442,7 +431,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign')
+            self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign')
         else:
             pass
         w_list = self.w_targets
@@ -460,9 +449,6 @@
 
 class AugAssign(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, target, op, value, lineno, col_offset):
         self.target = target
         self.op = op
@@ -480,7 +466,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign')
+            self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign')
         else:
             pass
         self.target.sync_app_attrs(space)
@@ -489,9 +475,6 @@
 
 class Print(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, dest, values, nl, lineno, col_offset):
         self.dest = dest
         self.values = values
@@ -511,10 +494,10 @@
         return visitor.visit_Print(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~1) ^ 30:
-            missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print')
+        if (self.initialization_state & ~4) ^ 27:
+            self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.dest = None
         if self.dest:
             self.dest.sync_app_attrs(space)
@@ -532,9 +515,6 @@
 
 class For(stmt):
 
-    _lineno_mask = 16
-    _col_offset_mask = 32
-
     def __init__(self, target, iter, body, orelse, lineno, col_offset):
         self.target = target
         self.iter = iter
@@ -559,7 +539,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 63:
-            missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For')
+            self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For')
         else:
             pass
         self.target.sync_app_attrs(space)
@@ -588,9 +568,6 @@
 
 class While(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, test, body, orelse, lineno, col_offset):
         self.test = test
         self.body = body
@@ -613,7 +590,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While')
+            self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While')
         else:
             pass
         self.test.sync_app_attrs(space)
@@ -641,9 +618,6 @@
 
 class If(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, test, body, orelse, lineno, col_offset):
         self.test = test
         self.body = body
@@ -666,7 +640,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If')
+            self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If')
         else:
             pass
         self.test.sync_app_attrs(space)
@@ -694,9 +668,6 @@
 
 class With(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, context_expr, optional_vars, body, lineno, col_offset):
         self.context_expr = context_expr
         self.optional_vars = optional_vars
@@ -717,10 +688,10 @@
         return visitor.visit_With(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~2) ^ 29:
-            missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With')
+        if (self.initialization_state & ~8) ^ 23:
+            self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With')
         else:
-            if not self.initialization_state & 2:
+            if not self.initialization_state & 8:
                 self.optional_vars = None
         self.context_expr.sync_app_attrs(space)
         if self.optional_vars:
@@ -739,9 +710,6 @@
 
 class Raise(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, type, inst, tback, lineno, col_offset):
         self.type = type
         self.inst = inst
@@ -762,14 +730,14 @@
         return visitor.visit_Raise(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~7) ^ 24:
-            missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise')
+        if (self.initialization_state & ~28) ^ 3:
+            self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.type = None
-            if not self.initialization_state & 2:
+            if not self.initialization_state & 8:
                 self.inst = None
-            if not self.initialization_state & 4:
+            if not self.initialization_state & 16:
                 self.tback = None
         if self.type:
             self.type.sync_app_attrs(space)
@@ -781,9 +749,6 @@
 
 class TryExcept(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, body, handlers, orelse, lineno, col_offset):
         self.body = body
         self.w_body = None
@@ -808,7 +773,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept')
+            self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept')
         else:
             pass
         w_list = self.w_body
@@ -845,9 +810,6 @@
 
 class TryFinally(stmt):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, body, finalbody, lineno, col_offset):
         self.body = body
         self.w_body = None
@@ -868,7 +830,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally')
+            self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally')
         else:
             pass
         w_list = self.w_body
@@ -895,9 +857,6 @@
 
 class Assert(stmt):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, test, msg, lineno, col_offset):
         self.test = test
         self.msg = msg
@@ -914,10 +873,10 @@
         return visitor.visit_Assert(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~2) ^ 13:
-            missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert')
+        if (self.initialization_state & ~8) ^ 7:
+            self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert')
         else:
-            if not self.initialization_state & 2:
+            if not self.initialization_state & 8:
                 self.msg = None
         self.test.sync_app_attrs(space)
         if self.msg:
@@ -926,9 +885,6 @@
 
 class Import(stmt):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, names, lineno, col_offset):
         self.names = names
         self.w_names = None
@@ -945,7 +901,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import')
+            self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import')
         else:
             pass
         w_list = self.w_names
@@ -962,9 +918,6 @@
 
 class ImportFrom(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, module, names, level, lineno, col_offset):
         self.module = module
         self.names = names
@@ -982,12 +935,12 @@
         return visitor.visit_ImportFrom(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~5) ^ 26:
-            missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom')
+        if (self.initialization_state & ~20) ^ 11:
+            self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.module = None
-            if not self.initialization_state & 4:
+            if not self.initialization_state & 16:
                 self.level = 0
         w_list = self.w_names
         if w_list is not None:
@@ -1003,9 +956,6 @@
 
 class Exec(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, body, globals, locals, lineno, col_offset):
         self.body = body
         self.globals = globals
@@ -1025,12 +975,12 @@
         return visitor.visit_Exec(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~6) ^ 25:
-            missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec')
+        if (self.initialization_state & ~24) ^ 7:
+            self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec')
         else:
-            if not self.initialization_state & 2:
+            if not self.initialization_state & 8:
                 self.globals = None
-            if not self.initialization_state & 4:
+            if not self.initialization_state & 16:
                 self.locals = None
         self.body.sync_app_attrs(space)
         if self.globals:
@@ -1041,9 +991,6 @@
 
 class Global(stmt):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, names, lineno, col_offset):
         self.names = names
         self.w_names = None
@@ -1058,7 +1005,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global')
+            self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global')
         else:
             pass
         w_list = self.w_names
@@ -1072,9 +1019,6 @@
 
 class Expr(stmt):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, value, lineno, col_offset):
         self.value = value
         stmt.__init__(self, lineno, col_offset)
@@ -1089,7 +1033,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr')
+            self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -1097,9 +1041,6 @@
 
 class Pass(stmt):
 
-    _lineno_mask = 1
-    _col_offset_mask = 2
-
     def __init__(self, lineno, col_offset):
         stmt.__init__(self, lineno, col_offset)
         self.initialization_state = 3
@@ -1112,16 +1053,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 3:
-            missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass')
+            self.missing_field(space, ['lineno', 'col_offset'], 'Pass')
         else:
             pass
 
 
 class Break(stmt):
 
-    _lineno_mask = 1
-    _col_offset_mask = 2
-
     def __init__(self, lineno, col_offset):
         stmt.__init__(self, lineno, col_offset)
         self.initialization_state = 3
@@ -1134,16 +1072,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 3:
-            missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break')
+            self.missing_field(space, ['lineno', 'col_offset'], 'Break')
         else:
             pass
 
 
 class Continue(stmt):
 
-    _lineno_mask = 1
-    _col_offset_mask = 2
-
     def __init__(self, lineno, col_offset):
         stmt.__init__(self, lineno, col_offset)
         self.initialization_state = 3
@@ -1156,21 +1091,19 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 3:
-            missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue')
+            self.missing_field(space, ['lineno', 'col_offset'], 'Continue')
         else:
             pass
 
 
 class expr(AST):
+
     def __init__(self, lineno, col_offset):
         self.lineno = lineno
         self.col_offset = col_offset
 
 class BoolOp(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, op, values, lineno, col_offset):
         self.op = op
         self.values = values
@@ -1188,7 +1121,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp')
+            self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp')
         else:
             pass
         w_list = self.w_values
@@ -1205,9 +1138,6 @@
 
 class BinOp(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, left, op, right, lineno, col_offset):
         self.left = left
         self.op = op
@@ -1225,7 +1155,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp')
+            self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp')
         else:
             pass
         self.left.sync_app_attrs(space)
@@ -1234,9 +1164,6 @@
 
 class UnaryOp(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, op, operand, lineno, col_offset):
         self.op = op
         self.operand = operand
@@ -1252,7 +1179,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp')
+            self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp')
         else:
             pass
         self.operand.sync_app_attrs(space)
@@ -1260,9 +1187,6 @@
 
 class Lambda(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, args, body, lineno, col_offset):
         self.args = args
         self.body = body
@@ -1279,7 +1203,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda')
+            self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda')
         else:
             pass
         self.args.sync_app_attrs(space)
@@ -1288,9 +1212,6 @@
 
 class IfExp(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, test, body, orelse, lineno, col_offset):
         self.test = test
         self.body = body
@@ -1309,7 +1230,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp')
+            self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp')
         else:
             pass
         self.test.sync_app_attrs(space)
@@ -1319,9 +1240,6 @@
 
 class Dict(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, keys, values, lineno, col_offset):
         self.keys = keys
         self.w_keys = None
@@ -1342,7 +1260,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict')
+            self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict')
         else:
             pass
         w_list = self.w_keys
@@ -1369,9 +1287,6 @@
 
 class Set(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, elts, lineno, col_offset):
         self.elts = elts
         self.w_elts = None
@@ -1388,7 +1303,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set')
+            self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set')
         else:
             pass
         w_list = self.w_elts
@@ -1405,9 +1320,6 @@
 
 class ListComp(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, elt, generators, lineno, col_offset):
         self.elt = elt
         self.generators = generators
@@ -1426,7 +1338,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp')
+            self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp')
         else:
             pass
         self.elt.sync_app_attrs(space)
@@ -1444,9 +1356,6 @@
 
 class SetComp(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, elt, generators, lineno, col_offset):
         self.elt = elt
         self.generators = generators
@@ -1465,7 +1374,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp')
+            self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp')
         else:
             pass
         self.elt.sync_app_attrs(space)
@@ -1483,9 +1392,6 @@
 
 class DictComp(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, key, value, generators, lineno, col_offset):
         self.key = key
         self.value = value
@@ -1506,7 +1412,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp')
+            self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp')
         else:
             pass
         self.key.sync_app_attrs(space)
@@ -1525,9 +1431,6 @@
 
 class GeneratorExp(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, elt, generators, lineno, col_offset):
         self.elt = elt
         self.generators = generators
@@ -1546,7 +1449,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp')
+            self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp')
         else:
             pass
         self.elt.sync_app_attrs(space)
@@ -1564,9 +1467,6 @@
 
 class Yield(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, value, lineno, col_offset):
         self.value = value
         expr.__init__(self, lineno, col_offset)
@@ -1581,10 +1481,10 @@
         return visitor.visit_Yield(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~1) ^ 6:
-            missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield')
+        if (self.initialization_state & ~4) ^ 3:
+            self.missing_field(space, ['lineno', 'col_offset', None], 'Yield')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.value = None
         if self.value:
             self.value.sync_app_attrs(space)
@@ -1592,9 +1492,6 @@
 
 class Compare(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, left, ops, comparators, lineno, col_offset):
         self.left = left
         self.ops = ops
@@ -1615,7 +1512,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare')
+            self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare')
         else:
             pass
         self.left.sync_app_attrs(space)
@@ -1640,9 +1537,6 @@
 
 class Call(expr):
 
-    _lineno_mask = 32
-    _col_offset_mask = 64
-
     def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset):
         self.func = func
         self.args = args
@@ -1670,12 +1564,12 @@
         return visitor.visit_Call(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~24) ^ 103:
-            missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call')
+        if (self.initialization_state & ~96) ^ 31:
+            self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call')
         else:
-            if not self.initialization_state & 8:
+            if not self.initialization_state & 32:
                 self.starargs = None
-            if not self.initialization_state & 16:
+            if not self.initialization_state & 64:
                 self.kwargs = None
         self.func.sync_app_attrs(space)
         w_list = self.w_args
@@ -1706,9 +1600,6 @@
 
 class Repr(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, value, lineno, col_offset):
         self.value = value
         expr.__init__(self, lineno, col_offset)
@@ -1723,7 +1614,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr')
+            self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -1731,9 +1622,6 @@
 
 class Num(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, n, lineno, col_offset):
         self.n = n
         expr.__init__(self, lineno, col_offset)
@@ -1747,16 +1635,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num')
+            self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num')
         else:
             pass
 
 
 class Str(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, s, lineno, col_offset):
         self.s = s
         expr.__init__(self, lineno, col_offset)
@@ -1770,16 +1655,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str')
+            self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str')
         else:
             pass
 
 
 class Attribute(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, value, attr, ctx, lineno, col_offset):
         self.value = value
         self.attr = attr
@@ -1796,7 +1678,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute')
+            self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -1804,9 +1686,6 @@
 
 class Subscript(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, value, slice, ctx, lineno, col_offset):
         self.value = value
         self.slice = slice
@@ -1824,7 +1703,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript')
+            self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -1833,9 +1712,6 @@
 
 class Name(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, id, ctx, lineno, col_offset):
         self.id = id
         self.ctx = ctx
@@ -1850,16 +1726,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name')
+            self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name')
         else:
             pass
 
 
 class List(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, elts, ctx, lineno, col_offset):
         self.elts = elts
         self.w_elts = None
@@ -1877,7 +1750,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List')
+            self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List')
         else:
             pass
         w_list = self.w_elts
@@ -1894,9 +1767,6 @@
 
 class Tuple(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, elts, ctx, lineno, col_offset):
         self.elts = elts
         self.w_elts = None
@@ -1914,7 +1784,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple')
+            self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple')
         else:
             pass
         w_list = self.w_elts
@@ -1931,9 +1801,6 @@
 
 class Const(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, value, lineno, col_offset):
         self.value = value
         expr.__init__(self, lineno, col_offset)
@@ -1947,7 +1814,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const')
+            self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const')
         else:
             pass
 
@@ -2009,7 +1876,6 @@
 
 class Ellipsis(slice):
 
-
     def __init__(self):
         self.initialization_state = 0
 
@@ -2021,14 +1887,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 0:
-            missing_field(space, self.initialization_state, [], 'Ellipsis')
+            self.missing_field(space, [], 'Ellipsis')
         else:
             pass
 
 
 class Slice(slice):
 
-
     def __init__(self, lower, upper, step):
         self.lower = lower
         self.upper = upper
@@ -2049,7 +1914,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~7) ^ 0:
-            missing_field(space, self.initialization_state, [None, None, None], 'Slice')
+            self.missing_field(space, [None, None, None], 'Slice')
         else:
             if not self.initialization_state & 1:
                 self.lower = None
@@ -2067,7 +1932,6 @@
 
 class ExtSlice(slice):
 
-
     def __init__(self, dims):
         self.dims = dims
         self.w_dims = None
@@ -2083,7 +1947,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['dims'], 'ExtSlice')
+            self.missing_field(space, ['dims'], 'ExtSlice')
         else:
             pass
         w_list = self.w_dims
@@ -2100,7 +1964,6 @@
 
 class Index(slice):
 
-
     def __init__(self, value):
         self.value = value
         self.initialization_state = 1
@@ -2114,7 +1977,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['value'], 'Index')
+            self.missing_field(space, ['value'], 'Index')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -2377,7 +2240,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension')
+            self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension')
         else:
             pass
         self.target.sync_app_attrs(space)
@@ -2394,15 +2257,13 @@
                 node.sync_app_attrs(space)
 
 class excepthandler(AST):
+
     def __init__(self, lineno, col_offset):
         self.lineno = lineno
         self.col_offset = col_offset
 
 class ExceptHandler(excepthandler):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, type, name, body, lineno, col_offset):
         self.type = type
         self.name = name
@@ -2424,12 +2285,12 @@
         return visitor.visit_ExceptHandler(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~3) ^ 28:
-            missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler')
+        if (self.initialization_state & ~12) ^ 19:
+            self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.type = None
-            if not self.initialization_state & 2:
+            if not self.initialization_state & 8:
                 self.name = None
         if self.type:
             self.type.sync_app_attrs(space)
@@ -2470,7 +2331,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~6) ^ 9:
-            missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments')
+            self.missing_field(space, ['args', None, None, 'defaults'], 'arguments')
         else:
             if not self.initialization_state & 2:
                 self.vararg = None
@@ -2513,7 +2374,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 3:
-            missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword')
+            self.missing_field(space, ['arg', 'value'], 'keyword')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -2533,7 +2394,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~2) ^ 1:
-            missing_field(space, self.initialization_state, ['name', None], 'alias')
+            self.missing_field(space, ['name', None], 'alias')
         else:
             if not self.initialization_state & 2:
                 self.asname = None
@@ -3019,6 +2880,8 @@
 def Expression_set_body(space, w_self, w_new_value):
     try:
         w_self.body = space.interp_w(expr, w_new_value, False)
+        if type(w_self.body) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -3098,7 +2961,7 @@
         w_obj = w_self.getdictvalue(space, 'lineno')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._lineno_mask:
+    if not w_self.initialization_state & 1:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno')
     return space.wrap(w_self.lineno)
@@ -3112,14 +2975,14 @@
         w_self.setdictvalue(space, 'lineno', w_new_value)
         return
     w_self.deldictvalue(space, 'lineno')
-    w_self.initialization_state |= w_self._lineno_mask
+    w_self.initialization_state |= 1
 
 def stmt_get_col_offset(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'col_offset')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._col_offset_mask:
+    if not w_self.initialization_state & 2:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset')
     return space.wrap(w_self.col_offset)
@@ -3133,7 +2996,7 @@
         w_self.setdictvalue(space, 'col_offset', w_new_value)
         return
     w_self.deldictvalue(space, 'col_offset')
-    w_self.initialization_state |= w_self._col_offset_mask
+    w_self.initialization_state |= 2
 
 stmt.typedef = typedef.TypeDef("stmt",
     AST.typedef,
@@ -3149,7 +3012,7 @@
         w_obj = w_self.getdictvalue(space, 'name')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name')
     return space.wrap(w_self.name)
@@ -3163,14 +3026,14 @@
         w_self.setdictvalue(space, 'name', w_new_value)
         return
     w_self.deldictvalue(space, 'name')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def FunctionDef_get_args(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'args')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args')
     return space.wrap(w_self.args)
@@ -3184,10 +3047,10 @@
         w_self.setdictvalue(space, 'args', w_new_value)
         return
     w_self.deldictvalue(space, 'args')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def FunctionDef_get_body(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -3201,10 +3064,10 @@
 
 def FunctionDef_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 def FunctionDef_get_decorator_list(space, w_self):
-    if not w_self.initialization_state & 8:
+    if not w_self.initialization_state & 32:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list')
     if w_self.w_decorator_list is None:
@@ -3218,7 +3081,7 @@
 
 def FunctionDef_set_decorator_list(space, w_self, w_new_value):
     w_self.w_decorator_list = w_new_value
-    w_self.initialization_state |= 8
+    w_self.initialization_state |= 32
 
 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list'])
 def FunctionDef_init(space, w_self, __args__):
@@ -3254,7 +3117,7 @@
         w_obj = w_self.getdictvalue(space, 'name')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name')
     return space.wrap(w_self.name)
@@ -3268,10 +3131,10 @@
         w_self.setdictvalue(space, 'name', w_new_value)
         return
     w_self.deldictvalue(space, 'name')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def ClassDef_get_bases(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases')
     if w_self.w_bases is None:
@@ -3285,10 +3148,10 @@
 
 def ClassDef_set_bases(space, w_self, w_new_value):
     w_self.w_bases = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def ClassDef_get_body(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -3302,10 +3165,10 @@
 
 def ClassDef_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 def ClassDef_get_decorator_list(space, w_self):
-    if not w_self.initialization_state & 8:
+    if not w_self.initialization_state & 32:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list')
     if w_self.w_decorator_list is None:
@@ -3319,7 +3182,7 @@
 
 def ClassDef_set_decorator_list(space, w_self, w_new_value):
     w_self.w_decorator_list = w_new_value
-    w_self.initialization_state |= 8
+    w_self.initialization_state |= 32
 
 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list'])
 def ClassDef_init(space, w_self, __args__):
@@ -3356,7 +3219,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -3364,13 +3227,15 @@
 def Return_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, True)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Return_field_unroller = unrolling_iterable(['value'])
 def Return_init(space, w_self, __args__):
@@ -3397,7 +3262,7 @@
 )
 
 def Delete_get_targets(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets')
     if w_self.w_targets is None:
@@ -3411,7 +3276,7 @@
 
 def Delete_set_targets(space, w_self, w_new_value):
     w_self.w_targets = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Delete_field_unroller = unrolling_iterable(['targets'])
 def Delete_init(space, w_self, __args__):
@@ -3439,7 +3304,7 @@
 )
 
 def Assign_get_targets(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets')
     if w_self.w_targets is None:
@@ -3453,14 +3318,14 @@
 
 def Assign_set_targets(space, w_self, w_new_value):
     w_self.w_targets = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Assign_get_value(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -3468,13 +3333,15 @@
 def Assign_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Assign_field_unroller = unrolling_iterable(['targets', 'value'])
 def Assign_init(space, w_self, __args__):
@@ -3507,7 +3374,7 @@
         w_obj = w_self.getdictvalue(space, 'target')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target')
     return space.wrap(w_self.target)
@@ -3515,20 +3382,22 @@
 def AugAssign_set_target(space, w_self, w_new_value):
     try:
         w_self.target = space.interp_w(expr, w_new_value, False)
+        if type(w_self.target) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'target', w_new_value)
         return
     w_self.deldictvalue(space, 'target')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def AugAssign_get_op(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'op')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op')
     return operator_to_class[w_self.op - 1]()
@@ -3544,14 +3413,14 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'op', w_new_value)
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def AugAssign_get_value(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -3559,13 +3428,15 @@
 def AugAssign_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value'])
 def AugAssign_init(space, w_self, __args__):
@@ -3598,7 +3469,7 @@
         w_obj = w_self.getdictvalue(space, 'dest')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest')
     return space.wrap(w_self.dest)
@@ -3606,16 +3477,18 @@
 def Print_set_dest(space, w_self, w_new_value):
     try:
         w_self.dest = space.interp_w(expr, w_new_value, True)
+        if type(w_self.dest) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'dest', w_new_value)
         return
     w_self.deldictvalue(space, 'dest')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Print_get_values(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values')
     if w_self.w_values is None:
@@ -3629,14 +3502,14 @@
 
 def Print_set_values(space, w_self, w_new_value):
     w_self.w_values = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Print_get_nl(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'nl')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl')
     return space.wrap(w_self.nl)
@@ -3650,7 +3523,7 @@
         w_self.setdictvalue(space, 'nl', w_new_value)
         return
     w_self.deldictvalue(space, 'nl')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl'])
 def Print_init(space, w_self, __args__):
@@ -3684,7 +3557,7 @@
         w_obj = w_self.getdictvalue(space, 'target')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target')
     return space.wrap(w_self.target)
@@ -3692,20 +3565,22 @@
 def For_set_target(space, w_self, w_new_value):
     try:
         w_self.target = space.interp_w(expr, w_new_value, False)
+        if type(w_self.target) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'target', w_new_value)
         return
     w_self.deldictvalue(space, 'target')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def For_get_iter(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'iter')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter')
     return space.wrap(w_self.iter)
@@ -3713,16 +3588,18 @@
 def For_set_iter(space, w_self, w_new_value):
     try:
         w_self.iter = space.interp_w(expr, w_new_value, False)
+        if type(w_self.iter) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'iter', w_new_value)
         return
     w_self.deldictvalue(space, 'iter')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def For_get_body(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -3736,10 +3613,10 @@
 
 def For_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 def For_get_orelse(space, w_self):
-    if not w_self.initialization_state & 8:
+    if not w_self.initialization_state & 32:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse')
     if w_self.w_orelse is None:
@@ -3753,7 +3630,7 @@
 
 def For_set_orelse(space, w_self, w_new_value):
     w_self.w_orelse = w_new_value
-    w_self.initialization_state |= 8
+    w_self.initialization_state |= 32
 
 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse'])
 def For_init(space, w_self, __args__):
@@ -3789,7 +3666,7 @@
         w_obj = w_self.getdictvalue(space, 'test')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test')
     return space.wrap(w_self.test)
@@ -3797,16 +3674,18 @@
 def While_set_test(space, w_self, w_new_value):
     try:
         w_self.test = space.interp_w(expr, w_new_value, False)
+        if type(w_self.test) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'test', w_new_value)
         return
     w_self.deldictvalue(space, 'test')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def While_get_body(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -3820,10 +3699,10 @@
 
 def While_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def While_get_orelse(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse')
     if w_self.w_orelse is None:
@@ -3837,7 +3716,7 @@
 
 def While_set_orelse(space, w_self, w_new_value):
     w_self.w_orelse = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse'])
 def While_init(space, w_self, __args__):
@@ -3872,7 +3751,7 @@
         w_obj = w_self.getdictvalue(space, 'test')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test')
     return space.wrap(w_self.test)
@@ -3880,16 +3759,18 @@
 def If_set_test(space, w_self, w_new_value):
     try:
         w_self.test = space.interp_w(expr, w_new_value, False)
+        if type(w_self.test) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'test', w_new_value)
         return
     w_self.deldictvalue(space, 'test')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def If_get_body(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -3903,10 +3784,10 @@
 
 def If_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def If_get_orelse(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse')
     if w_self.w_orelse is None:
@@ -3920,7 +3801,7 @@
 
 def If_set_orelse(space, w_self, w_new_value):
     w_self.w_orelse = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse'])
 def If_init(space, w_self, __args__):
@@ -3955,7 +3836,7 @@
         w_obj = w_self.getdictvalue(space, 'context_expr')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr')
     return space.wrap(w_self.context_expr)
@@ -3963,20 +3844,22 @@
 def With_set_context_expr(space, w_self, w_new_value):
     try:
         w_self.context_expr = space.interp_w(expr, w_new_value, False)
+        if type(w_self.context_expr) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'context_expr', w_new_value)
         return
     w_self.deldictvalue(space, 'context_expr')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def With_get_optional_vars(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'optional_vars')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars')
     return space.wrap(w_self.optional_vars)
@@ -3984,16 +3867,18 @@
 def With_set_optional_vars(space, w_self, w_new_value):
     try:
         w_self.optional_vars = space.interp_w(expr, w_new_value, True)
+        if type(w_self.optional_vars) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'optional_vars', w_new_value)
         return
     w_self.deldictvalue(space, 'optional_vars')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def With_get_body(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -4007,7 +3892,7 @@
 
 def With_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body'])
 def With_init(space, w_self, __args__):
@@ -4041,7 +3926,7 @@
         w_obj = w_self.getdictvalue(space, 'type')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type')
     return space.wrap(w_self.type)
@@ -4049,20 +3934,22 @@
 def Raise_set_type(space, w_self, w_new_value):
     try:
         w_self.type = space.interp_w(expr, w_new_value, True)
+        if type(w_self.type) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'type', w_new_value)
         return
     w_self.deldictvalue(space, 'type')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Raise_get_inst(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'inst')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst')
     return space.wrap(w_self.inst)
@@ -4070,20 +3957,22 @@
 def Raise_set_inst(space, w_self, w_new_value):
     try:
         w_self.inst = space.interp_w(expr, w_new_value, True)
+        if type(w_self.inst) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'inst', w_new_value)
         return
     w_self.deldictvalue(space, 'inst')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Raise_get_tback(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'tback')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback')
     return space.wrap(w_self.tback)
@@ -4091,13 +3980,15 @@
 def Raise_set_tback(space, w_self, w_new_value):
     try:
         w_self.tback = space.interp_w(expr, w_new_value, True)
+        if type(w_self.tback) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'tback', w_new_value)
         return
     w_self.deldictvalue(space, 'tback')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback'])
 def Raise_init(space, w_self, __args__):
@@ -4126,7 +4017,7 @@
 )
 
 def TryExcept_get_body(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -4140,10 +4031,10 @@
 
 def TryExcept_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def TryExcept_get_handlers(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers')
     if w_self.w_handlers is None:
@@ -4157,10 +4048,10 @@
 
 def TryExcept_set_handlers(space, w_self, w_new_value):
     w_self.w_handlers = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def TryExcept_get_orelse(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse')
     if w_self.w_orelse is None:
@@ -4174,7 +4065,7 @@
 
 def TryExcept_set_orelse(space, w_self, w_new_value):
     w_self.w_orelse = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse'])
 def TryExcept_init(space, w_self, __args__):
@@ -4206,7 +4097,7 @@
 )
 
 def TryFinally_get_body(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -4220,10 +4111,10 @@
 
 def TryFinally_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def TryFinally_get_finalbody(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody')
     if w_self.w_finalbody is None:
@@ -4237,7 +4128,7 @@
 
 def TryFinally_set_finalbody(space, w_self, w_new_value):
     w_self.w_finalbody = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody'])
 def TryFinally_init(space, w_self, __args__):
@@ -4271,7 +4162,7 @@
         w_obj = w_self.getdictvalue(space, 'test')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test')
     return space.wrap(w_self.test)
@@ -4279,20 +4170,22 @@
 def Assert_set_test(space, w_self, w_new_value):
     try:
         w_self.test = space.interp_w(expr, w_new_value, False)
+        if type(w_self.test) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'test', w_new_value)
         return
     w_self.deldictvalue(space, 'test')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Assert_get_msg(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'msg')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg')
     return space.wrap(w_self.msg)
@@ -4300,13 +4193,15 @@
 def Assert_set_msg(space, w_self, w_new_value):
     try:
         w_self.msg = space.interp_w(expr, w_new_value, True)
+        if type(w_self.msg) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'msg', w_new_value)
         return
     w_self.deldictvalue(space, 'msg')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Assert_field_unroller = unrolling_iterable(['test', 'msg'])
 def Assert_init(space, w_self, __args__):
@@ -4334,7 +4229,7 @@
 )
 
 def Import_get_names(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names')
     if w_self.w_names is None:
@@ -4348,7 +4243,7 @@
 
 def Import_set_names(space, w_self, w_new_value):
     w_self.w_names = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Import_field_unroller = unrolling_iterable(['names'])
 def Import_init(space, w_self, __args__):
@@ -4380,7 +4275,7 @@
         w_obj = w_self.getdictvalue(space, 'module')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module')
     return space.wrap(w_self.module)
@@ -4397,10 +4292,10 @@
         w_self.setdictvalue(space, 'module', w_new_value)
         return
     w_self.deldictvalue(space, 'module')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def ImportFrom_get_names(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names')
     if w_self.w_names is None:
@@ -4414,14 +4309,14 @@
 
 def ImportFrom_set_names(space, w_self, w_new_value):
     w_self.w_names = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def ImportFrom_get_level(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'level')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level')
     return space.wrap(w_self.level)
@@ -4435,7 +4330,7 @@
         w_self.setdictvalue(space, 'level', w_new_value)
         return
     w_self.deldictvalue(space, 'level')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level'])
 def ImportFrom_init(space, w_self, __args__):
@@ -4469,7 +4364,7 @@
         w_obj = w_self.getdictvalue(space, 'body')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     return space.wrap(w_self.body)
@@ -4477,20 +4372,22 @@
 def Exec_set_body(space, w_self, w_new_value):
     try:
         w_self.body = space.interp_w(expr, w_new_value, False)
+        if type(w_self.body) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'body', w_new_value)
         return
     w_self.deldictvalue(space, 'body')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Exec_get_globals(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'globals')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals')
     return space.wrap(w_self.globals)
@@ -4498,20 +4395,22 @@
 def Exec_set_globals(space, w_self, w_new_value):
     try:
         w_self.globals = space.interp_w(expr, w_new_value, True)
+        if type(w_self.globals) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'globals', w_new_value)
         return
     w_self.deldictvalue(space, 'globals')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Exec_get_locals(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'locals')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals')
     return space.wrap(w_self.locals)
@@ -4519,13 +4418,15 @@
 def Exec_set_locals(space, w_self, w_new_value):
     try:
         w_self.locals = space.interp_w(expr, w_new_value, True)
+        if type(w_self.locals) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'locals', w_new_value)
         return
     w_self.deldictvalue(space, 'locals')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals'])
 def Exec_init(space, w_self, __args__):
@@ -4554,7 +4455,7 @@
 )
 
 def Global_get_names(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names')
     if w_self.w_names is None:
@@ -4568,7 +4469,7 @@
 
 def Global_set_names(space, w_self, w_new_value):
     w_self.w_names = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Global_field_unroller = unrolling_iterable(['names'])
 def Global_init(space, w_self, __args__):
@@ -4600,7 +4501,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -4608,13 +4509,15 @@
 def Expr_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Expr_field_unroller = unrolling_iterable(['value'])
 def Expr_init(space, w_self, __args__):
@@ -4696,7 +4599,7 @@
         w_obj = w_self.getdictvalue(space, 'lineno')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._lineno_mask:
+    if not w_self.initialization_state & 1:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno')
     return space.wrap(w_self.lineno)
@@ -4710,14 +4613,14 @@
         w_self.setdictvalue(space, 'lineno', w_new_value)
         return
     w_self.deldictvalue(space, 'lineno')
-    w_self.initialization_state |= w_self._lineno_mask
+    w_self.initialization_state |= 1
 
 def expr_get_col_offset(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'col_offset')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._col_offset_mask:
+    if not w_self.initialization_state & 2:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset')
     return space.wrap(w_self.col_offset)
@@ -4731,7 +4634,7 @@
         w_self.setdictvalue(space, 'col_offset', w_new_value)
         return
     w_self.deldictvalue(space, 'col_offset')
-    w_self.initialization_state |= w_self._col_offset_mask
+    w_self.initialization_state |= 2
 
 expr.typedef = typedef.TypeDef("expr",
     AST.typedef,
@@ -4747,7 +4650,7 @@
         w_obj = w_self.getdictvalue(space, 'op')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op')
     return boolop_to_class[w_self.op - 1]()
@@ -4763,10 +4666,10 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'op', w_new_value)
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def BoolOp_get_values(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values')
     if w_self.w_values is None:
@@ -4780,7 +4683,7 @@
 
 def BoolOp_set_values(space, w_self, w_new_value):
     w_self.w_values = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _BoolOp_field_unroller = unrolling_iterable(['op', 'values'])
 def BoolOp_init(space, w_self, __args__):
@@ -4813,7 +4716,7 @@
         w_obj = w_self.getdictvalue(space, 'left')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left')
     return space.wrap(w_self.left)
@@ -4821,20 +4724,22 @@
 def BinOp_set_left(space, w_self, w_new_value):
     try:
         w_self.left = space.interp_w(expr, w_new_value, False)
+        if type(w_self.left) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'left', w_new_value)
         return
     w_self.deldictvalue(space, 'left')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def BinOp_get_op(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'op')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op')
     return operator_to_class[w_self.op - 1]()
@@ -4850,14 +4755,14 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'op', w_new_value)
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def BinOp_get_right(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'right')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right')
     return space.wrap(w_self.right)
@@ -4865,13 +4770,15 @@
 def BinOp_set_right(space, w_self, w_new_value):
     try:
         w_self.right = space.interp_w(expr, w_new_value, False)
+        if type(w_self.right) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'right', w_new_value)
         return
     w_self.deldictvalue(space, 'right')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right'])
 def BinOp_init(space, w_self, __args__):
@@ -4904,7 +4811,7 @@
         w_obj = w_self.getdictvalue(space, 'op')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op')
     return unaryop_to_class[w_self.op - 1]()
@@ -4920,14 +4827,14 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'op', w_new_value)
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def UnaryOp_get_operand(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'operand')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand')
     return space.wrap(w_self.operand)
@@ -4935,13 +4842,15 @@
 def UnaryOp_set_operand(space, w_self, w_new_value):
     try:
         w_self.operand = space.interp_w(expr, w_new_value, False)
+        if type(w_self.operand) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'operand', w_new_value)
         return
     w_self.deldictvalue(space, 'operand')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand'])
 def UnaryOp_init(space, w_self, __args__):
@@ -4973,7 +4882,7 @@
         w_obj = w_self.getdictvalue(space, 'args')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args')
     return space.wrap(w_self.args)
@@ -4987,14 +4896,14 @@
         w_self.setdictvalue(space, 'args', w_new_value)
         return
     w_self.deldictvalue(space, 'args')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Lambda_get_body(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'body')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     return space.wrap(w_self.body)
@@ -5002,13 +4911,15 @@
 def Lambda_set_body(space, w_self, w_new_value):
     try:
         w_self.body = space.interp_w(expr, w_new_value, False)
+        if type(w_self.body) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'body', w_new_value)
         return
     w_self.deldictvalue(space, 'body')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Lambda_field_unroller = unrolling_iterable(['args', 'body'])
 def Lambda_init(space, w_self, __args__):
@@ -5040,7 +4951,7 @@
         w_obj = w_self.getdictvalue(space, 'test')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test')
     return space.wrap(w_self.test)
@@ -5048,20 +4959,22 @@
 def IfExp_set_test(space, w_self, w_new_value):
     try:
         w_self.test = space.interp_w(expr, w_new_value, False)
+        if type(w_self.test) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'test', w_new_value)
         return
     w_self.deldictvalue(space, 'test')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def IfExp_get_body(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'body')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     return space.wrap(w_self.body)
@@ -5069,20 +4982,22 @@
 def IfExp_set_body(space, w_self, w_new_value):
     try:
         w_self.body = space.interp_w(expr, w_new_value, False)
+        if type(w_self.body) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'body', w_new_value)
         return
     w_self.deldictvalue(space, 'body')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def IfExp_get_orelse(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'orelse')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse')
     return space.wrap(w_self.orelse)
@@ -5090,13 +5005,15 @@
 def IfExp_set_orelse(space, w_self, w_new_value):
     try:
         w_self.orelse = space.interp_w(expr, w_new_value, False)
+        if type(w_self.orelse) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'orelse', w_new_value)
         return
     w_self.deldictvalue(space, 'orelse')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse'])
 def IfExp_init(space, w_self, __args__):
@@ -5125,7 +5042,7 @@
 )
 
 def Dict_get_keys(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys')
     if w_self.w_keys is None:
@@ -5139,10 +5056,10 @@
 
 def Dict_set_keys(space, w_self, w_new_value):
     w_self.w_keys = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Dict_get_values(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values')
     if w_self.w_values is None:
@@ -5156,7 +5073,7 @@
 
 def Dict_set_values(space, w_self, w_new_value):
     w_self.w_values = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Dict_field_unroller = unrolling_iterable(['keys', 'values'])
 def Dict_init(space, w_self, __args__):
@@ -5186,7 +5103,7 @@
 )
 
 def Set_get_elts(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts')
     if w_self.w_elts is None:
@@ -5200,7 +5117,7 @@
 
 def Set_set_elts(space, w_self, w_new_value):
     w_self.w_elts = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Set_field_unroller = unrolling_iterable(['elts'])
 def Set_init(space, w_self, __args__):
@@ -5232,7 +5149,7 @@
         w_obj = w_self.getdictvalue(space, 'elt')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt')
     return space.wrap(w_self.elt)
@@ -5240,16 +5157,18 @@
 def ListComp_set_elt(space, w_self, w_new_value):
     try:
         w_self.elt = space.interp_w(expr, w_new_value, False)
+        if type(w_self.elt) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'elt', w_new_value)
         return
     w_self.deldictvalue(space, 'elt')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def ListComp_get_generators(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators')
     if w_self.w_generators is None:
@@ -5263,7 +5182,7 @@
 
 def ListComp_set_generators(space, w_self, w_new_value):
     w_self.w_generators = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators'])
 def ListComp_init(space, w_self, __args__):
@@ -5296,7 +5215,7 @@
         w_obj = w_self.getdictvalue(space, 'elt')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt')
     return space.wrap(w_self.elt)
@@ -5304,16 +5223,18 @@
 def SetComp_set_elt(space, w_self, w_new_value):
     try:
         w_self.elt = space.interp_w(expr, w_new_value, False)
+        if type(w_self.elt) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'elt', w_new_value)
         return
     w_self.deldictvalue(space, 'elt')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def SetComp_get_generators(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators')
     if w_self.w_generators is None:
@@ -5327,7 +5248,7 @@
 
 def SetComp_set_generators(space, w_self, w_new_value):
     w_self.w_generators = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators'])
 def SetComp_init(space, w_self, __args__):
@@ -5360,7 +5281,7 @@
         w_obj = w_self.getdictvalue(space, 'key')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key')
     return space.wrap(w_self.key)
@@ -5368,20 +5289,22 @@
 def DictComp_set_key(space, w_self, w_new_value):
     try:
         w_self.key = space.interp_w(expr, w_new_value, False)
+        if type(w_self.key) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'key', w_new_value)
         return
     w_self.deldictvalue(space, 'key')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def DictComp_get_value(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -5389,16 +5312,18 @@
 def DictComp_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def DictComp_get_generators(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators')
     if w_self.w_generators is None:
@@ -5412,7 +5337,7 @@
 
 def DictComp_set_generators(space, w_self, w_new_value):
     w_self.w_generators = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators'])
 def DictComp_init(space, w_self, __args__):
@@ -5446,7 +5371,7 @@
         w_obj = w_self.getdictvalue(space, 'elt')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt')
     return space.wrap(w_self.elt)
@@ -5454,16 +5379,18 @@
 def GeneratorExp_set_elt(space, w_self, w_new_value):
     try:
         w_self.elt = space.interp_w(expr, w_new_value, False)
+        if type(w_self.elt) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'elt', w_new_value)
         return
     w_self.deldictvalue(space, 'elt')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def GeneratorExp_get_generators(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators')
     if w_self.w_generators is None:
@@ -5477,7 +5404,7 @@
 
 def GeneratorExp_set_generators(space, w_self, w_new_value):
     w_self.w_generators = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators'])
 def GeneratorExp_init(space, w_self, __args__):
@@ -5510,7 +5437,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -5518,13 +5445,15 @@
 def Yield_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, True)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Yield_field_unroller = unrolling_iterable(['value'])
 def Yield_init(space, w_self, __args__):
@@ -5555,7 +5484,7 @@
         w_obj = w_self.getdictvalue(space, 'left')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left')
     return space.wrap(w_self.left)
@@ -5563,16 +5492,18 @@
 def Compare_set_left(space, w_self, w_new_value):
     try:
         w_self.left = space.interp_w(expr, w_new_value, False)
+        if type(w_self.left) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'left', w_new_value)
         return
     w_self.deldictvalue(space, 'left')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Compare_get_ops(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops')
     if w_self.w_ops is None:
@@ -5586,10 +5517,10 @@
 
 def Compare_set_ops(space, w_self, w_new_value):
     w_self.w_ops = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Compare_get_comparators(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators')
     if w_self.w_comparators is None:
@@ -5603,7 +5534,7 @@
 
 def Compare_set_comparators(space, w_self, w_new_value):
     w_self.w_comparators = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators'])
 def Compare_init(space, w_self, __args__):
@@ -5638,7 +5569,7 @@
         w_obj = w_self.getdictvalue(space, 'func')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func')
     return space.wrap(w_self.func)
@@ -5646,16 +5577,18 @@
 def Call_set_func(space, w_self, w_new_value):
     try:
         w_self.func = space.interp_w(expr, w_new_value, False)
+        if type(w_self.func) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'func', w_new_value)
         return
     w_self.deldictvalue(space, 'func')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Call_get_args(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args')
     if w_self.w_args is None:
@@ -5669,10 +5602,10 @@
 
 def Call_set_args(space, w_self, w_new_value):
     w_self.w_args = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Call_get_keywords(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords')
     if w_self.w_keywords is None:
@@ -5686,14 +5619,14 @@
 
 def Call_set_keywords(space, w_self, w_new_value):
     w_self.w_keywords = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 def Call_get_starargs(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'starargs')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 8:
+    if not w_self.initialization_state & 32:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs')
     return space.wrap(w_self.starargs)
@@ -5701,20 +5634,22 @@
 def Call_set_starargs(space, w_self, w_new_value):
     try:
         w_self.starargs = space.interp_w(expr, w_new_value, True)
+        if type(w_self.starargs) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'starargs', w_new_value)
         return
     w_self.deldictvalue(space, 'starargs')
-    w_self.initialization_state |= 8
+    w_self.initialization_state |= 32
 
 def Call_get_kwargs(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'kwargs')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 16:
+    if not w_self.initialization_state & 64:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs')
     return space.wrap(w_self.kwargs)
@@ -5722,13 +5657,15 @@
 def Call_set_kwargs(space, w_self, w_new_value):
     try:
         w_self.kwargs = space.interp_w(expr, w_new_value, True)
+        if type(w_self.kwargs) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'kwargs', w_new_value)
         return
     w_self.deldictvalue(space, 'kwargs')
-    w_self.initialization_state |= 16
+    w_self.initialization_state |= 64
 
 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs'])
 def Call_init(space, w_self, __args__):
@@ -5765,7 +5702,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -5773,13 +5710,15 @@
 def Repr_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Repr_field_unroller = unrolling_iterable(['value'])
 def Repr_init(space, w_self, __args__):
@@ -5810,7 +5749,7 @@
         w_obj = w_self.getdictvalue(space, 'n')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n')
     return w_self.n
@@ -5824,7 +5763,7 @@
         w_self.setdictvalue(space, 'n', w_new_value)
         return
     w_self.deldictvalue(space, 'n')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Num_field_unroller = unrolling_iterable(['n'])
 def Num_init(space, w_self, __args__):
@@ -5855,7 +5794,7 @@
         w_obj = w_self.getdictvalue(space, 's')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's')
     return w_self.s
@@ -5869,7 +5808,7 @@
         w_self.setdictvalue(space, 's', w_new_value)
         return
     w_self.deldictvalue(space, 's')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Str_field_unroller = unrolling_iterable(['s'])
 def Str_init(space, w_self, __args__):
@@ -5900,7 +5839,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -5908,20 +5847,22 @@
 def Attribute_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Attribute_get_attr(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'attr')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr')
     return space.wrap(w_self.attr)
@@ -5935,14 +5876,14 @@
         w_self.setdictvalue(space, 'attr', w_new_value)
         return
     w_self.deldictvalue(space, 'attr')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Attribute_get_ctx(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'ctx')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx')
     return expr_context_to_class[w_self.ctx - 1]()
@@ -5958,7 +5899,7 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'ctx', w_new_value)
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx'])
 def Attribute_init(space, w_self, __args__):
@@ -5991,7 +5932,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -5999,20 +5940,22 @@
 def Subscript_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Subscript_get_slice(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'slice')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice')
     return space.wrap(w_self.slice)
@@ -6020,20 +5963,22 @@
 def Subscript_set_slice(space, w_self, w_new_value):
     try:
         w_self.slice = space.interp_w(slice, w_new_value, False)
+        if type(w_self.slice) is slice:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'slice', w_new_value)
         return
     w_self.deldictvalue(space, 'slice')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Subscript_get_ctx(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'ctx')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx')
     return expr_context_to_class[w_self.ctx - 1]()
@@ -6049,7 +5994,7 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'ctx', w_new_value)
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx'])
 def Subscript_init(space, w_self, __args__):
@@ -6082,7 +6027,7 @@
         w_obj = w_self.getdictvalue(space, 'id')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id')
     return space.wrap(w_self.id)
@@ -6096,14 +6041,14 @@
         w_self.setdictvalue(space, 'id', w_new_value)
         return
     w_self.deldictvalue(space, 'id')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Name_get_ctx(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'ctx')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx')
     return expr_context_to_class[w_self.ctx - 1]()
@@ -6119,7 +6064,7 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'ctx', w_new_value)
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Name_field_unroller = unrolling_iterable(['id', 'ctx'])
 def Name_init(space, w_self, __args__):
@@ -6147,7 +6092,7 @@
 )
 
 def List_get_elts(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts')
     if w_self.w_elts is None:
@@ -6161,14 +6106,14 @@
 
 def List_set_elts(space, w_self, w_new_value):
     w_self.w_elts = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def List_get_ctx(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'ctx')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx')
     return expr_context_to_class[w_self.ctx - 1]()
@@ -6184,7 +6129,7 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'ctx', w_new_value)
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _List_field_unroller = unrolling_iterable(['elts', 'ctx'])
 def List_init(space, w_self, __args__):
@@ -6213,7 +6158,7 @@
 )
 
 def Tuple_get_elts(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts')
     if w_self.w_elts is None:
@@ -6227,14 +6172,14 @@
 
 def Tuple_set_elts(space, w_self, w_new_value):
     w_self.w_elts = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Tuple_get_ctx(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'ctx')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx')
     return expr_context_to_class[w_self.ctx - 1]()
@@ -6250,7 +6195,7 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'ctx', w_new_value)
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx'])
 def Tuple_init(space, w_self, __args__):
@@ -6283,7 +6228,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return w_self.value
@@ -6297,7 +6242,7 @@
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Const_field_unroller = unrolling_iterable(['value'])
 def Const_init(space, w_self, __args__):
@@ -6409,6 +6354,8 @@
 def Slice_set_lower(space, w_self, w_new_value):
     try:
         w_self.lower = space.interp_w(expr, w_new_value, True)
+        if type(w_self.lower) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6430,6 +6377,8 @@
 def Slice_set_upper(space, w_self, w_new_value):
     try:
         w_self.upper = space.interp_w(expr, w_new_value, True)
+        if type(w_self.upper) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6451,6 +6400,8 @@
 def Slice_set_step(space, w_self, w_new_value):
     try:
         w_self.step = space.interp_w(expr, w_new_value, True)
+        if type(w_self.step) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6540,6 +6491,8 @@
 def Index_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6809,6 +6762,8 @@
 def comprehension_set_target(space, w_self, w_new_value):
     try:
         w_self.target = space.interp_w(expr, w_new_value, False)
+        if type(w_self.target) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6830,6 +6785,8 @@
 def comprehension_set_iter(space, w_self, w_new_value):
     try:
         w_self.iter = space.interp_w(expr, w_new_value, False)
+        if type(w_self.iter) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6887,7 +6844,7 @@
         w_obj = w_self.getdictvalue(space, 'lineno')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._lineno_mask:
+    if not w_self.initialization_state & 1:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno')
     return space.wrap(w_self.lineno)
@@ -6901,14 +6858,14 @@
         w_self.setdictvalue(space, 'lineno', w_new_value)
         return
     w_self.deldictvalue(space, 'lineno')
-    w_self.initialization_state |= w_self._lineno_mask
+    w_self.initialization_state |= 1
 
 def excepthandler_get_col_offset(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'col_offset')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._col_offset_mask:
+    if not w_self.initialization_state & 2:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset')
     return space.wrap(w_self.col_offset)
@@ -6922,7 +6879,7 @@
         w_self.setdictvalue(space, 'col_offset', w_new_value)
         return
     w_self.deldictvalue(space, 'col_offset')
-    w_self.initialization_state |= w_self._col_offset_mask
+    w_self.initialization_state |= 2
 
 excepthandler.typedef = typedef.TypeDef("excepthandler",
     AST.typedef,
@@ -6938,7 +6895,7 @@
         w_obj = w_self.getdictvalue(space, 'type')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type')
     return space.wrap(w_self.type)
@@ -6946,20 +6903,22 @@
 def ExceptHandler_set_type(space, w_self, w_new_value):
     try:
         w_self.type = space.interp_w(expr, w_new_value, True)
+        if type(w_self.type) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'type', w_new_value)
         return
     w_self.deldictvalue(space, 'type')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def ExceptHandler_get_name(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'name')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name')
     return space.wrap(w_self.name)
@@ -6967,16 +6926,18 @@
 def ExceptHandler_set_name(space, w_self, w_new_value):
     try:
         w_self.name = space.interp_w(expr, w_new_value, True)
+        if type(w_self.name) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'name', w_new_value)
         return
     w_self.deldictvalue(space, 'name')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def ExceptHandler_get_body(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -6990,7 +6951,7 @@
 
 def ExceptHandler_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body'])
 def ExceptHandler_init(space, w_self, __args__):
@@ -7164,6 +7125,8 @@
 def keyword_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -1,6 +1,5 @@
 """codegen helpers and AST constant folding."""
 import sys
-import itertools
 
 from pypy.interpreter.astcompiler import ast, consts, misc
 from pypy.tool import stdlib_opcode as ops
@@ -146,8 +145,7 @@
 }
 unrolling_unary_folders = unrolling_iterable(unary_folders.items())
 
-for folder in itertools.chain(binary_folders.itervalues(),
-                              unary_folders.itervalues()):
+for folder in binary_folders.values() + unary_folders.values():
     folder._always_inline_ = True
 del folder
 
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -79,6 +79,7 @@
         else:
             self.emit("class %s(AST):" % (base,))
             if sum.attributes:
+                self.emit("")
                 args = ", ".join(attr.name.value for attr in sum.attributes)
                 self.emit("def __init__(self, %s):" % (args,), 1)
                 for attr in sum.attributes:
@@ -114,7 +115,7 @@
             else:
                 names.append(repr(field.name.value))
         sub = (", ".join(names), name.value)
-        self.emit("missing_field(space, self.initialization_state, [%s], %r)"
+        self.emit("self.missing_field(space, [%s], %r)"
                   % sub, 3)
         self.emit("else:", 2)
         # Fill in all the default fields.
@@ -195,17 +196,13 @@
     def visitConstructor(self, cons, base, extra_attributes):
         self.emit("class %s(%s):" % (cons.name, base))
         self.emit("")
-        for field in self.data.cons_attributes[cons]:
-            subst = (field.name, self.data.field_masks[field])
-            self.emit("_%s_mask = %i" % subst, 1)
-        self.emit("")
         self.make_constructor(cons.fields, cons, extra_attributes, base)
         self.emit("")
         self.emit("def walkabout(self, visitor):", 1)
         self.emit("visitor.visit_%s(self)" % (cons.name,), 2)
         self.emit("")
         self.make_mutate_over(cons, cons.name)
-        self.make_var_syncer(cons.fields + self.data.cons_attributes[cons],
+        self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields,
                              cons, cons.name)
 
     def visitField(self, field):
@@ -324,7 +321,7 @@
 
     def visitSum(self, sum, name):
         for field in sum.attributes:
-            self.make_property(field, name, True)
+            self.make_property(field, name)
         self.make_typedef(name, "AST", sum.attributes,
                           fields_name="_attributes")
         if not is_simple_sum(sum):
@@ -400,13 +397,10 @@
     def visitField(self, field, name):
         self.make_property(field, name)
 
-    def make_property(self, field, name, different_masks=False):
+    def make_property(self, field, name):
         func = "def %s_get_%s(space, w_self):" % (name, field.name)
         self.emit(func)
-        if different_masks:
-            flag = "w_self._%s_mask" % (field.name,)
-        else:
-            flag = self.data.field_masks[field]
+        flag = self.data.field_masks[field]
         if not field.seq:
             self.emit("if w_self.w_dict is not None:", 1)
             self.emit("    w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1)
@@ -458,6 +452,11 @@
                     config = (field.name, field.type, repr(field.opt))
                     self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" %
                               config, 2)
+                    if field.type.value not in self.data.prod_simple:
+                        self.emit("if type(w_self.%s) is %s:" % (
+                                field.name, field.type), 2)
+                        self.emit("raise OperationError(space.w_TypeError, "
+                                  "space.w_None)", 3)
             else:
                 level = 2
                 if field.opt and field.type.value != "int":
@@ -505,7 +504,10 @@
             optional_mask = 0
             for i, field in enumerate(fields):
                 flag = 1 << i
-                field_masks[field] = flag
+                if field not in field_masks:
+                    field_masks[field] = flag
+                else:
+                    assert field_masks[field] == flag
                 if field.opt:
                     optional_mask |= flag
                 else:
@@ -518,9 +520,9 @@
                 if is_simple_sum(sum):
                     simple_types.add(tp.name.value)
                 else:
+                    attrs = [field for field in sum.attributes]
                     for cons in sum.types:
-                        attrs = [copy_field(field) for field in sum.attributes]
-                        add_masks(cons.fields + attrs, cons)
+                        add_masks(attrs + cons.fields, cons)
                         cons_attributes[cons] = attrs
             else:
                 prod = tp.value
@@ -588,6 +590,24 @@
             space.setattr(self, w_name,
                           space.getitem(w_state, w_name))
 
+    def missing_field(self, space, required, host):
+        "Find which required field is missing."
+        state = self.initialization_state
+        for i in range(len(required)):
+            if (state >> i) & 1:
+                continue  # field is present
+            missing = required[i]
+            if missing is None:
+                continue  # field is optional
+            w_obj = self.getdictvalue(space, missing)
+            if w_obj is None:
+                err = "required field \\"%s\\" missing from %s"
+                raise operationerrfmt(space.w_TypeError, err, missing, host)
+            else:
+                err = "incorrect type for field \\"%s\\" in %s"
+                raise operationerrfmt(space.w_TypeError, err, missing, host)
+        raise AssertionError("should not reach here")
+
 
 class NodeVisitorNotImplemented(Exception):
     pass
@@ -631,15 +651,6 @@
 )
 
 
-def missing_field(space, state, required, host):
-    "Find which required field is missing."
-    for i in range(len(required)):
-        if not (state >> i) & 1:
-            missing = required[i]
-            if missing is not None:
-                 err = "required field \\"%s\\" missing from %s"
-                 raise operationerrfmt(space.w_TypeError, err, missing, host)
-    raise AssertionError("should not reach here")
 
 
 """
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1,4 +1,3 @@
-import itertools
 import pypy
 from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag
 from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction
@@ -188,6 +187,12 @@
 
     # -------------------------------------------------------------------
 
+    def is_w(self, space, w_other):
+        return self is w_other
+
+    def immutable_unique_id(self, space):
+        return None
+
     def str_w(self, space):
         w_msg = typed_unwrap_error_msg(space, "string", self)
         raise OperationError(space.w_TypeError, w_msg)
@@ -482,6 +487,16 @@
         'parser', 'fcntl', '_codecs', 'binascii'
     ]
 
+    # These modules are treated like CPython treats built-in modules,
+    # i.e. they always shadow any xx.py.  The other modules are treated
+    # like CPython treats extension modules, and are loaded in sys.path
+    # order by the fake entry '.../lib_pypy/__extensions__'.
+    MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([
+        '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings',
+        '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal',
+        'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport',
+    ], None)
+
     def make_builtins(self):
         "NOT_RPYTHON: only for initializing the space."
 
@@ -513,8 +528,8 @@
         exception_types_w = self.export_builtin_exceptions()
 
         # initialize with "bootstrap types" from objspace  (e.g. w_None)
-        types_w = itertools.chain(self.get_builtin_types().iteritems(),
-                                  exception_types_w.iteritems())
+        types_w = (self.get_builtin_types().items() +
+                   exception_types_w.items())
         for name, w_type in types_w:
             self.setitem(self.builtin.w_dict, self.wrap(name), w_type)
 
@@ -681,9 +696,20 @@
         """shortcut for space.is_true(space.eq(w_obj1, w_obj2))"""
         return self.is_w(w_obj1, w_obj2) or self.is_true(self.eq(w_obj1, w_obj2))
 
-    def is_w(self, w_obj1, w_obj2):
-        """shortcut for space.is_true(space.is_(w_obj1, w_obj2))"""
-        return self.is_true(self.is_(w_obj1, w_obj2))
+    def is_(self, w_one, w_two):
+        return self.newbool(self.is_w(w_one, w_two))
+
+    def is_w(self, w_one, w_two):
+        # done by a method call on w_two (and not on w_one, because of the
+        # expected programming style where we say "if x is None" or
+        # "if x is object").
+        return w_two.is_w(self, w_one)
+
+    def id(self, w_obj):
+        w_result = w_obj.immutable_unique_id(self)
+        if w_result is None:
+            w_result = self.wrap(compute_unique_id(w_obj))
+        return w_result
 
     def hash_w(self, w_obj):
         """shortcut for space.int_w(space.hash(w_obj))"""
@@ -777,22 +803,63 @@
         """Unpack an iterable object into a real (interpreter-level) list.
         Raise an OperationError(w_ValueError) if the length is wrong."""
         w_iterator = self.iter(w_iterable)
-        # If we know the expected length we can preallocate.
         if expected_length == -1:
+            # xxx special hack for speed
+            from pypy.interpreter.generator import GeneratorIterator
+            if isinstance(w_iterator, GeneratorIterator):
+                lst_w = []
+                w_iterator.unpack_into(lst_w)
+                return lst_w
+            # /xxx
+            return self._unpackiterable_unknown_length(w_iterator, w_iterable)
+        else:
+            lst_w = self._unpackiterable_known_length(w_iterator,
+                                                      expected_length)
+            return lst_w[:]     # make the resulting list resizable
+
+    @jit.dont_look_inside
+    def _unpackiterable_unknown_length(self, w_iterator, w_iterable):
+        # Unpack a variable-size list of unknown length.
+        # The JIT does not look inside this function because it
+        # contains a loop (made explicit with the decorator above).
+        #
+        # If we can guess the expected length we can preallocate.
+        try:
+            lgt_estimate = self.len_w(w_iterable)
+        except OperationError, o:
+            if (not o.match(self, self.w_AttributeError) and
+                not o.match(self, self.w_TypeError)):
+                raise
+            items = []
+        else:
             try:
-                lgt_estimate = self.len_w(w_iterable)
-            except OperationError, o:
-                if (not o.match(self, self.w_AttributeError) and
-                    not o.match(self, self.w_TypeError)):
+                items = newlist(lgt_estimate)
+            except MemoryError:
+                items = [] # it might have lied
+        #
+        while True:
+            try:
+                w_item = self.next(w_iterator)
+            except OperationError, e:
+                if not e.match(self, self.w_StopIteration):
                     raise
-                items = []
-            else:
-                try:
-                    items = newlist(lgt_estimate)
-                except MemoryError:
-                    items = [] # it might have lied
-        else:
-            items = [None] * expected_length
+                break  # done
+            items.append(w_item)
+        #
+        return items
+
+    @jit.dont_look_inside
+    def _unpackiterable_known_length(self, w_iterator, expected_length):
+        # Unpack a known length list, without letting the JIT look inside.
+        # Implemented by just calling the @jit.unroll_safe version, but
+        # the JIT stopped looking inside already.
+        return self._unpackiterable_known_length_jitlook(w_iterator,
+                                                         expected_length)
+
+    @jit.unroll_safe
+    def _unpackiterable_known_length_jitlook(self, w_iterator,
+                                             expected_length):
+        items = [None] * expected_length
         idx = 0
         while True:
             try:
@@ -801,26 +868,29 @@
                 if not e.match(self, self.w_StopIteration):
                     raise
                 break  # done
-            if expected_length != -1 and idx == expected_length:
+            if idx == expected_length:
                 raise OperationError(self.w_ValueError,
-                                     self.wrap("too many values to unpack"))
-            if expected_length == -1:
-                items.append(w_item)
-            else:
-                items[idx] = w_item
+                                    self.wrap("too many values to unpack"))
+            items[idx] = w_item
             idx += 1
-        if expected_length != -1 and idx < expected_length:
+        if idx < expected_length:
             if idx == 1:
                 plural = ""
             else:
                 plural = "s"
-            raise OperationError(self.w_ValueError,
-                      self.wrap("need more than %d value%s to unpack" %
-                                (idx, plural)))
+            raise operationerrfmt(self.w_ValueError,
+                                  "need more than %d value%s to unpack",
+                                  idx, plural)
         return items
 
-    unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable,
-                                            'unpackiterable_unroll'))
+    def unpackiterable_unroll(self, w_iterable, expected_length):
+        # Like unpackiterable(), but for the cases where we have
+        # an expected_length and want to unroll when JITted.
+        # Returns a fixed-size list.
+        w_iterator = self.iter(w_iterable)
+        assert expected_length != -1
+        return self._unpackiterable_known_length_jitlook(w_iterator,
+                                                         expected_length)
 
     def fixedview(self, w_iterable, expected_length=-1):
         """ A fixed list view of w_iterable. Don't modify the result
@@ -835,6 +905,16 @@
         """
         return self.unpackiterable(w_iterable, expected_length)
 
+    def listview_str(self, w_list):
+        """ Return a list of unwrapped strings out of a list of strings. If the
+        argument is not a list or does not contain only strings, return None.
+        May return None anyway.
+        """
+        return None
+
+    def newlist_str(self, list_s):
+        return self.newlist([self.wrap(s) for s in list_s])
+
     @jit.unroll_safe
     def exception_match(self, w_exc_type, w_check_class):
         """Checks if the given exception type matches 'w_check_class'."""
@@ -969,9 +1049,6 @@
     def isinstance_w(self, w_obj, w_type):
         return self.is_true(self.isinstance(w_obj, w_type))
 
-    def id(self, w_obj):
-        return self.wrap(compute_unique_id(w_obj))
-
     # The code below only works
     # for the simple case (new-style instance).
     # These methods are patched with the full logic by the __builtin__
@@ -1543,6 +1620,8 @@
     'UnicodeError',
     'ValueError',
     'ZeroDivisionError',
+    'UnicodeEncodeError',
+    'UnicodeDecodeError',
     ]
 
 ## Irregular part of the interface:
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -1,14 +1,15 @@
+from pypy.interpreter.baseobjspace import Wrappable
 from pypy.interpreter.error import OperationError
-from pypy.interpreter.baseobjspace import Wrappable
 from pypy.interpreter.gateway import NoneNotWrapped
+from pypy.interpreter.pyopcode import LoopBlock
 from pypy.rlib import jit
-from pypy.interpreter.pyopcode import LoopBlock
+from pypy.rlib.objectmodel import specialize
 
 
 class GeneratorIterator(Wrappable):
     "An iterator created by a generator."
     _immutable_fields_ = ['pycode']
-    
+
     def __init__(self, frame):
         self.space = frame.space
         self.frame = frame     # turned into None when frame_finished_execution
@@ -81,7 +82,7 @@
             # if the frame is now marked as finished, it was RETURNed from
             if frame.frame_finished_execution:
                 self.frame = None
-                raise OperationError(space.w_StopIteration, space.w_None) 
+                raise OperationError(space.w_StopIteration, space.w_None)
             else:
                 return w_result     # YIELDed
         finally:
@@ -97,21 +98,21 @@
     def throw(self, w_type, w_val, w_tb):
         from pypy.interpreter.pytraceback import check_traceback
         space = self.space
-        
+
         msg = "throw() third argument must be a traceback object"
         if space.is_w(w_tb, space.w_None):
             tb = None
         else:
             tb = check_traceback(space, w_tb, msg)
-       
+
         operr = OperationError(w_type, w_val, tb)
         operr.normalize_exception(space)
         return self.send_ex(space.w_None, operr)
-             
+
     def descr_next(self):
         """x.next() -> the next value, or raise StopIteration"""
         return self.send_ex(self.space.w_None)
- 
+
     def descr_close(self):
         """x.close(arg) -> raise GeneratorExit inside generator."""
         assert isinstance(self, GeneratorIterator)
@@ -124,7 +125,7 @@
                     e.match(space, space.w_GeneratorExit):
                 return space.w_None
             raise
-        
+
         if w_retval is not None:
             msg = "generator ignored GeneratorExit"
             raise OperationError(space.w_RuntimeError, space.wrap(msg))
@@ -155,3 +156,44 @@
                                                  "interrupting generator of ")
                     break
                 block = block.previous
+
+    # Results can be either an RPython list of W_Root, or it can be an
+    # app-level W_ListObject, which also has an append() method, that's why we
+    # generate 2 versions of the function and 2 jit drivers.
+    def _create_unpack_into():
+        jitdriver = jit.JitDriver(greens=['pycode'],
+                                  reds=['self', 'frame', 'results'])
+        def unpack_into(self, results):
+            """This is a hack for performance: runs the generator and collects
+            all produced items in a list."""
+            # XXX copied and simplified version of send_ex()
+            space = self.space
+            if self.running:
+                raise OperationError(space.w_ValueError,
+                                     space.wrap('generator already executing'))
+            frame = self.frame
+            if frame is None:    # already finished
+                return
+            self.running = True
+            try:
+                pycode = self.pycode
+                while True:
+                    jitdriver.jit_merge_point(self=self, frame=frame,
+                                              results=results, pycode=pycode)
+                    try:
+                        w_result = frame.execute_frame(space.w_None)
+                    except OperationError, e:
+                        if not e.match(space, space.w_StopIteration):
+                            raise
+                        break
+                    # if the frame is now marked as finished, it was RETURNed from
+                    if frame.frame_finished_execution:
+                        break
+                    results.append(w_result)     # YIELDed
+            finally:
+                frame.f_backref = jit.vref_None
+                self.running = False
+                self.frame = None
+        return unpack_into
+    unpack_into = _create_unpack_into()
+    unpack_into_w = _create_unpack_into()
\ No newline at end of file
diff --git a/pypy/interpreter/pyframe.py b/pypy/interpreter/pyframe.py
--- a/pypy/interpreter/pyframe.py
+++ b/pypy/interpreter/pyframe.py
@@ -10,7 +10,7 @@
 from pypy.rlib.objectmodel import we_are_translated, instantiate
 from pypy.rlib.jit import hint
 from pypy.rlib.debug import make_sure_not_resized, check_nonneg
-from pypy.rlib.rarithmetic import intmask
+from pypy.rlib.rarithmetic import intmask, r_uint
 from pypy.rlib import jit
 from pypy.tool import stdlib_opcode
 from pypy.tool.stdlib_opcode import host_bytecode_spec
@@ -167,7 +167,7 @@
                 # Execution starts just after the last_instr.  Initially,
                 # last_instr is -1.  After a generator suspends it points to
                 # the YIELD_VALUE instruction.
-                next_instr = self.last_instr + 1
+                next_instr = r_uint(self.last_instr + 1)
                 if next_instr != 0:
                     self.pushvalue(w_inputvalue)
             #
@@ -691,6 +691,7 @@
     handlerposition = space.int_w(w_handlerposition)
     valuestackdepth = space.int_w(w_valuestackdepth)
     assert valuestackdepth >= 0
+    assert handlerposition >= 0
     blk = instantiate(get_block_class(opname))
     blk.handlerposition = handlerposition
     blk.valuestackdepth = valuestackdepth
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -837,6 +837,7 @@
         raise Yield
 
     def jump_absolute(self, jumpto, next_instr, ec):
+        check_nonneg(jumpto)
         return jumpto
 
     def JUMP_FORWARD(self, jumpby, next_instr):
@@ -1278,7 +1279,7 @@
 
     def handle(self, frame, unroller):
         next_instr = self.really_handle(frame, unroller)   # JIT hack
-        return next_instr
+        return r_uint(next_instr)
 
     def really_handle(self, frame, unroller):
         """ Purely abstract method
diff --git a/pypy/interpreter/test/test_executioncontext.py b/pypy/interpreter/test/test_executioncontext.py
--- a/pypy/interpreter/test/test_executioncontext.py
+++ b/pypy/interpreter/test/test_executioncontext.py
@@ -292,7 +292,7 @@
         import os, sys
         print sys.executable, self.tmpfile
         if sys.platform == "win32":
-            cmdformat = '""%s" "%s""'    # excellent! tons of "!
+            cmdformat = '"%s" "%s"'
         else:
             cmdformat = "'%s' '%s'"
         g = os.popen(cmdformat % (sys.executable, self.tmpfile), 'r')
diff --git a/pypy/interpreter/test/test_function.py b/pypy/interpreter/test/test_function.py
--- a/pypy/interpreter/test/test_function.py
+++ b/pypy/interpreter/test/test_function.py
@@ -587,7 +587,7 @@
         assert isinstance(meth2, Method)
         assert meth2.call_args(args) == obj1
         # Check method returned from unbound_method.__get__()
-        w_meth3 = descr_function_get(space, func, None, space.type(obj2))
+        w_meth3 = descr_function_get(space, func, space.w_None, space.type(obj2))
         meth3 = space.unwrap(w_meth3)
         w_meth4 = meth3.descr_method_get(obj2, space.w_None)
         meth4 = space.unwrap(w_meth4)
diff --git a/pypy/interpreter/test/test_generator.py b/pypy/interpreter/test/test_generator.py
--- a/pypy/interpreter/test/test_generator.py
+++ b/pypy/interpreter/test/test_generator.py
@@ -117,7 +117,7 @@
         g = f()
         raises(NameError, g.throw, NameError, "Error", None)
 
-    
+
     def test_throw_fail(self):
         def f():
             yield 1
@@ -129,7 +129,7 @@
             yield 1
         g = f()
         raises(TypeError, g.throw, list())
- 
+
     def test_throw_fail3(self):
         def f():
             yield 1
@@ -188,7 +188,7 @@
         g = f()
         g.next()
         raises(NameError, g.close)
- 
+
     def test_close_fail(self):
         def f():
             try:
@@ -267,3 +267,15 @@
         assert r.startswith("<generator object myFunc at 0x")
         assert list(g) == [1]
         assert repr(g) == r
+
+    def test_unpackiterable_gen(self):
+        g = (i*i for i in range(-5, 3))
+        assert set(g) == set([0, 1, 4, 9, 16, 25])
+        assert set(g) == set()
+        assert set(i for i in range(0)) == set()
+
+    def test_explicit_stop_iteration_unpackiterable(self):
+        def f():
+            yield 1
+            raise StopIteration
+        assert tuple(f()) == (1,)
\ No newline at end of file
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -63,10 +63,13 @@
     def test_unpackiterable(self):
         space = self.space
         w = space.wrap
-        l = [w(1), w(2), w(3), w(4)]
+        l = [space.newlist([]) for l in range(4)]
         w_l = space.newlist(l)
-        assert space.unpackiterable(w_l) == l
-        assert space.unpackiterable(w_l, 4) == l
+        l1 = space.unpackiterable(w_l)
+        l2 = space.unpackiterable(w_l, 4)
+        for i in range(4):
+            assert space.is_w(l1[i], l[i])
+            assert space.is_w(l2[i], l[i])
         err = raises(OperationError, space.unpackiterable, w_l, 3)
         assert err.value.match(space, space.w_ValueError)
         err = raises(OperationError, space.unpackiterable, w_l, 5)
diff --git a/pypy/interpreter/test/test_typedef.py b/pypy/interpreter/test/test_typedef.py
--- a/pypy/interpreter/test/test_typedef.py
+++ b/pypy/interpreter/test/test_typedef.py
@@ -2,7 +2,7 @@
 from pypy.interpreter import typedef
 from pypy.tool.udir import udir
 from pypy.interpreter.baseobjspace import Wrappable
-from pypy.interpreter.gateway import ObjSpace
+from pypy.interpreter.gateway import ObjSpace, interp2app
 
 # this test isn't so much to test that the objspace interface *works*
 # -- it's more to test that it's *there*
@@ -260,6 +260,50 @@
         gc.collect(); gc.collect()
         assert space.unwrap(w_seen) == [6, 2]
 
+    def test_multiple_inheritance(self):
+        class W_A(Wrappable):
+            a = 1
+            b = 2
+        class W_C(W_A):
+            b = 3
+        W_A.typedef = typedef.TypeDef("A",
+            a = typedef.interp_attrproperty("a", cls=W_A),
+            b = typedef.interp_attrproperty("b", cls=W_A),
+        )
+        class W_B(Wrappable):
+            pass
+        def standalone_method(space, w_obj):
+            if isinstance(w_obj, W_A):
+                return space.w_True
+            else:
+                return space.w_False
+        W_B.typedef = typedef.TypeDef("B",
+            c = interp2app(standalone_method)
+        )
+        W_C.typedef = typedef.TypeDef("C", (W_A.typedef, W_B.typedef,))
+
+        w_o1 = self.space.wrap(W_C())
+        w_o2 = self.space.wrap(W_B())
+        w_c = self.space.gettypefor(W_C)
+        w_b = self.space.gettypefor(W_B)
+        w_a = self.space.gettypefor(W_A)
+        assert w_c.mro_w == [
+            w_c,
+            w_a,
+            w_b,
+            self.space.w_object,
+        ]
+        for w_tp in w_c.mro_w:
+            assert self.space.isinstance_w(w_o1, w_tp)
+        def assert_attr(w_obj, name, value):
+            assert self.space.unwrap(self.space.getattr(w_obj, self.space.wrap(name))) == value
+        def assert_method(w_obj, name, value):
+            assert self.space.unwrap(self.space.call_method(w_obj, name)) == value
+        assert_attr(w_o1, "a", 1)
+        assert_attr(w_o1, "b", 3)
+        assert_method(w_o1, "c", True)
+        assert_method(w_o2, "c", False)
+
 
 class AppTestTypeDef:
 
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -15,13 +15,19 @@
     def __init__(self, __name, __base=None, **rawdict):
         "NOT_RPYTHON: initialization-time only"
         self.name = __name
-        self.base = __base
+        if __base is None:
+            bases = []
+        elif isinstance(__base, tuple):
+            bases = list(__base)
+        else:
+            bases = [__base]
+        self.bases = bases
         self.hasdict = '__dict__' in rawdict
         self.weakrefable = '__weakref__' in rawdict
         self.doc = rawdict.pop('__doc__', None)
-        if __base is not None:
-            self.hasdict     |= __base.hasdict
-            self.weakrefable |= __base.weakrefable
+        for base in bases:
+            self.hasdict     |= base.hasdict
+            self.weakrefable |= base.weakrefable
         self.rawdict = {}
         self.acceptable_as_base_class = '__new__' in rawdict
         self.applevel_subclasses_base = None
@@ -48,7 +54,11 @@
 #  Hash support
 
 def default_identity_hash(space, w_obj):
-    return space.wrap(compute_identity_hash(w_obj))
+    w_unique_id = w_obj.immutable_unique_id(space)
+    if w_unique_id is None:     # common case
+        return space.wrap(compute_identity_hash(w_obj))
+    else:
+        return space.hash(w_unique_id)
 
 # ____________________________________________________________
 #
diff --git a/pypy/jit/backend/conftest.py b/pypy/jit/backend/conftest.py
--- a/pypy/jit/backend/conftest.py
+++ b/pypy/jit/backend/conftest.py
@@ -12,7 +12,7 @@
                     help="choose a fixed random seed")
     group.addoption('--backend', action="store",
                     default='llgraph',
-                    choices=['llgraph', 'x86'],
+                    choices=['llgraph', 'cpu'],
                     dest="backend",
                     help="select the backend to run the functions with")
     group.addoption('--block-length', action="store", type="int",
diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py
--- a/pypy/jit/backend/llgraph/llimpl.py
+++ b/pypy/jit/backend/llgraph/llimpl.py
@@ -8,6 +8,7 @@
 from pypy.objspace.flow.model import Variable, Constant
 from pypy.annotation import model as annmodel
 from pypy.jit.metainterp.history import REF, INT, FLOAT
+from pypy.jit.metainterp import history
 from pypy.jit.codewriter import heaptracker
 from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi
 from pypy.rpython.ootypesystem import ootype
@@ -20,6 +21,7 @@
 from pypy.jit.backend.llgraph import symbolic
 from pypy.jit.codewriter import longlong
 
+from pypy.rlib import libffi, clibffi
 from pypy.rlib.objectmodel import ComputedIntSymbolic, we_are_translated
 from pypy.rlib.rarithmetic import ovfcheck
 from pypy.rlib.rarithmetic import r_longlong, r_ulonglong, r_uint
@@ -47,6 +49,11 @@
         value._the_opaque_pointer = op
         return op
 
+def _normalize(value):
+    if isinstance(value, lltype._ptr):
+        value = lltype.top_container(value._obj)
+    return value
+
 def from_opaque_string(s):
     if isinstance(s, str):
         return s
@@ -321,16 +328,24 @@
     _variables.append(v)
     return r
 
+def compile_started_vars(clt):
+    if not hasattr(clt, '_debug_argtypes'):    # only when compiling the loop
+        argtypes = [v.concretetype for v in _variables]
+        try:
+            clt._debug_argtypes = argtypes
+        except AttributeError:    # when 'clt' is actually a translated
+            pass                  # GcStruct
+
 def compile_add(loop, opnum):
     loop = _from_opaque(loop)
     loop.operations.append(Operation(opnum))
 
-def compile_add_descr(loop, ofs, type, arg_types):
+def compile_add_descr(loop, ofs, type, arg_types, extrainfo, width):
     from pypy.jit.backend.llgraph.runner import Descr
     loop = _from_opaque(loop)
     op = loop.operations[-1]
     assert isinstance(type, str) and len(type) == 1
-    op.descr = Descr(ofs, type, arg_types=arg_types)
+    op.descr = Descr(ofs, type, arg_types=arg_types, extrainfo=extrainfo, width=width)
 
 def compile_add_descr_arg(loop, ofs, type, arg_types):
     from pypy.jit.backend.llgraph.runner import Descr
@@ -346,6 +361,16 @@
     op = loop.operations[-1]
     op.descr = weakref.ref(descr)
 
+TARGET_TOKENS = weakref.WeakKeyDictionary()
+
+def compile_add_target_token(loop, descr, clt):
+    # here, 'clt' is the compiled_loop_token of the original loop that
+    # we are compiling
+    loop = _from_opaque(loop)
+    op = loop.operations[-1]
+    descrobj = _normalize(descr)
+    TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt
+
 def compile_add_var(loop, intvar):
     loop = _from_opaque(loop)
     op = loop.operations[-1]
@@ -380,13 +405,25 @@
     _variables.append(v)
     return r
 
-def compile_add_jump_target(loop, loop_target):
+def compile_add_jump_target(loop, targettoken, source_clt):
     loop = _from_opaque(loop)
-    loop_target = _from_opaque(loop_target)
+    descrobj = _normalize(targettoken)
+    (loop_target, target_opindex, target_inputargs, target_clt
+        ) = TARGET_TOKENS[descrobj]
+    #
+    try:
+        assert source_clt._debug_argtypes == target_clt._debug_argtypes
+    except AttributeError:   # when translated
+        pass
+    #
     op = loop.operations[-1]
     op.jump_target = loop_target
+    op.jump_target_opindex = target_opindex
+    op.jump_target_inputargs = target_inputargs
     assert op.opnum == rop.JUMP
-    assert len(op.args) == len(loop_target.inputargs)
+    assert [v.concretetype for v in op.args] == (
+           [v.concretetype for v in target_inputargs])
+    #
     if loop_target == loop:
         log.info("compiling new loop")
     else:
@@ -520,10 +557,11 @@
                 self.opindex += 1
                 continue
             if op.opnum == rop.JUMP:
-                assert len(op.jump_target.inputargs) == len(args)
-                self.env = dict(zip(op.jump_target.inputargs, args))
+                inputargs = op.jump_target_inputargs
+                assert len(inputargs) == len(args)
+                self.env = dict(zip(inputargs, args))
                 self.loop = op.jump_target
-                self.opindex = 0
+                self.opindex = op.jump_target_opindex
                 _stats.exec_jumps += 1
             elif op.opnum == rop.FINISH:
                 if self.verbose:
@@ -616,6 +654,15 @@
         #
         return _op_default_implementation
 
+    def op_label(self, _, *args):
+        op = self.loop.operations[self.opindex]
+        assert op.opnum == rop.LABEL
+        assert len(op.args) == len(args)
+        newenv = {}
+        for v, value in zip(op.args, args):
+            newenv[v] = value
+        self.env = newenv
+
     def op_debug_merge_point(self, _, *args):
         from pypy.jit.metainterp.warmspot import get_stats
         try:
@@ -825,6 +872,16 @@
         else:
             raise NotImplementedError
 
+    def op_getinteriorfield_raw(self, descr, array, index):
+        if descr.typeinfo == REF:
+            return do_getinteriorfield_raw_ptr(array, index, descr.width, descr.ofs)
+        elif descr.typeinfo == INT:
+            return do_getinteriorfield_raw_int(array, index, descr.width, descr.ofs)
+        elif descr.typeinfo == FLOAT:
+            return do_getinteriorfield_raw_float(array, index, descr.width, descr.ofs)
+        else:
+            raise NotImplementedError
+
     def op_setinteriorfield_gc(self, descr, array, index, newvalue):
         if descr.typeinfo == REF:
             return do_setinteriorfield_gc_ptr(array, index, descr.ofs,
@@ -838,6 +895,16 @@
         else:
             raise NotImplementedError
 
+    def op_setinteriorfield_raw(self, descr, array, index, newvalue):
+        if descr.typeinfo == REF:
+            return do_setinteriorfield_raw_ptr(array, index, newvalue, descr.width, descr.ofs)
+        elif descr.typeinfo == INT:
+            return do_setinteriorfield_raw_int(array, index, newvalue, descr.width, descr.ofs)
+        elif descr.typeinfo == FLOAT:
+            return do_setinteriorfield_raw_float(array, index, newvalue, descr.width, descr.ofs)
+        else:
+            raise NotImplementedError
+
     def op_setfield_gc(self, fielddescr, struct, newvalue):
         if fielddescr.typeinfo == REF:
             do_setfield_gc_ptr(struct, fielddescr.ofs, newvalue)
@@ -938,6 +1005,7 @@
         self._may_force = self.opindex
         try:
             inpargs = _from_opaque(ctl.compiled_version).inputargs
+            assert len(inpargs) == len(args)
             for i, inparg in enumerate(inpargs):
                 TYPE = inparg.concretetype
                 if TYPE is lltype.Signed:
@@ -1114,7 +1182,9 @@
     if isinstance(TYPE, lltype.Ptr):
         if isinstance(x, (int, long, llmemory.AddressAsInt)):
             x = llmemory.cast_int_to_adr(x)
-        if TYPE is rffi.VOIDP or TYPE.TO._hints.get("uncast_on_llgraph"):
+        if TYPE is rffi.VOIDP or (
+                hasattr(TYPE.TO, '_hints') and
+                TYPE.TO._hints.get("uncast_on_llgraph")):
             # assume that we want a "C-style" cast, without typechecking the value
             return rffi.cast(TYPE, x)
         return llmemory.cast_adr_to_ptr(x, TYPE)
@@ -1401,6 +1471,18 @@
     struct = array._obj.container.getitem(index)
     return cast_to_ptr(_getinteriorfield_gc(struct, fieldnum))
 
+def _getinteriorfield_raw(ffitype, array, index, width, ofs):
+    addr = rffi.cast(rffi.VOIDP, array)
+    return libffi.array_getitem(ffitype, width, addr, index, ofs)
+
+def do_getinteriorfield_raw_int(array, index, width, ofs):
+    res = _getinteriorfield_raw(libffi.types.slong, array, index, width, ofs)
+    return res
+
+def do_getinteriorfield_raw_float(array, index, width, ofs):
+    res = _getinteriorfield_raw(libffi.types.double, array, index, width, ofs)
+    return res
+
 def _getfield_raw(struct, fieldnum):
     STRUCT, fieldname = symbolic.TokenToField[fieldnum]
     ptr = cast_from_int(lltype.Ptr(STRUCT), struct)
@@ -1477,7 +1559,19 @@
     return do_setinteriorfield_gc
 do_setinteriorfield_gc_int = new_setinteriorfield_gc(cast_from_int)
 do_setinteriorfield_gc_float = new_setinteriorfield_gc(cast_from_floatstorage)
-do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr)        
+do_setinteriorfield_gc_ptr = new_setinteriorfield_gc(cast_from_ptr)
+
+def new_setinteriorfield_raw(cast_func, ffitype):
+    def do_setinteriorfield_raw(array, index, newvalue, width, ofs):
+        addr = rffi.cast(rffi.VOIDP, array)
+        for TYPE, ffitype2 in clibffi.ffitype_map:
+            if ffitype2 is ffitype:
+                newvalue = cast_func(TYPE, newvalue)
+                break
+        return libffi.array_setitem(ffitype, width, addr, index, ofs, newvalue)
+    return do_setinteriorfield_raw
+do_setinteriorfield_raw_int = new_setinteriorfield_raw(cast_from_int, libffi.types.slong)
+do_setinteriorfield_raw_float = new_setinteriorfield_raw(cast_from_floatstorage, libffi.types.double)
 
 def do_setfield_raw_int(struct, fieldnum, newvalue):
     STRUCT, fieldname = symbolic.TokenToField[fieldnum]
@@ -1741,9 +1835,11 @@
 setannotation(compile_start_int_var, annmodel.SomeInteger())
 setannotation(compile_start_ref_var, annmodel.SomeInteger())
 setannotation(compile_start_float_var, annmodel.SomeInteger())
+setannotation(compile_started_vars, annmodel.s_None)
 setannotation(compile_add, annmodel.s_None)
 setannotation(compile_add_descr, annmodel.s_None)
 setannotation(compile_add_descr_arg, annmodel.s_None)
+setannotation(compile_add_target_token, annmodel.s_None)
 setannotation(compile_add_var, annmodel.s_None)
 setannotation(compile_add_int_const, annmodel.s_None)
 setannotation(compile_add_ref_const, annmodel.s_None)
diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py
--- a/pypy/jit/backend/llgraph/runner.py
+++ b/pypy/jit/backend/llgraph/runner.py
@@ -23,8 +23,10 @@
 class Descr(history.AbstractDescr):
 
     def __init__(self, ofs, typeinfo, extrainfo=None, name=None,
-                 arg_types=None, count_fields_if_immut=-1, ffi_flags=0):
+                 arg_types=None, count_fields_if_immut=-1, ffi_flags=0, width=-1):
+
         self.ofs = ofs
+        self.width = width
         self.typeinfo = typeinfo
         self.extrainfo = extrainfo
         self.name = name
@@ -35,7 +37,7 @@
     def get_arg_types(self):
         return self.arg_types
 
-    def get_return_type(self):
+    def get_result_type(self):
         return self.typeinfo
 
     def get_extra_info(self):
@@ -119,14 +121,14 @@
         return False
 
     def getdescr(self, ofs, typeinfo='?', extrainfo=None, name=None,
-                 arg_types=None, count_fields_if_immut=-1, ffi_flags=0):
+                 arg_types=None, count_fields_if_immut=-1, ffi_flags=0, width=-1):
         key = (ofs, typeinfo, extrainfo, name, arg_types,
-               count_fields_if_immut, ffi_flags)
+               count_fields_if_immut, ffi_flags, width)
         try:
             return self._descrs[key]
         except KeyError:
             descr = Descr(ofs, typeinfo, extrainfo, name, arg_types,
-                          count_fields_if_immut, ffi_flags)
+                          count_fields_if_immut, ffi_flags, width)
             self._descrs[key] = descr
             return descr
 
@@ -136,29 +138,30 @@
         clt = original_loop_token.compiled_loop_token
         clt.loop_and_bridges.append(c)
         clt.compiling_a_bridge()
-        self._compile_loop_or_bridge(c, inputargs, operations)
+        self._compile_loop_or_bridge(c, inputargs, operations, clt)
         old, oldindex = faildescr._compiled_fail
         llimpl.compile_redirect_fail(old, oldindex, c)
 
-    def compile_loop(self, inputargs, operations, looptoken, log=True, name=''):
+    def compile_loop(self, inputargs, operations, jitcell_token,
+                     log=True, name=''):
         """In a real assembler backend, this should assemble the given
         list of operations.  Here we just generate a similar CompiledLoop
         instance.  The code here is RPython, whereas the code in llimpl
         is not.
         """
         c = llimpl.compile_start()
-        clt = model.CompiledLoopToken(self, looptoken.number)
+        clt = model.CompiledLoopToken(self, jitcell_token.number)
         clt.loop_and_bridges = [c]
         clt.compiled_version = c
-        looptoken.compiled_loop_token = clt
-        self._compile_loop_or_bridge(c, inputargs, operations)
+        jitcell_token.compiled_loop_token = clt
+        self._compile_loop_or_bridge(c, inputargs, operations, clt)
 
     def free_loop_and_bridges(self, compiled_loop_token):
         for c in compiled_loop_token.loop_and_bridges:
             llimpl.mark_as_free(c)
         model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token)
 
-    def _compile_loop_or_bridge(self, c, inputargs, operations):
+    def _compile_loop_or_bridge(self, c, inputargs, operations, clt):
         var2index = {}
         for box in inputargs:
             if isinstance(box, history.BoxInt):
@@ -170,19 +173,23 @@
                 var2index[box] = llimpl.compile_start_float_var(c)
             else:
                 raise Exception("box is: %r" % (box,))
-        self._compile_operations(c, operations, var2index)
+        llimpl.compile_started_vars(clt)
+        self._compile_operations(c, operations, var2index, clt)
         return c
 
-    def _compile_operations(self, c, operations, var2index):
+    def _compile_operations(self, c, operations, var2index, clt):
         for op in operations:
             llimpl.compile_add(c, op.getopnum())
             descr = op.getdescr()
             if isinstance(descr, Descr):
                 llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo,
-                                         descr.arg_types)
-            if (isinstance(descr, history.LoopToken) and
-                op.getopnum() != rop.JUMP):
+                                         descr.arg_types, descr.extrainfo,
+                                         descr.width)
+            if isinstance(descr, history.JitCellToken):
+                assert op.getopnum() != rop.JUMP
                 llimpl.compile_add_loop_token(c, descr)
+            if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL:
+                llimpl.compile_add_target_token(c, descr, clt)
             if self.is_oo and isinstance(descr, (OODescr, MethDescr)):
                 # hack hack, not rpython
                 c._obj.externalobj.operations[-1].setdescr(descr)
@@ -236,9 +243,7 @@
         assert op.is_final()
         if op.getopnum() == rop.JUMP:
             targettoken = op.getdescr()
-            assert isinstance(targettoken, history.LoopToken)
-            compiled_version = targettoken.compiled_loop_token.compiled_version
-            llimpl.compile_add_jump_target(c, compiled_version)
+            llimpl.compile_add_jump_target(c, targettoken, clt)
         elif op.getopnum() == rop.FINISH:
             faildescr = op.getdescr()
             index = self.get_fail_descr_number(faildescr)
@@ -257,21 +262,28 @@
         self.latest_frame = frame
         return fail_index
 
-    def execute_token(self, loop_token):
-        """Calls the assembler generated for the given loop.
-        Returns the ResOperation that failed, of type rop.FAIL.
-        """
-        fail_index = self._execute_token(loop_token)
-        return self.get_fail_descr_from_number(fail_index)
-
-    def set_future_value_int(self, index, intvalue):
-        llimpl.set_future_value_int(index, intvalue)
-
-    def set_future_value_ref(self, index, objvalue):
-        llimpl.set_future_value_ref(index, objvalue)
-
-    def set_future_value_float(self, index, floatvalue):
-        llimpl.set_future_value_float(index, floatvalue)
+    def make_execute_token(self, *argtypes):
+        nb_args = len(argtypes)
+        unroll_argtypes = unrolling_iterable(list(enumerate(argtypes)))
+        #
+        def execute_token(loop_token, *args):
+            assert len(args) == nb_args
+            for index, TYPE in unroll_argtypes:
+                x = args[index]
+                assert TYPE == lltype.typeOf(x)
+                if TYPE == lltype.Signed:
+                    llimpl.set_future_value_int(index, x)
+                elif TYPE == llmemory.GCREF:
+                    llimpl.set_future_value_ref(index, x)
+                elif TYPE == longlong.FLOATSTORAGE:
+                    llimpl.set_future_value_float(index, x)
+                else:
+                    assert 0
+            #
+            fail_index = self._execute_token(loop_token)
+            return self.get_fail_descr_from_number(fail_index)
+        #
+        return execute_token
 
     def get_latest_value_int(self, index):
         return llimpl.frame_int_getvalue(self.latest_frame, index)
@@ -324,10 +336,22 @@
 
     def interiorfielddescrof(self, A, fieldname):
         S = A.OF
-        ofs2 = symbolic.get_size(A)
+        width = symbolic.get_size(A)
         ofs, size = symbolic.get_field_token(S, fieldname)
         token = history.getkind(getattr(S, fieldname))
-        return self.getdescr(ofs, token[0], name=fieldname, extrainfo=ofs2)
+        return self.getdescr(ofs, token[0], name=fieldname, width=width)
+
+    def interiorfielddescrof_dynamic(self, offset, width, fieldsize,
+        is_pointer, is_float, is_signed):
+
+        if is_pointer:
+            typeinfo = REF
+        elif is_float:
+            typeinfo = FLOAT
+        else:
+            typeinfo = INT
+        # we abuse the arg_types field to distinguish dynamic and static descrs
+        return Descr(offset, typeinfo, arg_types='dynamic', name='<dynamic interior field>', width=width)
 
     def calldescrof(self, FUNC, ARGS, RESULT, extrainfo):
         arg_types = []
@@ -365,6 +389,7 @@
 
     def arraydescrof(self, A):
         assert A.OF != lltype.Void
+        assert isinstance(A, lltype.GcArray) or A._hints.get('nolength', False)
         size = symbolic.get_size(A)
         if isinstance(A.OF, lltype.Ptr) or isinstance(A.OF, lltype.Primitive):
             token = history.getkind(A.OF)[0]
diff --git a/pypy/jit/backend/llsupport/asmmemmgr.py b/pypy/jit/backend/llsupport/asmmemmgr.py
--- a/pypy/jit/backend/llsupport/asmmemmgr.py
+++ b/pypy/jit/backend/llsupport/asmmemmgr.py
@@ -37,25 +37,25 @@
             self._add_free_block(smaller_stop, stop)
             stop = smaller_stop
             result = (start, stop)
-        self.total_mallocs += stop - start
+        self.total_mallocs += r_uint(stop - start)
         return result   # pair (start, stop)
 
     def free(self, start, stop):
         """Free a block (start, stop) returned by a previous malloc()."""
-        self.total_mallocs -= (stop - start)
+        self.total_mallocs -= r_uint(stop - start)
         self._add_free_block(start, stop)
 
     def open_malloc(self, minsize):
         """Allocate at least minsize bytes.  Returns (start, stop)."""
         result = self._allocate_block(minsize)
         (start, stop) = result
-        self.total_mallocs += stop - start
+        self.total_mallocs += r_uint(stop - start)
         return result
 
     def open_free(self, middle, stop):
         """Used for freeing the end of an open-allocated block of memory."""
         if stop - middle >= self.min_fragment:
-            self.total_mallocs -= (stop - middle)
+            self.total_mallocs -= r_uint(stop - middle)
             self._add_free_block(middle, stop)
             return True
         else:
@@ -77,7 +77,7 @@
                 # Hack to make sure that mcs are not within 32-bits of one
                 # another for testing purposes
                 rmmap.hint.pos += 0x80000000 - size
-        self.total_memory_allocated += size
+        self.total_memory_allocated += r_uint(size)
         data = rffi.cast(lltype.Signed, data)
         return self._add_free_block(data, data + size)
 
diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py
--- a/pypy/jit/backend/llsupport/descr.py
+++ b/pypy/jit/backend/llsupport/descr.py
@@ -5,11 +5,7 @@
 from pypy.jit.metainterp.history import AbstractDescr, getkind
 from pypy.jit.metainterp import history
 from pypy.jit.codewriter import heaptracker, longlong
-
-# The point of the class organization in this file is to make instances
-# as compact as possible.  This is done by not storing the field size or
-# the 'is_pointer_field' flag in the instance itself but in the class
-# (in methods actually) using a few classes instead of just one.
+from pypy.jit.codewriter.longlong import is_longlong
 
 
 class GcCache(object):
@@ -19,6 +15,7 @@
         self._cache_size = {}
         self._cache_field = {}
         self._cache_array = {}
+        self._cache_arraylen = {}
         self._cache_call = {}
         self._cache_interiorfield = {}
 
@@ -26,24 +23,15 @@
         assert isinstance(STRUCT, lltype.GcStruct)
 
     def init_array_descr(self, ARRAY, arraydescr):
-        assert isinstance(ARRAY, lltype.GcArray)
+        assert (isinstance(ARRAY, lltype.GcArray) or
+                isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld)
 
 
-if lltype.SignedLongLong is lltype.Signed:
-    def is_longlong(TYPE):
-        return False
-else:
-    assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float)
-    def is_longlong(TYPE):
-        return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong)
-
 # ____________________________________________________________
 # SizeDescrs
 
 class SizeDescr(AbstractDescr):
     size = 0      # help translation
-    is_immutable = False
-
     tid = llop.combine_ushort(lltype.Signed, 0, 0)
 
     def __init__(self, size, count_fields_if_immut=-1):
@@ -77,204 +65,158 @@
         cache[STRUCT] = sizedescr
         return sizedescr
 
+
 # ____________________________________________________________
 # FieldDescrs
 
-class BaseFieldDescr(AbstractDescr):
+FLAG_POINTER  = 'P'
+FLAG_FLOAT    = 'F'
+FLAG_UNSIGNED = 'U'
+FLAG_SIGNED   = 'S'
+FLAG_STRUCT   = 'X'
+FLAG_VOID     = 'V'
+
+class FieldDescr(AbstractDescr):
+    name = ''
     offset = 0      # help translation
-    name = ''
-    _clsname = ''
+    field_size = 0
+    flag = '\x00'
 
-    def __init__(self, name, offset):
+    def __init__(self, name, offset, field_size, flag):
         self.name = name
         self.offset = offset
+        self.field_size = field_size
+        self.flag = flag
+
+    def is_pointer_field(self):
+        return self.flag == FLAG_POINTER
+
+    def is_float_field(self):
+        return self.flag == FLAG_FLOAT
+
+    def is_field_signed(self):
+        return self.flag == FLAG_SIGNED
 
     def sort_key(self):
         return self.offset
 
-    def get_field_size(self, translate_support_code):
-        raise NotImplementedError
+    def repr_of_descr(self):
+        return '<Field%s %s %s>' % (self.flag, self.name, self.offset)
 
-    _is_pointer_field = False   # unless overridden by GcPtrFieldDescr
-    _is_float_field = False     # unless overridden by FloatFieldDescr
-    _is_field_signed = False    # unless overridden by XxxFieldDescr
-
-    def is_pointer_field(self):
-        return self._is_pointer_field
-
-    def is_float_field(self):
-        return self._is_float_field
-
-    def is_field_signed(self):
-        return self._is_field_signed
-
-    def repr_of_descr(self):
-        return '<%s %s %s>' % (self._clsname, self.name, self.offset)
-
-
-class NonGcPtrFieldDescr(BaseFieldDescr):
-    _clsname = 'NonGcPtrFieldDescr'
-    def get_field_size(self, translate_support_code):
-        return symbolic.get_size_of_ptr(translate_support_code)
-
-class GcPtrFieldDescr(NonGcPtrFieldDescr):
-    _clsname = 'GcPtrFieldDescr'
-    _is_pointer_field = True
-
-def getFieldDescrClass(TYPE):
-    return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr,
-                         NonGcPtrFieldDescr, 'Field', 'get_field_size',
-                         '_is_float_field', '_is_field_signed')
 
 def get_field_descr(gccache, STRUCT, fieldname):
     cache = gccache._cache_field
     try:
         return cache[STRUCT][fieldname]
     except KeyError:
-        offset, _ = symbolic.get_field_token(STRUCT, fieldname,
-                                             gccache.translate_support_code)
+        offset, size = symbolic.get_field_token(STRUCT, fieldname,
+                                                gccache.translate_support_code)
         FIELDTYPE = getattr(STRUCT, fieldname)
+        flag = get_type_flag(FIELDTYPE)
         name = '%s.%s' % (STRUCT._name, fieldname)
-        fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset)
+        fielddescr = FieldDescr(name, offset, size, flag)
         cachedict = cache.setdefault(STRUCT, {})
         cachedict[fieldname] = fielddescr
         return fielddescr
 
+def get_type_flag(TYPE):
+    if isinstance(TYPE, lltype.Ptr):
+        if TYPE.TO._gckind == 'gc':
+            return FLAG_POINTER
+        else:
+            return FLAG_UNSIGNED
+    if isinstance(TYPE, lltype.Struct):
+        return FLAG_STRUCT
+    if TYPE is lltype.Float or is_longlong(TYPE):
+        return FLAG_FLOAT
+    if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and
+           rffi.cast(TYPE, -1) == -1):
+        return FLAG_SIGNED
+    return FLAG_UNSIGNED
+
+def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT):
+    cache = gccache._cache_arraylen
+    try:
+        return cache[ARRAY_OR_STRUCT]
+    except KeyError:
+        tsc = gccache.translate_support_code
+        (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc)
+        size = symbolic.get_size(lltype.Signed, tsc)
+        result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed))
+        cache[ARRAY_OR_STRUCT] = result
+        return result
+
+
 # ____________________________________________________________
 # ArrayDescrs
 
-_A = lltype.GcArray(lltype.Signed)     # a random gcarray
-_AF = lltype.GcArray(lltype.Float)     # an array of C doubles
+class ArrayDescr(AbstractDescr):
+    tid = 0
+    basesize = 0       # workaround for the annotator
+    itemsize = 0
+    lendescr = None
+    flag = '\x00'
 
-
-class BaseArrayDescr(AbstractDescr):
-    _clsname = ''
-    tid = llop.combine_ushort(lltype.Signed, 0, 0)
-
-    def get_base_size(self, translate_support_code):
-        basesize, _, _ = symbolic.get_array_token(_A, translate_support_code)
-        return basesize
-
-    def get_ofs_length(self, translate_support_code):
-        _, _, ofslength = symbolic.get_array_token(_A, translate_support_code)
-        return ofslength
-
-    def get_item_size(self, translate_support_code):
-        raise NotImplementedError
-
-    _is_array_of_pointers = False      # unless overridden by GcPtrArrayDescr
-    _is_array_of_floats   = False      # unless overridden by FloatArrayDescr
-    _is_array_of_structs  = False      # unless overridden by StructArrayDescr
-    _is_item_signed       = False      # unless overridden by XxxArrayDescr
+    def __init__(self, basesize, itemsize, lendescr, flag):
+        self.basesize = basesize
+        self.itemsize = itemsize
+        self.lendescr = lendescr    # or None, if no length
+        self.flag = flag
 
     def is_array_of_pointers(self):
-        return self._is_array_of_pointers
+        return self.flag == FLAG_POINTER
 
     def is_array_of_floats(self):
-        return self._is_array_of_floats
+        return self.flag == FLAG_FLOAT
+
+    def is_item_signed(self):
+        return self.flag == FLAG_SIGNED
 
     def is_array_of_structs(self):
-        return self._is_array_of_structs
-
-    def is_item_signed(self):
-        return self._is_item_signed
+        return self.flag == FLAG_STRUCT
 
     def repr_of_descr(self):
-        return '<%s>' % self._clsname
+        return '<Array%s %s>' % (self.flag, self.itemsize)
 
-class NonGcPtrArrayDescr(BaseArrayDescr):
-    _clsname = 'NonGcPtrArrayDescr'
-    def get_item_size(self, translate_support_code):
-        return symbolic.get_size_of_ptr(translate_support_code)
 
-class GcPtrArrayDescr(NonGcPtrArrayDescr):
-    _clsname = 'GcPtrArrayDescr'
-    _is_array_of_pointers = True
-
-class FloatArrayDescr(BaseArrayDescr):
-    _clsname = 'FloatArrayDescr'
-    _is_array_of_floats = True
-    def get_base_size(self, translate_support_code):
-        basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code)
-        return basesize
-    def get_item_size(self, translate_support_code):
-        return symbolic.get_size(lltype.Float, translate_support_code)
-
-class StructArrayDescr(BaseArrayDescr):
-    _clsname = 'StructArrayDescr'
-    _is_array_of_structs = True
-
-class BaseArrayNoLengthDescr(BaseArrayDescr):
-    def get_base_size(self, translate_support_code):
-        return 0
-
-    def get_ofs_length(self, translate_support_code):
-        return -1
-
-class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr):
-    _clsname = 'NonGcPtrArrayNoLengthDescr'
-    def get_item_size(self, translate_support_code):
-        return symbolic.get_size_of_ptr(translate_support_code)
-
-class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr):
-    _clsname = 'GcPtrArrayNoLengthDescr'
-    _is_array_of_pointers = True
-
-def getArrayDescrClass(ARRAY):
-    if ARRAY.OF is lltype.Float:
-        return FloatArrayDescr
-    elif isinstance(ARRAY.OF, lltype.Struct):
-        class Descr(StructArrayDescr):
-            _clsname = '%sArrayDescr' % ARRAY.OF._name
-            def get_item_size(self, translate_support_code):
-                return symbolic.get_size(ARRAY.OF, translate_support_code)
-        Descr.__name__ = Descr._clsname
-        return Descr
-    return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr,
-                         NonGcPtrArrayDescr, 'Array', 'get_item_size',
-                         '_is_array_of_floats', '_is_item_signed')
-
-def getArrayNoLengthDescrClass(ARRAY):
-    return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr,
-                         NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size',
-                         '_is_array_of_floats', '_is_item_signed')
-
-def get_array_descr(gccache, ARRAY):
+def get_array_descr(gccache, ARRAY_OR_STRUCT):
     cache = gccache._cache_array
     try:
-        return cache[ARRAY]
+        return cache[ARRAY_OR_STRUCT]
     except KeyError:
-        # we only support Arrays that are either GcArrays, or raw no-length
-        # non-gc Arrays.
-        if ARRAY._hints.get('nolength', False):
-            assert not isinstance(ARRAY, lltype.GcArray)
-            arraydescr = getArrayNoLengthDescrClass(ARRAY)()
+        tsc = gccache.translate_support_code
+        basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc)
+        if isinstance(ARRAY_OR_STRUCT, lltype.Array):
+            ARRAY_INSIDE = ARRAY_OR_STRUCT
         else:
-            assert isinstance(ARRAY, lltype.GcArray)
-            arraydescr = getArrayDescrClass(ARRAY)()
-        # verify basic assumption that all arrays' basesize and ofslength
-        # are equal
-        basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False)
-        assert basesize == arraydescr.get_base_size(False)
-        assert itemsize == arraydescr.get_item_size(False)
-        if not ARRAY._hints.get('nolength', False):
-            assert ofslength == arraydescr.get_ofs_length(False)
-        if isinstance(ARRAY, lltype.GcArray):
-            gccache.init_array_descr(ARRAY, arraydescr)
-        cache[ARRAY] = arraydescr
+            ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld]
+        if ARRAY_INSIDE._hints.get('nolength', False):
+            lendescr = None
+        else:
+            lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT)
+        flag = get_type_flag(ARRAY_INSIDE.OF)
+        arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag)
+        if ARRAY_OR_STRUCT._gckind == 'gc':
+            gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr)
+        cache[ARRAY_OR_STRUCT] = arraydescr
         return arraydescr
 
+
 # ____________________________________________________________
 # InteriorFieldDescr
 
 class InteriorFieldDescr(AbstractDescr):
-    arraydescr = BaseArrayDescr()     # workaround for the annotator
-    fielddescr = BaseFieldDescr('', 0)
+    arraydescr = ArrayDescr(0, 0, None, '\x00')  # workaround for the annotator
+    fielddescr = FieldDescr('', 0, 0, '\x00')
 
     def __init__(self, arraydescr, fielddescr):
+        assert arraydescr.flag == FLAG_STRUCT
         self.arraydescr = arraydescr
         self.fielddescr = fielddescr
 
+    def sort_key(self):
+        return self.fielddescr.sort_key()
+
     def is_pointer_field(self):
         return self.fielddescr.is_pointer_field()
 
@@ -284,33 +226,86 @@
     def repr_of_descr(self):
         return '<InteriorFieldDescr %s>' % self.fielddescr.repr_of_descr()
 
-def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name):
+def get_interiorfield_descr(gc_ll_descr, ARRAY, name):
     cache = gc_ll_descr._cache_interiorfield
     try:
-        return cache[(ARRAY, FIELDTP, name)]
+        return cache[(ARRAY, name)]
     except KeyError:
         arraydescr = get_array_descr(gc_ll_descr, ARRAY)
-        fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name)
+        fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name)
         descr = InteriorFieldDescr(arraydescr, fielddescr)
-        cache[(ARRAY, FIELDTP, name)] = descr
+        cache[(ARRAY, name)] = descr
         return descr
 
+def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize,
+                                    is_pointer, is_float, is_signed):
+    arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT)
+    if is_pointer:
+        assert not is_float
+        flag = FLAG_POINTER
+    elif is_float:
+        flag = FLAG_FLOAT
+    elif is_signed:
+        flag = FLAG_SIGNED
+    else:
+        flag = FLAG_UNSIGNED
+    fielddescr = FieldDescr('dynamic', offset, fieldsize, flag)
+    return InteriorFieldDescr(arraydescr, fielddescr)
+
+
 # ____________________________________________________________
 # CallDescrs
 
-class BaseCallDescr(AbstractDescr):
-    _clsname = ''
-    loop_token = None
+class CallDescr(AbstractDescr):
     arg_classes = ''     # <-- annotation hack
-    ffi_flags = 0
+    result_type = '\x00'
+    result_flag = '\x00'
+    ffi_flags = 1
+    call_stub_i = staticmethod(lambda func, args_i, args_r, args_f:
+                               0)
+    call_stub_r = staticmethod(lambda func, args_i, args_r, args_f:
+                               lltype.nullptr(llmemory.GCREF.TO))
+    call_stub_f = staticmethod(lambda func,args_i,args_r,args_f:
+                               longlong.ZEROF)
 
-    def __init__(self, arg_classes, extrainfo=None, ffi_flags=0):
-        self.arg_classes = arg_classes    # string of "r" and "i" (ref/int)
+    def __init__(self, arg_classes, result_type, result_signed, result_size,
+                 extrainfo=None, ffi_flags=1):
+        """
+            'arg_classes' is a string of characters, one per argument:
+                'i', 'r', 'f', 'L', 'S'
+
+            'result_type' is one character from the same list or 'v'
+
+            'result_signed' is a boolean True/False
+        """
+        self.arg_classes = arg_classes
+        self.result_type = result_type
+        self.result_size = result_size
         self.extrainfo = extrainfo
         self.ffi_flags = ffi_flags
+        # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which
+        # makes sense on Windows as it's the one for all the C functions
+        # we are compiling together with the JIT.  On non-Windows platforms
+        # it is just ignored anyway.
+        if result_type == 'v':
+            result_flag = FLAG_VOID
+        elif result_type == 'i':
+            if result_signed:
+                result_flag = FLAG_SIGNED
+            else:
+                result_flag = FLAG_UNSIGNED
+        elif result_type == history.REF:
+            result_flag = FLAG_POINTER
+        elif result_type == history.FLOAT or result_type == 'L':
+            result_flag = FLAG_FLOAT
+        elif result_type == 'S':
+            result_flag = FLAG_UNSIGNED
+        else:
+            raise NotImplementedError("result_type = '%s'" % (result_type,))
+        self.result_flag = result_flag
 
     def __repr__(self):
-        res = '%s(%s)' % (self.__class__.__name__, self.arg_classes)
+        res = 'CallDescr(%s)' % (self.arg_classes,)
         extraeffect = getattr(self.extrainfo, 'extraeffect', None)
         if extraeffect is not None:
             res += ' EF=%r' % extraeffect
@@ -338,16 +333,20 @@
     def get_arg_types(self):
         return self.arg_classes
 
-    def get_return_type(self):
-        return self._return_type
+    def get_result_type(self):
+        return self.result_type
 
-    def get_result_size(self, translate_support_code):
-        raise NotImplementedError
+    def get_result_size(self):
+        return self.result_size
 
     def is_result_signed(self):
-        return False    # unless overridden
+        return self.result_flag == FLAG_SIGNED
 
     def create_call_stub(self, rtyper, RESULT):
+        from pypy.rlib.clibffi import FFI_DEFAULT_ABI
+        assert self.get_call_conv() == FFI_DEFAULT_ABI, (
+            "%r: create_call_stub() with a non-default call ABI" % (self,))
+
         def process(c):
             if c == 'L':
                 assert longlong.supports_longlong
@@ -379,18 +378,26 @@
         seen = {'i': 0, 'r': 0, 'f': 0}
         args = ", ".join([process(c) for c in self.arg_classes])
 
-        if self.get_return_type() == history.INT:
+        result_type = self.get_result_type()
+        if result_type == history.INT:
             result = 'rffi.cast(lltype.Signed, res)'
-        elif self.get_return_type() == history.REF:
+            category = 'i'
+        elif result_type == history.REF:
+            assert RESULT == llmemory.GCREF   # should be ensured by the caller
             result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)'
-        elif self.get_return_type() == history.FLOAT:
+            category = 'r'
+        elif result_type == history.FLOAT:
             result = 'longlong.getfloatstorage(res)'
-        elif self.get_return_type() == 'L':
+            category = 'f'
+        elif result_type == 'L':
             result = 'rffi.cast(lltype.SignedLongLong, res)'
-        elif self.get_return_type() == history.VOID:
-            result = 'None'
-        elif self.get_return_type() == 'S':
+            category = 'f'
+        elif result_type == history.VOID:
+            result = '0'
+            category = 'i'
+        elif result_type == 'S':
             result = 'longlong.singlefloat2int(res)'
+            category = 'i'
         else:
             assert 0
         source = py.code.Source("""
@@ -404,10 +411,13 @@
         d = globals().copy()
         d.update(locals())
         exec source.compile() in d
-        self.call_stub = d['call_stub']
+        call_stub = d['call_stub']
+        # store the function into one of three attributes, to preserve
+        # type-correctness of the return value
+        setattr(self, 'call_stub_%s' % category, call_stub)
 
     def verify_types(self, args_i, args_r, args_f, return_type):
-        assert self._return_type in return_type
+        assert self.result_type in return_type
         assert (self.arg_classes.count('i') +
                 self.arg_classes.count('S')) == len(args_i or ())
         assert self.arg_classes.count('r') == len(args_r or ())
@@ -415,161 +425,56 @@
                 self.arg_classes.count('L')) == len(args_f or ())
 
     def repr_of_descr(self):
-        return '<%s>' % self._clsname
+        res = 'Call%s %d' % (self.result_type, self.result_size)
+        if self.arg_classes:
+            res += ' ' + self.arg_classes
+        if self.extrainfo:
+            res += ' EF=%d' % self.extrainfo.extraeffect
+            oopspecindex = self.extrainfo.oopspecindex
+            if oopspecindex:
+                res += ' OS=%d' % oopspecindex
+        return '<%s>' % res
 
 
-class BaseIntCallDescr(BaseCallDescr):
-    # Base class of the various subclasses of descrs corresponding to
-    # calls having a return kind of 'int' (including non-gc pointers).
-    # The inheritance hierarchy is a bit different than with other Descr
-    # classes because of the 'call_stub' attribute, which is of type
-    #
-    #     lambda func, args_i, args_r, args_f --> int/ref/float/void
-    #
-    # The purpose of BaseIntCallDescr is to be the parent of all classes
-    # in which 'call_stub' has a return kind of 'int'.
-    _return_type = history.INT
-    call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0)
-
-    _is_result_signed = False      # can be overridden in XxxCallDescr
-    def is_result_signed(self):
-        return self._is_result_signed
-
-class DynamicIntCallDescr(BaseIntCallDescr):
-    """
-    calldescr that works for every integer type, by explicitly passing it the
-    size of the result. Used only by get_call_descr_dynamic
-    """
-    _clsname = 'DynamicIntCallDescr'
-
-    def __init__(self, arg_classes, result_size, result_sign, extrainfo=None, ffi_flags=0):
-        BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags)
-        assert isinstance(result_sign, bool)
-        self._result_size = chr(result_size)
-        self._result_sign = result_sign
-
-    def get_result_size(self, translate_support_code):
-        return ord(self._result_size)
-
-    def is_result_signed(self):
-        return self._result_sign
-
-
-class NonGcPtrCallDescr(BaseIntCallDescr):
-    _clsname = 'NonGcPtrCallDescr'
-    def get_result_size(self, translate_support_code):
-        return symbolic.get_size_of_ptr(translate_support_code)
-
-class GcPtrCallDescr(BaseCallDescr):
-    _clsname = 'GcPtrCallDescr'
-    _return_type = history.REF
-    call_stub = staticmethod(lambda func, args_i, args_r, args_f:
-                             lltype.nullptr(llmemory.GCREF.TO))
-    def get_result_size(self, translate_support_code):
-        return symbolic.get_size_of_ptr(translate_support_code)
-
-class FloatCallDescr(BaseCallDescr):
-    _clsname = 'FloatCallDescr'
-    _return_type = history.FLOAT
-    call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF)
-    def get_result_size(self, translate_support_code):
-        return symbolic.get_size(lltype.Float, translate_support_code)
-
-class LongLongCallDescr(FloatCallDescr):
-    _clsname = 'LongLongCallDescr'
-    _return_type = 'L'
-
-class VoidCallDescr(BaseCallDescr):
-    _clsname = 'VoidCallDescr'
-    _return_type = history.VOID
-    call_stub = staticmethod(lambda func, args_i, args_r, args_f: None)
-    def get_result_size(self, translate_support_code):
-        return 0
-
-_SingleFloatCallDescr = None   # built lazily
-
-def getCallDescrClass(RESULT):
-    if RESULT is lltype.Void:
-        return VoidCallDescr
-    if RESULT is lltype.Float:
-        return FloatCallDescr
-    if RESULT is lltype.SingleFloat:
-        global _SingleFloatCallDescr
-        if _SingleFloatCallDescr is None:
-            assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT)
-            class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)):
-                _clsname = 'SingleFloatCallDescr'
-                _return_type = 'S'
-            _SingleFloatCallDescr = SingleFloatCallDescr
-        return _SingleFloatCallDescr
-    if is_longlong(RESULT):
-        return LongLongCallDescr
-    return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr,
-                         NonGcPtrCallDescr, 'Call', 'get_result_size',
-                         Ellipsis,  # <= floatattrname should not be used here
-                         '_is_result_signed')
-getCallDescrClass._annspecialcase_ = 'specialize:memo'
+def map_type_to_argclass(ARG, accept_void=False):
+    kind = getkind(ARG)
+    if   kind == 'int':
+        if ARG is lltype.SingleFloat: return 'S'
+        else:                         return 'i'
+    elif kind == 'ref':               return 'r'
+    elif kind == 'float':
+        if is_longlong(ARG):          return 'L'
+        else:                         return 'f'
+    elif kind == 'void':
+        if accept_void:               return 'v'
+    raise NotImplementedError('ARG = %r' % (ARG,))
 
 def get_call_descr(gccache, ARGS, RESULT, extrainfo=None):
-    arg_classes = []
-    for ARG in ARGS:
-        kind = getkind(ARG)
-        if   kind == 'int':
-            if ARG is lltype.SingleFloat:
-                arg_classes.append('S')
+    arg_classes = map(map_type_to_argclass, ARGS)
+    arg_classes = ''.join(arg_classes)
+    result_type = map_type_to_argclass(RESULT, accept_void=True)
+    RESULT_ERASED = RESULT
+    if RESULT is lltype.Void:
+        result_size = 0
+        result_signed = False
+    else:
+        if isinstance(RESULT, lltype.Ptr):
+            # avoid too many CallDescrs
+            if result_type == 'r':
+                RESULT_ERASED = llmemory.GCREF
             else:
-                arg_classes.append('i')
-        elif kind == 'ref': arg_classes.append('r')
-        elif kind == 'float':
-            if is_longlong(ARG):
-                arg_classes.append('L')
-            else:
-                arg_classes.append('f')
-        else:
-            raise NotImplementedError('ARG = %r' % (ARG,))
-    arg_classes = ''.join(arg_classes)
-    cls = getCallDescrClass(RESULT)
-    key = (cls, arg_classes, extrainfo)
+                RESULT_ERASED = llmemory.Address
+        result_size = symbolic.get_size(RESULT_ERASED,
+                                        gccache.translate_support_code)
+        result_signed = get_type_flag(RESULT) == FLAG_SIGNED
+    key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo)
     cache = gccache._cache_call
     try:
-        return cache[key]
+        calldescr = cache[key]
     except KeyError:
-        calldescr = cls(arg_classes, extrainfo)
-        calldescr.create_call_stub(gccache.rtyper, RESULT)
+        calldescr = CallDescr(arg_classes, result_type, result_signed,
+                              result_size, extrainfo)
+        calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED)
         cache[key] = calldescr
-        return calldescr
-
-
-# ____________________________________________________________
-
-def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr,
-                  nameprefix, methodname, floatattrname, signedattrname,
-                  _cache={}):
-    if isinstance(TYPE, lltype.Ptr):
-        if TYPE.TO._gckind == 'gc':
-            return GcPtrDescr
-        else:
-            return NonGcPtrDescr
-    if TYPE is lltype.SingleFloat:
-        assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE)
-        TYPE = rffi.UINT
-    try:
-        return _cache[nameprefix, TYPE]
-    except KeyError:
-        #
-        class Descr(BaseDescr):
-            _clsname = '%s%sDescr' % (TYPE._name, nameprefix)
-        Descr.__name__ = Descr._clsname
-        #
-        def method(self, translate_support_code):
-            return symbolic.get_size(TYPE, translate_support_code)
-        setattr(Descr, methodname, method)
-        #
-        if TYPE is lltype.Float or is_longlong(TYPE):
-            setattr(Descr, floatattrname, True)
-        elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and
-              rffi.cast(TYPE, -1) == -1):
-            setattr(Descr, signedattrname, True)
-        #
-        _cache[nameprefix, TYPE] = Descr
-        return Descr
+    assert repr(calldescr.result_size) == repr(result_size)
+    return calldescr
diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py
--- a/pypy/jit/backend/llsupport/ffisupport.py
+++ b/pypy/jit/backend/llsupport/ffisupport.py
@@ -1,14 +1,12 @@
 from pypy.rlib.rarithmetic import intmask
 from pypy.jit.metainterp import history
 from pypy.rpython.lltypesystem import rffi
-from pypy.jit.backend.llsupport.descr import (
-    DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr,
-    LongLongCallDescr, getCallDescrClass)
+from pypy.jit.backend.llsupport.descr import CallDescr
 
 class UnsupportedKind(Exception):
     pass
 
-def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo=None, ffi_flags=0):
+def get_call_descr_dynamic(cpu, ffi_args, ffi_result, extrainfo, ffi_flags):
     """Get a call descr: the types of result and args are represented by
     rlib.libffi.types.*"""
     try:
@@ -16,29 +14,13 @@
         argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args]
     except UnsupportedKind:
         return None
-    arg_classes = ''.join(argkinds)
-    if reskind == history.INT:
-        size = intmask(ffi_result.c_size)
-        signed = is_ffi_type_signed(ffi_result)
-        return DynamicIntCallDescr(arg_classes, size, signed, extrainfo,
-                                   ffi_flags=ffi_flags)
-    elif reskind == history.REF:
-        return  NonGcPtrCallDescr(arg_classes, extrainfo,
-                                  ffi_flags=ffi_flags)
-    elif reskind == history.FLOAT:
-        return FloatCallDescr(arg_classes, extrainfo,
-                              ffi_flags=ffi_flags)
-    elif reskind == history.VOID:
-        return VoidCallDescr(arg_classes, extrainfo,
-                             ffi_flags=ffi_flags)
-    elif reskind == 'L':
-        return LongLongCallDescr(arg_classes, extrainfo,
-                                 ffi_flags=ffi_flags)
-    elif reskind == 'S':
-        SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT)
-        return SingleFloatCallDescr(arg_classes, extrainfo,
-                                    ffi_flags=ffi_flags)
-    assert False
+    if reskind == history.VOID:
+        result_size = 0
+    else:
+        result_size = intmask(ffi_result.c_size)
+    argkinds = ''.join(argkinds)
+    return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result),
+                     result_size, extrainfo, ffi_flags=ffi_flags)
 
 def get_ffi_type_kind(cpu, ffi_type):
     from pypy.rlib.libffi import types
diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py
--- a/pypy/jit/backend/llsupport/gc.py
+++ b/pypy/jit/backend/llsupport/gc.py
@@ -1,6 +1,6 @@
 import os
 from pypy.rlib import rgc
-from pypy.rlib.objectmodel import we_are_translated
+from pypy.rlib.objectmodel import we_are_translated, specialize
 from pypy.rlib.debug import fatalerror
 from pypy.rlib.rarithmetic import ovfcheck
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr
@@ -8,52 +8,93 @@
 from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.rpython.annlowlevel import llhelper
 from pypy.translator.tool.cbuild import ExternalCompilationInfo
-from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr
-from pypy.jit.metainterp.history import AbstractDescr
+from pypy.jit.codewriter import heaptracker
+from pypy.jit.metainterp.history import ConstPtr, AbstractDescr
 from pypy.jit.metainterp.resoperation import ResOperation, rop
 from pypy.jit.backend.llsupport import symbolic
 from pypy.jit.backend.llsupport.symbolic import WORD
-from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr
+from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr
 from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr
-from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr
+from pypy.jit.backend.llsupport.descr import get_array_descr
 from pypy.jit.backend.llsupport.descr import get_call_descr
+from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler
 from pypy.rpython.memory.gctransform import asmgcroot
 
 # ____________________________________________________________
 
 class GcLLDescription(GcCache):
-    minimal_size_in_nursery = 0
-    get_malloc_slowpath_addr = None
 
     def __init__(self, gcdescr, translator=None, rtyper=None):
         GcCache.__init__(self, translator is not None, rtyper)
         self.gcdescr = gcdescr
+        if translator and translator.config.translation.gcremovetypeptr:
+            self.fielddescr_vtable = None
+        else:
+            self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT,
+                                                     'typeptr')
+        self._generated_functions = []
+
+    def _setup_str(self):
+        self.str_descr     = get_array_descr(self, rstr.STR)
+        self.unicode_descr = get_array_descr(self, rstr.UNICODE)
+
+    def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF):
+        """Generates a variant of malloc with the given name and the given
+        arguments.  It should return NULL if out of memory.  If it raises
+        anything, it must be an optional MemoryError.
+        """
+        FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT))
+        descr = get_call_descr(self, ARGS, RESULT)
+        setattr(self, funcname, func)
+        setattr(self, funcname + '_FUNCPTR', FUNCPTR)
+        setattr(self, funcname + '_descr', descr)
+        self._generated_functions.append(funcname)
+
+    @specialize.arg(1)
+    def get_malloc_fn(self, funcname):
+        func = getattr(self, funcname)
+        FUNC = getattr(self, funcname + '_FUNCPTR')
+        return llhelper(FUNC, func)
+
+    @specialize.arg(1)
+    def get_malloc_fn_addr(self, funcname):
+        ll_func = self.get_malloc_fn(funcname)
+        return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func))
+
     def _freeze_(self):
         return True
     def initialize(self):
         pass
     def do_write_barrier(self, gcref_struct, gcref_newptr):
         pass
-    def rewrite_assembler(self, cpu, operations, gcrefs_output_list):
-        return operations
-    def can_inline_malloc(self, descr):
-        return False
-    def can_inline_malloc_varsize(self, descr, num_elem):
+    def can_use_nursery_malloc(self, size):
         return False
     def has_write_barrier_class(self):
         return None
     def freeing_block(self, start, stop):
         pass
+    def get_nursery_free_addr(self):
+        raise NotImplementedError
+    def get_nursery_top_addr(self):
+        raise NotImplementedError
 
-    def get_funcptr_for_newarray(self):
-        return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array)
-    def get_funcptr_for_newstr(self):
-        return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str)
-    def get_funcptr_for_newunicode(self):
-        return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode)
+    def gc_malloc(self, sizedescr):
+        """Blackhole: do a 'bh_new'.  Also used for 'bh_new_with_vtable',
+        with the vtable pointer set manually afterwards."""
+        assert isinstance(sizedescr, SizeDescr)
+        return self._bh_malloc(sizedescr)
 
+    def gc_malloc_array(self, arraydescr, num_elem):
+        assert isinstance(arraydescr, ArrayDescr)
+        return self._bh_malloc_array(arraydescr, num_elem)
 
-    def record_constptrs(self, op, gcrefs_output_list):
+    def gc_malloc_str(self, num_elem):
+        return self._bh_malloc_array(self.str_descr, num_elem)
+
+    def gc_malloc_unicode(self, num_elem):
+        return self._bh_malloc_array(self.unicode_descr, num_elem)
+
+    def _record_constptrs(self, op, gcrefs_output_list):
         for i in range(op.numargs()):
             v = op.getarg(i)
             if isinstance(v, ConstPtr) and bool(v.value):
@@ -61,11 +102,27 @@
                 rgc._make_sure_does_not_move(p)
                 gcrefs_output_list.append(p)
 
+    def rewrite_assembler(self, cpu, operations, gcrefs_output_list):
+        rewriter = GcRewriterAssembler(self, cpu)
+        newops = rewriter.rewrite(operations)
+        # record all GCREFs, because the GC (or Boehm) cannot see them and
+        # keep them alive if they end up as constants in the assembler
+        for op in newops:
+            self._record_constptrs(op, gcrefs_output_list)
+        return newops
+
 # ____________________________________________________________
 
 class GcLLDescr_boehm(GcLLDescription):
-    moving_gc = False
-    gcrootmap = None
+    kind                  = 'boehm'
+    moving_gc             = False
+    round_up              = False
+    gcrootmap             = None
+    write_barrier_descr   = None
+    fielddescr_tid        = None
+    str_type_id           = 0
+    unicode_type_id       = 0
+    get_malloc_slowpath_addr = None
 
     @classmethod
     def configure_boehm_once(cls):
@@ -76,6 +133,16 @@
         from pypy.rpython.tool import rffi_platform
         compilation_info = rffi_platform.configure_boehm()
 
+        # on some platform GC_init is required before any other
+        # GC_* functions, call it here for the benefit of tests
+        # XXX move this to tests
+        init_fn_ptr = rffi.llexternal("GC_init",
+                                      [], lltype.Void,
+                                      compilation_info=compilation_info,
+                                      sandboxsafe=True,
+                                      _nowrapper=True)
+        init_fn_ptr()
+
         # Versions 6.x of libgc needs to use GC_local_malloc().
         # Versions 7.x of libgc removed this function; GC_malloc() has
         # the same behavior if libgc was compiled with
@@ -95,96 +162,42 @@
                                         sandboxsafe=True,
                                         _nowrapper=True)
         cls.malloc_fn_ptr = malloc_fn_ptr
-        cls.compilation_info = compilation_info
         return malloc_fn_ptr
 
     def __init__(self, gcdescr, translator, rtyper):
         GcLLDescription.__init__(self, gcdescr, translator, rtyper)
         # grab a pointer to the Boehm 'malloc' function
-        malloc_fn_ptr = self.configure_boehm_once()
-        self.funcptr_for_new = malloc_fn_ptr
+        self.malloc_fn_ptr = self.configure_boehm_once()
+        self._setup_str()
+        self._make_functions()
 
-        def malloc_array(basesize, itemsize, ofs_length, num_elem):
+    def _make_functions(self):
+
+        def malloc_fixedsize(size):
+            return self.malloc_fn_ptr(size)
+        self.generate_function('malloc_fixedsize', malloc_fixedsize,
+                               [lltype.Signed])
+
+        def malloc_array(basesize, num_elem, itemsize, ofs_length):
             try:
-                size = ovfcheck(basesize + ovfcheck(itemsize * num_elem))
+                totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem))
             except OverflowError:
                 return lltype.nullptr(llmemory.GCREF.TO)
-            res = self.funcptr_for_new(size)
-            if not res:
-                return res
-            rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem
+            res = self.malloc_fn_ptr(totalsize)
+            if res:
+                arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res)
+                arrayptr[ofs_length/WORD] = num_elem
             return res
-        self.malloc_array = malloc_array
-        self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType(
-            [lltype.Signed] * 4, llmemory.GCREF))
+        self.generate_function('malloc_array', malloc_array,
+                               [lltype.Signed] * 4)
 
+    def _bh_malloc(self, sizedescr):
+        return self.malloc_fixedsize(sizedescr.size)
 
-        (str_basesize, str_itemsize, str_ofs_length
-         ) = symbolic.get_array_token(rstr.STR, self.translate_support_code)
-        (unicode_basesize, unicode_itemsize, unicode_ofs_length
-         ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code)
-        def malloc_str(length):
-            return self.malloc_array(
-                str_basesize, str_itemsize, str_ofs_length, length
-            )
-        def malloc_unicode(length):
-            return self.malloc_array(
-                unicode_basesize, unicode_itemsize, unicode_ofs_length, length
-            )
-        self.malloc_str = malloc_str
-        self.malloc_unicode = malloc_unicode
-        self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType(
-            [lltype.Signed], llmemory.GCREF))
-
-
-        # on some platform GC_init is required before any other
-        # GC_* functions, call it here for the benefit of tests
-        # XXX move this to tests
-        init_fn_ptr = rffi.llexternal("GC_init",
-                                      [], lltype.Void,
-                                      compilation_info=self.compilation_info,
-                                      sandboxsafe=True,
-                                      _nowrapper=True)
-
-        init_fn_ptr()
-
-    def gc_malloc(self, sizedescr):
-        assert isinstance(sizedescr, BaseSizeDescr)
-        return self.funcptr_for_new(sizedescr.size)
-
-    def gc_malloc_array(self, arraydescr, num_elem):
-        assert isinstance(arraydescr, BaseArrayDescr)
-        ofs_length = arraydescr.get_ofs_length(self.translate_support_code)
-        basesize = arraydescr.get_base_size(self.translate_support_code)
-        itemsize = arraydescr.get_item_size(self.translate_support_code)
-        return self.malloc_array(basesize, itemsize, ofs_length, num_elem)
-
-    def gc_malloc_str(self, num_elem):
-        return self.malloc_str(num_elem)
-
-    def gc_malloc_unicode(self, num_elem):
-        return self.malloc_unicode(num_elem)
-
-    def args_for_new(self, sizedescr):
-        assert isinstance(sizedescr, BaseSizeDescr)
-        return [sizedescr.size]
-
-    def args_for_new_array(self, arraydescr):
-        ofs_length = arraydescr.get_ofs_length(self.translate_support_code)
-        basesize = arraydescr.get_base_size(self.translate_support_code)
-        itemsize = arraydescr.get_item_size(self.translate_support_code)
-        return [basesize, itemsize, ofs_length]
-
-    def get_funcptr_for_new(self):
-        return self.funcptr_for_new
-
-    def rewrite_assembler(self, cpu, operations, gcrefs_output_list):
-        # record all GCREFs too, because Boehm cannot see them and keep them
-        # alive if they end up as constants in the assembler
-        for op in operations:
-            self.record_constptrs(op, gcrefs_output_list)
-        return GcLLDescription.rewrite_assembler(self, cpu, operations,
-                                                 gcrefs_output_list)
+    def _bh_malloc_array(self, arraydescr, num_elem):
+        return self.malloc_array(arraydescr.basesize, num_elem,
+                                 arraydescr.itemsize,
+                                 arraydescr.lendescr.offset)
 
 
 # ____________________________________________________________
@@ -554,12 +567,14 @@
 
 class WriteBarrierDescr(AbstractDescr):
     def __init__(self, gc_ll_descr):
-        GCClass = gc_ll_descr.GCClass
         self.llop1 = gc_ll_descr.llop1
         self.WB_FUNCPTR = gc_ll_descr.WB_FUNCPTR
         self.WB_ARRAY_FUNCPTR = gc_ll_descr.WB_ARRAY_FUNCPTR
-        self.fielddescr_tid = get_field_descr(gc_ll_descr, GCClass.HDR, 'tid')
+        self.fielddescr_tid = gc_ll_descr.fielddescr_tid
         #
+        GCClass = gc_ll_descr.GCClass
+        if GCClass is None:     # for tests
+            return
         self.jit_wb_if_flag = GCClass.JIT_WB_IF_FLAG
         self.jit_wb_if_flag_byteofs, self.jit_wb_if_flag_singlebyte = (
             self.extract_flag_byte(self.jit_wb_if_flag))
@@ -596,48 +611,74 @@
         funcaddr = llmemory.cast_ptr_to_adr(funcptr)
         return cpu.cast_adr_to_int(funcaddr)    # this may return 0
 
+    def has_write_barrier_from_array(self, cpu):
+        return self.get_write_barrier_from_array_fn(cpu) != 0
+
 
 class GcLLDescr_framework(GcLLDescription):
     DEBUG = False    # forced to True by x86/test/test_zrpy_gc.py
+    kind = 'framework'
+    round_up = True
 
-    def __init__(self, gcdescr, translator, rtyper, llop1=llop):
-        from pypy.rpython.memory.gctypelayout import check_typeid
-        from pypy.rpython.memory.gcheader import GCHeaderBuilder
-        from pypy.rpython.memory.gctransform import framework
+    def __init__(self, gcdescr, translator, rtyper, llop1=llop,
+                 really_not_translated=False):
         GcLLDescription.__init__(self, gcdescr, translator, rtyper)
-        assert self.translate_support_code, "required with the framework GC"
         self.translator = translator
         self.llop1 = llop1
+        if really_not_translated:
+            assert not self.translate_support_code  # but half does not work
+            self._initialize_for_tests()
+        else:
+            assert self.translate_support_code,"required with the framework GC"
+            self._check_valid_gc()
+            self._make_gcrootmap()
+            self._make_layoutbuilder()
+            self._setup_gcclass()
+            self._setup_tid()
+        self._setup_write_barrier()
+        self._setup_str()
+        self._make_functions(really_not_translated)
 
+    def _initialize_for_tests(self):
+        self.layoutbuilder = None
+        self.fielddescr_tid = AbstractDescr()
+        self.max_size_of_young_obj = 1000
+        self.GCClass = None
+
+    def _check_valid_gc(self):
         # we need the hybrid or minimark GC for rgc._make_sure_does_not_move()
         # to work
-        if gcdescr.config.translation.gc not in ('hybrid', 'minimark'):
+        if self.gcdescr.config.translation.gc not in ('hybrid', 'minimark'):
             raise NotImplementedError("--gc=%s not implemented with the JIT" %
                                       (gcdescr.config.translation.gc,))
 
+    def _make_gcrootmap(self):
         # to find roots in the assembler, make a GcRootMap
-        name = gcdescr.config.translation.gcrootfinder
+        name = self.gcdescr.config.translation.gcrootfinder
         try:
             cls = globals()['GcRootMap_' + name]
         except KeyError:
             raise NotImplementedError("--gcrootfinder=%s not implemented"
                                       " with the JIT" % (name,))
-        gcrootmap = cls(gcdescr)
+        gcrootmap = cls(self.gcdescr)
         self.gcrootmap = gcrootmap
 
+    def _make_layoutbuilder(self):
         # make a TransformerLayoutBuilder and save it on the translator
         # where it can be fished and reused by the FrameworkGCTransformer
+        from pypy.rpython.memory.gctransform import framework
+        translator = self.translator
         self.layoutbuilder = framework.TransformerLayoutBuilder(translator)
         self.layoutbuilder.delay_encoding()
-        self.translator._jit2gc = {'layoutbuilder': self.layoutbuilder}
-        gcrootmap.add_jit2gc_hooks(self.translator._jit2gc)
+        translator._jit2gc = {'layoutbuilder': self.layoutbuilder}
+        self.gcrootmap.add_jit2gc_hooks(translator._jit2gc)
 
+    def _setup_gcclass(self):
+        from pypy.rpython.memory.gcheader import GCHeaderBuilder
         self.GCClass = self.layoutbuilder.GCClass
         self.moving_gc = self.GCClass.moving_gc
         self.HDRPTR = lltype.Ptr(self.GCClass.HDR)
         self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO)
-        (self.array_basesize, _, self.array_length_ofs) = \
-             symbolic.get_array_token(lltype.GcArray(lltype.Signed), True)
         self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj()
         self.minimal_size_in_nursery=self.GCClass.JIT_minimal_size_in_nursery()
 
@@ -645,88 +686,124 @@
         assert self.GCClass.inline_simple_malloc
         assert self.GCClass.inline_simple_malloc_varsize
 
-        # make a malloc function, with two arguments
-        def malloc_basic(size, tid):
-            type_id = llop.extract_ushort(llgroup.HALFWORD, tid)
-            has_finalizer = bool(tid & (1<<llgroup.HALFSHIFT))
-            check_typeid(type_id)
-            res = llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
-                                                  type_id, size,
-                                                  has_finalizer, False)
-            # In case the operation above failed, we are returning NULL
-            # from this function to assembler.  There is also an RPython
-            # exception set, typically MemoryError; but it's easier and
-            # faster to check for the NULL return value, as done by
-            # translator/exceptiontransform.py.
-            #llop.debug_print(lltype.Void, "\tmalloc_basic", size, type_id,
-            #                 "-->", res)
-            return res
-        self.malloc_basic = malloc_basic
-        self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType(
-            [lltype.Signed, lltype.Signed], llmemory.GCREF))
+    def _setup_tid(self):
+        self.fielddescr_tid = get_field_descr(self, self.GCClass.HDR, 'tid')
+
+    def _setup_write_barrier(self):
         self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType(
             [llmemory.Address, llmemory.Address], lltype.Void))
         self.WB_ARRAY_FUNCPTR = lltype.Ptr(lltype.FuncType(
             [llmemory.Address, lltype.Signed, llmemory.Address], lltype.Void))
         self.write_barrier_descr = WriteBarrierDescr(self)
-        #
+
+    def _make_functions(self, really_not_translated):
+        from pypy.rpython.memory.gctypelayout import check_typeid
+        llop1 = self.llop1
+        (self.standard_array_basesize, _, self.standard_array_length_ofs) = \
+             symbolic.get_array_token(lltype.GcArray(lltype.Signed),
+                                      not really_not_translated)
+
+        def malloc_nursery_slowpath(size):
+            """Allocate 'size' null bytes out of the nursery.
+            Note that the fast path is typically inlined by the backend."""
+            if self.DEBUG:
+                self._random_usage_of_xmm_registers()
+            type_id = rffi.cast(llgroup.HALFWORD, 0)    # missing here
+            return llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
+                                                   type_id, size,
+                                                   False, False, False)
+        self.generate_function('malloc_nursery', malloc_nursery_slowpath,
+                               [lltype.Signed])
+
         def malloc_array(itemsize, tid, num_elem):
+            """Allocate an array with a variable-size num_elem.
+            Only works for standard arrays."""
             type_id = llop.extract_ushort(llgroup.HALFWORD, tid)
             check_typeid(type_id)
             return llop1.do_malloc_varsize_clear(
                 llmemory.GCREF,
-                type_id, num_elem, self.array_basesize, itemsize,
-                self.array_length_ofs)
-        self.malloc_array = malloc_array
-        self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType(
-            [lltype.Signed] * 3, llmemory.GCREF))
-        #
-        (str_basesize, str_itemsize, str_ofs_length
-         ) = symbolic.get_array_token(rstr.STR, True)
-        (unicode_basesize, unicode_itemsize, unicode_ofs_length
-         ) = symbolic.get_array_token(rstr.UNICODE, True)
-        str_type_id = self.layoutbuilder.get_type_id(rstr.STR)
-        unicode_type_id = self.layoutbuilder.get_type_id(rstr.UNICODE)
-        #
+                type_id, num_elem, self.standard_array_basesize, itemsize,
+                self.standard_array_length_ofs)
+        self.generate_function('malloc_array', malloc_array,
+                               [lltype.Signed] * 3)
+
+        def malloc_array_nonstandard(basesize, itemsize, lengthofs, tid,
+                                     num_elem):
+            """For the rare case of non-standard arrays, i.e. arrays where
+            self.standard_array_{basesize,length_ofs} is wrong.  It can
+            occur e.g. with arrays of floats on Win32."""
+            type_id = llop.extract_ushort(llgroup.HALFWORD, tid)
+            check_typeid(type_id)
+            return llop1.do_malloc_varsize_clear(
+                llmemory.GCREF,
+                type_id, num_elem, basesize, itemsize, lengthofs)
+        self.generate_function('malloc_array_nonstandard',
+                               malloc_array_nonstandard,
+                               [lltype.Signed] * 5)
+
+        str_type_id    = self.str_descr.tid
+        str_basesize   = self.str_descr.basesize
+        str_itemsize   = self.str_descr.itemsize
+        str_ofs_length = self.str_descr.lendescr.offset
+        unicode_type_id    = self.unicode_descr.tid
+        unicode_basesize   = self.unicode_descr.basesize
+        unicode_itemsize   = self.unicode_descr.itemsize
+        unicode_ofs_length = self.unicode_descr.lendescr.offset
+
         def malloc_str(length):
             return llop1.do_malloc_varsize_clear(
                 llmemory.GCREF,
                 str_type_id, length, str_basesize, str_itemsize,
                 str_ofs_length)
+        self.generate_function('malloc_str', malloc_str,
+                               [lltype.Signed])
+
         def malloc_unicode(length):
             return llop1.do_malloc_varsize_clear(
                 llmemory.GCREF,
-                unicode_type_id, length, unicode_basesize,unicode_itemsize,
+                unicode_type_id, length, unicode_basesize, unicode_itemsize,
                 unicode_ofs_length)
-        self.malloc_str = malloc_str
-        self.malloc_unicode = malloc_unicode
-        self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType(
-            [lltype.Signed], llmemory.GCREF))
-        #
-        class ForTestOnly:
-            pass
-        for_test_only = ForTestOnly()
-        for_test_only.x = 1.23
-        def random_usage_of_xmm_registers():
-            x0 = for_test_only.x
-            x1 = x0 * 0.1
-            x2 = x0 * 0.2
-            x3 = x0 * 0.3
-            for_test_only.x = x0 + x1 + x2 + x3
-        #
-        def malloc_slowpath(size):
-            if self.DEBUG:
-                random_usage_of_xmm_registers()
-            assert size >= self.minimal_size_in_nursery
-            # NB. although we call do_malloc_fixedsize_clear() here,
-            # it's a bit of a hack because we set tid to 0 and may
-            # also use it to allocate varsized objects.  The tid
-            # and possibly the length are both set afterward.
-            gcref = llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
-                                        0, size, False, False)
-            return rffi.cast(lltype.Signed, gcref)
-        self.malloc_slowpath = malloc_slowpath
-        self.MALLOC_SLOWPATH = lltype.FuncType([lltype.Signed], lltype.Signed)
+        self.generate_function('malloc_unicode', malloc_unicode,
+                               [lltype.Signed])
+
+        # Rarely called: allocate a fixed-size amount of bytes, but
+        # not in the nursery, because it is too big.  Implemented like
+        # malloc_nursery_slowpath() above.
+        self.generate_function('malloc_fixedsize', malloc_nursery_slowpath,
+                               [lltype.Signed])
+
+    def _bh_malloc(self, sizedescr):
+        from pypy.rpython.memory.gctypelayout import check_typeid
+        llop1 = self.llop1
+        type_id = llop.extract_ushort(llgroup.HALFWORD, sizedescr.tid)
+        check_typeid(type_id)
+        return llop1.do_malloc_fixedsize_clear(llmemory.GCREF,
+                                               type_id, sizedescr.size,
+                                               False, False, False)
+
+    def _bh_malloc_array(self, arraydescr, num_elem):
+        from pypy.rpython.memory.gctypelayout import check_typeid
+        llop1 = self.llop1
+        type_id = llop.extract_ushort(llgroup.HALFWORD, arraydescr.tid)
+        check_typeid(type_id)
+        return llop1.do_malloc_varsize_clear(llmemory.GCREF,
+                                             type_id, num_elem,
+                                             arraydescr.basesize,
+                                             arraydescr.itemsize,
+                                             arraydescr.lendescr.offset)
+
+
+    class ForTestOnly:
+        pass
+    for_test_only = ForTestOnly()
+    for_test_only.x = 1.23
+
+    def _random_usage_of_xmm_registers(self):
+        x0 = self.for_test_only.x
+        x1 = x0 * 0.1
+        x2 = x0 * 0.2
+        x3 = x0 * 0.3
+        self.for_test_only.x = x0 + x1 + x2 + x3
 
     def get_nursery_free_addr(self):
         nurs_addr = llop.gc_adr_of_nursery_free(llmemory.Address)
@@ -736,50 +813,26 @@
         nurs_top_addr = llop.gc_adr_of_nursery_top(llmemory.Address)
         return rffi.cast(lltype.Signed, nurs_top_addr)
 
-    def get_malloc_slowpath_addr(self):
-        fptr = llhelper(lltype.Ptr(self.MALLOC_SLOWPATH), self.malloc_slowpath)
-        return rffi.cast(lltype.Signed, fptr)
-
     def initialize(self):
         self.gcrootmap.initialize()
 
     def init_size_descr(self, S, descr):
-        type_id = self.layoutbuilder.get_type_id(S)
-        assert not self.layoutbuilder.is_weakref_type(S)
-        has_finalizer = bool(self.layoutbuilder.has_finalizer(S))
-        flags = int(has_finalizer) << llgroup.HALFSHIFT
-        descr.tid = llop.combine_ushort(lltype.Signed, type_id, flags)
+        if self.layoutbuilder is not None:
+            type_id = self.layoutbuilder.get_type_id(S)
+            assert not self.layoutbuilder.is_weakref_type(S)
+            assert not self.layoutbuilder.has_finalizer(S)
+            descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0)
 
     def init_array_descr(self, A, descr):
-        type_id = self.layoutbuilder.get_type_id(A)
-        descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0)
+        if self.layoutbuilder is not None:
+            type_id = self.layoutbuilder.get_type_id(A)
+            descr.tid = llop.combine_ushort(lltype.Signed, type_id, 0)
 
-    def gc_malloc(self, sizedescr):
-        assert isinstance(sizedescr, BaseSizeDescr)
-        return self.malloc_basic(sizedescr.size, sizedescr.tid)
-
-    def gc_malloc_array(self, arraydescr, num_elem):
-        assert isinstance(arraydescr, BaseArrayDescr)
-        itemsize = arraydescr.get_item_size(self.translate_support_code)
-        return self.malloc_array(itemsize, arraydescr.tid, num_elem)
-
-    def gc_malloc_str(self, num_elem):
-        return self.malloc_str(num_elem)
-
-    def gc_malloc_unicode(self, num_elem):
-        return self.malloc_unicode(num_elem)
-
-    def args_for_new(self, sizedescr):
-        assert isinstance(sizedescr, BaseSizeDescr)
-        return [sizedescr.size, sizedescr.tid]
-
-    def args_for_new_array(self, arraydescr):
-        assert isinstance(arraydescr, BaseArrayDescr)
-        itemsize = arraydescr.get_item_size(self.translate_support_code)
-        return [itemsize, arraydescr.tid]
-
-    def get_funcptr_for_new(self):
-        return llhelper(self.GC_MALLOC_BASIC, self.malloc_basic)
+    def _set_tid(self, gcptr, tid):
+        hdr_addr = llmemory.cast_ptr_to_adr(gcptr)
+        hdr_addr -= self.gcheaderbuilder.size_gc_header
+        hdr = llmemory.cast_adr_to_ptr(hdr_addr, self.HDRPTR)
+        hdr.tid = tid
 
     def do_write_barrier(self, gcref_struct, gcref_newptr):
         hdr_addr = llmemory.cast_ptr_to_adr(gcref_struct)
@@ -793,99 +846,8 @@
             funcptr(llmemory.cast_ptr_to_adr(gcref_struct),
                     llmemory.cast_ptr_to_adr(gcref_newptr))
 
-    def rewrite_assembler(self, cpu, operations, gcrefs_output_list):
-        # Perform two kinds of rewrites in parallel:
-        #
-        # - Add COND_CALLs to the write barrier before SETFIELD_GC and
-        #   SETARRAYITEM_GC operations.
-        #
-        # - Record the ConstPtrs from the assembler.
-        #
-        newops = []
-        known_lengths = {}
-        # we can only remember one malloc since the next malloc can possibly
-        # collect
-        last_malloc = None
-        for op in operations:
-            if op.getopnum() == rop.DEBUG_MERGE_POINT:
-                continue
-            # ---------- record the ConstPtrs ----------
-            self.record_constptrs(op, gcrefs_output_list)
-            if op.is_malloc():
-                last_malloc = op.result
-            elif op.can_malloc():
-                last_malloc = None
-            # ---------- write barrier for SETFIELD_GC ----------
-            if op.getopnum() == rop.SETFIELD_GC:
-                val = op.getarg(0)
-                # no need for a write barrier in the case of previous malloc
-                if val is not last_malloc:
-                    v = op.getarg(1)
-                    if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
-                                            bool(v.value)): # store a non-NULL
-                        self._gen_write_barrier(newops, op.getarg(0), v)
-                        op = op.copy_and_change(rop.SETFIELD_RAW)
-            # ---------- write barrier for SETARRAYITEM_GC ----------
-            if op.getopnum() == rop.SETARRAYITEM_GC:
-                val = op.getarg(0)
-                # no need for a write barrier in the case of previous malloc
-                if val is not last_malloc:
-                    v = op.getarg(2)
-                    if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
-                                            bool(v.value)): # store a non-NULL
-                        self._gen_write_barrier_array(newops, op.getarg(0),
-                                                      op.getarg(1), v,
-                                                      cpu, known_lengths)
-                        op = op.copy_and_change(rop.SETARRAYITEM_RAW)
-            elif op.getopnum() == rop.NEW_ARRAY:
-                v_length = op.getarg(0)
-                if isinstance(v_length, ConstInt):
-                    known_lengths[op.result] = v_length.getint()
-            # ----------
-            newops.append(op)
-        return newops
-
-    def _gen_write_barrier(self, newops, v_base, v_value):
-        args = [v_base, v_value]
-        newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None,
-                                   descr=self.write_barrier_descr))
-
-    def _gen_write_barrier_array(self, newops, v_base, v_index, v_value,
-                                 cpu, known_lengths):
-        if self.write_barrier_descr.get_write_barrier_from_array_fn(cpu) != 0:
-            # If we know statically the length of 'v', and it is not too
-            # big, then produce a regular write_barrier.  If it's unknown or
-            # too big, produce instead a write_barrier_from_array.
-            LARGE = 130
-            length = known_lengths.get(v_base, LARGE)
-            if length >= LARGE:
-                # unknown or too big: produce a write_barrier_from_array
-                args = [v_base, v_index, v_value]
-                newops.append(ResOperation(rop.COND_CALL_GC_WB_ARRAY, args,
-                                           None,
-                                           descr=self.write_barrier_descr))
-                return
-        # fall-back case: produce a write_barrier
-        self._gen_write_barrier(newops, v_base, v_value)
-
-    def can_inline_malloc(self, descr):
-        assert isinstance(descr, BaseSizeDescr)
-        if descr.size < self.max_size_of_young_obj:
-            has_finalizer = bool(descr.tid & (1<<llgroup.HALFSHIFT))
-            if has_finalizer:
-                return False
-            return True
-        return False
-
-    def can_inline_malloc_varsize(self, arraydescr, num_elem):
-        assert isinstance(arraydescr, BaseArrayDescr)
-        basesize = arraydescr.get_base_size(self.translate_support_code)
-        itemsize = arraydescr.get_item_size(self.translate_support_code)
-        try:
-            size = ovfcheck(basesize + ovfcheck(itemsize * num_elem))
-            return size < self.max_size_of_young_obj
-        except OverflowError:
-            return False
+    def can_use_nursery_malloc(self, size):
+        return size < self.max_size_of_young_obj
 
     def has_write_barrier_class(self):
         return WriteBarrierDescr
@@ -893,6 +855,9 @@
     def freeing_block(self, start, stop):
         self.gcrootmap.freeing_block(start, stop)
 
+    def get_malloc_slowpath_addr(self):
+        return self.get_malloc_fn_addr('malloc_nursery')
+
 # ____________________________________________________________
 
 def get_ll_description(gcdescr, translator=None, rtyper=None):
diff --git a/pypy/jit/backend/llsupport/llmodel.py b/pypy/jit/backend/llsupport/llmodel.py
--- a/pypy/jit/backend/llsupport/llmodel.py
+++ b/pypy/jit/backend/llsupport/llmodel.py
@@ -8,10 +8,10 @@
 from pypy.jit.backend.model import AbstractCPU
 from pypy.jit.backend.llsupport import symbolic
 from pypy.jit.backend.llsupport.symbolic import WORD, unroll_basic_sizes
-from pypy.jit.backend.llsupport.descr import (get_size_descr,
-     get_field_descr, BaseFieldDescr, get_array_descr, BaseArrayDescr,
-     get_call_descr, BaseIntCallDescr, GcPtrCallDescr, FloatCallDescr,
-     VoidCallDescr, InteriorFieldDescr, get_interiorfield_descr)
+from pypy.jit.backend.llsupport.descr import (
+    get_size_descr, get_field_descr, get_array_descr,
+    get_call_descr, get_interiorfield_descr, get_dynamic_interiorfield_descr,
+    FieldDescr, ArrayDescr, CallDescr, InteriorFieldDescr)
 from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager
 
 
@@ -106,9 +106,15 @@
             _exception_emulator[1] = 0
             self.saved_exc_value = rffi.cast(llmemory.GCREF, v_i)
 
+        def save_exception_memoryerr():
+            save_exception()
+            if not self.saved_exc_value:
+                self.saved_exc_value = "memoryerror!"    # for tests
+
         self.pos_exception = pos_exception
         self.pos_exc_value = pos_exc_value
         self.save_exception = save_exception
+        self.save_exception_memoryerr = save_exception_memoryerr
         self.insert_stack_check = lambda: (0, 0, 0)
 
 
@@ -133,6 +139,15 @@
             # in the assignment to self.saved_exc_value, as needed.
             self.saved_exc_value = exc_value
 
+        def save_exception_memoryerr():
+            from pypy.rpython.annlowlevel import cast_instance_to_base_ptr
+            save_exception()
+            if not self.saved_exc_value:
+                exc = MemoryError()
+                exc = cast_instance_to_base_ptr(exc)
+                exc = lltype.cast_opaque_ptr(llmemory.GCREF, exc)
+                self.saved_exc_value = exc
+
         from pypy.rlib import rstack
         STACK_CHECK_SLOWPATH = lltype.Ptr(lltype.FuncType([lltype.Signed],
                                                           lltype.Void))
@@ -146,16 +161,19 @@
         self.pos_exception = pos_exception
         self.pos_exc_value = pos_exc_value
         self.save_exception = save_exception
+        self.save_exception_memoryerr = save_exception_memoryerr
         self.insert_stack_check = insert_stack_check
 
     def _setup_on_leave_jitted_untranslated(self):
         # assume we don't need a backend leave in this case
         self.on_leave_jitted_save_exc = self.save_exception
+        self.on_leave_jitted_memoryerr = self.save_exception_memoryerr
         self.on_leave_jitted_noexc = lambda : None
 
     def _setup_on_leave_jitted_translated(self):
         on_leave_jitted_hook = self.get_on_leave_jitted_hook()
         save_exception = self.save_exception
+        save_exception_memoryerr = self.save_exception_memoryerr
 
         def on_leave_jitted_noexc():
             on_leave_jitted_hook()
@@ -164,16 +182,24 @@
             save_exception()
             on_leave_jitted_hook()
 
+        def on_leave_jitted_memoryerr():
+            save_exception_memoryerr()
+            on_leave_jitted_hook()
+
         self.on_leave_jitted_noexc = on_leave_jitted_noexc
         self.on_leave_jitted_save_exc = on_leave_jitted_save_exc
+        self.on_leave_jitted_memoryerr = on_leave_jitted_memoryerr
 
     def get_on_leave_jitted_hook(self):
         return lambda : None
 
     _ON_JIT_LEAVE_FUNC = lltype.Ptr(lltype.FuncType([], lltype.Void))
 
-    def get_on_leave_jitted_int(self, save_exception):
-        if save_exception:
+    def get_on_leave_jitted_int(self, save_exception,
+                                default_to_memoryerror=False):
+        if default_to_memoryerror:
+            f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_memoryerr)
+        elif save_exception:
             f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_save_exc)
         else:
             f = llhelper(self._ON_JIT_LEAVE_FUNC, self.on_leave_jitted_noexc)
@@ -220,14 +246,14 @@
         return get_field_descr(self.gc_ll_descr, STRUCT, fieldname)
 
     def unpack_fielddescr(self, fielddescr):
-        assert isinstance(fielddescr, BaseFieldDescr)
+        assert isinstance(fielddescr, FieldDescr)
         return fielddescr.offset
     unpack_fielddescr._always_inline_ = True
 
     def unpack_fielddescr_size(self, fielddescr):
-        assert isinstance(fielddescr, BaseFieldDescr)
+        assert isinstance(fielddescr, FieldDescr)
         ofs = fielddescr.offset
-        size = fielddescr.get_field_size(self.translate_support_code)
+        size = fielddescr.field_size
         sign = fielddescr.is_field_signed()
         return ofs, size, sign
     unpack_fielddescr_size._always_inline_ = True
@@ -236,17 +262,23 @@
         return get_array_descr(self.gc_ll_descr, A)
 
     def interiorfielddescrof(self, A, fieldname):
-        return get_interiorfield_descr(self.gc_ll_descr, A, A.OF, fieldname)
+        return get_interiorfield_descr(self.gc_ll_descr, A, fieldname)
+
+    def interiorfielddescrof_dynamic(self, offset, width, fieldsize,
+                                     is_pointer, is_float, is_signed):
+        return get_dynamic_interiorfield_descr(self.gc_ll_descr,
+                                               offset, width, fieldsize,
+                                               is_pointer, is_float, is_signed)
 
     def unpack_arraydescr(self, arraydescr):
-        assert isinstance(arraydescr, BaseArrayDescr)
-        return arraydescr.get_base_size(self.translate_support_code)
+        assert isinstance(arraydescr, ArrayDescr)
+        return arraydescr.basesize
     unpack_arraydescr._always_inline_ = True
 
     def unpack_arraydescr_size(self, arraydescr):
-        assert isinstance(arraydescr, BaseArrayDescr)
-        ofs = arraydescr.get_base_size(self.translate_support_code)
-        size = arraydescr.get_item_size(self.translate_support_code)
+        assert isinstance(arraydescr, ArrayDescr)
+        ofs = arraydescr.basesize
+        size = arraydescr.itemsize
         sign = arraydescr.is_item_signed()
         return ofs, size, sign
     unpack_arraydescr_size._always_inline_ = True
@@ -274,8 +306,8 @@
     # ____________________________________________________________
 
     def bh_arraylen_gc(self, arraydescr, array):
-        assert isinstance(arraydescr, BaseArrayDescr)
-        ofs = arraydescr.get_ofs_length(self.translate_support_code)
+        assert isinstance(arraydescr, ArrayDescr)
+        ofs = arraydescr.lendescr.offset
         return rffi.cast(rffi.CArrayPtr(lltype.Signed), array)[ofs/WORD]
 
     @specialize.argtype(2)
@@ -360,7 +392,7 @@
         arraydescr = descr.arraydescr
         ofs, size, _ = self.unpack_arraydescr_size(arraydescr)
         ofs += descr.fielddescr.offset
-        fieldsize = descr.fielddescr.get_field_size(self.translate_support_code)
+        fieldsize = descr.fielddescr.field_size
         sign = descr.fielddescr.is_field_signed()
         fullofs = itemindex * size + ofs
         # --- start of GC unsafe code (no GC operation!) ---
@@ -411,7 +443,7 @@
         arraydescr = descr.arraydescr
         ofs, size, _ = self.unpack_arraydescr_size(arraydescr)
         ofs += descr.fielddescr.offset
-        fieldsize = descr.fielddescr.get_field_size(self.translate_support_code)
+        fieldsize = descr.fielddescr.field_size
         ofs = itemindex * size + ofs
         # --- start of GC unsafe code (no GC operation!) ---
         items = rffi.ptradd(rffi.cast(rffi.CCHARP, gcref), ofs)
@@ -597,25 +629,26 @@
         rstr.copy_unicode_contents(src, dst, srcstart, dststart, length)
 
     def bh_call_i(self, func, calldescr, args_i, args_r, args_f):
-        assert isinstance(calldescr, BaseIntCallDescr)
+        assert isinstance(calldescr, CallDescr)
         if not we_are_translated():
             calldescr.verify_types(args_i, args_r, args_f, history.INT + 'S')
-        return calldescr.call_stub(func, args_i, args_r, args_f)
+        return calldescr.call_stub_i(func, args_i, args_r, args_f)
 
     def bh_call_r(self, func, calldescr, args_i, args_r, args_f):
-        assert isinstance(calldescr, GcPtrCallDescr)
+        assert isinstance(calldescr, CallDescr)
         if not we_are_translated():
             calldescr.verify_types(args_i, args_r, args_f, history.REF)
-        return calldescr.call_stub(func, args_i, args_r, args_f)
+        return calldescr.call_stub_r(func, args_i, args_r, args_f)
 
     def bh_call_f(self, func, calldescr, args_i, args_r, args_f):
-        assert isinstance(calldescr, FloatCallDescr)  # or LongLongCallDescr
+        assert isinstance(calldescr, CallDescr)
         if not we_are_translated():
             calldescr.verify_types(args_i, args_r, args_f, history.FLOAT + 'L')
-        return calldescr.call_stub(func, args_i, args_r, args_f)
+        return calldescr.call_stub_f(func, args_i, args_r, args_f)
 
     def bh_call_v(self, func, calldescr, args_i, args_r, args_f):
-        assert isinstance(calldescr, VoidCallDescr)
+        assert isinstance(calldescr, CallDescr)
         if not we_are_translated():
             calldescr.verify_types(args_i, args_r, args_f, history.VOID)
-        return calldescr.call_stub(func, args_i, args_r, args_f)
+        # the 'i' return value is ignored (and nonsense anyway)
+        calldescr.call_stub_i(func, args_i, args_r, args_f)
diff --git a/pypy/jit/backend/llsupport/regalloc.py b/pypy/jit/backend/llsupport/regalloc.py
--- a/pypy/jit/backend/llsupport/regalloc.py
+++ b/pypy/jit/backend/llsupport/regalloc.py
@@ -16,32 +16,106 @@
     """ Manage frame positions
     """
     def __init__(self):
-        self.frame_bindings = {}
-        self.frame_depth    = 0
+        self.bindings = {}
+        self.used = []      # list of bools
+        self.hint_frame_locations = {}
+
+    frame_depth = property(lambda:xxx, lambda:xxx)   # XXX kill me
+
+    def get_frame_depth(self):
+        return len(self.used)
 
     def get(self, box):
-        return self.frame_bindings.get(box, None)
+        return self.bindings.get(box, None)
 
     def loc(self, box):
-        res = self.get(box)
-        if res is not None:
-            return res
+        """Return or create the frame location associated with 'box'."""
+        # first check if it's already in the frame_manager
+        try:
+            return self.bindings[box]
+        except KeyError:
+            pass
+        # check if we have a hint for this box
+        if box in self.hint_frame_locations:
+            # if we do, try to reuse the location for this box
+            loc = self.hint_frame_locations[box]
+            if self.try_to_reuse_location(box, loc):
+                return loc
+        # no valid hint.  make up a new free location
+        return self.get_new_loc(box)
+
+    def get_new_loc(self, box):
         size = self.frame_size(box.type)
-        self.frame_depth += ((-self.frame_depth) & (size-1))
-        # ^^^ frame_depth is rounded up to a multiple of 'size', assuming
+        # frame_depth is rounded up to a multiple of 'size', assuming
         # that 'size' is a power of two.  The reason for doing so is to
         # avoid obscure issues in jump.py with stack locations that try
         # to move from position (6,7) to position (7,8).
-        newloc = self.frame_pos(self.frame_depth, box.type)
-        self.frame_bindings[box] = newloc
-        self.frame_depth += size
+        while self.get_frame_depth() & (size - 1):
+            self.used.append(False)
+        #
+        index = self.get_frame_depth()
+        newloc = self.frame_pos(index, box.type)
+        for i in range(size):
+            self.used.append(True)
+        #
+        if not we_are_translated():    # extra testing
+            testindex = self.get_loc_index(newloc)
+            assert testindex == index
+        #
+        self.bindings[box] = newloc
         return newloc
 
+    def set_binding(self, box, loc):
+        self.bindings[box] = loc
+        #
+        index = self.get_loc_index(loc)
+        if index < 0:
+            return
+        endindex = index + self.frame_size(box.type)
+        while len(self.used) < endindex:
+            self.used.append(False)
+        while index < endindex:
+            self.used[index] = True
+            index += 1
+
     def reserve_location_in_frame(self, size):
-        frame_depth = self.frame_depth
-        self.frame_depth += size
+        frame_depth = self.get_frame_depth()
+        for i in range(size):
+            self.used.append(True)
         return frame_depth
 
+    def mark_as_free(self, box):
+        try:
+            loc = self.bindings[box]
+        except KeyError:
+            return    # already gone
+        del self.bindings[box]
+        #
+        size = self.frame_size(box.type)
+        baseindex = self.get_loc_index(loc)
+        if baseindex < 0:
+            return
+        for i in range(size):
+            index = baseindex + i
+            assert 0 <= index < len(self.used)
+            self.used[index] = False
+
+    def try_to_reuse_location(self, box, loc):
+        index = self.get_loc_index(loc)
+        if index < 0:
+            return False
+        size = self.frame_size(box.type)
+        for i in range(size):
+            while (index + i) >= len(self.used):
+                self.used.append(False)
+            if self.used[index + i]:
+                return False    # already in use
+        # good, we can reuse the location
+        for i in range(size):
+            self.used[index + i] = True
+        self.bindings[box] = loc
+        return True
+
     # abstract methods that need to be overwritten for specific assemblers
     @staticmethod
     def frame_pos(loc, type):
@@ -49,6 +123,10 @@
     @staticmethod
     def frame_size(type):
         return 1
+    @staticmethod
+    def get_loc_index(loc):
+        raise NotImplementedError("Purely abstract")
+
 
 class RegisterManager(object):
     """ Class that keeps track of register allocations
@@ -68,7 +146,14 @@
         self.frame_manager = frame_manager
         self.assembler = assembler
 
+    def is_still_alive(self, v):
+        # Check if 'v' is alive at the current position.
+        # Return False if the last usage is strictly before.
+        return self.longevity[v][1] >= self.position
+
     def stays_alive(self, v):
+        # Check if 'v' stays alive after the current position.
+        # Return False if the last usage is before or at position.
         return self.longevity[v][1] > self.position
 
     def next_instruction(self, incr=1):
@@ -84,11 +169,14 @@
             point for all variables that might be in registers.
         """
         self._check_type(v)
-        if isinstance(v, Const) or v not in self.reg_bindings:
+        if isinstance(v, Const):
             return
         if v not in self.longevity or self.longevity[v][1] <= self.position:
-            self.free_regs.append(self.reg_bindings[v])
-            del self.reg_bindings[v]
+            if v in self.reg_bindings:
+                self.free_regs.append(self.reg_bindings[v])
+                del self.reg_bindings[v]
+            if self.frame_manager is not None:
+                self.frame_manager.mark_as_free(v)
 
     def possibly_free_vars(self, vars):
         """ Same as 'possibly_free_var', but for all v in vars.
diff --git a/pypy/jit/backend/llsupport/rewrite.py b/pypy/jit/backend/llsupport/rewrite.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/llsupport/rewrite.py
@@ -0,0 +1,328 @@
+import sys
+from pypy.rlib.rarithmetic import ovfcheck
+from pypy.jit.metainterp.history import ConstInt, BoxPtr, ConstPtr
+from pypy.jit.metainterp.resoperation import ResOperation, rop
+from pypy.jit.codewriter import heaptracker
+from pypy.jit.backend.llsupport.symbolic import WORD
+from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr
+
+
+class GcRewriterAssembler(object):
+    # This class performs the following rewrites on the list of operations:
+    #
+    # - Remove the DEBUG_MERGE_POINTs.
+    #
+    # - Turn all NEW_xxx to either a CALL_MALLOC_GC, or a CALL_MALLOC_NURSERY
+    #   followed by SETFIELDs in order to initialize their GC fields.  The
+    #   two advantages of CALL_MALLOC_NURSERY is that it inlines the common
+    #   path, and we need only one such operation to allocate several blocks
+    #   of memory at once.
+    #
+    # - Add COND_CALLs to the write barrier before SETFIELD_GC and
+    #   SETARRAYITEM_GC operations.
+
+    _previous_size = -1
+    _op_malloc_nursery = None
+    _v_last_malloced_nursery = None
+    c_zero = ConstInt(0)
+
+    def __init__(self, gc_ll_descr, cpu):
+        self.gc_ll_descr = gc_ll_descr
+        self.cpu = cpu
+        self.newops = []
+        self.known_lengths = {}
+        self.recent_mallocs = {}     # set of variables
+
+    def rewrite(self, operations):
+        # we can only remember one malloc since the next malloc can possibly
+        # collect; but we can try to collapse several known-size mallocs into
+        # one, both for performance and to reduce the number of write
+        # barriers.  We do this on each "basic block" of operations, which in
+        # this case means between CALLs or unknown-size mallocs.
+        #
+        for op in operations:
+            if op.getopnum() == rop.DEBUG_MERGE_POINT:
+                continue
+            # ---------- turn NEWxxx into CALL_MALLOC_xxx ----------
+            if op.is_malloc():
+                self.handle_malloc_operation(op)
+                continue
+            elif op.can_malloc():
+                self.emitting_an_operation_that_can_collect()
+            elif op.getopnum() == rop.LABEL:
+                self.emitting_an_operation_that_can_collect()
+                self.known_lengths.clear()
+            # ---------- write barriers ----------
+            if self.gc_ll_descr.write_barrier_descr is not None:
+                if op.getopnum() == rop.SETFIELD_GC:
+                    self.handle_write_barrier_setfield(op)
+                    continue
+                if op.getopnum() == rop.SETINTERIORFIELD_GC:
+                    self.handle_write_barrier_setinteriorfield(op)
+                    continue
+                if op.getopnum() == rop.SETARRAYITEM_GC:
+                    self.handle_write_barrier_setarrayitem(op)
+                    continue
+            # ----------
+            self.newops.append(op)
+        return self.newops
+
+    # ----------
+
+    def handle_malloc_operation(self, op):
+        opnum = op.getopnum()
+        if opnum == rop.NEW:
+            self.handle_new_fixedsize(op.getdescr(), op)
+        elif opnum == rop.NEW_WITH_VTABLE:
+            classint = op.getarg(0).getint()
+            descr = heaptracker.vtable2descr(self.cpu, classint)
+            self.handle_new_fixedsize(descr, op)
+            if self.gc_ll_descr.fielddescr_vtable is not None:
+                op = ResOperation(rop.SETFIELD_GC,
+                                  [op.result, ConstInt(classint)], None,
+                                  descr=self.gc_ll_descr.fielddescr_vtable)
+                self.newops.append(op)
+        elif opnum == rop.NEW_ARRAY:
+            descr = op.getdescr()
+            assert isinstance(descr, ArrayDescr)
+            self.handle_new_array(descr, op)
+        elif opnum == rop.NEWSTR:
+            self.handle_new_array(self.gc_ll_descr.str_descr, op)
+        elif opnum == rop.NEWUNICODE:
+            self.handle_new_array(self.gc_ll_descr.unicode_descr, op)
+        else:
+            raise NotImplementedError(op.getopname())
+
+    def handle_new_fixedsize(self, descr, op):
+        assert isinstance(descr, SizeDescr)
+        size = descr.size
+        self.gen_malloc_nursery(size, op.result)
+        self.gen_initialize_tid(op.result, descr.tid)
+
+    def handle_new_array(self, arraydescr, op):
+        v_length = op.getarg(0)
+        total_size = -1
+        if isinstance(v_length, ConstInt):
+            num_elem = v_length.getint()
+            self.known_lengths[op.result] = num_elem
+            try:
+                var_size = ovfcheck(arraydescr.itemsize * num_elem)
+                total_size = ovfcheck(arraydescr.basesize + var_size)
+            except OverflowError:
+                pass    # total_size is still -1
+        elif arraydescr.itemsize == 0:
+            total_size = arraydescr.basesize
+        if 0 <= total_size <= 0xffffff:     # up to 16MB, arbitrarily
+            self.gen_malloc_nursery(total_size, op.result)
+            self.gen_initialize_tid(op.result, arraydescr.tid)
+            self.gen_initialize_len(op.result, v_length, arraydescr.lendescr)
+        elif self.gc_ll_descr.kind == 'boehm':
+            self.gen_boehm_malloc_array(arraydescr, v_length, op.result)
+        else:
+            opnum = op.getopnum()
+            if opnum == rop.NEW_ARRAY:
+                self.gen_malloc_array(arraydescr, v_length, op.result)
+            elif opnum == rop.NEWSTR:
+                self.gen_malloc_str(v_length, op.result)
+            elif opnum == rop.NEWUNICODE:
+                self.gen_malloc_unicode(v_length, op.result)
+            else:
+                raise NotImplementedError(op.getopname())
+
+    # ----------
+
+    def emitting_an_operation_that_can_collect(self):
+        # must be called whenever we emit an operation that can collect:
+        # forgets the previous MALLOC_NURSERY, if any; and empty the
+        # set 'recent_mallocs', so that future SETFIELDs will generate
+        # a write barrier as usual.
+        self._op_malloc_nursery = None
+        self.recent_mallocs.clear()
+
+    def _gen_call_malloc_gc(self, args, v_result, descr):
+        """Generate a CALL_MALLOC_GC with the given args."""
+        self.emitting_an_operation_that_can_collect()
+        op = ResOperation(rop.CALL_MALLOC_GC, args, v_result, descr)
+        self.newops.append(op)
+        # mark 'v_result' as freshly malloced
+        self.recent_mallocs[v_result] = None
+
+    def gen_malloc_fixedsize(self, size, v_result):
+        """Generate a CALL_MALLOC_GC(malloc_fixedsize_fn, Const(size)).
+        Note that with the framework GC, this should be called very rarely.
+        """
+        addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_fixedsize')
+        self._gen_call_malloc_gc([ConstInt(addr), ConstInt(size)], v_result,
+                                 self.gc_ll_descr.malloc_fixedsize_descr)
+
+    def gen_boehm_malloc_array(self, arraydescr, v_num_elem, v_result):
+        """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) for Boehm."""
+        addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array')
+        self._gen_call_malloc_gc([ConstInt(addr),
+                                  ConstInt(arraydescr.basesize),
+                                  v_num_elem,
+                                  ConstInt(arraydescr.itemsize),
+                                  ConstInt(arraydescr.lendescr.offset)],
+                                 v_result,
+                                 self.gc_ll_descr.malloc_array_descr)
+
+    def gen_malloc_array(self, arraydescr, v_num_elem, v_result):
+        """Generate a CALL_MALLOC_GC(malloc_array_fn, ...) going either
+        to the standard or the nonstandard version of the function."""
+        #
+        if (arraydescr.basesize == self.gc_ll_descr.standard_array_basesize
+            and arraydescr.lendescr.offset ==
+                self.gc_ll_descr.standard_array_length_ofs):
+            # this is a standard-looking array, common case
+            addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_array')
+            args = [ConstInt(addr),
+                    ConstInt(arraydescr.itemsize),
+                    ConstInt(arraydescr.tid),
+                    v_num_elem]
+            calldescr = self.gc_ll_descr.malloc_array_descr
+        else:
+            # rare case, so don't care too much about the number of arguments
+            addr = self.gc_ll_descr.get_malloc_fn_addr(
+                                              'malloc_array_nonstandard')
+            args = [ConstInt(addr),
+                    ConstInt(arraydescr.basesize),
+                    ConstInt(arraydescr.itemsize),
+                    ConstInt(arraydescr.lendescr.offset),
+                    ConstInt(arraydescr.tid),
+                    v_num_elem]
+            calldescr = self.gc_ll_descr.malloc_array_nonstandard_descr
+        self._gen_call_malloc_gc(args, v_result, calldescr)
+
+    def gen_malloc_str(self, v_num_elem, v_result):
+        """Generate a CALL_MALLOC_GC(malloc_str_fn, ...)."""
+        addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_str')
+        self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result,
+                                 self.gc_ll_descr.malloc_str_descr)
+
+    def gen_malloc_unicode(self, v_num_elem, v_result):
+        """Generate a CALL_MALLOC_GC(malloc_unicode_fn, ...)."""
+        addr = self.gc_ll_descr.get_malloc_fn_addr('malloc_unicode')
+        self._gen_call_malloc_gc([ConstInt(addr), v_num_elem], v_result,
+                                 self.gc_ll_descr.malloc_unicode_descr)
+
+    def gen_malloc_nursery(self, size, v_result):
+        """Try to generate or update a CALL_MALLOC_NURSERY.
+        If that fails, generate a plain CALL_MALLOC_GC instead.
+        """
+        size = self.round_up_for_allocation(size)
+        if not self.gc_ll_descr.can_use_nursery_malloc(size):
+            self.gen_malloc_fixedsize(size, v_result)
+            return
+        #
+        op = None
+        if self._op_malloc_nursery is not None:
+            # already a MALLOC_NURSERY: increment its total size
+            total_size = self._op_malloc_nursery.getarg(0).getint()
+            total_size += size
+            if self.gc_ll_descr.can_use_nursery_malloc(total_size):
+                # if the total size is still reasonable, merge it
+                self._op_malloc_nursery.setarg(0, ConstInt(total_size))
+                op = ResOperation(rop.INT_ADD,
+                                  [self._v_last_malloced_nursery,
+                                   ConstInt(self._previous_size)],
+                                  v_result)
+        if op is None:
+            # if we failed to merge with a previous MALLOC_NURSERY, emit one
+            self.emitting_an_operation_that_can_collect()
+            op = ResOperation(rop.CALL_MALLOC_NURSERY,
+                              [ConstInt(size)],
+                              v_result)
+            self._op_malloc_nursery = op
+        #
+        self.newops.append(op)
+        self._previous_size = size
+        self._v_last_malloced_nursery = v_result
+        self.recent_mallocs[v_result] = None
+
+    def gen_initialize_tid(self, v_newgcobj, tid):
+        if self.gc_ll_descr.fielddescr_tid is not None:
+            # produce a SETFIELD to initialize the GC header
+            op = ResOperation(rop.SETFIELD_GC,
+                              [v_newgcobj, ConstInt(tid)], None,
+                              descr=self.gc_ll_descr.fielddescr_tid)
+            self.newops.append(op)
+
+    def gen_initialize_len(self, v_newgcobj, v_length, arraylen_descr):
+        # produce a SETFIELD to initialize the array length
+        op = ResOperation(rop.SETFIELD_GC,
+                          [v_newgcobj, v_length], None,
+                          descr=arraylen_descr)
+        self.newops.append(op)
+
+    # ----------
+
+    def handle_write_barrier_setfield(self, op):
+        val = op.getarg(0)
+        # no need for a write barrier in the case of previous malloc
+        if val not in self.recent_mallocs:
+            v = op.getarg(1)
+            if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
+                                         bool(v.value)): # store a non-NULL
+                self.gen_write_barrier(op.getarg(0), v)
+                op = op.copy_and_change(rop.SETFIELD_RAW)
+        self.newops.append(op)
+
+    def handle_write_barrier_setinteriorfield(self, op):
+        val = op.getarg(0)
+        # no need for a write barrier in the case of previous malloc
+        if val not in self.recent_mallocs:
+            v = op.getarg(2)
+            if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
+                                         bool(v.value)): # store a non-NULL
+                self.gen_write_barrier(op.getarg(0), v)
+                op = op.copy_and_change(rop.SETINTERIORFIELD_RAW)
+        self.newops.append(op)
+
+    def handle_write_barrier_setarrayitem(self, op):
+        val = op.getarg(0)
+        # no need for a write barrier in the case of previous malloc
+        if val not in self.recent_mallocs:
+            v = op.getarg(2)
+            if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
+                                         bool(v.value)): # store a non-NULL
+                self.gen_write_barrier_array(op.getarg(0),
+                                             op.getarg(1), v)
+                op = op.copy_and_change(rop.SETARRAYITEM_RAW)
+        self.newops.append(op)
+
+    def gen_write_barrier(self, v_base, v_value):
+        write_barrier_descr = self.gc_ll_descr.write_barrier_descr
+        args = [v_base, v_value]
+        self.newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None,
+                                        descr=write_barrier_descr))
+
+    def gen_write_barrier_array(self, v_base, v_index, v_value):
+        write_barrier_descr = self.gc_ll_descr.write_barrier_descr
+        if write_barrier_descr.has_write_barrier_from_array(self.cpu):
+            # If we know statically the length of 'v', and it is not too
+            # big, then produce a regular write_barrier.  If it's unknown or
+            # too big, produce instead a write_barrier_from_array.
+            LARGE = 130
+            length = self.known_lengths.get(v_base, LARGE)
+            if length >= LARGE:
+                # unknown or too big: produce a write_barrier_from_array
+                args = [v_base, v_index, v_value]
+                self.newops.append(
+                    ResOperation(rop.COND_CALL_GC_WB_ARRAY, args, None,
+                                 descr=write_barrier_descr))
+                return
+        # fall-back case: produce a write_barrier
+        self.gen_write_barrier(v_base, v_value)
+
+    def round_up_for_allocation(self, size):
+        if not self.gc_ll_descr.round_up:
+            return size
+        if self.gc_ll_descr.translate_support_code:
+            from pypy.rpython.lltypesystem import llarena
+            return llarena.round_up_for_allocation(
+                size, self.gc_ll_descr.minimal_size_in_nursery)
+        else:
+            # non-translated: do it manually
+            # assume that "self.gc_ll_descr.minimal_size_in_nursery" is 2 WORDs
+            size = max(size, 2 * WORD)
+            return (size + WORD-1) & ~(WORD-1)     # round up
diff --git a/pypy/jit/backend/llsupport/test/test_descr.py b/pypy/jit/backend/llsupport/test/test_descr.py
--- a/pypy/jit/backend/llsupport/test/test_descr.py
+++ b/pypy/jit/backend/llsupport/test/test_descr.py
@@ -1,4 +1,4 @@
-from pypy.rpython.lltypesystem import lltype, rffi
+from pypy.rpython.lltypesystem import lltype, rffi, rstr
 from pypy.jit.backend.llsupport.descr import *
 from pypy.jit.backend.llsupport import symbolic
 from pypy.rlib.objectmodel import Symbolic
@@ -53,18 +53,6 @@
                              ('z', lltype.Ptr(U)),
                              ('f', lltype.Float),
                              ('s', lltype.SingleFloat))
-    assert getFieldDescrClass(lltype.Ptr(T)) is GcPtrFieldDescr
-    assert getFieldDescrClass(lltype.Ptr(U)) is NonGcPtrFieldDescr
-    cls = getFieldDescrClass(lltype.Char)
-    assert cls != getFieldDescrClass(lltype.Signed)
-    assert cls == getFieldDescrClass(lltype.Char)
-    clsf = getFieldDescrClass(lltype.Float)
-    assert clsf != cls
-    assert clsf == getFieldDescrClass(lltype.Float)
-    clss = getFieldDescrClass(lltype.SingleFloat)
-    assert clss not in (cls, clsf)
-    assert clss == getFieldDescrClass(lltype.SingleFloat)
-    assert clss == getFieldDescrClass(rffi.UINT)    # for now
     #
     c0 = GcCache(False)
     c1 = GcCache(True)
@@ -77,11 +65,7 @@
         descr_z = get_field_descr(c2, S, 'z')
         descr_f = get_field_descr(c2, S, 'f')
         descr_s = get_field_descr(c2, S, 's')
-        assert descr_x.__class__ is cls
-        assert descr_y.__class__ is GcPtrFieldDescr
-        assert descr_z.__class__ is NonGcPtrFieldDescr
-        assert descr_f.__class__ is clsf
-        assert descr_s.__class__ is clss
+        assert isinstance(descr_x, FieldDescr)
         assert descr_x.name == 'S.x'
         assert descr_y.name == 'S.y'
         assert descr_z.name == 'S.z'
@@ -90,33 +74,27 @@
         if not tsc:
             assert descr_x.offset < descr_y.offset < descr_z.offset
             assert descr_x.sort_key() < descr_y.sort_key() < descr_z.sort_key()
-            assert descr_x.get_field_size(False) == rffi.sizeof(lltype.Char)
-            assert descr_y.get_field_size(False) == rffi.sizeof(lltype.Ptr(T))
-            assert descr_z.get_field_size(False) == rffi.sizeof(lltype.Ptr(U))
-            assert descr_f.get_field_size(False) == rffi.sizeof(lltype.Float)
-            assert descr_s.get_field_size(False) == rffi.sizeof(
-                                                            lltype.SingleFloat)
+            assert descr_x.field_size == rffi.sizeof(lltype.Char)
+            assert descr_y.field_size == rffi.sizeof(lltype.Ptr(T))
+            assert descr_z.field_size == rffi.sizeof(lltype.Ptr(U))
+            assert descr_f.field_size == rffi.sizeof(lltype.Float)
+            assert descr_s.field_size == rffi.sizeof(lltype.SingleFloat)
         else:
             assert isinstance(descr_x.offset, Symbolic)
             assert isinstance(descr_y.offset, Symbolic)
             assert isinstance(descr_z.offset, Symbolic)
             assert isinstance(descr_f.offset, Symbolic)
             assert isinstance(descr_s.offset, Symbolic)
-            assert isinstance(descr_x.get_field_size(True), Symbolic)
-            assert isinstance(descr_y.get_field_size(True), Symbolic)
-            assert isinstance(descr_z.get_field_size(True), Symbolic)
-            assert isinstance(descr_f.get_field_size(True), Symbolic)
-            assert isinstance(descr_s.get_field_size(True), Symbolic)
-        assert not descr_x.is_pointer_field()
-        assert     descr_y.is_pointer_field()
-        assert not descr_z.is_pointer_field()
-        assert not descr_f.is_pointer_field()
-        assert not descr_s.is_pointer_field()
-        assert not descr_x.is_float_field()
-        assert not descr_y.is_float_field()
-        assert not descr_z.is_float_field()
-        assert     descr_f.is_float_field()
-        assert not descr_s.is_float_field()
+            assert isinstance(descr_x.field_size, Symbolic)
+            assert isinstance(descr_y.field_size, Symbolic)
+            assert isinstance(descr_z.field_size, Symbolic)
+            assert isinstance(descr_f.field_size, Symbolic)
+            assert isinstance(descr_s.field_size, Symbolic)
+        assert descr_x.flag == FLAG_UNSIGNED
+        assert descr_y.flag == FLAG_POINTER
+        assert descr_z.flag == FLAG_UNSIGNED
+        assert descr_f.flag == FLAG_FLOAT
+        assert descr_s.flag == FLAG_UNSIGNED
 
 
 def test_get_field_descr_sign():
@@ -128,7 +106,8 @@
         for tsc in [False, True]:
             c2 = GcCache(tsc)
             descr_x = get_field_descr(c2, S, 'x')
-            assert descr_x.is_field_signed() == signed
+            assert descr_x.flag == {False: FLAG_UNSIGNED,
+                                    True:  FLAG_SIGNED  }[signed]
 
 def test_get_field_descr_longlong():
     if sys.maxint > 2147483647:
@@ -136,9 +115,8 @@
     c0 = GcCache(False)
     S = lltype.GcStruct('S', ('y', lltype.UnsignedLongLong))
     descr = get_field_descr(c0, S, 'y')
-    assert not descr.is_pointer_field()
-    assert descr.is_float_field()
-    assert descr.get_field_size(False) == 8
+    assert descr.flag == FLAG_FLOAT
+    assert descr.field_size == 8
 
 
 def test_get_array_descr():
@@ -149,19 +127,8 @@
     A3 = lltype.GcArray(lltype.Ptr(U))
     A4 = lltype.GcArray(lltype.Float)
     A5 = lltype.GcArray(lltype.Struct('x', ('v', lltype.Signed),
-                                      ('k', lltype.Signed)))
+                                           ('k', lltype.Signed)))
     A6 = lltype.GcArray(lltype.SingleFloat)
-    assert getArrayDescrClass(A2) is GcPtrArrayDescr
-    assert getArrayDescrClass(A3) is NonGcPtrArrayDescr
-    cls = getArrayDescrClass(A1)
-    assert cls != getArrayDescrClass(lltype.GcArray(lltype.Signed))
-    assert cls == getArrayDescrClass(lltype.GcArray(lltype.Char))
-    clsf = getArrayDescrClass(A4)
-    assert clsf != cls
-    assert clsf == getArrayDescrClass(lltype.GcArray(lltype.Float))
-    clss = getArrayDescrClass(A6)
-    assert clss not in (clsf, cls)
-    assert clss == getArrayDescrClass(lltype.GcArray(rffi.UINT))
     #
     c0 = GcCache(False)
     descr1 = get_array_descr(c0, A1)
@@ -170,82 +137,61 @@
     descr4 = get_array_descr(c0, A4)
     descr5 = get_array_descr(c0, A5)
     descr6 = get_array_descr(c0, A6)
-    assert descr1.__class__ is cls
-    assert descr2.__class__ is GcPtrArrayDescr
-    assert descr3.__class__ is NonGcPtrArrayDescr
-    assert descr4.__class__ is clsf
-    assert descr6.__class__ is clss
+    assert isinstance(descr1, ArrayDescr)
     assert descr1 == get_array_descr(c0, lltype.GcArray(lltype.Char))
-    assert not descr1.is_array_of_pointers()
-    assert     descr2.is_array_of_pointers()
-    assert not descr3.is_array_of_pointers()
-    assert not descr4.is_array_of_pointers()
-    assert not descr5.is_array_of_pointers()
-    assert not descr1.is_array_of_floats()
-    assert not descr2.is_array_of_floats()
-    assert not descr3.is_array_of_floats()
-    assert     descr4.is_array_of_floats()
-    assert not descr5.is_array_of_floats()
+    assert descr1.flag == FLAG_UNSIGNED
+    assert descr2.flag == FLAG_POINTER
+    assert descr3.flag == FLAG_UNSIGNED
+    assert descr4.flag == FLAG_FLOAT
+    assert descr5.flag == FLAG_STRUCT
+    assert descr6.flag == FLAG_UNSIGNED
     #
     def get_alignment(code):
         # Retrieve default alignment for the compiler/platform
         return struct.calcsize('l' + code) - struct.calcsize(code)
-    assert descr1.get_base_size(False) == get_alignment('c')
-    assert descr2.get_base_size(False) == get_alignment('p')
-    assert descr3.get_base_size(False) == get_alignment('p')
-    assert descr4.get_base_size(False) == get_alignment('d')
-    assert descr5.get_base_size(False) == get_alignment('f')
-    assert descr1.get_ofs_length(False) == 0
-    assert descr2.get_ofs_length(False) == 0
-    assert descr3.get_ofs_length(False) == 0
-    assert descr4.get_ofs_length(False) == 0
-    assert descr5.get_ofs_length(False) == 0
-    assert descr1.get_item_size(False) == rffi.sizeof(lltype.Char)
-    assert descr2.get_item_size(False) == rffi.sizeof(lltype.Ptr(T))
-    assert descr3.get_item_size(False) == rffi.sizeof(lltype.Ptr(U))
-    assert descr4.get_item_size(False) == rffi.sizeof(lltype.Float)
-    assert descr5.get_item_size(False) == rffi.sizeof(lltype.Signed) * 2
-    assert descr6.get_item_size(False) == rffi.sizeof(lltype.SingleFloat)
+    assert descr1.basesize == get_alignment('c')
+    assert descr2.basesize == get_alignment('p')
+    assert descr3.basesize == get_alignment('p')
+    assert descr4.basesize == get_alignment('d')
+    assert descr5.basesize == get_alignment('f')
+    assert descr1.lendescr.offset == 0
+    assert descr2.lendescr.offset == 0
+    assert descr3.lendescr.offset == 0
+    assert descr4.lendescr.offset == 0
+    assert descr5.lendescr.offset == 0
+    assert descr1.itemsize == rffi.sizeof(lltype.Char)
+    assert descr2.itemsize == rffi.sizeof(lltype.Ptr(T))
+    assert descr3.itemsize == rffi.sizeof(lltype.Ptr(U))
+    assert descr4.itemsize == rffi.sizeof(lltype.Float)
+    assert descr5.itemsize == rffi.sizeof(lltype.Signed) * 2
+    assert descr6.itemsize == rffi.sizeof(lltype.SingleFloat)
     #
-    assert isinstance(descr1.get_base_size(True), Symbolic)
-    assert isinstance(descr2.get_base_size(True), Symbolic)
-    assert isinstance(descr3.get_base_size(True), Symbolic)
-    assert isinstance(descr4.get_base_size(True), Symbolic)
-    assert isinstance(descr5.get_base_size(True), Symbolic)
-    assert isinstance(descr1.get_ofs_length(True), Symbolic)
-    assert isinstance(descr2.get_ofs_length(True), Symbolic)
-    assert isinstance(descr3.get_ofs_length(True), Symbolic)
-    assert isinstance(descr4.get_ofs_length(True), Symbolic)
-    assert isinstance(descr5.get_ofs_length(True), Symbolic)
-    assert isinstance(descr1.get_item_size(True), Symbolic)
-    assert isinstance(descr2.get_item_size(True), Symbolic)
-    assert isinstance(descr3.get_item_size(True), Symbolic)
-    assert isinstance(descr4.get_item_size(True), Symbolic)
-    assert isinstance(descr5.get_item_size(True), Symbolic)
     CA = rffi.CArray(lltype.Signed)
     descr = get_array_descr(c0, CA)
-    assert not descr.is_array_of_floats()
-    assert descr.get_base_size(False) == 0
-    assert descr.get_ofs_length(False) == -1
+    assert descr.flag == FLAG_SIGNED
+    assert descr.basesize == 0
+    assert descr.lendescr is None
     CA = rffi.CArray(lltype.Ptr(lltype.GcStruct('S')))
     descr = get_array_descr(c0, CA)
-    assert descr.is_array_of_pointers()
-    assert descr.get_base_size(False) == 0
-    assert descr.get_ofs_length(False) == -1
+    assert descr.flag == FLAG_POINTER
+    assert descr.basesize == 0
+    assert descr.lendescr is None
     CA = rffi.CArray(lltype.Ptr(lltype.Struct('S')))
     descr = get_array_descr(c0, CA)
-    assert descr.get_base_size(False) == 0
-    assert descr.get_ofs_length(False) == -1
+    assert descr.flag == FLAG_UNSIGNED
+    assert descr.basesize == 0
+    assert descr.lendescr is None
     CA = rffi.CArray(lltype.Float)
     descr = get_array_descr(c0, CA)
-    assert descr.is_array_of_floats()
-    assert descr.get_base_size(False) == 0
-    assert descr.get_ofs_length(False) == -1
+    assert descr.flag == FLAG_FLOAT
+    assert descr.basesize == 0
+    assert descr.lendescr is None
     CA = rffi.CArray(rffi.FLOAT)
     descr = get_array_descr(c0, CA)
-    assert not descr.is_array_of_floats()
-    assert descr.get_base_size(False) == 0
-    assert descr.get_ofs_length(False) == -1
+    assert descr.flag == FLAG_UNSIGNED
+    assert descr.basesize == 0
+    assert descr.itemsize == rffi.sizeof(lltype.SingleFloat)
+    assert descr.lendescr is None
 
 
 def test_get_array_descr_sign():
@@ -257,46 +203,55 @@
         for tsc in [False, True]:
             c2 = GcCache(tsc)
             arraydescr = get_array_descr(c2, A)
-            assert arraydescr.is_item_signed() == signed
+            assert arraydescr.flag == {False: FLAG_UNSIGNED,
+                                       True:  FLAG_SIGNED  }[signed]
         #
         RA = rffi.CArray(RESTYPE)
         for tsc in [False, True]:
             c2 = GcCache(tsc)
             arraydescr = get_array_descr(c2, RA)
-            assert arraydescr.is_item_signed() == signed
+            assert arraydescr.flag == {False: FLAG_UNSIGNED,
+                                       True:  FLAG_SIGNED  }[signed]
+
+
+def test_get_array_descr_str():
+    c0 = GcCache(False)
+    descr1 = get_array_descr(c0, rstr.STR)
+    assert descr1.itemsize == rffi.sizeof(lltype.Char)
+    assert descr1.flag == FLAG_UNSIGNED
 
 
 def test_get_call_descr_not_translated():
     c0 = GcCache(False)
     descr1 = get_call_descr(c0, [lltype.Char, lltype.Signed], lltype.Char)
-    assert descr1.get_result_size(False) == rffi.sizeof(lltype.Char)
-    assert descr1.get_return_type() == history.INT
+    assert descr1.get_result_size() == rffi.sizeof(lltype.Char)
+    assert descr1.get_result_type() == history.INT
     assert descr1.arg_classes == "ii"
     #
     T = lltype.GcStruct('T')
     descr2 = get_call_descr(c0, [lltype.Ptr(T)], lltype.Ptr(T))
-    assert descr2.get_result_size(False) == rffi.sizeof(lltype.Ptr(T))
-    assert descr2.get_return_type() == history.REF
+    assert descr2.get_result_size() == rffi.sizeof(lltype.Ptr(T))
+    assert descr2.get_result_type() == history.REF
     assert descr2.arg_classes == "r"
     #
     U = lltype.GcStruct('U', ('x', lltype.Signed))
     assert descr2 == get_call_descr(c0, [lltype.Ptr(U)], lltype.Ptr(U))
     #
     V = lltype.Struct('V', ('x', lltype.Signed))
-    assert (get_call_descr(c0, [], lltype.Ptr(V)).get_return_type() ==
+    assert (get_call_descr(c0, [], lltype.Ptr(V)).get_result_type() ==
             history.INT)
     #
-    assert (get_call_descr(c0, [], lltype.Void).get_return_type() ==
+    assert (get_call_descr(c0, [], lltype.Void).get_result_type() ==
             history.VOID)
     #
     descr4 = get_call_descr(c0, [lltype.Float, lltype.Float], lltype.Float)
-    assert descr4.get_result_size(False) == rffi.sizeof(lltype.Float)
-    assert descr4.get_return_type() == history.FLOAT
+    assert descr4.get_result_size() == rffi.sizeof(lltype.Float)
+    assert descr4.get_result_type() == history.FLOAT
     assert descr4.arg_classes == "ff"
     #
     descr5 = get_call_descr(c0, [lltype.SingleFloat], lltype.SingleFloat)
-    assert descr5.get_result_size(False) == rffi.sizeof(lltype.SingleFloat)
-    assert descr5.get_return_type() == "S"
+    assert descr5.get_result_size() == rffi.sizeof(lltype.SingleFloat)
+    assert descr5.get_result_type() == "S"
     assert descr5.arg_classes == "S"
 
 def test_get_call_descr_not_translated_longlong():
@@ -305,13 +260,13 @@
     c0 = GcCache(False)
     #
     descr5 = get_call_descr(c0, [lltype.SignedLongLong], lltype.Signed)
-    assert descr5.get_result_size(False) == 4
-    assert descr5.get_return_type() == history.INT
+    assert descr5.get_result_size() == 4
+    assert descr5.get_result_type() == history.INT
     assert descr5.arg_classes == "L"
     #
     descr6 = get_call_descr(c0, [lltype.Signed], lltype.SignedLongLong)
-    assert descr6.get_result_size(False) == 8
-    assert descr6.get_return_type() == "L"
+    assert descr6.get_result_size() == 8
+    assert descr6.get_result_type() == "L"
     assert descr6.arg_classes == "i"
 
 def test_get_call_descr_translated():
@@ -319,18 +274,18 @@
     T = lltype.GcStruct('T')
     U = lltype.GcStruct('U', ('x', lltype.Signed))
     descr3 = get_call_descr(c1, [lltype.Ptr(T)], lltype.Ptr(U))
-    assert isinstance(descr3.get_result_size(True), Symbolic)
-    assert descr3.get_return_type() == history.REF
+    assert isinstance(descr3.get_result_size(), Symbolic)
+    assert descr3.get_result_type() == history.REF
     assert descr3.arg_classes == "r"
     #
     descr4 = get_call_descr(c1, [lltype.Float, lltype.Float], lltype.Float)
-    assert isinstance(descr4.get_result_size(True), Symbolic)
-    assert descr4.get_return_type() == history.FLOAT
+    assert isinstance(descr4.get_result_size(), Symbolic)
+    assert descr4.get_result_type() == history.FLOAT
     assert descr4.arg_classes == "ff"
     #
     descr5 = get_call_descr(c1, [lltype.SingleFloat], lltype.SingleFloat)
-    assert isinstance(descr5.get_result_size(True), Symbolic)
-    assert descr5.get_return_type() == "S"
+    assert isinstance(descr5.get_result_size(), Symbolic)
+    assert descr5.get_result_type() == "S"
     assert descr5.arg_classes == "S"
 
 def test_call_descr_extra_info():
@@ -358,6 +313,10 @@
 
 
 def test_repr_of_descr():
+    def repr_of_descr(descr):
+        s = descr.repr_of_descr()
+        assert ',' not in s  # makes the life easier for pypy.tool.jitlogparser
+        return s
     c0 = GcCache(False)
     T = lltype.GcStruct('T')
     S = lltype.GcStruct('S', ('x', lltype.Char),
@@ -365,33 +324,34 @@
                              ('z', lltype.Ptr(T)))
     descr1 = get_size_descr(c0, S)
     s = symbolic.get_size(S, False)
-    assert descr1.repr_of_descr() == '<SizeDescr %d>' % s
+    assert repr_of_descr(descr1) == '<SizeDescr %d>' % s
     #
     descr2 = get_field_descr(c0, S, 'y')
     o, _ = symbolic.get_field_token(S, 'y', False)
-    assert descr2.repr_of_descr() == '<GcPtrFieldDescr S.y %d>' % o
+    assert repr_of_descr(descr2) == '<FieldP S.y %d>' % o
     #
     descr2i = get_field_descr(c0, S, 'x')
     o, _ = symbolic.get_field_token(S, 'x', False)
-    assert descr2i.repr_of_descr() == '<CharFieldDescr S.x %d>' % o
+    assert repr_of_descr(descr2i) == '<FieldU S.x %d>' % o
     #
     descr3 = get_array_descr(c0, lltype.GcArray(lltype.Ptr(S)))
-    assert descr3.repr_of_descr() == '<GcPtrArrayDescr>'
+    o = symbolic.get_size(lltype.Ptr(S), False)
+    assert repr_of_descr(descr3) == '<ArrayP %d>' % o
     #
     descr3i = get_array_descr(c0, lltype.GcArray(lltype.Char))
-    assert descr3i.repr_of_descr() == '<CharArrayDescr>'
+    assert repr_of_descr(descr3i) == '<ArrayU 1>'
     #
     descr4 = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Ptr(S))
-    assert 'GcPtrCallDescr' in descr4.repr_of_descr()
+    assert repr_of_descr(descr4) == '<Callr %d ir>' % o
     #
     descr4i = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Char)
-    assert 'CharCallDescr' in descr4i.repr_of_descr()
+    assert repr_of_descr(descr4i) == '<Calli 1 ir>'
     #
     descr4f = get_call_descr(c0, [lltype.Char, lltype.Ptr(S)], lltype.Float)
-    assert 'FloatCallDescr' in descr4f.repr_of_descr()
+    assert repr_of_descr(descr4f) == '<Callf 8 ir>'
     #
     descr5f = get_call_descr(c0, [lltype.Char], lltype.SingleFloat)
-    assert 'SingleFloatCallDescr' in descr5f.repr_of_descr()
+    assert repr_of_descr(descr5f) == '<CallS 4 i>'
 
 def test_call_stubs_1():
     c0 = GcCache(False)
@@ -401,10 +361,10 @@
     def f(a, b):
         return 'c'
 
-    call_stub = descr1.call_stub
     fnptr = llhelper(lltype.Ptr(lltype.FuncType(ARGS, RES)), f)
 
-    res = call_stub(rffi.cast(lltype.Signed, fnptr), [1, 2], None, None)
+    res = descr1.call_stub_i(rffi.cast(lltype.Signed, fnptr),
+                             [1, 2], None, None)
     assert res == ord('c')
 
 def test_call_stubs_2():
@@ -421,8 +381,8 @@
     a = lltype.malloc(ARRAY, 3)
     opaquea = lltype.cast_opaque_ptr(llmemory.GCREF, a)
     a[0] = 1
-    res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr),
-                           [], [opaquea], [longlong.getfloatstorage(3.5)])
+    res = descr2.call_stub_f(rffi.cast(lltype.Signed, fnptr),
+                             [], [opaquea], [longlong.getfloatstorage(3.5)])
     assert longlong.getrealfloat(res) == 4.5
 
 def test_call_stubs_single_float():
@@ -445,6 +405,22 @@
     a = intmask(singlefloat2uint(r_singlefloat(-10.0)))
     b = intmask(singlefloat2uint(r_singlefloat(3.0)))
     c = intmask(singlefloat2uint(r_singlefloat(2.0)))
-    res = descr2.call_stub(rffi.cast(lltype.Signed, fnptr),
-                           [a, b, c], [], [])
+    res = descr2.call_stub_i(rffi.cast(lltype.Signed, fnptr),
+                             [a, b, c], [], [])
     assert float(uint2singlefloat(rffi.r_uint(res))) == -11.5
+
+def test_field_arraylen_descr():
+    c0 = GcCache(True)
+    A1 = lltype.GcArray(lltype.Signed)
+    fielddescr = get_field_arraylen_descr(c0, A1)
+    assert isinstance(fielddescr, FieldDescr)
+    ofs = fielddescr.offset
+    assert repr(ofs) == '< ArrayLengthOffset <GcArray of Signed > >'
+    #
+    fielddescr = get_field_arraylen_descr(c0, rstr.STR)
+    ofs = fielddescr.offset
+    assert repr(ofs) == ("< <FieldOffset <GcStruct rpy_string { hash, chars }>"
+                         " 'chars'> + < ArrayLengthOffset"
+                         " <Array of Char > > >")
+    # caching:
+    assert fielddescr is get_field_arraylen_descr(c0, rstr.STR)
diff --git a/pypy/jit/backend/llsupport/test/test_ffisupport.py b/pypy/jit/backend/llsupport/test/test_ffisupport.py
--- a/pypy/jit/backend/llsupport/test/test_ffisupport.py
+++ b/pypy/jit/backend/llsupport/test/test_ffisupport.py
@@ -1,5 +1,6 @@
 from pypy.rlib.libffi import types
 from pypy.jit.codewriter.longlong import is_64_bit
+from pypy.jit.backend.llsupport.descr import *
 from pypy.jit.backend.llsupport.ffisupport import *
 
 
@@ -13,44 +14,52 @@
 
 def test_call_descr_dynamic():
     args = [types.sint, types.pointer]
-    descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, ffi_flags=42)
-    assert isinstance(descr, DynamicIntCallDescr)
+    descr = get_call_descr_dynamic(FakeCPU(), args, types.sint, None,
+                                   ffi_flags=42)
+    assert isinstance(descr, CallDescr)
+    assert descr.result_type == 'i'
+    assert descr.result_flag == FLAG_SIGNED
     assert descr.arg_classes == 'ii'
     assert descr.get_ffi_flags() == 42
 
     args = [types.sint, types.double, types.pointer]
-    descr = get_call_descr_dynamic(FakeCPU(), args, types.void)
+    descr = get_call_descr_dynamic(FakeCPU(), args, types.void, None, 42)
     assert descr is None    # missing floats
     descr = get_call_descr_dynamic(FakeCPU(supports_floats=True),
-                                   args, types.void, ffi_flags=43)
-    assert isinstance(descr, VoidCallDescr)
+                                   args, types.void, None, ffi_flags=43)
+    assert descr.result_type == 'v'
+    assert descr.result_flag == FLAG_VOID
     assert descr.arg_classes == 'ifi'
     assert descr.get_ffi_flags() == 43
 
-    descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8)
-    assert isinstance(descr, DynamicIntCallDescr)
-    assert descr.get_result_size(False) == 1
+    descr = get_call_descr_dynamic(FakeCPU(), [], types.sint8, None, 42)
+    assert descr.get_result_size() == 1
+    assert descr.result_flag == FLAG_SIGNED
     assert descr.is_result_signed() == True
 
-    descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8)
-    assert isinstance(descr, DynamicIntCallDescr)
-    assert descr.get_result_size(False) == 1
+    descr = get_call_descr_dynamic(FakeCPU(), [], types.uint8, None, 42)
+    assert isinstance(descr, CallDescr)
+    assert descr.get_result_size() == 1
+    assert descr.result_flag == FLAG_UNSIGNED
     assert descr.is_result_signed() == False
 
     if not is_64_bit:
-        descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong)
+        descr = get_call_descr_dynamic(FakeCPU(), [], types.slonglong,
+                                       None, 42)
         assert descr is None   # missing longlongs
         descr = get_call_descr_dynamic(FakeCPU(supports_longlong=True),
-                                       [], types.slonglong, ffi_flags=43)
-        assert isinstance(descr, LongLongCallDescr)
+                                       [], types.slonglong, None, ffi_flags=43)
+        assert isinstance(descr, CallDescr)
+        assert descr.result_flag == FLAG_FLOAT
+        assert descr.result_type == 'L'
         assert descr.get_ffi_flags() == 43
     else:
         assert types.slonglong is types.slong
 
-    descr = get_call_descr_dynamic(FakeCPU(), [], types.float)
+    descr = get_call_descr_dynamic(FakeCPU(), [], types.float, None, 42)
     assert descr is None   # missing singlefloats
     descr = get_call_descr_dynamic(FakeCPU(supports_singlefloats=True),
-                                   [], types.float, ffi_flags=44)
-    SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT)
-    assert isinstance(descr, SingleFloatCallDescr)
+                                   [], types.float, None, ffi_flags=44)
+    assert descr.result_flag == FLAG_UNSIGNED
+    assert descr.result_type == 'S'
     assert descr.get_ffi_flags() == 44
diff --git a/pypy/jit/backend/llsupport/test/test_gc.py b/pypy/jit/backend/llsupport/test/test_gc.py
--- a/pypy/jit/backend/llsupport/test/test_gc.py
+++ b/pypy/jit/backend/llsupport/test/test_gc.py
@@ -6,6 +6,7 @@
 from pypy.jit.backend.llsupport.gc import *
 from pypy.jit.backend.llsupport import symbolic
 from pypy.jit.metainterp.gc import get_description
+from pypy.jit.metainterp.history import BoxPtr, BoxInt, ConstPtr
 from pypy.jit.metainterp.resoperation import get_deep_immutable_oplist
 from pypy.jit.tool.oparser import parse
 from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE
@@ -15,12 +16,12 @@
     gc_ll_descr = GcLLDescr_boehm(None, None, None)
     #
     record = []
-    prev_funcptr_for_new = gc_ll_descr.funcptr_for_new
-    def my_funcptr_for_new(size):
-        p = prev_funcptr_for_new(size)
+    prev_malloc_fn_ptr = gc_ll_descr.malloc_fn_ptr
+    def my_malloc_fn_ptr(size):
+        p = prev_malloc_fn_ptr(size)
         record.append((size, p))
         return p
-    gc_ll_descr.funcptr_for_new = my_funcptr_for_new
+    gc_ll_descr.malloc_fn_ptr = my_malloc_fn_ptr
     #
     # ---------- gc_malloc ----------
     S = lltype.GcStruct('S', ('x', lltype.Signed))
@@ -32,8 +33,8 @@
     A = lltype.GcArray(lltype.Signed)
     arraydescr = get_array_descr(gc_ll_descr, A)
     p = gc_ll_descr.gc_malloc_array(arraydescr, 10)
-    assert record == [(arraydescr.get_base_size(False) +
-                       10 * arraydescr.get_item_size(False), p)]
+    assert record == [(arraydescr.basesize +
+                       10 * arraydescr.itemsize, p)]
     del record[:]
     # ---------- gc_malloc_str ----------
     p = gc_ll_descr.gc_malloc_str(10)
@@ -246,22 +247,28 @@
     def __init__(self):
         self.record = []
 
+    def _malloc(self, type_id, size):
+        tid = llop.combine_ushort(lltype.Signed, type_id, 0)
+        x = llmemory.raw_malloc(self.gcheaderbuilder.size_gc_header + size)
+        x += self.gcheaderbuilder.size_gc_header
+        return x, tid
+
     def do_malloc_fixedsize_clear(self, RESTYPE, type_id, size,
-                                  has_finalizer, contains_weakptr):
+                                  has_finalizer, has_light_finalizer,
+                                  contains_weakptr):
         assert not contains_weakptr
-        p = llmemory.raw_malloc(size)
+        assert not has_finalizer
+        assert not has_light_finalizer
+        p, tid = self._malloc(type_id, size)
         p = llmemory.cast_adr_to_ptr(p, RESTYPE)
-        flags = int(has_finalizer) << 16
-        tid = llop.combine_ushort(lltype.Signed, type_id, flags)
         self.record.append(("fixedsize", repr(size), tid, p))
         return p
 
     def do_malloc_varsize_clear(self, RESTYPE, type_id, length, size,
                                 itemsize, offset_to_length):
-        p = llmemory.raw_malloc(size + itemsize * length)
+        p, tid = self._malloc(type_id, size + itemsize * length)
         (p + offset_to_length).signed[0] = length
         p = llmemory.cast_adr_to_ptr(p, RESTYPE)
-        tid = llop.combine_ushort(lltype.Signed, type_id, 0)
         self.record.append(("varsize", tid, length,
                             repr(size), repr(itemsize),
                             repr(offset_to_length), p))
@@ -320,43 +327,40 @@
         gc_ll_descr = GcLLDescr_framework(gcdescr, FakeTranslator(), None,
                                           llop1)
         gc_ll_descr.initialize()
+        llop1.gcheaderbuilder = gc_ll_descr.gcheaderbuilder
         self.llop1 = llop1
         self.gc_ll_descr = gc_ll_descr
         self.fake_cpu = FakeCPU()
 
-    def test_args_for_new(self):
-        S = lltype.GcStruct('S', ('x', lltype.Signed))
-        sizedescr = get_size_descr(self.gc_ll_descr, S)
-        args = self.gc_ll_descr.args_for_new(sizedescr)
-        for x in args:
-            assert lltype.typeOf(x) == lltype.Signed
-        A = lltype.GcArray(lltype.Signed)
-        arraydescr = get_array_descr(self.gc_ll_descr, A)
-        args = self.gc_ll_descr.args_for_new(sizedescr)
-        for x in args:
-            assert lltype.typeOf(x) == lltype.Signed
+##    def test_args_for_new(self):
+##        S = lltype.GcStruct('S', ('x', lltype.Signed))
+##        sizedescr = get_size_descr(self.gc_ll_descr, S)
+##        args = self.gc_ll_descr.args_for_new(sizedescr)
+##        for x in args:
+##            assert lltype.typeOf(x) == lltype.Signed
+##        A = lltype.GcArray(lltype.Signed)
+##        arraydescr = get_array_descr(self.gc_ll_descr, A)
+##        args = self.gc_ll_descr.args_for_new(sizedescr)
+##        for x in args:
+##            assert lltype.typeOf(x) == lltype.Signed
 
     def test_gc_malloc(self):
         S = lltype.GcStruct('S', ('x', lltype.Signed))
         sizedescr = get_size_descr(self.gc_ll_descr, S)
         p = self.gc_ll_descr.gc_malloc(sizedescr)
-        assert self.llop1.record == [("fixedsize",
-                                      repr(sizedescr.size),
+        assert lltype.typeOf(p) == llmemory.GCREF
+        assert self.llop1.record == [("fixedsize", repr(sizedescr.size),
                                       sizedescr.tid, p)]
-        assert repr(self.gc_ll_descr.args_for_new(sizedescr)) == repr(
-            [sizedescr.size, sizedescr.tid])
 
     def test_gc_malloc_array(self):
         A = lltype.GcArray(lltype.Signed)
         arraydescr = get_array_descr(self.gc_ll_descr, A)
         p = self.gc_ll_descr.gc_malloc_array(arraydescr, 10)
         assert self.llop1.record == [("varsize", arraydescr.tid, 10,
-                                      repr(arraydescr.get_base_size(True)),
-                                      repr(arraydescr.get_item_size(True)),
-                                      repr(arraydescr.get_ofs_length(True)),
+                                      repr(arraydescr.basesize),
+                                      repr(arraydescr.itemsize),
+                                      repr(arraydescr.lendescr.offset),
                                       p)]
-        assert repr(self.gc_ll_descr.args_for_new_array(arraydescr)) == repr(
-            [arraydescr.get_item_size(True), arraydescr.tid])
 
     def test_gc_malloc_str(self):
         p = self.gc_ll_descr.gc_malloc_str(10)
@@ -402,10 +406,11 @@
         gc_ll_descr = self.gc_ll_descr
         llop1 = self.llop1
         #
-        newops = []
+        rewriter = GcRewriterAssembler(gc_ll_descr, None)
+        newops = rewriter.newops
         v_base = BoxPtr()
         v_value = BoxPtr()
-        gc_ll_descr._gen_write_barrier(newops, v_base, v_value)
+        rewriter.gen_write_barrier(v_base, v_value)
         assert llop1.record == []
         assert len(newops) == 1
         assert newops[0].getopnum() == rop.COND_CALL_GC_WB
@@ -425,8 +430,7 @@
         operations = gc_ll_descr.rewrite_assembler(None, operations, [])
         assert len(operations) == 0
 
-    def test_rewrite_assembler_1(self):
-        # check recording of ConstPtrs
+    def test_record_constptrs(self):
         class MyFakeCPU(object):
             def cast_adr_to_int(self, adr):
                 assert adr == "some fake address"
@@ -453,189 +457,6 @@
         assert operations2 == operations
         assert gcrefs == [s_gcref]
 
-    def test_rewrite_assembler_2(self):
-        # check write barriers before SETFIELD_GC
-        v_base = BoxPtr()
-        v_value = BoxPtr()
-        field_descr = AbstractDescr()
-        operations = [
-            ResOperation(rop.SETFIELD_GC, [v_base, v_value], None,
-                         descr=field_descr),
-            ]
-        gc_ll_descr = self.gc_ll_descr
-        operations = get_deep_immutable_oplist(operations)
-        operations = gc_ll_descr.rewrite_assembler(self.fake_cpu, operations,
-                                                   [])
-        assert len(operations) == 2
-        #
-        assert operations[0].getopnum() == rop.COND_CALL_GC_WB
-        assert operations[0].getarg(0) == v_base
-        assert operations[0].getarg(1) == v_value
-        assert operations[0].result is None
-        #
-        assert operations[1].getopnum() == rop.SETFIELD_RAW
-        assert operations[1].getarg(0) == v_base
-        assert operations[1].getarg(1) == v_value
-        assert operations[1].getdescr() == field_descr
-
-    def test_rewrite_assembler_3(self):
-        # check write barriers before SETARRAYITEM_GC
-        for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()):
-            v_base = BoxPtr()
-            v_index = BoxInt()
-            v_value = BoxPtr()
-            array_descr = AbstractDescr()
-            operations = [
-                ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value],
-                             None, descr=array_descr),
-                ]
-            if v_new_length is not None:
-                operations.insert(0, ResOperation(rop.NEW_ARRAY,
-                                                  [v_new_length], v_base,
-                                                  descr=array_descr))
-                # we need to insert another, unrelated NEW_ARRAY here
-                # to prevent the initialization_store optimization
-                operations.insert(1, ResOperation(rop.NEW_ARRAY,
-                                                  [ConstInt(12)], BoxPtr(),
-                                                  descr=array_descr))
-            gc_ll_descr = self.gc_ll_descr
-            operations = get_deep_immutable_oplist(operations)
-            operations = gc_ll_descr.rewrite_assembler(self.fake_cpu,
-                                                       operations, [])
-            if v_new_length is not None:
-                assert operations[0].getopnum() == rop.NEW_ARRAY
-                assert operations[1].getopnum() == rop.NEW_ARRAY
-                del operations[:2]
-            assert len(operations) == 2
-            #
-            assert operations[0].getopnum() == rop.COND_CALL_GC_WB
-            assert operations[0].getarg(0) == v_base
-            assert operations[0].getarg(1) == v_value
-            assert operations[0].result is None
-            #
-            assert operations[1].getopnum() == rop.SETARRAYITEM_RAW
-            assert operations[1].getarg(0) == v_base
-            assert operations[1].getarg(1) == v_index
-            assert operations[1].getarg(2) == v_value
-            assert operations[1].getdescr() == array_descr
-
-    def test_rewrite_assembler_4(self):
-        # check write barriers before SETARRAYITEM_GC,
-        # if we have actually a write_barrier_from_array.
-        self.llop1._have_wb_from_array = True
-        for v_new_length in (None, ConstInt(5), ConstInt(5000), BoxInt()):
-            v_base = BoxPtr()
-            v_index = BoxInt()
-            v_value = BoxPtr()
-            array_descr = AbstractDescr()
-            operations = [
-                ResOperation(rop.SETARRAYITEM_GC, [v_base, v_index, v_value],
-                             None, descr=array_descr),
-                ]
-            if v_new_length is not None:
-                operations.insert(0, ResOperation(rop.NEW_ARRAY,
-                                                  [v_new_length], v_base,
-                                                  descr=array_descr))
-                # we need to insert another, unrelated NEW_ARRAY here
-                # to prevent the initialization_store optimization
-                operations.insert(1, ResOperation(rop.NEW_ARRAY,
-                                                  [ConstInt(12)], BoxPtr(),
-                                                  descr=array_descr))
-            gc_ll_descr = self.gc_ll_descr
-            operations = get_deep_immutable_oplist(operations)
-            operations = gc_ll_descr.rewrite_assembler(self.fake_cpu,
-                                                       operations, [])
-            if v_new_length is not None:
-                assert operations[0].getopnum() == rop.NEW_ARRAY
-                assert operations[1].getopnum() == rop.NEW_ARRAY
-                del operations[:2]
-            assert len(operations) == 2
-            #
-            if isinstance(v_new_length, ConstInt) and v_new_length.value < 130:
-                assert operations[0].getopnum() == rop.COND_CALL_GC_WB
-                assert operations[0].getarg(0) == v_base
-                assert operations[0].getarg(1) == v_value
-            else:
-                assert operations[0].getopnum() == rop.COND_CALL_GC_WB_ARRAY
-                assert operations[0].getarg(0) == v_base
-                assert operations[0].getarg(1) == v_index
-                assert operations[0].getarg(2) == v_value
-            assert operations[0].result is None
-            #
-            assert operations[1].getopnum() == rop.SETARRAYITEM_RAW
-            assert operations[1].getarg(0) == v_base
-            assert operations[1].getarg(1) == v_index
-            assert operations[1].getarg(2) == v_value
-            assert operations[1].getdescr() == array_descr
-
-    def test_rewrite_assembler_initialization_store(self):
-        S = lltype.GcStruct('S', ('parent', OBJECT),
-                            ('x', lltype.Signed))
-        s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True)
-        xdescr = get_field_descr(self.gc_ll_descr, S, 'x')
-        ops = parse("""
-        [p1]
-        p0 = new_with_vtable(ConstClass(s_vtable))
-        setfield_gc(p0, p1, descr=xdescr)
-        jump()
-        """, namespace=locals())
-        expected = parse("""
-        [p1]
-        p0 = new_with_vtable(ConstClass(s_vtable))
-        # no write barrier
-        setfield_gc(p0, p1, descr=xdescr)
-        jump()
-        """, namespace=locals())
-        operations = get_deep_immutable_oplist(ops.operations)
-        operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu,
-                                                        operations, [])
-        equaloplists(operations, expected.operations)
-
-    def test_rewrite_assembler_initialization_store_2(self):
-        S = lltype.GcStruct('S', ('parent', OBJECT),
-                            ('x', lltype.Signed))
-        s_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True)
-        wbdescr = self.gc_ll_descr.write_barrier_descr
-        xdescr = get_field_descr(self.gc_ll_descr, S, 'x')
-        ops = parse("""
-        [p1]
-        p0 = new_with_vtable(ConstClass(s_vtable))
-        p3 = new_with_vtable(ConstClass(s_vtable))
-        setfield_gc(p0, p1, descr=xdescr)
-        jump()
-        """, namespace=locals())
-        expected = parse("""
-        [p1]
-        p0 = new_with_vtable(ConstClass(s_vtable))
-        p3 = new_with_vtable(ConstClass(s_vtable))
-        cond_call_gc_wb(p0, p1, descr=wbdescr)
-        setfield_raw(p0, p1, descr=xdescr)
-        jump()
-        """, namespace=locals())
-        operations = get_deep_immutable_oplist(ops.operations)
-        operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu,
-                                                        operations, [])
-        equaloplists(operations, expected.operations)
-
-    def test_rewrite_assembler_initialization_store_3(self):
-        A = lltype.GcArray(lltype.Ptr(lltype.GcStruct('S')))
-        arraydescr = get_array_descr(self.gc_ll_descr, A)
-        ops = parse("""
-        [p1]
-        p0 = new_array(3, descr=arraydescr)
-        setarrayitem_gc(p0, 0, p1, descr=arraydescr)
-        jump()
-        """, namespace=locals())
-        expected = parse("""
-        [p1]
-        p0 = new_array(3, descr=arraydescr)
-        setarrayitem_gc(p0, 0, p1, descr=arraydescr)
-        jump()
-        """, namespace=locals())
-        operations = get_deep_immutable_oplist(ops.operations)
-        operations = self.gc_ll_descr.rewrite_assembler(self.fake_cpu,
-                                                        operations, [])
-        equaloplists(operations, expected.operations)
 
 class TestFrameworkMiniMark(TestFramework):
     gc = 'minimark'
diff --git a/pypy/jit/backend/llsupport/test/test_regalloc.py b/pypy/jit/backend/llsupport/test/test_regalloc.py
--- a/pypy/jit/backend/llsupport/test/test_regalloc.py
+++ b/pypy/jit/backend/llsupport/test/test_regalloc.py
@@ -2,6 +2,8 @@
 from pypy.jit.metainterp.history import BoxInt, ConstInt, BoxFloat, INT, FLOAT
 from pypy.jit.backend.llsupport.regalloc import FrameManager
 from pypy.jit.backend.llsupport.regalloc import RegisterManager as BaseRegMan
+from pypy.jit.tool.oparser import parse
+from pypy.jit.backend.detect_cpu import getcpuclass
 
 def newboxes(*values):
     return [BoxInt(v) for v in values]
@@ -40,8 +42,13 @@
     def frame_size(self, box_type):
         if box_type == FLOAT:
             return 2
+        elif box_type == INT:
+            return 1
         else:
-            return 1
+            raise ValueError(box_type)
+    def get_loc_index(self, loc):
+        assert isinstance(loc, FakeFramePos)
+        return loc.pos
 
 class MockAsm(object):
     def __init__(self):
@@ -280,7 +287,7 @@
             rm.force_allocate_reg(b)
         rm.before_call()
         assert len(rm.reg_bindings) == 2
-        assert fm.frame_depth == 2
+        assert fm.get_frame_depth() == 2
         assert len(asm.moves) == 2
         rm._check_invariants()
         rm.after_call(boxes[-1])
@@ -303,7 +310,7 @@
             rm.force_allocate_reg(b)
         rm.before_call(save_all_regs=True)
         assert len(rm.reg_bindings) == 0
-        assert fm.frame_depth == 4
+        assert fm.get_frame_depth() == 4
         assert len(asm.moves) == 4
         rm._check_invariants()
         rm.after_call(boxes[-1])
@@ -325,7 +332,7 @@
         xrm = XRegisterManager(longevity, frame_manager=fm, assembler=asm)
         xrm.loc(f0)
         rm.loc(b0)
-        assert fm.frame_depth == 3
+        assert fm.get_frame_depth() == 3
         
         
 
@@ -346,3 +353,123 @@
         spilled2 = rm.force_allocate_reg(b5)
         assert spilled2 is loc
         rm._check_invariants()
+
+
+    def test_hint_frame_locations_1(self):
+        b0, = newboxes(0)
+        fm = TFrameManager()
+        loc123 = FakeFramePos(123, INT)
+        fm.hint_frame_locations[b0] = loc123
+        assert fm.get_frame_depth() == 0
+        loc = fm.loc(b0)
+        assert loc == loc123
+        assert fm.get_frame_depth() == 124
+
+    def test_hint_frame_locations_2(self):
+        b0, b1, b2 = newboxes(0, 1, 2)
+        longevity = {b0: (0, 1), b1: (0, 2), b2: (0, 2)}
+        fm = TFrameManager()
+        asm = MockAsm()
+        rm = RegisterManager(longevity, frame_manager=fm, assembler=asm)
+        rm.force_allocate_reg(b0)
+        rm.force_allocate_reg(b1)
+        rm.force_allocate_reg(b2)
+        rm.force_spill_var(b0)
+        loc = rm.loc(b0)
+        assert isinstance(loc, FakeFramePos)
+        assert fm.get_loc_index(loc) == 0
+        rm.position = 1
+        assert fm.used == [True]
+        rm.possibly_free_var(b0)
+        assert fm.used == [False]
+        #
+        fm.hint_frame_locations[b1] = loc
+        rm.force_spill_var(b1)
+        loc1 = rm.loc(b1)
+        assert loc1 == loc
+        assert fm.used == [True]
+        #
+        fm.hint_frame_locations[b2] = loc
+        rm.force_spill_var(b2)
+        loc2 = rm.loc(b2)
+        assert loc2 != loc1     # because it was not free
+        assert fm.used == [True, True]
+        #
+        rm._check_invariants()
+
+    def test_frame_manager_basic(self):
+        b0, b1 = newboxes(0, 1)
+        fm = TFrameManager()
+        loc0 = fm.loc(b0)
+        assert fm.get_loc_index(loc0) == 0
+        #
+        assert fm.get(b1) is None
+        loc1 = fm.loc(b1)
+        assert fm.get_loc_index(loc1) == 1
+        assert fm.get(b1) == loc1
+        #
+        loc0b = fm.loc(b0)
+        assert loc0b == loc0
+        #
+        fm.loc(BoxInt())
+        assert fm.get_frame_depth() == 3
+        #
+        f0 = BoxFloat()
+        locf0 = fm.loc(f0)
+        assert fm.get_loc_index(locf0) == 4
+        assert fm.get_frame_depth() == 6
+        #
+        f1 = BoxFloat()
+        locf1 = fm.loc(f1)
+        assert fm.get_loc_index(locf1) == 6
+        assert fm.get_frame_depth() == 8
+        assert fm.used == [True, True, True, False, True, True, True, True]
+        #
+        fm.mark_as_free(b0)
+        assert fm.used == [False, True, True, False, True, True, True, True]
+        fm.mark_as_free(b0)
+        assert fm.used == [False, True, True, False, True, True, True, True]
+        fm.mark_as_free(f1)
+        assert fm.used == [False, True, True, False, True, True, False, False]
+        #
+        fm.reserve_location_in_frame(1)
+        assert fm.get_frame_depth() == 9
+        assert fm.used == [False, True, True, False, True, True, False, False, True]
+        #
+        assert b0 not in fm.bindings
+        fm.set_binding(b0, loc0)
+        assert b0 in fm.bindings
+        assert fm.used == [True, True, True, False, True, True, False, False, True]
+        #
+        b3 = BoxInt()
+        assert not fm.try_to_reuse_location(b3, loc0)
+        assert fm.used == [True, True, True, False, True, True, False, False, True]
+        #
+        fm.mark_as_free(b0)
+        assert fm.used == [False, True, True, False, True, True, False, False, True]
+        assert fm.try_to_reuse_location(b3, loc0)
+        assert fm.used == [True, True, True, False, True, True, False, False, True]
+        #
+        fm.mark_as_free(b0)   # already free
+        assert fm.used == [True, True, True, False, True, True, False, False, True]
+        #
+        fm.mark_as_free(b3)
+        assert fm.used == [False, True, True, False, True, True, False, False, True]
+        f3 = BoxFloat()
+        assert not fm.try_to_reuse_location(f3, fm.frame_pos(0, FLOAT))
+        assert not fm.try_to_reuse_location(f3, fm.frame_pos(1, FLOAT))
+        assert not fm.try_to_reuse_location(f3, fm.frame_pos(2, FLOAT))
+        assert not fm.try_to_reuse_location(f3, fm.frame_pos(3, FLOAT))
+        assert not fm.try_to_reuse_location(f3, fm.frame_pos(4, FLOAT))
+        assert not fm.try_to_reuse_location(f3, fm.frame_pos(5, FLOAT))
+        assert fm.used == [False, True, True, False, True, True, False, False, True]
+        assert fm.try_to_reuse_location(f3, fm.frame_pos(6, FLOAT))
+        assert fm.used == [False, True, True, False, True, True, True, True, True]
+        #
+        fm.used = [False]
+        assert fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT))
+        assert fm.used == [True, True]
+        #
+        fm.used = [True]
+        assert not fm.try_to_reuse_location(BoxFloat(), fm.frame_pos(0, FLOAT))
+        assert fm.used == [True]
diff --git a/pypy/jit/backend/llsupport/test/test_rewrite.py b/pypy/jit/backend/llsupport/test/test_rewrite.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/llsupport/test/test_rewrite.py
@@ -0,0 +1,668 @@
+from pypy.jit.backend.llsupport.descr import *
+from pypy.jit.backend.llsupport.gc import *
+from pypy.jit.metainterp.gc import get_description
+from pypy.jit.tool.oparser import parse
+from pypy.jit.metainterp.optimizeopt.util import equaloplists
+from pypy.jit.codewriter.heaptracker import register_known_gctype
+
+
+class Evaluator(object):
+    def __init__(self, scope):
+        self.scope = scope
+    def __getitem__(self, key):
+        return eval(key, self.scope)
+
+
+class RewriteTests(object):
+    def check_rewrite(self, frm_operations, to_operations, **namespace):
+        S = lltype.GcStruct('S', ('x', lltype.Signed),
+                                 ('y', lltype.Signed))
+        sdescr = get_size_descr(self.gc_ll_descr, S)
+        sdescr.tid = 1234
+        #
+        T = lltype.GcStruct('T', ('y', lltype.Signed),
+                                 ('z', lltype.Ptr(S)),
+                                 ('t', lltype.Signed))
+        tdescr = get_size_descr(self.gc_ll_descr, T)
+        tdescr.tid = 5678
+        tzdescr = get_field_descr(self.gc_ll_descr, T, 'z')
+        #
+        A = lltype.GcArray(lltype.Signed)
+        adescr = get_array_descr(self.gc_ll_descr, A)
+        adescr.tid = 4321
+        alendescr = adescr.lendescr
+        #
+        B = lltype.GcArray(lltype.Char)
+        bdescr = get_array_descr(self.gc_ll_descr, B)
+        bdescr.tid = 8765
+        blendescr = bdescr.lendescr
+        #
+        C = lltype.GcArray(lltype.Ptr(S))
+        cdescr = get_array_descr(self.gc_ll_descr, C)
+        cdescr.tid = 8111
+        clendescr = cdescr.lendescr
+        #
+        E = lltype.GcStruct('Empty')
+        edescr = get_size_descr(self.gc_ll_descr, E)
+        edescr.tid = 9000
+        #
+        vtable_descr = self.gc_ll_descr.fielddescr_vtable
+        O = lltype.GcStruct('O', ('parent', rclass.OBJECT),
+                                 ('x', lltype.Signed))
+        o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True)
+        register_known_gctype(self.cpu, o_vtable, O)
+        #
+        tiddescr = self.gc_ll_descr.fielddescr_tid
+        wbdescr = self.gc_ll_descr.write_barrier_descr
+        WORD = globals()['WORD']
+        #
+        strdescr     = self.gc_ll_descr.str_descr
+        unicodedescr = self.gc_ll_descr.unicode_descr
+        strlendescr     = strdescr.lendescr
+        unicodelendescr = unicodedescr.lendescr
+        #
+        namespace.update(locals())
+        #
+        for funcname in self.gc_ll_descr._generated_functions:
+            namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname)
+            namespace[funcname + '_descr'] = getattr(self.gc_ll_descr,
+                                                     '%s_descr' % funcname)
+        #
+        ops = parse(frm_operations, namespace=namespace)
+        expected = parse(to_operations % Evaluator(namespace),
+                         namespace=namespace)
+        operations = self.gc_ll_descr.rewrite_assembler(self.cpu,
+                                                        ops.operations,
+                                                        [])
+        equaloplists(operations, expected.operations)
+
+
+class TestBoehm(RewriteTests):
+    def setup_method(self, meth):
+        class FakeCPU(object):
+            def sizeof(self, STRUCT):
+                return SizeDescrWithVTable(102)
+        self.cpu = FakeCPU()
+        self.gc_ll_descr = GcLLDescr_boehm(None, None, None)
+
+    def test_new(self):
+        self.check_rewrite("""
+            []
+            p0 = new(descr=sdescr)
+            jump()
+        """, """
+            [p1]
+            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\
+                                descr=malloc_fixedsize_descr)
+            jump()
+        """)
+
+    def test_no_collapsing(self):
+        self.check_rewrite("""
+            []
+            p0 = new(descr=sdescr)
+            p1 = new(descr=sdescr)
+            jump()
+        """, """
+            []
+            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\
+                                descr=malloc_fixedsize_descr)
+            p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\
+                                descr=malloc_fixedsize_descr)
+            jump()
+        """)
+
+    def test_new_array_fixed(self):
+        self.check_rewrite("""
+            []
+            p0 = new_array(10, descr=adescr)
+            jump()
+        """, """
+            []
+            p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
+                                %(adescr.basesize + 10 * adescr.itemsize)d, \
+                                descr=malloc_fixedsize_descr)
+            setfield_gc(p0, 10, descr=alendescr)
+            jump()
+        """)
+
+    def test_new_array_variable(self):
+        self.check_rewrite("""
+            [i1]
+            p0 = new_array(i1, descr=adescr)
+            jump()
+        """, """
+            [i1]


More information about the pypy-commit mailing list