[pypy-commit] pypy type-specialized-instances: merge default

l.diekmann noreply at buildbot.pypy.org
Wed Jan 25 15:36:18 CET 2012


Author: Lukas Diekmann <lukas.diekmann at uni-duesseldorf.de>
Branch: type-specialized-instances
Changeset: r51752:7636a491c009
Date: 2012-01-25 15:34 +0100
http://bitbucket.org/pypy/pypy/changeset/7636a491c009/

Log:	merge default

diff too long, truncating to 10000 out of 43426 lines

diff --git a/.hgignore b/.hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -2,6 +2,9 @@
 *.py[co]
 *~
 .*.swp
+.idea
+.project
+.pydevproject
 
 syntax: regexp
 ^testresult$
diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -27,7 +27,7 @@
     DEALINGS IN THE SOFTWARE.
 
 
-PyPy Copyright holders 2003-2011
+PyPy Copyright holders 2003-2012
 ----------------------------------- 
 
 Except when otherwise stated (look for LICENSE files or information at
@@ -37,43 +37,47 @@
     Armin Rigo
     Maciej Fijalkowski
     Carl Friedrich Bolz
+    Amaury Forgeot d'Arc
     Antonio Cuni
-    Amaury Forgeot d'Arc
     Samuele Pedroni
     Michael Hudson
     Holger Krekel
-    Benjamin Peterson
+    Alex Gaynor
     Christian Tismer
     Hakan Ardo
-    Alex Gaynor
+    Benjamin Peterson
+    David Schneider
     Eric van Riet Paap
     Anders Chrigstrom
-    David Schneider
     Richard Emslie
     Dan Villiom Podlaski Christiansen
     Alexander Schremmer
+    Lukas Diekmann
     Aurelien Campeas
     Anders Lehmann
     Camillo Bruni
     Niklaus Haldimann
+    Sven Hager
     Leonardo Santagada
     Toon Verwaest
     Seo Sanghyeon
+    Justin Peel
     Lawrence Oluyede
     Bartosz Skowron
     Jakub Gustak
     Guido Wesdorp
     Daniel Roberts
+    Laura Creighton
     Adrien Di Mascio
-    Laura Creighton
     Ludovic Aubry
     Niko Matsakis
+    Wim Lavrijsen
+    Matti Picus
     Jason Creighton
     Jacob Hallen
     Alex Martelli
     Anders Hammarquist
     Jan de Mooij
-    Wim Lavrijsen
     Stephan Diehl
     Michael Foord
     Stefan Schwarzer
@@ -84,34 +88,36 @@
     Alexandre Fayolle
     Marius Gedminas
     Simon Burton
-    Justin Peel
+    David Edelsohn
     Jean-Paul Calderone
     John Witulski
-    Lukas Diekmann
+    Timo Paulssen
     holger krekel
-    Wim Lavrijsen
     Dario Bertini
+    Mark Pearse
     Andreas St&#252;hrk
     Jean-Philippe St. Pierre
     Guido van Rossum
     Pavel Vinogradov
     Valentino Volonghi
     Paul deGrandis
+    Ilya Osadchiy
+    Ronny Pfannschmidt
     Adrian Kuhn
     tav
     Georg Brandl
+    Philip Jenvey
     Gerald Klix
     Wanja Saatkamp
-    Ronny Pfannschmidt
     Boris Feigin
     Oscar Nierstrasz
     David Malcolm
     Eugene Oden
     Henry Mason
-    Sven Hager
+    Jeff Terrace
     Lukas Renggli
-    Ilya Osadchiy
     Guenter Jantzen
+    Ned Batchelder
     Bert Freudenberg
     Amit Regmi
     Ben Young
@@ -142,7 +148,6 @@
     Anders Qvist
     Beatrice During
     Alexander Sedov
-    Timo Paulssen
     Corbin Simpson
     Vincent Legoll
     Romain Guillebert
@@ -165,9 +170,10 @@
     Lucio Torre
     Lene Wagner
     Miguel de Val Borro
+    Artur Lisiecki
+    Bruno Gola
     Ignas Mikalajunas
-    Artur Lisiecki
-    Philip Jenvey
+    Stefano Rivera
     Joshua Gilbert
     Godefroid Chappelle
     Yusei Tahara
@@ -179,17 +185,17 @@
     Kristjan Valur Jonsson
     Bobby Impollonia
     Michael Hudson-Doyle
+    Laurence Tratt
+    Yasir Suhail
     Andrew Thompson
     Anders Sigfridsson
     Floris Bruynooghe
     Jacek Generowicz
     Dan Colish
     Zooko Wilcox-O Hearn
-    Dan Villiom Podlaski Christiansen
-    Anders Hammarquist
+    Dan Loewenherz
     Chris Lambacher
     Dinu Gherman
-    Dan Colish
     Brett Cannon
     Daniel Neuh&#228;user
     Michael Chermside
diff --git a/lib-python/modified-2.7/ctypes/__init__.py b/lib-python/modified-2.7/ctypes/__init__.py
--- a/lib-python/modified-2.7/ctypes/__init__.py
+++ b/lib-python/modified-2.7/ctypes/__init__.py
@@ -351,7 +351,7 @@
         self._FuncPtr = _FuncPtr
 
         if handle is None:
-            self._handle = _ffi.CDLL(name)
+            self._handle = _ffi.CDLL(name, mode)
         else:
             self._handle = handle
 
diff --git a/lib-python/modified-2.7/ctypes/test/test_callbacks.py b/lib-python/modified-2.7/ctypes/test/test_callbacks.py
--- a/lib-python/modified-2.7/ctypes/test/test_callbacks.py
+++ b/lib-python/modified-2.7/ctypes/test/test_callbacks.py
@@ -1,5 +1,6 @@
 import unittest
 from ctypes import *
+from ctypes.test import xfail
 import _ctypes_test
 
 class Callbacks(unittest.TestCase):
@@ -98,6 +99,7 @@
 ##        self.check_type(c_char_p, "abc")
 ##        self.check_type(c_char_p, "def")
 
+    @xfail
     def test_pyobject(self):
         o = ()
         from sys import getrefcount as grc
diff --git a/lib-python/modified-2.7/ctypes/test/test_libc.py b/lib-python/modified-2.7/ctypes/test/test_libc.py
--- a/lib-python/modified-2.7/ctypes/test/test_libc.py
+++ b/lib-python/modified-2.7/ctypes/test/test_libc.py
@@ -25,7 +25,10 @@
         lib.my_qsort(chars, len(chars)-1, sizeof(c_char), comparefunc(sort))
         self.assertEqual(chars.raw, "   ,,aaaadmmmnpppsss\x00")
 
-    def test_no_more_xfail(self):
+    def SKIPPED_test_no_more_xfail(self):
+        # We decided to not explicitly support the whole ctypes-2.7
+        # and instead go for a case-by-case, demand-driven approach.
+        # So this test is skipped instead of failing.
         import socket
         import ctypes.test
         self.assertTrue(not hasattr(ctypes.test, 'xfail'),
diff --git a/lib_pypy/_collections.py b/lib_pypy/_collections.py
--- a/lib_pypy/_collections.py
+++ b/lib_pypy/_collections.py
@@ -406,7 +406,7 @@
             recurse.remove(id(self))
 
     def copy(self):
-        return type(self)(self, default_factory=self.default_factory)
+        return type(self)(self.default_factory, self)
     
     def __copy__(self):
         return self.copy()
diff --git a/lib_pypy/_ctypes/structure.py b/lib_pypy/_ctypes/structure.py
--- a/lib_pypy/_ctypes/structure.py
+++ b/lib_pypy/_ctypes/structure.py
@@ -73,8 +73,12 @@
 
 class Field(object):
     def __init__(self, name, offset, size, ctype, num, is_bitfield):
-        for k in ('name', 'offset', 'size', 'ctype', 'num', 'is_bitfield'):
-            self.__dict__[k] = locals()[k]
+        self.__dict__['name'] = name
+        self.__dict__['offset'] = offset
+        self.__dict__['size'] = size
+        self.__dict__['ctype'] = ctype
+        self.__dict__['num'] = num
+        self.__dict__['is_bitfield'] = is_bitfield
 
     def __setattr__(self, name, value):
         raise AttributeError(name)
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -20,6 +20,8 @@
 # 2. Altered source versions must be plainly marked as such, and must not be
 #    misrepresented as being the original software.
 # 3. This notice may not be removed or altered from any source distribution.
+#
+# Note: This software has been modified for use in PyPy.
 
 from ctypes import c_void_p, c_int, c_double, c_int64, c_char_p, cdll
 from ctypes import POINTER, byref, string_at, CFUNCTYPE, cast
@@ -27,7 +29,6 @@
 from collections import OrderedDict
 import datetime
 import sys
-import time
 import weakref
 from threading import _get_ident as thread_get_ident
 
@@ -231,8 +232,10 @@
 sqlite.sqlite3_result_text.argtypes = [c_void_p, c_char_p, c_int, c_void_p]
 sqlite.sqlite3_result_text.restype = None
 
-sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int]
-sqlite.sqlite3_enable_load_extension.restype = c_int
+HAS_LOAD_EXTENSION = hasattr(sqlite, "sqlite3_enable_load_extension")
+if HAS_LOAD_EXTENSION:
+    sqlite.sqlite3_enable_load_extension.argtypes = [c_void_p, c_int]
+    sqlite.sqlite3_enable_load_extension.restype = c_int
 
 ##########################################
 # END Wrapped SQLite C API and constants
@@ -604,7 +607,7 @@
             def authorizer(userdata, action, arg1, arg2, dbname, source):
                 try:
                     return int(callback(action, arg1, arg2, dbname, source))
-                except Exception, e:
+                except Exception:
                     return SQLITE_DENY
             c_authorizer = AUTHORIZER(authorizer)
 
@@ -651,7 +654,7 @@
                 if not aggregate_ptr[0]:
                     try:
                         aggregate = cls()
-                    except Exception, e:
+                    except Exception:
                         msg = ("user-defined aggregate's '__init__' "
                                "method raised error")
                         sqlite.sqlite3_result_error(context, msg, len(msg))
@@ -665,7 +668,7 @@
                 params = _convert_params(context, argc, c_params)
                 try:
                     aggregate.step(*params)
-                except Exception, e:
+                except Exception:
                     msg = ("user-defined aggregate's 'step' "
                            "method raised error")
                     sqlite.sqlite3_result_error(context, msg, len(msg))
@@ -681,7 +684,7 @@
                     aggregate = self.aggregate_instances[aggregate_ptr[0]]
                     try:
                         val = aggregate.finalize()
-                    except Exception, e:
+                    except Exception:
                         msg = ("user-defined aggregate's 'finalize' "
                                "method raised error")
                         sqlite.sqlite3_result_error(context, msg, len(msg))
@@ -708,13 +711,14 @@
         from sqlite3.dump import _iterdump
         return _iterdump(self)
 
-    def enable_load_extension(self, enabled):
-        self._check_thread()
-        self._check_closed()
+    if HAS_LOAD_EXTENSION:
+        def enable_load_extension(self, enabled):
+            self._check_thread()
+            self._check_closed()
 
-        rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled))
-        if rc != SQLITE_OK:
-            raise OperationalError("Error enabling load extension")
+            rc = sqlite.sqlite3_enable_load_extension(self.db, int(enabled))
+            if rc != SQLITE_OK:
+                raise OperationalError("Error enabling load extension")
 
 DML, DQL, DDL = range(3)
 
@@ -768,7 +772,7 @@
             self.statement.item = None
             self.statement.exhausted = True
 
-        if self.statement.kind == DML or self.statement.kind == DDL:
+        if self.statement.kind == DML:
             self.statement.reset()
 
         self.rowcount = -1
@@ -788,7 +792,7 @@
         if self.statement.kind == DML:
             self.connection._begin()
         else:
-            raise ProgrammingError, "executemany is only for DML statements"
+            raise ProgrammingError("executemany is only for DML statements")
 
         self.rowcount = 0
         for params in many_params:
@@ -858,8 +862,6 @@
         except StopIteration:
             return None
 
-        return nextrow
-
     def fetchmany(self, size=None):
         self._check_closed()
         self._check_reset()
@@ -912,7 +914,7 @@
     def __init__(self, connection, sql):
         self.statement = None
         if not isinstance(sql, str):
-            raise ValueError, "sql must be a string"
+            raise ValueError("sql must be a string")
         self.con = connection
         self.sql = sql # DEBUG ONLY
         first_word = self._statement_kind = sql.lstrip().split(" ")[0].upper()
@@ -941,8 +943,8 @@
             raise self.con._get_exception(ret)
         self.con._remember_statement(self)
         if _check_remaining_sql(next_char.value):
-            raise Warning, "One and only one statement required: %r" % (
-                next_char.value,)
+            raise Warning("One and only one statement required: %r" % (
+                next_char.value,))
         # sql_char should remain alive until here
 
         self._build_row_cast_map()
@@ -1013,7 +1015,7 @@
         elif type(param) is buffer:
             sqlite.sqlite3_bind_blob(self.statement, idx, str(param), len(param), SQLITE_TRANSIENT)
         else:
-            raise InterfaceError, "parameter type %s is not supported" % str(type(param))
+            raise InterfaceError("parameter type %s is not supported" % str(type(param)))
 
     def set_params(self, params):
         ret = sqlite.sqlite3_reset(self.statement)
@@ -1042,11 +1044,11 @@
             for idx in range(1, sqlite.sqlite3_bind_parameter_count(self.statement) + 1):
                 param_name = sqlite.sqlite3_bind_parameter_name(self.statement, idx)
                 if param_name is None:
-                    raise ProgrammingError, "need named parameters"
+                    raise ProgrammingError("need named parameters")
                 param_name = param_name[1:]
                 try:
                     param = params[param_name]
-                except KeyError, e:
+                except KeyError:
                     raise ProgrammingError("missing parameter '%s'" %param)
                 self.set_param(idx, param)
 
@@ -1257,7 +1259,7 @@
     params = _convert_params(context, nargs, c_params)
     try:
         val = real_cb(*params)
-    except Exception, e:
+    except Exception:
         msg = "user-defined function raised exception"
         sqlite.sqlite3_result_error(context, msg, len(msg))
     else:
diff --git a/lib_pypy/datetime.py b/lib_pypy/datetime.py
--- a/lib_pypy/datetime.py
+++ b/lib_pypy/datetime.py
@@ -13,7 +13,7 @@
 Sources for time zone and DST data: http://www.twinsun.com/tz/tz-link.htm
 
 This was originally copied from the sandbox of the CPython CVS repository.
-Thanks to Tim Peters for suggesting using it. 
+Thanks to Tim Peters for suggesting using it.
 """
 
 import time as _time
@@ -271,6 +271,8 @@
     raise ValueError("%s()=%d, must be in -1439..1439" % (name, offset))
 
 def _check_date_fields(year, month, day):
+    if not isinstance(year, (int, long)):
+        raise TypeError('int expected')
     if not MINYEAR <= year <= MAXYEAR:
         raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
     if not 1 <= month <= 12:
@@ -280,6 +282,8 @@
         raise ValueError('day must be in 1..%d' % dim, day)
 
 def _check_time_fields(hour, minute, second, microsecond):
+    if not isinstance(hour, (int, long)):
+        raise TypeError('int expected')
     if not 0 <= hour <= 23:
         raise ValueError('hour must be in 0..23', hour)
     if not 0 <= minute <= 59:
@@ -543,61 +547,76 @@
 
         self = object.__new__(cls)
 
-        self.__days = d
-        self.__seconds = s
-        self.__microseconds = us
+        self._days = d
+        self._seconds = s
+        self._microseconds = us
         if abs(d) > 999999999:
             raise OverflowError("timedelta # of days is too large: %d" % d)
 
         return self
 
     def __repr__(self):
-        if self.__microseconds:
+        if self._microseconds:
             return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
-                                       self.__days,
-                                       self.__seconds,
-                                       self.__microseconds)
-        if self.__seconds:
+                                       self._days,
+                                       self._seconds,
+                                       self._microseconds)
+        if self._seconds:
             return "%s(%d, %d)" % ('datetime.' + self.__class__.__name__,
-                                   self.__days,
-                                   self.__seconds)
-        return "%s(%d)" % ('datetime.' + self.__class__.__name__, self.__days)
+                                   self._days,
+                                   self._seconds)
+        return "%s(%d)" % ('datetime.' + self.__class__.__name__, self._days)
 
     def __str__(self):
-        mm, ss = divmod(self.__seconds, 60)
+        mm, ss = divmod(self._seconds, 60)
         hh, mm = divmod(mm, 60)
         s = "%d:%02d:%02d" % (hh, mm, ss)
-        if self.__days:
+        if self._days:
             def plural(n):
                 return n, abs(n) != 1 and "s" or ""
-            s = ("%d day%s, " % plural(self.__days)) + s
-        if self.__microseconds:
-            s = s + ".%06d" % self.__microseconds
+            s = ("%d day%s, " % plural(self._days)) + s
+        if self._microseconds:
+            s = s + ".%06d" % self._microseconds
         return s
 
-    days = property(lambda self: self.__days, doc="days")
-    seconds = property(lambda self: self.__seconds, doc="seconds")
-    microseconds = property(lambda self: self.__microseconds,
-                            doc="microseconds")
-
     def total_seconds(self):
         return ((self.days * 86400 + self.seconds) * 10**6
                 + self.microseconds) / 1e6
 
+    # Read-only field accessors
+    @property
+    def days(self):
+        """days"""
+        return self._days
+
+    @property
+    def seconds(self):
+        """seconds"""
+        return self._seconds
+
+    @property
+    def microseconds(self):
+        """microseconds"""
+        return self._microseconds
+
     def __add__(self, other):
         if isinstance(other, timedelta):
             # for CPython compatibility, we cannot use
             # our __class__ here, but need a real timedelta
-            return timedelta(self.__days + other.__days,
-                             self.__seconds + other.__seconds,
-                             self.__microseconds + other.__microseconds)
+            return timedelta(self._days + other._days,
+                             self._seconds + other._seconds,
+                             self._microseconds + other._microseconds)
         return NotImplemented
 
     __radd__ = __add__
 
     def __sub__(self, other):
         if isinstance(other, timedelta):
-            return self + -other
+            # for CPython compatibility, we cannot use
+            # our __class__ here, but need a real timedelta
+            return timedelta(self._days - other._days,
+                             self._seconds - other._seconds,
+                             self._microseconds - other._microseconds)
         return NotImplemented
 
     def __rsub__(self, other):
@@ -606,17 +625,17 @@
         return NotImplemented
 
     def __neg__(self):
-            # for CPython compatibility, we cannot use
-            # our __class__ here, but need a real timedelta
-            return timedelta(-self.__days,
-                             -self.__seconds,
-                             -self.__microseconds)
+        # for CPython compatibility, we cannot use
+        # our __class__ here, but need a real timedelta
+        return timedelta(-self._days,
+                         -self._seconds,
+                         -self._microseconds)
 
     def __pos__(self):
         return self
 
     def __abs__(self):
-        if self.__days < 0:
+        if self._days < 0:
             return -self
         else:
             return self
@@ -625,81 +644,81 @@
         if isinstance(other, (int, long)):
             # for CPython compatibility, we cannot use
             # our __class__ here, but need a real timedelta
-            return timedelta(self.__days * other,
-                             self.__seconds * other,
-                             self.__microseconds * other)
+            return timedelta(self._days * other,
+                             self._seconds * other,
+                             self._microseconds * other)
         return NotImplemented
 
     __rmul__ = __mul__
 
     def __div__(self, other):
         if isinstance(other, (int, long)):
-            usec = ((self.__days * (24*3600L) + self.__seconds) * 1000000 +
-                    self.__microseconds)
+            usec = ((self._days * (24*3600L) + self._seconds) * 1000000 +
+                    self._microseconds)
             return timedelta(0, 0, usec // other)
         return NotImplemented
 
     __floordiv__ = __div__
 
-    # Comparisons.
+    # Comparisons of timedelta objects with other.
 
     def __eq__(self, other):
         if isinstance(other, timedelta):
-            return self.__cmp(other) == 0
+            return self._cmp(other) == 0
         else:
             return False
 
     def __ne__(self, other):
         if isinstance(other, timedelta):
-            return self.__cmp(other) != 0
+            return self._cmp(other) != 0
         else:
             return True
 
     def __le__(self, other):
         if isinstance(other, timedelta):
-            return self.__cmp(other) <= 0
+            return self._cmp(other) <= 0
         else:
             _cmperror(self, other)
 
     def __lt__(self, other):
         if isinstance(other, timedelta):
-            return self.__cmp(other) < 0
+            return self._cmp(other) < 0
         else:
             _cmperror(self, other)
 
     def __ge__(self, other):
         if isinstance(other, timedelta):
-            return self.__cmp(other) >= 0
+            return self._cmp(other) >= 0
         else:
             _cmperror(self, other)
 
     def __gt__(self, other):
         if isinstance(other, timedelta):
-            return self.__cmp(other) > 0
+            return self._cmp(other) > 0
         else:
             _cmperror(self, other)
 
-    def __cmp(self, other):
+    def _cmp(self, other):
         assert isinstance(other, timedelta)
-        return cmp(self.__getstate(), other.__getstate())
+        return cmp(self._getstate(), other._getstate())
 
     def __hash__(self):
-        return hash(self.__getstate())
+        return hash(self._getstate())
 
     def __nonzero__(self):
-        return (self.__days != 0 or
-                self.__seconds != 0 or
-                self.__microseconds != 0)
+        return (self._days != 0 or
+                self._seconds != 0 or
+                self._microseconds != 0)
 
     # Pickle support.
 
     __safe_for_unpickling__ = True      # For Python 2.2
 
-    def __getstate(self):
-        return (self.__days, self.__seconds, self.__microseconds)
+    def _getstate(self):
+        return (self._days, self._seconds, self._microseconds)
 
     def __reduce__(self):
-        return (self.__class__, self.__getstate())
+        return (self.__class__, self._getstate())
 
 timedelta.min = timedelta(-999999999)
 timedelta.max = timedelta(days=999999999, hours=23, minutes=59, seconds=59,
@@ -749,25 +768,26 @@
             return self
         _check_date_fields(year, month, day)
         self = object.__new__(cls)
-        self.__year = year
-        self.__month = month
-        self.__day = day
+        self._year = year
+        self._month = month
+        self._day = day
         return self
 
     # Additional constructors
 
+    @classmethod
     def fromtimestamp(cls, t):
         "Construct a date from a POSIX timestamp (like time.time())."
         y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
         return cls(y, m, d)
-    fromtimestamp = classmethod(fromtimestamp)
 
+    @classmethod
     def today(cls):
         "Construct a date from time.time()."
         t = _time.time()
         return cls.fromtimestamp(t)
-    today = classmethod(today)
 
+    @classmethod
     def fromordinal(cls, n):
         """Contruct a date from a proleptic Gregorian ordinal.
 
@@ -776,16 +796,24 @@
         """
         y, m, d = _ord2ymd(n)
         return cls(y, m, d)
-    fromordinal = classmethod(fromordinal)
 
     # Conversions to string
 
     def __repr__(self):
-        "Convert to formal string, for repr()."
+        """Convert to formal string, for repr().
+
+        >>> dt = datetime(2010, 1, 1)
+        >>> repr(dt)
+        'datetime.datetime(2010, 1, 1, 0, 0)'
+
+        >>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
+        >>> repr(dt)
+        'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
+        """
         return "%s(%d, %d, %d)" % ('datetime.' + self.__class__.__name__,
-                                   self.__year,
-                                   self.__month,
-                                   self.__day)
+                                   self._year,
+                                   self._month,
+                                   self._day)
     # XXX These shouldn't depend on time.localtime(), because that
     # clips the usable dates to [1970 .. 2038).  At least ctime() is
     # easily done without using strftime() -- that's better too because
@@ -793,12 +821,20 @@
 
     def ctime(self):
         "Format a la ctime()."
-        return tmxxx(self.__year, self.__month, self.__day).ctime()
+        return tmxxx(self._year, self._month, self._day).ctime()
 
     def strftime(self, fmt):
         "Format using strftime()."
         return _wrap_strftime(self, fmt, self.timetuple())
 
+    def __format__(self, fmt):
+        if not isinstance(fmt, (str, unicode)):
+            raise ValueError("__format__ excepts str or unicode, not %s" %
+                             fmt.__class__.__name__)
+        if len(fmt) != 0:
+            return self.strftime(fmt)
+        return str(self)
+
     def isoformat(self):
         """Return the date formatted according to ISO.
 
@@ -808,29 +844,31 @@
         - http://www.w3.org/TR/NOTE-datetime
         - http://www.cl.cam.ac.uk/~mgk25/iso-time.html
         """
-        return "%04d-%02d-%02d" % (self.__year, self.__month, self.__day)
+        return "%04d-%02d-%02d" % (self._year, self._month, self._day)
 
     __str__ = isoformat
 
-    def __format__(self, format):
-        if not isinstance(format, (str, unicode)):
-            raise ValueError("__format__ excepts str or unicode, not %s" %
-                             format.__class__.__name__)
-        if not format:
-            return str(self)
-        return self.strftime(format)
+    # Read-only field accessors
+    @property
+    def year(self):
+        """year (1-9999)"""
+        return self._year
 
-    # Read-only field accessors
-    year = property(lambda self: self.__year,
-                    doc="year (%d-%d)" % (MINYEAR, MAXYEAR))
-    month = property(lambda self: self.__month, doc="month (1-12)")
-    day = property(lambda self: self.__day, doc="day (1-31)")
+    @property
+    def month(self):
+        """month (1-12)"""
+        return self._month
+
+    @property
+    def day(self):
+        """day (1-31)"""
+        return self._day
 
     # Standard conversions, __cmp__, __hash__ (and helpers)
 
     def timetuple(self):
         "Return local time tuple compatible with time.localtime()."
-        return _build_struct_time(self.__year, self.__month, self.__day,
+        return _build_struct_time(self._year, self._month, self._day,
                                   0, 0, 0, -1)
 
     def toordinal(self):
@@ -839,24 +877,24 @@
         January 1 of year 1 is day 1.  Only the year, month and day values
         contribute to the result.
         """
-        return _ymd2ord(self.__year, self.__month, self.__day)
+        return _ymd2ord(self._year, self._month, self._day)
 
     def replace(self, year=None, month=None, day=None):
         """Return a new date with new values for the specified fields."""
         if year is None:
-            year = self.__year
+            year = self._year
         if month is None:
-            month = self.__month
+            month = self._month
         if day is None:
-            day = self.__day
+            day = self._day
         _check_date_fields(year, month, day)
         return date(year, month, day)
 
-    # Comparisons.
+    # Comparisons of date objects with other.
 
     def __eq__(self, other):
         if isinstance(other, date):
-            return self.__cmp(other) == 0
+            return self._cmp(other) == 0
         elif hasattr(other, "timetuple"):
             return NotImplemented
         else:
@@ -864,7 +902,7 @@
 
     def __ne__(self, other):
         if isinstance(other, date):
-            return self.__cmp(other) != 0
+            return self._cmp(other) != 0
         elif hasattr(other, "timetuple"):
             return NotImplemented
         else:
@@ -872,7 +910,7 @@
 
     def __le__(self, other):
         if isinstance(other, date):
-            return self.__cmp(other) <= 0
+            return self._cmp(other) <= 0
         elif hasattr(other, "timetuple"):
             return NotImplemented
         else:
@@ -880,7 +918,7 @@
 
     def __lt__(self, other):
         if isinstance(other, date):
-            return self.__cmp(other) < 0
+            return self._cmp(other) < 0
         elif hasattr(other, "timetuple"):
             return NotImplemented
         else:
@@ -888,7 +926,7 @@
 
     def __ge__(self, other):
         if isinstance(other, date):
-            return self.__cmp(other) >= 0
+            return self._cmp(other) >= 0
         elif hasattr(other, "timetuple"):
             return NotImplemented
         else:
@@ -896,21 +934,21 @@
 
     def __gt__(self, other):
         if isinstance(other, date):
-            return self.__cmp(other) > 0
+            return self._cmp(other) > 0
         elif hasattr(other, "timetuple"):
             return NotImplemented
         else:
             _cmperror(self, other)
 
-    def __cmp(self, other):
+    def _cmp(self, other):
         assert isinstance(other, date)
-        y, m, d = self.__year, self.__month, self.__day
-        y2, m2, d2 = other.__year, other.__month, other.__day
+        y, m, d = self._year, self._month, self._day
+        y2, m2, d2 = other._year, other._month, other._day
         return cmp((y, m, d), (y2, m2, d2))
 
     def __hash__(self):
         "Hash."
-        return hash(self.__getstate())
+        return hash(self._getstate())
 
     # Computations
 
@@ -922,9 +960,9 @@
     def __add__(self, other):
         "Add a date to a timedelta."
         if isinstance(other, timedelta):
-            t = tmxxx(self.__year,
-                      self.__month,
-                      self.__day + other.days)
+            t = tmxxx(self._year,
+                      self._month,
+                      self._day + other.days)
             self._checkOverflow(t.year)
             result = date(t.year, t.month, t.day)
             return result
@@ -966,9 +1004,9 @@
         ISO calendar algorithm taken from
         http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
         """
-        year = self.__year
+        year = self._year
         week1monday = _isoweek1monday(year)
-        today = _ymd2ord(self.__year, self.__month, self.__day)
+        today = _ymd2ord(self._year, self._month, self._day)
         # Internally, week and day have origin 0
         week, day = divmod(today - week1monday, 7)
         if week < 0:
@@ -985,18 +1023,18 @@
 
     __safe_for_unpickling__ = True      # For Python 2.2
 
-    def __getstate(self):
-        yhi, ylo = divmod(self.__year, 256)
-        return ("%c%c%c%c" % (yhi, ylo, self.__month, self.__day), )
+    def _getstate(self):
+        yhi, ylo = divmod(self._year, 256)
+        return ("%c%c%c%c" % (yhi, ylo, self._month, self._day), )
 
     def __setstate(self, string):
         if len(string) != 4 or not (1 <= ord(string[2]) <= 12):
             raise TypeError("not enough arguments")
-        yhi, ylo, self.__month, self.__day = map(ord, string)
-        self.__year = yhi * 256 + ylo
+        yhi, ylo, self._month, self._day = map(ord, string)
+        self._year = yhi * 256 + ylo
 
     def __reduce__(self):
-        return (self.__class__, self.__getstate())
+        return (self.__class__, self._getstate())
 
 _date_class = date  # so functions w/ args named "date" can get at the class
 
@@ -1118,62 +1156,80 @@
             return self
         _check_tzinfo_arg(tzinfo)
         _check_time_fields(hour, minute, second, microsecond)
-        self.__hour = hour
-        self.__minute = minute
-        self.__second = second
-        self.__microsecond = microsecond
+        self._hour = hour
+        self._minute = minute
+        self._second = second
+        self._microsecond = microsecond
         self._tzinfo = tzinfo
         return self
 
     # Read-only field accessors
-    hour = property(lambda self: self.__hour, doc="hour (0-23)")
-    minute = property(lambda self: self.__minute, doc="minute (0-59)")
-    second = property(lambda self: self.__second, doc="second (0-59)")
-    microsecond = property(lambda self: self.__microsecond,
-                           doc="microsecond (0-999999)")
-    tzinfo = property(lambda self: self._tzinfo, doc="timezone info object")
+    @property
+    def hour(self):
+        """hour (0-23)"""
+        return self._hour
+
+    @property
+    def minute(self):
+        """minute (0-59)"""
+        return self._minute
+
+    @property
+    def second(self):
+        """second (0-59)"""
+        return self._second
+
+    @property
+    def microsecond(self):
+        """microsecond (0-999999)"""
+        return self._microsecond
+
+    @property
+    def tzinfo(self):
+        """timezone info object"""
+        return self._tzinfo
 
     # Standard conversions, __hash__ (and helpers)
 
-    # Comparisons.
+    # Comparisons of time objects with other.
 
     def __eq__(self, other):
         if isinstance(other, time):
-            return self.__cmp(other) == 0
+            return self._cmp(other) == 0
         else:
             return False
 
     def __ne__(self, other):
         if isinstance(other, time):
-            return self.__cmp(other) != 0
+            return self._cmp(other) != 0
         else:
             return True
 
     def __le__(self, other):
         if isinstance(other, time):
-            return self.__cmp(other) <= 0
+            return self._cmp(other) <= 0
         else:
             _cmperror(self, other)
 
     def __lt__(self, other):
         if isinstance(other, time):
-            return self.__cmp(other) < 0
+            return self._cmp(other) < 0
         else:
             _cmperror(self, other)
 
     def __ge__(self, other):
         if isinstance(other, time):
-            return self.__cmp(other) >= 0
+            return self._cmp(other) >= 0
         else:
             _cmperror(self, other)
 
     def __gt__(self, other):
         if isinstance(other, time):
-            return self.__cmp(other) > 0
+            return self._cmp(other) > 0
         else:
             _cmperror(self, other)
 
-    def __cmp(self, other):
+    def _cmp(self, other):
         assert isinstance(other, time)
         mytz = self._tzinfo
         ottz = other._tzinfo
@@ -1187,23 +1243,23 @@
             base_compare = myoff == otoff
 
         if base_compare:
-            return cmp((self.__hour, self.__minute, self.__second,
-                        self.__microsecond),
-                       (other.__hour, other.__minute, other.__second,
-                        other.__microsecond))
+            return cmp((self._hour, self._minute, self._second,
+                        self._microsecond),
+                       (other._hour, other._minute, other._second,
+                        other._microsecond))
         if myoff is None or otoff is None:
             # XXX Buggy in 2.2.2.
             raise TypeError("cannot compare naive and aware times")
-        myhhmm = self.__hour * 60 + self.__minute - myoff
-        othhmm = other.__hour * 60 + other.__minute - otoff
-        return cmp((myhhmm, self.__second, self.__microsecond),
-                   (othhmm, other.__second, other.__microsecond))
+        myhhmm = self._hour * 60 + self._minute - myoff
+        othhmm = other._hour * 60 + other._minute - otoff
+        return cmp((myhhmm, self._second, self._microsecond),
+                   (othhmm, other._second, other._microsecond))
 
     def __hash__(self):
         """Hash."""
         tzoff = self._utcoffset()
         if not tzoff: # zero or None
-            return hash(self.__getstate()[0])
+            return hash(self._getstate()[0])
         h, m = divmod(self.hour * 60 + self.minute - tzoff, 60)
         if 0 <= h < 24:
             return hash(time(h, m, self.second, self.microsecond))
@@ -1227,14 +1283,14 @@
 
     def __repr__(self):
         """Convert to formal string, for repr()."""
-        if self.__microsecond != 0:
-            s = ", %d, %d" % (self.__second, self.__microsecond)
-        elif self.__second != 0:
-            s = ", %d" % self.__second
+        if self._microsecond != 0:
+            s = ", %d, %d" % (self._second, self._microsecond)
+        elif self._second != 0:
+            s = ", %d" % self._second
         else:
             s = ""
         s= "%s(%d, %d%s)" % ('datetime.' + self.__class__.__name__,
-                             self.__hour, self.__minute, s)
+                             self._hour, self._minute, s)
         if self._tzinfo is not None:
             assert s[-1:] == ")"
             s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
@@ -1246,8 +1302,8 @@
         This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
         self.microsecond == 0.
         """
-        s = _format_time(self.__hour, self.__minute, self.__second,
-                         self.__microsecond)
+        s = _format_time(self._hour, self._minute, self._second,
+                         self._microsecond)
         tz = self._tzstr()
         if tz:
             s += tz
@@ -1255,14 +1311,6 @@
 
     __str__ = isoformat
 
-    def __format__(self, format):
-        if not isinstance(format, (str, unicode)):
-            raise ValueError("__format__ excepts str or unicode, not %s" %
-                             format.__class__.__name__)
-        if not format:
-            return str(self)
-        return self.strftime(format)
-
     def strftime(self, fmt):
         """Format using strftime().  The date part of the timestamp passed
         to underlying strftime should not be used.
@@ -1270,10 +1318,18 @@
         # The year must be >= 1900 else Python's strftime implementation
         # can raise a bogus exception.
         timetuple = (1900, 1, 1,
-                     self.__hour, self.__minute, self.__second,
+                     self._hour, self._minute, self._second,
                      0, 1, -1)
         return _wrap_strftime(self, fmt, timetuple)
 
+    def __format__(self, fmt):
+        if not isinstance(fmt, (str, unicode)):
+            raise ValueError("__format__ excepts str or unicode, not %s" %
+                             fmt.__class__.__name__)
+        if len(fmt) != 0:
+            return self.strftime(fmt)
+        return str(self)
+
     # Timezone functions
 
     def utcoffset(self):
@@ -1350,10 +1406,10 @@
 
     __safe_for_unpickling__ = True      # For Python 2.2
 
-    def __getstate(self):
-        us2, us3 = divmod(self.__microsecond, 256)
+    def _getstate(self):
+        us2, us3 = divmod(self._microsecond, 256)
         us1, us2 = divmod(us2, 256)
-        basestate = ("%c" * 6) % (self.__hour, self.__minute, self.__second,
+        basestate = ("%c" * 6) % (self._hour, self._minute, self._second,
                                   us1, us2, us3)
         if self._tzinfo is None:
             return (basestate,)
@@ -1363,13 +1419,13 @@
     def __setstate(self, string, tzinfo):
         if len(string) != 6 or ord(string[0]) >= 24:
             raise TypeError("an integer is required")
-        self.__hour, self.__minute, self.__second, us1, us2, us3 = \
+        self._hour, self._minute, self._second, us1, us2, us3 = \
                                                             map(ord, string)
-        self.__microsecond = (((us1 << 8) | us2) << 8) | us3
+        self._microsecond = (((us1 << 8) | us2) << 8) | us3
         self._tzinfo = tzinfo
 
     def __reduce__(self):
-        return (time, self.__getstate())
+        return (time, self._getstate())
 
 _time_class = time  # so functions w/ args named "time" can get at the class
 
@@ -1378,9 +1434,11 @@
 time.resolution = timedelta(microseconds=1)
 
 class datetime(date):
+    """datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
 
-    # XXX needs docstrings
-    # See http://www.zope.org/Members/fdrake/DateTimeWiki/TimeZoneInfo
+    The year, month and day arguments are required. tzinfo may be None, or an
+    instance of a tzinfo subclass. The remaining arguments may be ints or longs.
+    """
 
     def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
                 microsecond=0, tzinfo=None):
@@ -1393,24 +1451,43 @@
         _check_time_fields(hour, minute, second, microsecond)
         self = date.__new__(cls, year, month, day)
         # XXX This duplicates __year, __month, __day for convenience :-(
-        self.__year = year
-        self.__month = month
-        self.__day = day
-        self.__hour = hour
-        self.__minute = minute
-        self.__second = second
-        self.__microsecond = microsecond
+        self._year = year
+        self._month = month
+        self._day = day
+        self._hour = hour
+        self._minute = minute
+        self._second = second
+        self._microsecond = microsecond
         self._tzinfo = tzinfo
         return self
 
     # Read-only field accessors
-    hour = property(lambda self: self.__hour, doc="hour (0-23)")
-    minute = property(lambda self: self.__minute, doc="minute (0-59)")
-    second = property(lambda self: self.__second, doc="second (0-59)")
-    microsecond = property(lambda self: self.__microsecond,
-                           doc="microsecond (0-999999)")
-    tzinfo = property(lambda self: self._tzinfo, doc="timezone info object")
+    @property
+    def hour(self):
+        """hour (0-23)"""
+        return self._hour
 
+    @property
+    def minute(self):
+        """minute (0-59)"""
+        return self._minute
+
+    @property
+    def second(self):
+        """second (0-59)"""
+        return self._second
+
+    @property
+    def microsecond(self):
+        """microsecond (0-999999)"""
+        return self._microsecond
+
+    @property
+    def tzinfo(self):
+        """timezone info object"""
+        return self._tzinfo
+
+    @classmethod
     def fromtimestamp(cls, t, tz=None):
         """Construct a datetime from a POSIX timestamp (like time.time()).
 
@@ -1438,37 +1515,42 @@
         if tz is not None:
             result = tz.fromutc(result)
         return result
-    fromtimestamp = classmethod(fromtimestamp)
 
+    @classmethod
     def utcfromtimestamp(cls, t):
         "Construct a UTC datetime from a POSIX timestamp (like time.time())."
-        if 1 - (t % 1.0) < 0.0000005:
-            t = float(int(t)) + 1
-        if t < 0:
-            t -= 1
+        t, frac = divmod(t, 1.0)
+        us = round(frac * 1e6)
+
+        # If timestamp is less than one microsecond smaller than a
+        # full second, us can be rounded up to 1000000.  In this case,
+        # roll over to seconds, otherwise, ValueError is raised
+        # by the constructor.
+        if us == 1000000:
+            t += 1
+            us = 0
         y, m, d, hh, mm, ss, weekday, jday, dst = _time.gmtime(t)
-        us = int((t % 1.0) * 1000000)
         ss = min(ss, 59)    # clamp out leap seconds if the platform has them
         return cls(y, m, d, hh, mm, ss, us)
-    utcfromtimestamp = classmethod(utcfromtimestamp)
 
     # XXX This is supposed to do better than we *can* do by using time.time(),
     # XXX if the platform supports a more accurate way.  The C implementation
     # XXX uses gettimeofday on platforms that have it, but that isn't
     # XXX available from Python.  So now() may return different results
     # XXX across the implementations.
+    @classmethod
     def now(cls, tz=None):
         "Construct a datetime from time.time() and optional time zone info."
         t = _time.time()
         return cls.fromtimestamp(t, tz)
-    now = classmethod(now)
 
+    @classmethod
     def utcnow(cls):
         "Construct a UTC datetime from time.time()."
         t = _time.time()
         return cls.utcfromtimestamp(t)
-    utcnow = classmethod(utcnow)
 
+    @classmethod
     def combine(cls, date, time):
         "Construct a datetime from a given date and a given time."
         if not isinstance(date, _date_class):
@@ -1478,7 +1560,6 @@
         return cls(date.year, date.month, date.day,
                    time.hour, time.minute, time.second, time.microsecond,
                    time.tzinfo)
-    combine = classmethod(combine)
 
     def timetuple(self):
         "Return local time tuple compatible with time.localtime()."
@@ -1504,7 +1585,7 @@
 
     def date(self):
         "Return the date part."
-        return date(self.__year, self.__month, self.__day)
+        return date(self._year, self._month, self._day)
 
     def time(self):
         "Return the time part, with tzinfo None."
@@ -1564,8 +1645,8 @@
 
     def ctime(self):
         "Format a la ctime()."
-        t = tmxxx(self.__year, self.__month, self.__day, self.__hour,
-                  self.__minute, self.__second)
+        t = tmxxx(self._year, self._month, self._day, self._hour,
+                  self._minute, self._second)
         return t.ctime()
 
     def isoformat(self, sep='T'):
@@ -1580,10 +1661,10 @@
         Optional argument sep specifies the separator between date and
         time, default 'T'.
         """
-        s = ("%04d-%02d-%02d%c" % (self.__year, self.__month, self.__day,
+        s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day,
                                   sep) +
-                _format_time(self.__hour, self.__minute, self.__second,
-                             self.__microsecond))
+                _format_time(self._hour, self._minute, self._second,
+                             self._microsecond))
         off = self._utcoffset()
         if off is not None:
             if off < 0:
@@ -1596,13 +1677,13 @@
         return s
 
     def __repr__(self):
-        "Convert to formal string, for repr()."
-        L = [self.__year, self.__month, self.__day, # These are never zero
-             self.__hour, self.__minute, self.__second, self.__microsecond]
+        """Convert to formal string, for repr()."""
+        L = [self._year, self._month, self._day, # These are never zero
+             self._hour, self._minute, self._second, self._microsecond]
         if L[-1] == 0:
             del L[-1]
         if L[-1] == 0:
-            del L[-1]            
+            del L[-1]
         s = ", ".join(map(str, L))
         s = "%s(%s)" % ('datetime.' + self.__class__.__name__, s)
         if self._tzinfo is not None:
@@ -1675,7 +1756,7 @@
 
     def __eq__(self, other):
         if isinstance(other, datetime):
-            return self.__cmp(other) == 0
+            return self._cmp(other) == 0
         elif hasattr(other, "timetuple") and not isinstance(other, date):
             return NotImplemented
         else:
@@ -1683,7 +1764,7 @@
 
     def __ne__(self, other):
         if isinstance(other, datetime):
-            return self.__cmp(other) != 0
+            return self._cmp(other) != 0
         elif hasattr(other, "timetuple") and not isinstance(other, date):
             return NotImplemented
         else:
@@ -1691,7 +1772,7 @@
 
     def __le__(self, other):
         if isinstance(other, datetime):
-            return self.__cmp(other) <= 0
+            return self._cmp(other) <= 0
         elif hasattr(other, "timetuple") and not isinstance(other, date):
             return NotImplemented
         else:
@@ -1699,7 +1780,7 @@
 
     def __lt__(self, other):
         if isinstance(other, datetime):
-            return self.__cmp(other) < 0
+            return self._cmp(other) < 0
         elif hasattr(other, "timetuple") and not isinstance(other, date):
             return NotImplemented
         else:
@@ -1707,7 +1788,7 @@
 
     def __ge__(self, other):
         if isinstance(other, datetime):
-            return self.__cmp(other) >= 0
+            return self._cmp(other) >= 0
         elif hasattr(other, "timetuple") and not isinstance(other, date):
             return NotImplemented
         else:
@@ -1715,13 +1796,13 @@
 
     def __gt__(self, other):
         if isinstance(other, datetime):
-            return self.__cmp(other) > 0
+            return self._cmp(other) > 0
         elif hasattr(other, "timetuple") and not isinstance(other, date):
             return NotImplemented
         else:
             _cmperror(self, other)
 
-    def __cmp(self, other):
+    def _cmp(self, other):
         assert isinstance(other, datetime)
         mytz = self._tzinfo
         ottz = other._tzinfo
@@ -1737,12 +1818,12 @@
             base_compare = myoff == otoff
 
         if base_compare:
-            return cmp((self.__year, self.__month, self.__day,
-                        self.__hour, self.__minute, self.__second,
-                        self.__microsecond),
-                       (other.__year, other.__month, other.__day,
-                        other.__hour, other.__minute, other.__second,
-                        other.__microsecond))
+            return cmp((self._year, self._month, self._day,
+                        self._hour, self._minute, self._second,
+                        self._microsecond),
+                       (other._year, other._month, other._day,
+                        other._hour, other._minute, other._second,
+                        other._microsecond))
         if myoff is None or otoff is None:
             # XXX Buggy in 2.2.2.
             raise TypeError("cannot compare naive and aware datetimes")
@@ -1756,13 +1837,13 @@
         "Add a datetime and a timedelta."
         if not isinstance(other, timedelta):
             return NotImplemented
-        t = tmxxx(self.__year,
-                  self.__month,
-                  self.__day + other.days,
-                  self.__hour,
-                  self.__minute,
-                  self.__second + other.seconds,
-                  self.__microsecond + other.microseconds)
+        t = tmxxx(self._year,
+                  self._month,
+                  self._day + other.days,
+                  self._hour,
+                  self._minute,
+                  self._second + other.seconds,
+                  self._microsecond + other.microseconds)
         self._checkOverflow(t.year)
         result = datetime(t.year, t.month, t.day,
                                 t.hour, t.minute, t.second,
@@ -1780,11 +1861,11 @@
 
         days1 = self.toordinal()
         days2 = other.toordinal()
-        secs1 = self.__second + self.__minute * 60 + self.__hour * 3600
-        secs2 = other.__second + other.__minute * 60 + other.__hour * 3600
+        secs1 = self._second + self._minute * 60 + self._hour * 3600
+        secs2 = other._second + other._minute * 60 + other._hour * 3600
         base = timedelta(days1 - days2,
                          secs1 - secs2,
-                         self.__microsecond - other.__microsecond)
+                         self._microsecond - other._microsecond)
         if self._tzinfo is other._tzinfo:
             return base
         myoff = self._utcoffset()
@@ -1792,13 +1873,13 @@
         if myoff == otoff:
             return base
         if myoff is None or otoff is None:
-            raise TypeError, "cannot mix naive and timezone-aware time"
+            raise TypeError("cannot mix naive and timezone-aware time")
         return base + timedelta(minutes = otoff-myoff)
 
     def __hash__(self):
         tzoff = self._utcoffset()
         if tzoff is None:
-            return hash(self.__getstate()[0])
+            return hash(self._getstate()[0])
         days = _ymd2ord(self.year, self.month, self.day)
         seconds = self.hour * 3600 + (self.minute - tzoff) * 60 + self.second
         return hash(timedelta(days, seconds, self.microsecond))
@@ -1807,12 +1888,12 @@
 
     __safe_for_unpickling__ = True      # For Python 2.2
 
-    def __getstate(self):
-        yhi, ylo = divmod(self.__year, 256)
-        us2, us3 = divmod(self.__microsecond, 256)
+    def _getstate(self):
+        yhi, ylo = divmod(self._year, 256)
+        us2, us3 = divmod(self._microsecond, 256)
         us1, us2 = divmod(us2, 256)
-        basestate = ("%c" * 10) % (yhi, ylo, self.__month, self.__day,
-                                   self.__hour, self.__minute, self.__second,
+        basestate = ("%c" * 10) % (yhi, ylo, self._month, self._day,
+                                   self._hour, self._minute, self._second,
                                    us1, us2, us3)
         if self._tzinfo is None:
             return (basestate,)
@@ -1820,14 +1901,14 @@
             return (basestate, self._tzinfo)
 
     def __setstate(self, string, tzinfo):
-        (yhi, ylo, self.__month, self.__day, self.__hour,
-         self.__minute, self.__second, us1, us2, us3) = map(ord, string)
-        self.__year = yhi * 256 + ylo
-        self.__microsecond = (((us1 << 8) | us2) << 8) | us3
+        (yhi, ylo, self._month, self._day, self._hour,
+         self._minute, self._second, us1, us2, us3) = map(ord, string)
+        self._year = yhi * 256 + ylo
+        self._microsecond = (((us1 << 8) | us2) << 8) | us3
         self._tzinfo = tzinfo
 
     def __reduce__(self):
-        return (self.__class__, self.__getstate())
+        return (self.__class__, self._getstate())
 
 
 datetime.min = datetime(1, 1, 1)
@@ -2009,7 +2090,7 @@
 
 Because we know z.d said z was in daylight time (else [5] would have held and
 we would have stopped then), and we know z.d != z'.d (else [8] would have held
-and we we have stopped then), and there are only 2 possible values dst() can
+and we have stopped then), and there are only 2 possible values dst() can
 return in Eastern, it follows that z'.d must be 0 (which it is in the example,
 but the reasoning doesn't depend on the example -- it depends on there being
 two possible dst() outcomes, one zero and the other non-zero).  Therefore
diff --git a/lib_pypy/distributed/socklayer.py b/lib_pypy/distributed/socklayer.py
--- a/lib_pypy/distributed/socklayer.py
+++ b/lib_pypy/distributed/socklayer.py
@@ -2,7 +2,7 @@
 import py
 from socket import socket
 
-XXX needs import adaptation as 'green' is removed from py lib for years 
+raise ImportError("XXX needs import adaptation as 'green' is removed from py lib for years")
 from py.impl.green.msgstruct import decodemessage, message
 from socket import socket, AF_INET, SOCK_STREAM
 import marshal
diff --git a/lib_pypy/numpypy/__init__.py b/lib_pypy/numpypy/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/numpypy/__init__.py
@@ -0,0 +1,2 @@
+from _numpypy import *
+from .core import *
diff --git a/lib_pypy/numpypy/core/__init__.py b/lib_pypy/numpypy/core/__init__.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/numpypy/core/__init__.py
@@ -0,0 +1,1 @@
+from .fromnumeric import *
diff --git a/lib_pypy/numpypy/core/fromnumeric.py b/lib_pypy/numpypy/core/fromnumeric.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/numpypy/core/fromnumeric.py
@@ -0,0 +1,2430 @@
+######################################################################    
+# This is a copy of numpy/core/fromnumeric.py modified for numpypy
+######################################################################
+# Each name in __all__ was a function in  'numeric' that is now 
+# a method in 'numpy'.
+# When the corresponding method is added to numpypy BaseArray
+# each function should be added as a module function 
+# at the applevel 
+# This can be as simple as doing the following
+#
+# def func(a, ...):
+#     if not hasattr(a, 'func')
+#         a = numpypy.array(a)
+#     return a.func(...)
+#
+######################################################################
+
+import numpypy
+
+# Module containing non-deprecated functions borrowed from Numeric.
+__docformat__ = "restructuredtext en"
+
+# functions that are now methods
+__all__ = ['take', 'reshape', 'choose', 'repeat', 'put',
+           'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin',
+           'searchsorted', 'alen',
+           'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape',
+           'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue',
+           'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim',
+           'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze',
+           'amax', 'amin',
+          ]
+          
+def take(a, indices, axis=None, out=None, mode='raise'):
+    """
+    Take elements from an array along an axis.
+
+    This function does the same thing as "fancy" indexing (indexing arrays
+    using arrays); however, it can be easier to use if you need elements
+    along a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        The source array.
+    indices : array_like
+        The indices of the values to extract.
+    axis : int, optional
+        The axis over which to select values. By default, the flattened
+        input array is used.
+    out : ndarray, optional
+        If provided, the result will be placed in this array. It should
+        be of the appropriate shape and dtype.
+    mode : {'raise', 'wrap', 'clip'}, optional
+        Specifies how out-of-bounds indices will behave.
+
+        * 'raise' -- raise an error (default)
+        * 'wrap' -- wrap around
+        * 'clip' -- clip to the range
+
+        'clip' mode means that all indices that are too large are replaced
+        by the index that addresses the last element along that axis. Note
+        that this disables indexing with negative numbers.
+
+    Returns
+    -------
+    subarray : ndarray
+        The returned array has the same type as `a`.
+
+    See Also
+    --------
+    ndarray.take : equivalent method
+
+    Examples
+    --------
+    >>> a = [4, 3, 5, 7, 6, 8]
+    >>> indices = [0, 1, 4]
+    >>> np.take(a, indices)
+    array([4, 3, 6])
+
+    In this example if `a` is an ndarray, "fancy" indexing can be used.
+
+    >>> a = np.array(a)
+    >>> a[indices]
+    array([4, 3, 6])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+# not deprecated --- copy if necessary, view otherwise
+def reshape(a, newshape, order='C'):
+    """
+    Gives a new shape to an array without changing its data.
+
+    Parameters
+    ----------
+    a : array_like
+        Array to be reshaped.
+    newshape : int or tuple of ints
+        The new shape should be compatible with the original shape. If
+        an integer, then the result will be a 1-D array of that length.
+        One shape dimension can be -1. In this case, the value is inferred
+        from the length of the array and remaining dimensions.
+    order : {'C', 'F', 'A'}, optional
+        Determines whether the array data should be viewed as in C
+        (row-major) order, FORTRAN (column-major) order, or the C/FORTRAN
+        order should be preserved.
+
+    Returns
+    -------
+    reshaped_array : ndarray
+        This will be a new view object if possible; otherwise, it will
+        be a copy.
+
+
+    See Also
+    --------
+    ndarray.reshape : Equivalent method.
+
+    Notes
+    -----
+
+    It is not always possible to change the shape of an array without
+    copying the data. If you want an error to be raise if the data is copied,
+    you should assign the new shape to the shape attribute of the array::
+
+     >>> a = np.zeros((10, 2))
+     # A transpose make the array non-contiguous
+     >>> b = a.T
+     # Taking a view makes it possible to modify the shape without modiying the
+     # initial object.
+     >>> c = b.view()
+     >>> c.shape = (20)
+     AttributeError: incompatible shape for a non-contiguous array
+
+
+    Examples
+    --------
+    >>> a = np.array([[1,2,3], [4,5,6]])
+    >>> np.reshape(a, 6)
+    array([1, 2, 3, 4, 5, 6])
+    >>> np.reshape(a, 6, order='F')
+    array([1, 4, 2, 5, 3, 6])
+
+    >>> np.reshape(a, (3,-1))       # the unspecified value is inferred to be 2
+    array([[1, 2],
+           [3, 4],
+           [5, 6]])
+
+    """
+    assert order == 'C'
+    if not hasattr(a, 'reshape'):
+       a = numpypy.array(a)
+    return a.reshape(newshape)
+
+
+def choose(a, choices, out=None, mode='raise'):
+    """
+    Construct an array from an index array and a set of arrays to choose from.
+
+    First of all, if confused or uncertain, definitely look at the Examples -
+    in its full generality, this function is less simple than it might
+    seem from the following code description (below ndi =
+    `numpy.lib.index_tricks`):
+
+    ``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
+
+    But this omits some subtleties.  Here is a fully general summary:
+
+    Given an "index" array (`a`) of integers and a sequence of `n` arrays
+    (`choices`), `a` and each choice array are first broadcast, as necessary,
+    to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
+    0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
+    for each `i`.  Then, a new array with shape ``Ba.shape`` is created as
+    follows:
+
+    * if ``mode=raise`` (the default), then, first of all, each element of
+      `a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
+      `i` (in that range) is the value at the `(j0, j1, ..., jm)` position
+      in `Ba` - then the value at the same position in the new array is the
+      value in `Bchoices[i]` at that same position;
+
+    * if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
+      integer; modular arithmetic is used to map integers outside the range
+      `[0, n-1]` back into that range; and then the new array is constructed
+      as above;
+
+    * if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
+      integer; negative integers are mapped to 0; values greater than `n-1`
+      are mapped to `n-1`; and then the new array is constructed as above.
+
+    Parameters
+    ----------
+    a : int array
+        This array must contain integers in `[0, n-1]`, where `n` is the number
+        of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
+        integers are permissible.
+    choices : sequence of arrays
+        Choice arrays. `a` and all of the choices must be broadcastable to the
+        same shape.  If `choices` is itself an array (not recommended), then
+        its outermost dimension (i.e., the one corresponding to
+        ``choices.shape[0]``) is taken as defining the "sequence".
+    out : array, optional
+        If provided, the result will be inserted into this array. It should
+        be of the appropriate shape and dtype.
+    mode : {'raise' (default), 'wrap', 'clip'}, optional
+        Specifies how indices outside `[0, n-1]` will be treated:
+
+          * 'raise' : an exception is raised
+          * 'wrap' : value becomes value mod `n`
+          * 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
+
+    Returns
+    -------
+    merged_array : array
+        The merged result.
+
+    Raises
+    ------
+    ValueError: shape mismatch
+        If `a` and each choice array are not all broadcastable to the same
+        shape.
+
+    See Also
+    --------
+    ndarray.choose : equivalent method
+
+    Notes
+    -----
+    To reduce the chance of misinterpretation, even though the following
+    "abuse" is nominally supported, `choices` should neither be, nor be
+    thought of as, a single array, i.e., the outermost sequence-like container
+    should be either a list or a tuple.
+
+    Examples
+    --------
+
+    >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
+    ...   [20, 21, 22, 23], [30, 31, 32, 33]]
+    >>> np.choose([2, 3, 1, 0], choices
+    ... # the first element of the result will be the first element of the
+    ... # third (2+1) "array" in choices, namely, 20; the second element
+    ... # will be the second element of the fourth (3+1) choice array, i.e.,
+    ... # 31, etc.
+    ... )
+    array([20, 31, 12,  3])
+    >>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
+    array([20, 31, 12,  3])
+    >>> # because there are 4 choice arrays
+    >>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
+    array([20,  1, 12,  3])
+    >>> # i.e., 0
+
+    A couple examples illustrating how choose broadcasts:
+
+    >>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
+    >>> choices = [-10, 10]
+    >>> np.choose(a, choices)
+    array([[ 10, -10,  10],
+           [-10,  10, -10],
+           [ 10, -10,  10]])
+
+    >>> # With thanks to Anne Archibald
+    >>> a = np.array([0, 1]).reshape((2,1,1))
+    >>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
+    >>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
+    >>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
+    array([[[ 1,  1,  1,  1,  1],
+            [ 2,  2,  2,  2,  2],
+            [ 3,  3,  3,  3,  3]],
+           [[-1, -2, -3, -4, -5],
+            [-1, -2, -3, -4, -5],
+            [-1, -2, -3, -4, -5]]])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def repeat(a, repeats, axis=None):
+    """
+    Repeat elements of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    repeats : {int, array of ints}
+        The number of repetitions for each element.  `repeats` is broadcasted
+        to fit the shape of the given axis.
+    axis : int, optional
+        The axis along which to repeat values.  By default, use the
+        flattened input array, and return a flat output array.
+
+    Returns
+    -------
+    repeated_array : ndarray
+        Output array which has the same shape as `a`, except along
+        the given axis.
+
+    See Also
+    --------
+    tile : Tile an array.
+
+    Examples
+    --------
+    >>> x = np.array([[1,2],[3,4]])
+    >>> np.repeat(x, 2)
+    array([1, 1, 2, 2, 3, 3, 4, 4])
+    >>> np.repeat(x, 3, axis=1)
+    array([[1, 1, 1, 2, 2, 2],
+           [3, 3, 3, 4, 4, 4]])
+    >>> np.repeat(x, [1, 2], axis=0)
+    array([[1, 2],
+           [3, 4],
+           [3, 4]])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def put(a, ind, v, mode='raise'):
+    """
+    Replaces specified elements of an array with given values.
+
+    The indexing works on the flattened target array. `put` is roughly
+    equivalent to:
+
+    ::
+
+        a.flat[ind] = v
+
+    Parameters
+    ----------
+    a : ndarray
+        Target array.
+    ind : array_like
+        Target indices, interpreted as integers.
+    v : array_like
+        Values to place in `a` at target indices. If `v` is shorter than
+        `ind` it will be repeated as necessary.
+    mode : {'raise', 'wrap', 'clip'}, optional
+        Specifies how out-of-bounds indices will behave.
+
+        * 'raise' -- raise an error (default)
+        * 'wrap' -- wrap around
+        * 'clip' -- clip to the range
+
+        'clip' mode means that all indices that are too large are replaced
+        by the index that addresses the last element along that axis. Note
+        that this disables indexing with negative numbers.
+
+    See Also
+    --------
+    putmask, place
+
+    Examples
+    --------
+    >>> a = np.arange(5)
+    >>> np.put(a, [0, 2], [-44, -55])
+    >>> a
+    array([-44,   1, -55,   3,   4])
+
+    >>> a = np.arange(5)
+    >>> np.put(a, 22, -5, mode='clip')
+    >>> a
+    array([ 0,  1,  2,  3, -5])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def swapaxes(a, axis1, axis2):
+    """
+    Interchange two axes of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis1 : int
+        First axis.
+    axis2 : int
+        Second axis.
+
+    Returns
+    -------
+    a_swapped : ndarray
+        If `a` is an ndarray, then a view of `a` is returned; otherwise
+        a new array is created.
+
+    Examples
+    --------
+    >>> x = np.array([[1,2,3]])
+    >>> np.swapaxes(x,0,1)
+    array([[1],
+           [2],
+           [3]])
+
+    >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
+    >>> x
+    array([[[0, 1],
+            [2, 3]],
+           [[4, 5],
+            [6, 7]]])
+
+    >>> np.swapaxes(x,0,2)
+    array([[[0, 4],
+            [2, 6]],
+           [[1, 5],
+            [3, 7]]])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def transpose(a, axes=None):
+    """
+    Permute the dimensions of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axes : list of ints, optional
+        By default, reverse the dimensions, otherwise permute the axes
+        according to the values given.
+
+    Returns
+    -------
+    p : ndarray
+        `a` with its axes permuted.  A view is returned whenever
+        possible.
+
+    See Also
+    --------
+    rollaxis
+
+    Examples
+    --------
+    >>> x = np.arange(4).reshape((2,2))
+    >>> x
+    array([[0, 1],
+           [2, 3]])
+
+    >>> np.transpose(x)
+    array([[0, 2],
+           [1, 3]])
+
+    >>> x = np.ones((1, 2, 3))
+    >>> np.transpose(x, (1, 0, 2)).shape
+    (2, 1, 3)
+
+    """
+    if axes is not None:
+        raise NotImplementedError('No "axes" arg yet.')
+    if not hasattr(a, 'T'):
+        a = numpypy.array(a)
+    return a.T
+
+def sort(a, axis=-1, kind='quicksort', order=None):
+    """
+    Return a sorted copy of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Array to be sorted.
+    axis : int or None, optional
+        Axis along which to sort. If None, the array is flattened before
+        sorting. The default is -1, which sorts along the last axis.
+    kind : {'quicksort', 'mergesort', 'heapsort'}, optional
+        Sorting algorithm. Default is 'quicksort'.
+    order : list, optional
+        When `a` is a structured array, this argument specifies which fields
+        to compare first, second, and so on.  This list does not need to
+        include all of the fields.
+
+    Returns
+    -------
+    sorted_array : ndarray
+        Array of the same type and shape as `a`.
+
+    See Also
+    --------
+    ndarray.sort : Method to sort an array in-place.
+    argsort : Indirect sort.
+    lexsort : Indirect stable sort on multiple keys.
+    searchsorted : Find elements in a sorted array.
+
+    Notes
+    -----
+    The various sorting algorithms are characterized by their average speed,
+    worst case performance, work space size, and whether they are stable. A
+    stable sort keeps items with the same key in the same relative
+    order. The three available algorithms have the following
+    properties:
+
+    =========== ======= ============= ============ =======
+       kind      speed   worst case    work space  stable
+    =========== ======= ============= ============ =======
+    'quicksort'    1     O(n^2)            0          no
+    'mergesort'    2     O(n*log(n))      ~n/2        yes
+    'heapsort'     3     O(n*log(n))       0          no
+    =========== ======= ============= ============ =======
+
+    All the sort algorithms make temporary copies of the data when
+    sorting along any but the last axis.  Consequently, sorting along
+    the last axis is faster and uses less space than sorting along
+    any other axis.
+
+    The sort order for complex numbers is lexicographic. If both the real
+    and imaginary parts are non-nan then the order is determined by the
+    real parts except when they are equal, in which case the order is
+    determined by the imaginary parts.
+
+    Previous to numpy 1.4.0 sorting real and complex arrays containing nan
+    values led to undefined behaviour. In numpy versions >= 1.4.0 nan
+    values are sorted to the end. The extended sort order is:
+
+      * Real: [R, nan]
+      * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
+
+    where R is a non-nan real value. Complex values with the same nan
+    placements are sorted according to the non-nan part if it exists.
+    Non-nan values are sorted as before.
+
+    Examples
+    --------
+    >>> a = np.array([[1,4],[3,1]])
+    >>> np.sort(a)                # sort along the last axis
+    array([[1, 4],
+           [1, 3]])
+    >>> np.sort(a, axis=None)     # sort the flattened array
+    array([1, 1, 3, 4])
+    >>> np.sort(a, axis=0)        # sort along the first axis
+    array([[1, 1],
+           [3, 4]])
+
+    Use the `order` keyword to specify a field to use when sorting a
+    structured array:
+
+    >>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
+    >>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
+    ...           ('Galahad', 1.7, 38)]
+    >>> a = np.array(values, dtype=dtype)       # create a structured array
+    >>> np.sort(a, order='height')                        # doctest: +SKIP
+    array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
+           ('Lancelot', 1.8999999999999999, 38)],
+          dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
+
+    Sort by age, then height if ages are equal:
+
+    >>> np.sort(a, order=['age', 'height'])               # doctest: +SKIP
+    array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
+           ('Arthur', 1.8, 41)],
+          dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def argsort(a, axis=-1, kind='quicksort', order=None):
+    """
+    Returns the indices that would sort an array.
+
+    Perform an indirect sort along the given axis using the algorithm specified
+    by the `kind` keyword. It returns an array of indices of the same shape as
+    `a` that index data along the given axis in sorted order.
+
+    Parameters
+    ----------
+    a : array_like
+        Array to sort.
+    axis : int or None, optional
+        Axis along which to sort.  The default is -1 (the last axis). If None,
+        the flattened array is used.
+    kind : {'quicksort', 'mergesort', 'heapsort'}, optional
+        Sorting algorithm.
+    order : list, optional
+        When `a` is an array with fields defined, this argument specifies
+        which fields to compare first, second, etc.  Not all fields need be
+        specified.
+
+    Returns
+    -------
+    index_array : ndarray, int
+        Array of indices that sort `a` along the specified axis.
+        In other words, ``a[index_array]`` yields a sorted `a`.
+
+    See Also
+    --------
+    sort : Describes sorting algorithms used.
+    lexsort : Indirect stable sort with multiple keys.
+    ndarray.sort : Inplace sort.
+
+    Notes
+    -----
+    See `sort` for notes on the different sorting algorithms.
+
+    As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
+    nan values. The enhanced sort order is documented in `sort`.
+
+    Examples
+    --------
+    One dimensional array:
+
+    >>> x = np.array([3, 1, 2])
+    >>> np.argsort(x)
+    array([1, 2, 0])
+
+    Two-dimensional array:
+
+    >>> x = np.array([[0, 3], [2, 2]])
+    >>> x
+    array([[0, 3],
+           [2, 2]])
+
+    >>> np.argsort(x, axis=0)
+    array([[0, 1],
+           [1, 0]])
+
+    >>> np.argsort(x, axis=1)
+    array([[0, 1],
+           [0, 1]])
+
+    Sorting with keys:
+
+    >>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
+    >>> x
+    array([(1, 0), (0, 1)],
+          dtype=[('x', '<i4'), ('y', '<i4')])
+
+    >>> np.argsort(x, order=('x','y'))
+    array([1, 0])
+
+    >>> np.argsort(x, order=('y','x'))
+    array([0, 1])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def argmax(a, axis=None):
+    """
+    Indices of the maximum values along an axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int, optional
+        By default, the index is into the flattened array, otherwise
+        along the specified axis.
+
+    Returns
+    -------
+    index_array : ndarray of ints
+        Array of indices into the array. It has the same shape as `a.shape`
+        with the dimension along `axis` removed.
+
+    See Also
+    --------
+    ndarray.argmax, argmin
+    amax : The maximum value along a given axis.
+    unravel_index : Convert a flat index into an index tuple.
+
+    Notes
+    -----
+    In case of multiple occurrences of the maximum values, the indices
+    corresponding to the first occurrence are returned.
+
+    Examples
+    --------
+    >>> a = np.arange(6).reshape(2,3)
+    >>> a
+    array([[0, 1, 2],
+           [3, 4, 5]])
+    >>> np.argmax(a)
+    5
+    >>> np.argmax(a, axis=0)
+    array([1, 1, 1])
+    >>> np.argmax(a, axis=1)
+    array([2, 2])
+
+    >>> b = np.arange(6)
+    >>> b[1] = 5
+    >>> b
+    array([0, 5, 2, 3, 4, 5])
+    >>> np.argmax(b) # Only the first occurrence is returned.
+    1
+
+    """
+    assert axis is None
+    if not hasattr(a, 'argmax'):
+        a = numpypy.array(a)
+    return a.argmax()
+
+
+def argmin(a, axis=None):
+    """
+    Return the indices of the minimum values along an axis.
+
+    See Also
+    --------
+    argmax : Similar function.  Please refer to `numpy.argmax` for detailed
+        documentation.
+
+    """
+    assert axis is None
+    if not hasattr(a, 'argmin'):
+        a = numpypy.array(a)
+    return a.argmin()
+
+
+def searchsorted(a, v, side='left'):
+    """
+    Find indices where elements should be inserted to maintain order.
+
+    Find the indices into a sorted array `a` such that, if the corresponding
+    elements in `v` were inserted before the indices, the order of `a` would
+    be preserved.
+
+    Parameters
+    ----------
+    a : 1-D array_like
+        Input array, sorted in ascending order.
+    v : array_like
+        Values to insert into `a`.
+    side : {'left', 'right'}, optional
+        If 'left', the index of the first suitable location found is given.  If
+        'right', return the last such index.  If there is no suitable
+        index, return either 0 or N (where N is the length of `a`).
+
+    Returns
+    -------
+    indices : array of ints
+        Array of insertion points with the same shape as `v`.
+
+    See Also
+    --------
+    sort : Return a sorted copy of an array.
+    histogram : Produce histogram from 1-D data.
+
+    Notes
+    -----
+    Binary search is used to find the required insertion points.
+
+    As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing
+    `nan` values. The enhanced sort order is documented in `sort`.
+
+    Examples
+    --------
+    >>> np.searchsorted([1,2,3,4,5], 3)
+    2
+    >>> np.searchsorted([1,2,3,4,5], 3, side='right')
+    3
+    >>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
+    array([0, 5, 1, 2])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def resize(a, new_shape):
+    """
+    Return a new array with the specified shape.
+
+    If the new array is larger than the original array, then the new
+    array is filled with repeated copies of `a`.  Note that this behavior
+    is different from a.resize(new_shape) which fills with zeros instead
+    of repeated copies of `a`.
+
+    Parameters
+    ----------
+    a : array_like
+        Array to be resized.
+
+    new_shape : int or tuple of int
+        Shape of resized array.
+
+    Returns
+    -------
+    reshaped_array : ndarray
+        The new array is formed from the data in the old array, repeated
+        if necessary to fill out the required number of elements.  The
+        data are repeated in the order that they are stored in memory.
+
+    See Also
+    --------
+    ndarray.resize : resize an array in-place.
+
+    Examples
+    --------
+    >>> a=np.array([[0,1],[2,3]])
+    >>> np.resize(a,(1,4))
+    array([[0, 1, 2, 3]])
+    >>> np.resize(a,(2,4))
+    array([[0, 1, 2, 3],
+           [0, 1, 2, 3]])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def squeeze(a):
+    """
+    Remove single-dimensional entries from the shape of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+
+    Returns
+    -------
+    squeezed : ndarray
+        The input array, but with with all dimensions of length 1
+        removed.  Whenever possible, a view on `a` is returned.
+
+    Examples
+    --------
+    >>> x = np.array([[[0], [1], [2]]])
+    >>> x.shape
+    (1, 3, 1)
+    >>> np.squeeze(x).shape
+    (3,)
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def diagonal(a, offset=0, axis1=0, axis2=1):
+    """
+    Return specified diagonals.
+
+    If `a` is 2-D, returns the diagonal of `a` with the given offset,
+    i.e., the collection of elements of the form ``a[i, i+offset]``.  If
+    `a` has more than two dimensions, then the axes specified by `axis1`
+    and `axis2` are used to determine the 2-D sub-array whose diagonal is
+    returned.  The shape of the resulting array can be determined by
+    removing `axis1` and `axis2` and appending an index to the right equal
+    to the size of the resulting diagonals.
+
+    Parameters
+    ----------
+    a : array_like
+        Array from which the diagonals are taken.
+    offset : int, optional
+        Offset of the diagonal from the main diagonal.  Can be positive or
+        negative.  Defaults to main diagonal (0).
+    axis1 : int, optional
+        Axis to be used as the first axis of the 2-D sub-arrays from which
+        the diagonals should be taken.  Defaults to first axis (0).
+    axis2 : int, optional
+        Axis to be used as the second axis of the 2-D sub-arrays from
+        which the diagonals should be taken. Defaults to second axis (1).
+
+    Returns
+    -------
+    array_of_diagonals : ndarray
+        If `a` is 2-D, a 1-D array containing the diagonal is returned.
+        If the dimension of `a` is larger, then an array of diagonals is
+        returned, "packed" from left-most dimension to right-most (e.g.,
+        if `a` is 3-D, then the diagonals are "packed" along rows).
+
+    Raises
+    ------
+    ValueError
+        If the dimension of `a` is less than 2.
+
+    See Also
+    --------
+    diag : MATLAB work-a-like for 1-D and 2-D arrays.
+    diagflat : Create diagonal arrays.
+    trace : Sum along diagonals.
+
+    Examples
+    --------
+    >>> a = np.arange(4).reshape(2,2)
+    >>> a
+    array([[0, 1],
+           [2, 3]])
+    >>> a.diagonal()
+    array([0, 3])
+    >>> a.diagonal(1)
+    array([1])
+
+    A 3-D example:
+
+    >>> a = np.arange(8).reshape(2,2,2); a
+    array([[[0, 1],
+            [2, 3]],
+           [[4, 5],
+            [6, 7]]])
+    >>> a.diagonal(0, # Main diagonals of two arrays created by skipping
+    ...            0, # across the outer(left)-most axis last and
+    ...            1) # the "middle" (row) axis first.
+    array([[0, 6],
+           [1, 7]])
+
+    The sub-arrays whose main diagonals we just obtained; note that each
+    corresponds to fixing the right-most (column) axis, and that the
+    diagonals are "packed" in rows.
+
+    >>> a[:,:,0] # main diagonal is [0 6]
+    array([[0, 2],
+           [4, 6]])
+    >>> a[:,:,1] # main diagonal is [1 7]
+    array([[1, 3],
+           [5, 7]])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
+    """
+    Return the sum along diagonals of the array.
+
+    If `a` is 2-D, the sum along its diagonal with the given offset
+    is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
+
+    If `a` has more than two dimensions, then the axes specified by axis1 and
+    axis2 are used to determine the 2-D sub-arrays whose traces are returned.
+    The shape of the resulting array is the same as that of `a` with `axis1`
+    and `axis2` removed.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array, from which the diagonals are taken.
+    offset : int, optional
+        Offset of the diagonal from the main diagonal. Can be both positive
+        and negative. Defaults to 0.
+    axis1, axis2 : int, optional
+        Axes to be used as the first and second axis of the 2-D sub-arrays
+        from which the diagonals should be taken. Defaults are the first two
+        axes of `a`.
+    dtype : dtype, optional
+        Determines the data-type of the returned array and of the accumulator
+        where the elements are summed. If dtype has the value None and `a` is
+        of integer type of precision less than the default integer
+        precision, then the default integer precision is used. Otherwise,
+        the precision is the same as that of `a`.
+    out : ndarray, optional
+        Array into which the output is placed. Its type is preserved and
+        it must be of the right shape to hold the output.
+
+    Returns
+    -------
+    sum_along_diagonals : ndarray
+        If `a` is 2-D, the sum along the diagonal is returned.  If `a` has
+        larger dimensions, then an array of sums along diagonals is returned.
+
+    See Also
+    --------
+    diag, diagonal, diagflat
+
+    Examples
+    --------
+    >>> np.trace(np.eye(3))
+    3.0
+    >>> a = np.arange(8).reshape((2,2,2))
+    >>> np.trace(a)
+    array([6, 8])
+
+    >>> a = np.arange(24).reshape((2,2,2,3))
+    >>> np.trace(a).shape
+    (2, 3)
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+def ravel(a, order='C'):
+    """
+    Return a flattened array.
+
+    A 1-D array, containing the elements of the input, is returned.  A copy is
+    made only if needed.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.  The elements in ``a`` are read in the order specified by
+        `order`, and packed as a 1-D array.
+    order : {'C','F', 'A', 'K'}, optional
+        The elements of ``a`` are read in this order. 'C' means to view
+        the elements in C (row-major) order. 'F' means to view the elements
+        in Fortran (column-major) order. 'A' means to view the elements
+        in 'F' order if a is Fortran contiguous, 'C' order otherwise.
+        'K' means to view the elements in the order they occur in memory,
+        except for reversing the data when strides are negative.
+        By default, 'C' order is used.
+
+    Returns
+    -------
+    1d_array : ndarray
+        Output of the same dtype as `a`, and of shape ``(a.size(),)``.
+
+    See Also
+    --------
+    ndarray.flat : 1-D iterator over an array.
+    ndarray.flatten : 1-D array copy of the elements of an array
+                      in row-major order.
+
+    Notes
+    -----
+    In row-major order, the row index varies the slowest, and the column
+    index the quickest.  This can be generalized to multiple dimensions,
+    where row-major order implies that the index along the first axis
+    varies slowest, and the index along the last quickest.  The opposite holds
+    for Fortran-, or column-major, mode.
+
+    Examples
+    --------
+    It is equivalent to ``reshape(-1, order=order)``.
+
+    >>> x = np.array([[1, 2, 3], [4, 5, 6]])
+    >>> print np.ravel(x)
+    [1 2 3 4 5 6]
+
+    >>> print x.reshape(-1)
+    [1 2 3 4 5 6]
+
+    >>> print np.ravel(x, order='F')
+    [1 4 2 5 3 6]
+
+    When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
+
+    >>> print np.ravel(x.T)
+    [1 4 2 5 3 6]
+    >>> print np.ravel(x.T, order='A')
+    [1 2 3 4 5 6]
+
+    When ``order`` is 'K', it will preserve orderings that are neither 'C'
+    nor 'F', but won't reverse axes:
+
+    >>> a = np.arange(3)[::-1]; a
+    array([2, 1, 0])
+    >>> a.ravel(order='C')
+    array([2, 1, 0])
+    >>> a.ravel(order='K')
+    array([2, 1, 0])
+
+    >>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
+    array([[[ 0,  2,  4],
+            [ 1,  3,  5]],
+           [[ 6,  8, 10],
+            [ 7,  9, 11]]])
+    >>> a.ravel(order='C')
+    array([ 0,  2,  4,  1,  3,  5,  6,  8, 10,  7,  9, 11])
+    >>> a.ravel(order='K')
+    array([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def nonzero(a):
+    """
+    Return the indices of the elements that are non-zero.
+
+    Returns a tuple of arrays, one for each dimension of `a`, containing
+    the indices of the non-zero elements in that dimension. The
+    corresponding non-zero values can be obtained with::
+
+        a[nonzero(a)]
+
+    To group the indices by element, rather than dimension, use::
+
+        transpose(nonzero(a))
+
+    The result of this is always a 2-D array, with a row for
+    each non-zero element.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+
+    Returns
+    -------
+    tuple_of_arrays : tuple
+        Indices of elements that are non-zero.
+
+    See Also
+    --------
+    flatnonzero :
+        Return indices that are non-zero in the flattened version of the input
+        array.
+    ndarray.nonzero :
+        Equivalent ndarray method.
+    count_nonzero :
+        Counts the number of non-zero elements in the input array.
+
+    Examples
+    --------
+    >>> x = np.eye(3)
+    >>> x
+    array([[ 1.,  0.,  0.],
+           [ 0.,  1.,  0.],
+           [ 0.,  0.,  1.]])
+    >>> np.nonzero(x)
+    (array([0, 1, 2]), array([0, 1, 2]))
+
+    >>> x[np.nonzero(x)]
+    array([ 1.,  1.,  1.])
+    >>> np.transpose(np.nonzero(x))
+    array([[0, 0],
+           [1, 1],
+           [2, 2]])
+
+    A common use for ``nonzero`` is to find the indices of an array, where
+    a condition is True.  Given an array `a`, the condition `a` > 3 is a
+    boolean array and since False is interpreted as 0, np.nonzero(a > 3)
+    yields the indices of the `a` where the condition is true.
+
+    >>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
+    >>> a > 3
+    array([[False, False, False],
+           [ True,  True,  True],
+           [ True,  True,  True]], dtype=bool)
+    >>> np.nonzero(a > 3)
+    (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+    The ``nonzero`` method of the boolean array can also be called.
+
+    >>> (a > 3).nonzero()
+    (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def shape(a):
+    """
+    Return the shape of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+
+    Returns
+    -------
+    shape : tuple of ints
+        The elements of the shape tuple give the lengths of the
+        corresponding array dimensions.
+
+    See Also
+    --------
+    alen
+    ndarray.shape : Equivalent array method.
+
+    Examples
+    --------
+    >>> np.shape(np.eye(3))
+    (3, 3)
+    >>> np.shape([[1, 2]])
+    (1, 2)
+    >>> np.shape([0])
+    (1,)
+    >>> np.shape(0)
+    ()
+
+    >>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
+    >>> np.shape(a)
+    (2,)
+    >>> a.shape
+    (2,)
+
+    """
+    if not hasattr(a, 'shape'):
+        a = numpypy.array(a)
+    return a.shape
+
+
+def compress(condition, a, axis=None, out=None):
+    """
+    Return selected slices of an array along given axis.
+
+    When working along a given axis, a slice along that axis is returned in
+    `output` for each index where `condition` evaluates to True. When
+    working on a 1-D array, `compress` is equivalent to `extract`.
+
+    Parameters
+    ----------
+    condition : 1-D array of bools
+        Array that selects which entries to return. If len(condition)
+        is less than the size of `a` along the given axis, then output is
+        truncated to the length of the condition array.
+    a : array_like
+        Array from which to extract a part.
+    axis : int, optional
+        Axis along which to take slices. If None (default), work on the
+        flattened array.
+    out : ndarray, optional
+        Output array.  Its type is preserved and it must be of the right
+        shape to hold the output.
+
+    Returns
+    -------
+    compressed_array : ndarray
+        A copy of `a` without the slices along axis for which `condition`
+        is false.
+
+    See Also
+    --------
+    take, choose, diag, diagonal, select
+    ndarray.compress : Equivalent method.
+    numpy.doc.ufuncs : Section "Output arguments"
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4], [5, 6]])
+    >>> a
+    array([[1, 2],
+           [3, 4],
+           [5, 6]])
+    >>> np.compress([0, 1], a, axis=0)
+    array([[3, 4]])
+    >>> np.compress([False, True, True], a, axis=0)
+    array([[3, 4],
+           [5, 6]])
+    >>> np.compress([False, True], a, axis=1)
+    array([[2],
+           [4],
+           [6]])
+
+    Working on the flattened array does not return slices along an axis but
+    selects elements.
+
+    >>> np.compress([False, True], a)
+    array([2])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def clip(a, a_min, a_max, out=None):
+    """
+    Clip (limit) the values in an array.
+
+    Given an interval, values outside the interval are clipped to
+    the interval edges.  For example, if an interval of ``[0, 1]``
+    is specified, values smaller than 0 become 0, and values larger
+    than 1 become 1.
+
+    Parameters
+    ----------
+    a : array_like
+        Array containing elements to clip.
+    a_min : scalar or array_like
+        Minimum value.
+    a_max : scalar or array_like
+        Maximum value.  If `a_min` or `a_max` are array_like, then they will
+        be broadcasted to the shape of `a`.
+    out : ndarray, optional
+        The results will be placed in this array. It may be the input
+        array for in-place clipping.  `out` must be of the right shape
+        to hold the output.  Its type is preserved.
+
+    Returns
+    -------
+    clipped_array : ndarray
+        An array with the elements of `a`, but where values
+        < `a_min` are replaced with `a_min`, and those > `a_max`
+        with `a_max`.
+
+    See Also
+    --------
+    numpy.doc.ufuncs : Section "Output arguments"
+
+    Examples
+    --------
+    >>> a = np.arange(10)
+    >>> np.clip(a, 1, 8)
+    array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
+    >>> a
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+    >>> np.clip(a, 3, 6, out=a)
+    array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
+    >>> a = np.arange(10)
+    >>> a
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+    >>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
+    array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def sum(a, axis=None, dtype=None, out=None):
+    """
+    Sum of array elements over a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Elements to sum.
+    axis : integer, optional
+        Axis over which the sum is taken. By default `axis` is None,
+        and all elements are summed.
+    dtype : dtype, optional
+        The type of the returned array and of the accumulator in which
+        the elements are summed.  By default, the dtype of `a` is used.
+        An exception is when `a` has an integer type with less precision
+        than the default platform integer.  In that case, the default
+        platform integer is used instead.
+    out : ndarray, optional
+        Array into which the output is placed.  By default, a new array is
+        created.  If `out` is given, it must be of the appropriate shape
+        (the shape of `a` with `axis` removed, i.e.,
+        ``numpy.delete(a.shape, axis)``).  Its type is preserved. See
+        `doc.ufuncs` (Section "Output arguments") for more details.
+
+    Returns
+    -------
+    sum_along_axis : ndarray
+        An array with the same shape as `a`, with the specified
+        axis removed.   If `a` is a 0-d array, or if `axis` is None, a scalar
+        is returned.  If an output array is specified, a reference to
+        `out` is returned.
+
+    See Also
+    --------
+    ndarray.sum : Equivalent method.
+
+    cumsum : Cumulative sum of array elements.
+
+    trapz : Integration of array values using the composite trapezoidal rule.
+
+    mean, average
+
+    Notes
+    -----
+    Arithmetic is modular when using integer types, and no error is
+    raised on overflow.
+
+    Examples
+    --------
+    >>> np.sum([0.5, 1.5])
+    2.0
+    >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
+    1
+    >>> np.sum([[0, 1], [0, 5]])
+    6
+    >>> np.sum([[0, 1], [0, 5]], axis=0)
+    array([0, 6])
+    >>> np.sum([[0, 1], [0, 5]], axis=1)
+    array([1, 5])
+
+    If the accumulator is too small, overflow occurs:
+
+    >>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
+    -128
+
+    """
+    assert dtype is None
+    assert out is None
+    if not hasattr(a, "sum"):
+        a = numpypy.array(a)
+    return a.sum(axis=axis)
+
+
+def product (a, axis=None, dtype=None, out=None):
+    """
+    Return the product of array elements over a given axis.
+
+    See Also
+    --------
+    prod : equivalent function; see for details.
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def sometrue(a, axis=None, out=None):
+    """
+    Check whether some values are true.
+
+    Refer to `any` for full documentation.
+
+    See Also
+    --------
+    any : equivalent function
+
+    """
+    assert axis is None
+    assert out is None
+    if not hasattr(a, 'any'):
+        a = numpypy.array(a)
+    return a.any()
+
+
+def alltrue (a, axis=None, out=None):
+    """
+    Check if all elements of input array are true.
+
+    See Also
+    --------
+    numpy.all : Equivalent function; see for details.
+
+    """
+    assert axis is None
+    assert out is None
+    if not hasattr(a, 'all'):
+        a = numpypy.array(a)
+    return a.all()
+
+def any(a,axis=None, out=None):
+    """
+    Test whether any array element along a given axis evaluates to True.
+
+    Returns single boolean unless `axis` is not ``None``
+
+    Parameters
+    ----------
+    a : array_like
+        Input array or object that can be converted to an array.
+    axis : int, optional
+        Axis along which a logical OR is performed.  The default
+        (`axis` = `None`) is to perform a logical OR over a flattened
+        input array. `axis` may be negative, in which case it counts
+        from the last to the first axis.
+    out : ndarray, optional
+        Alternate output array in which to place the result.  It must have
+        the same shape as the expected output and its type is preserved
+        (e.g., if it is of type float, then it will remain so, returning
+        1.0 for True and 0.0 for False, regardless of the type of `a`).
+        See `doc.ufuncs` (Section "Output arguments") for details.
+
+    Returns
+    -------
+    any : bool or ndarray
+        A new boolean or `ndarray` is returned unless `out` is specified,
+        in which case a reference to `out` is returned.
+
+    See Also
+    --------
+    ndarray.any : equivalent method
+
+    all : Test whether all elements along a given axis evaluate to True.
+
+    Notes
+    -----
+    Not a Number (NaN), positive infinity and negative infinity evaluate
+    to `True` because these are not equal to zero.
+
+    Examples
+    --------
+    >>> np.any([[True, False], [True, True]])
+    True
+
+    >>> np.any([[True, False], [False, False]], axis=0)
+    array([ True, False], dtype=bool)
+
+    >>> np.any([-1, 0, 5])
+    True
+
+    >>> np.any(np.nan)
+    True
+
+    >>> o=np.array([False])
+    >>> z=np.any([-1, 4, 5], out=o)
+    >>> z, o
+    (array([ True], dtype=bool), array([ True], dtype=bool))
+    >>> # Check now that z is a reference to o
+    >>> z is o
+    True
+    >>> id(z), id(o) # identity of z and o              # doctest: +SKIP
+    (191614240, 191614240)
+
+    """
+    assert axis is None
+    assert out is None
+    if not hasattr(a, 'any'):
+        a = numpypy.array(a)
+    return a.any()
+
+
+def all(a,axis=None, out=None):
+    """
+    Test whether all array elements along a given axis evaluate to True.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array or object that can be converted to an array.
+    axis : int, optional
+        Axis along which a logical AND is performed.
+        The default (`axis` = `None`) is to perform a logical AND
+        over a flattened input array.  `axis` may be negative, in which
+        case it counts from the last to the first axis.
+    out : ndarray, optional
+        Alternate output array in which to place the result.
+        It must have the same shape as the expected output and its
+        type is preserved (e.g., if ``dtype(out)`` is float, the result
+        will consist of 0.0's and 1.0's).  See `doc.ufuncs` (Section
+        "Output arguments") for more details.
+
+    Returns
+    -------
+    all : ndarray, bool
+        A new boolean or array is returned unless `out` is specified,
+        in which case a reference to `out` is returned.
+
+    See Also
+    --------
+    ndarray.all : equivalent method
+
+    any : Test whether any element along a given axis evaluates to True.
+
+    Notes
+    -----
+    Not a Number (NaN), positive infinity and negative infinity
+    evaluate to `True` because these are not equal to zero.
+
+    Examples
+    --------
+    >>> np.all([[True,False],[True,True]])
+    False
+
+    >>> np.all([[True,False],[True,True]], axis=0)
+    array([ True, False], dtype=bool)
+
+    >>> np.all([-1, 4, 5])
+    True
+
+    >>> np.all([1.0, np.nan])
+    True
+
+    >>> o=np.array([False])
+    >>> z=np.all([-1, 4, 5], out=o)
+    >>> id(z), id(o), z                             # doctest: +SKIP
+    (28293632, 28293632, array([ True], dtype=bool))
+
+    """
+    assert axis is None
+    assert out is None
+    if not hasattr(a, 'all'):
+        a = numpypy.array(a)
+    return a.all()
+
+
+def cumsum (a, axis=None, dtype=None, out=None):
+    """
+    Return the cumulative sum of the elements along a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int, optional
+        Axis along which the cumulative sum is computed. The default
+        (None) is to compute the cumsum over the flattened array.
+    dtype : dtype, optional
+        Type of the returned array and of the accumulator in which the
+        elements are summed.  If `dtype` is not specified, it defaults
+        to the dtype of `a`, unless `a` has an integer dtype with a
+        precision less than that of the default platform integer.  In
+        that case, the default platform integer is used.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must
+        have the same shape and buffer length as the expected output
+        but the type will be cast if necessary. See `doc.ufuncs`
+        (Section "Output arguments") for more details.
+
+    Returns
+    -------
+    cumsum_along_axis : ndarray.
+        A new array holding the result is returned unless `out` is
+        specified, in which case a reference to `out` is returned. The
+        result has the same size as `a`, and the same shape as `a` if
+        `axis` is not None or `a` is a 1-d array.
+
+
+    See Also
+    --------
+    sum : Sum array elements.
+
+    trapz : Integration of array values using the composite trapezoidal rule.
+
+    Notes
+    -----
+    Arithmetic is modular when using integer types, and no error is
+    raised on overflow.
+
+    Examples
+    --------
+    >>> a = np.array([[1,2,3], [4,5,6]])
+    >>> a
+    array([[1, 2, 3],
+           [4, 5, 6]])
+    >>> np.cumsum(a)
+    array([ 1,  3,  6, 10, 15, 21])
+    >>> np.cumsum(a, dtype=float)     # specifies type of output value(s)
+    array([  1.,   3.,   6.,  10.,  15.,  21.])
+
+    >>> np.cumsum(a,axis=0)      # sum over rows for each of the 3 columns
+    array([[1, 2, 3],
+           [5, 7, 9]])
+    >>> np.cumsum(a,axis=1)      # sum over columns for each of the 2 rows
+    array([[ 1,  3,  6],
+           [ 4,  9, 15]])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def cumproduct(a, axis=None, dtype=None, out=None):
+    """
+    Return the cumulative product over the given axis.
+
+
+    See Also
+    --------
+    cumprod : equivalent function; see for details.
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def ptp(a, axis=None, out=None):
+    """
+    Range of values (maximum - minimum) along an axis.
+
+    The name of the function comes from the acronym for 'peak to peak'.
+
+    Parameters
+    ----------
+    a : array_like
+        Input values.
+    axis : int, optional
+        Axis along which to find the peaks.  By default, flatten the
+        array.
+    out : array_like
+        Alternative output array in which to place the result. It must
+        have the same shape and buffer length as the expected output,
+        but the type of the output values will be cast if necessary.
+
+    Returns
+    -------
+    ptp : ndarray
+        A new array holding the result, unless `out` was
+        specified, in which case a reference to `out` is returned.
+
+    Examples
+    --------
+    >>> x = np.arange(4).reshape((2,2))
+    >>> x
+    array([[0, 1],
+           [2, 3]])
+
+    >>> np.ptp(x, axis=0)
+    array([2, 2])
+
+    >>> np.ptp(x, axis=1)
+    array([1, 1])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def amax(a, axis=None, out=None):
+    """
+    Return the maximum of an array or maximum along an axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    axis : int, optional
+        Axis along which to operate.  By default flattened input is used.
+    out : ndarray, optional
+        Alternate output array in which to place the result.  Must be of
+        the same shape and buffer length as the expected output.  See
+        `doc.ufuncs` (Section "Output arguments") for more details.
+
+    Returns
+    -------
+    amax : ndarray or scalar
+        Maximum of `a`. If `axis` is None, the result is a scalar value.
+        If `axis` is given, the result is an array of dimension
+        ``a.ndim - 1``.
+
+    See Also
+    --------
+    nanmax : NaN values are ignored instead of being propagated.
+    fmax : same behavior as the C99 fmax function.
+    argmax : indices of the maximum values.
+
+    Notes
+    -----
+    NaN values are propagated, that is if at least one item is NaN, the
+    corresponding max value will be NaN as well.  To ignore NaN values
+    (MATLAB behavior), please use nanmax.
+
+    Examples
+    --------
+    >>> a = np.arange(4).reshape((2,2))
+    >>> a
+    array([[0, 1],
+           [2, 3]])
+    >>> np.amax(a)
+    3
+    >>> np.amax(a, axis=0)
+    array([2, 3])
+    >>> np.amax(a, axis=1)
+    array([1, 3])
+
+    >>> b = np.arange(5, dtype=np.float)
+    >>> b[2] = np.NaN
+    >>> np.amax(b)
+    nan
+    >>> np.nanmax(b)
+    4.0
+
+    """
+    assert axis is None
+    assert out is None
+    if not hasattr(a, "max"):
+        a = numpypy.array(a)
+    return a.max()
+
+
+def amin(a, axis=None, out=None):
+    """
+    Return the minimum of an array or minimum along an axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    axis : int, optional
+        Axis along which to operate.  By default a flattened input is used.
+    out : ndarray, optional
+        Alternative output array in which to place the result.  Must
+        be of the same shape and buffer length as the expected output.
+        See `doc.ufuncs` (Section "Output arguments") for more details.
+
+    Returns
+    -------
+    amin : ndarray
+        A new array or a scalar array with the result.
+
+    See Also
+    --------
+    nanmin: nan values are ignored instead of being propagated
+    fmin: same behavior as the C99 fmin function
+    argmin: Return the indices of the minimum values.
+
+    amax, nanmax, fmax
+
+    Notes
+    -----
+    NaN values are propagated, that is if at least one item is nan, the
+    corresponding min value will be nan as well. To ignore NaN values (matlab
+    behavior), please use nanmin.
+
+    Examples
+    --------
+    >>> a = np.arange(4).reshape((2,2))
+    >>> a
+    array([[0, 1],
+           [2, 3]])
+    >>> np.amin(a)           # Minimum of the flattened array
+    0
+    >>> np.amin(a, axis=0)         # Minima along the first axis
+    array([0, 1])
+    >>> np.amin(a, axis=1)         # Minima along the second axis
+    array([0, 2])
+
+    >>> b = np.arange(5, dtype=np.float)
+    >>> b[2] = np.NaN
+    >>> np.amin(b)
+    nan
+    >>> np.nanmin(b)
+    0.0
+
+    """
+    # amin() is equivalent to min()
+    assert axis is None
+    assert out is None
+    if not hasattr(a, 'min'):
+        a = numpypy.array(a)
+    return a.min()
+
+def alen(a):
+    """
+    Return the length of the first dimension of the input array.
+
+    Parameters
+    ----------
+    a : array_like
+       Input array.
+
+    Returns
+    -------
+    l : int
+       Length of the first dimension of `a`.
+
+    See Also
+    --------
+    shape, size
+
+    Examples
+    --------
+    >>> a = np.zeros((7,4,5))
+    >>> a.shape[0]
+    7
+    >>> np.alen(a)
+    7
+
+    """
+    if not hasattr(a, 'shape'):
+        a = numpypy.array(a)
+    return a.shape[0]
+
+
+def prod(a, axis=None, dtype=None, out=None):
+    """
+    Return the product of array elements over a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    axis : int, optional
+        Axis over which the product is taken.  By default, the product
+        of all elements is calculated.
+    dtype : data-type, optional
+        The data-type of the returned array, as well as of the accumulator
+        in which the elements are multiplied.  By default, if `a` is of
+        integer type, `dtype` is the default platform integer. (Note: if
+        the type of `a` is unsigned, then so is `dtype`.)  Otherwise,
+        the dtype is the same as that of `a`.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must have
+        the same shape as the expected output, but the type of the
+        output values will be cast if necessary.
+
+    Returns
+    -------
+    product_along_axis : ndarray, see `dtype` parameter above.
+        An array shaped as `a` but with the specified axis removed.
+        Returns a reference to `out` if specified.
+
+    See Also
+    --------
+    ndarray.prod : equivalent method
+    numpy.doc.ufuncs : Section "Output arguments"
+
+    Notes
+    -----
+    Arithmetic is modular when using integer types, and no error is
+    raised on overflow.  That means that, on a 32-bit platform:
+
+    >>> x = np.array([536870910, 536870910, 536870910, 536870910])
+    >>> np.prod(x) #random
+    16
+
+    Examples
+    --------
+    By default, calculate the product of all elements:
+
+    >>> np.prod([1.,2.])
+    2.0
+
+    Even when the input array is two-dimensional:
+
+    >>> np.prod([[1.,2.],[3.,4.]])
+    24.0
+
+    But we can also specify the axis over which to multiply:
+
+    >>> np.prod([[1.,2.],[3.,4.]], axis=1)
+    array([  2.,  12.])
+
+    If the type of `x` is unsigned, then the output type is
+    the unsigned platform integer:
+
+    >>> x = np.array([1, 2, 3], dtype=np.uint8)
+    >>> np.prod(x).dtype == np.uint
+    True
+
+    If `x` is of a signed integer type, then the output type
+    is the default platform integer:
+
+    >>> x = np.array([1, 2, 3], dtype=np.int8)
+    >>> np.prod(x).dtype == np.int
+    True
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def cumprod(a, axis=None, dtype=None, out=None):
+    """
+    Return the cumulative product of elements along a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int, optional
+        Axis along which the cumulative product is computed.  By default
+        the input is flattened.
+    dtype : dtype, optional
+        Type of the returned array, as well as of the accumulator in which
+        the elements are multiplied.  If *dtype* is not specified, it
+        defaults to the dtype of `a`, unless `a` has an integer dtype with
+        a precision less than that of the default platform integer.  In
+        that case, the default platform integer is used instead.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must
+        have the same shape and buffer length as the expected output
+        but the type of the resulting values will be cast if necessary.
+
+    Returns
+    -------
+    cumprod : ndarray
+        A new array holding the result is returned unless `out` is
+        specified, in which case a reference to out is returned.
+
+    See Also
+    --------
+    numpy.doc.ufuncs : Section "Output arguments"
+
+    Notes
+    -----
+    Arithmetic is modular when using integer types, and no error is
+    raised on overflow.
+
+    Examples
+    --------
+    >>> a = np.array([1,2,3])
+    >>> np.cumprod(a) # intermediate results 1, 1*2
+    ...               # total product 1*2*3 = 6
+    array([1, 2, 6])
+    >>> a = np.array([[1, 2, 3], [4, 5, 6]])
+    >>> np.cumprod(a, dtype=float) # specify type of output
+    array([   1.,    2.,    6.,   24.,  120.,  720.])
+
+    The cumulative product for each column (i.e., over the rows) of `a`:
+
+    >>> np.cumprod(a, axis=0)
+    array([[ 1,  2,  3],
+           [ 4, 10, 18]])
+
+    The cumulative product for each row (i.e. over the columns) of `a`:
+
+    >>> np.cumprod(a,axis=1)
+    array([[  1,   2,   6],
+           [  4,  20, 120]])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def ndim(a):
+    """
+    Return the number of dimensions of an array.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.  If it is not already an ndarray, a conversion is
+        attempted.
+
+    Returns
+    -------
+    number_of_dimensions : int
+        The number of dimensions in `a`.  Scalars are zero-dimensional.
+
+    See Also
+    --------
+    ndarray.ndim : equivalent method
+    shape : dimensions of array
+    ndarray.shape : dimensions of array
+
+    Examples
+    --------
+    >>> np.ndim([[1,2,3],[4,5,6]])
+    2
+    >>> np.ndim(np.array([[1,2,3],[4,5,6]]))
+    2
+    >>> np.ndim(1)
+    0
+
+    """
+    if not hasattr(a, 'ndim'):
+        a = numpypy.array(a)
+    return a.ndim
+
+
+def rank(a):
+    """
+    Return the number of dimensions of an array.
+
+    If `a` is not already an array, a conversion is attempted.
+    Scalars are zero dimensional.
+
+    Parameters
+    ----------
+    a : array_like
+        Array whose number of dimensions is desired. If `a` is not an array,
+        a conversion is attempted.
+
+    Returns
+    -------
+    number_of_dimensions : int
+        The number of dimensions in the array.
+
+    See Also
+    --------
+    ndim : equivalent function
+    ndarray.ndim : equivalent property
+    shape : dimensions of array
+    ndarray.shape : dimensions of array
+
+    Notes
+    -----
+    In the old Numeric package, `rank` was the term used for the number of
+    dimensions, but in Numpy `ndim` is used instead.
+
+    Examples
+    --------
+    >>> np.rank([1,2,3])
+    1
+    >>> np.rank(np.array([[1,2,3],[4,5,6]]))
+    2
+    >>> np.rank(1)
+    0
+
+    """
+    if not hasattr(a, 'ndim'):
+        a = numpypy.array(a)
+    return a.ndim
+
+
+def size(a, axis=None):
+    """
+    Return the number of elements along a given axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    axis : int, optional
+        Axis along which the elements are counted.  By default, give
+        the total number of elements.
+
+    Returns
+    -------
+    element_count : int
+        Number of elements along the specified axis.
+
+    See Also
+    --------
+    shape : dimensions of array
+    ndarray.shape : dimensions of array
+    ndarray.size : number of elements in array
+
+    Examples
+    --------
+    >>> a = np.array([[1,2,3],[4,5,6]])
+    >>> np.size(a)
+    6
+    >>> np.size(a,1)
+    3
+    >>> np.size(a,0)
+    2
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def around(a, decimals=0, out=None):
+    """
+    Evenly round to the given number of decimals.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data.
+    decimals : int, optional
+        Number of decimal places to round to (default: 0).  If
+        decimals is negative, it specifies the number of positions to
+        the left of the decimal point.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must have
+        the same shape as the expected output, but the type of the output
+        values will be cast if necessary. See `doc.ufuncs` (Section
+        "Output arguments") for details.
+
+    Returns
+    -------
+    rounded_array : ndarray
+        An array of the same type as `a`, containing the rounded values.
+        Unless `out` was specified, a new array is created.  A reference to
+        the result is returned.
+
+        The real and imaginary parts of complex numbers are rounded
+        separately.  The result of rounding a float is a float.
+
+    See Also
+    --------
+    ndarray.round : equivalent method
+
+    ceil, fix, floor, rint, trunc
+
+
+    Notes
+    -----
+    For values exactly halfway between rounded decimal values, Numpy
+    rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
+    -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
+    to the inexact representation of decimal fractions in the IEEE
+    floating point standard [1]_ and errors introduced when scaling
+    by powers of ten.
+
+    References
+    ----------
+    .. [1] "Lecture Notes on the Status of  IEEE 754", William Kahan,
+           http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
+    .. [2] "How Futile are Mindless Assessments of
+           Roundoff in Floating-Point Computation?", William Kahan,
+           http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
+
+    Examples
+    --------
+    >>> np.around([0.37, 1.64])
+    array([ 0.,  2.])
+    >>> np.around([0.37, 1.64], decimals=1)
+    array([ 0.4,  1.6])
+    >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
+    array([ 0.,  2.,  2.,  4.,  4.])
+    >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
+    array([ 1,  2,  3, 11])
+    >>> np.around([1,2,3,11], decimals=-1)
+    array([ 0,  0,  0, 10])
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def round_(a, decimals=0, out=None):
+    """
+    Round an array to the given number of decimals.
+
+    Refer to `around` for full documentation.
+
+    See Also
+    --------
+    around : equivalent function
+
+    """
+    raise NotImplementedError('Waiting on interp level method')
+
+
+def mean(a, axis=None, dtype=None, out=None):
+    """
+    Compute the arithmetic mean along the specified axis.
+
+    Returns the average of the array elements.  The average is taken over
+    the flattened array by default, otherwise over the specified axis.
+    `float64` intermediate and return values are used for integer inputs.
+
+    Parameters
+    ----------
+    a : array_like
+        Array containing numbers whose mean is desired. If `a` is not an
+        array, a conversion is attempted.
+    axis : int, optional
+        Axis along which the means are computed. The default is to compute
+        the mean of the flattened array.
+    dtype : data-type, optional
+        Type to use in computing the mean.  For integer inputs, the default
+        is `float64`; for floating point inputs, it is the same as the
+        input dtype.
+    out : ndarray, optional
+        Alternate output array in which to place the result.  The default
+        is ``None``; if provided, it must have the same shape as the
+        expected output, but the type will be cast if necessary.
+        See `doc.ufuncs` for details.
+
+    Returns
+    -------
+    m : ndarray, see dtype parameter above
+        If `out=None`, returns a new array containing the mean values,
+        otherwise a reference to the output array is returned.
+
+    See Also
+    --------
+    average : Weighted average
+
+    Notes
+    -----
+    The arithmetic mean is the sum of the elements along the axis divided
+    by the number of elements.
+
+    Note that for floating-point input, the mean is computed using the
+    same precision the input has.  Depending on the input data, this can
+    cause the results to be inaccurate, especially for `float32` (see
+    example below).  Specifying a higher-precision accumulator using the
+    `dtype` keyword can alleviate this issue.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> np.mean(a)
+    2.5
+    >>> np.mean(a, axis=0)
+    array([ 2.,  3.])
+    >>> np.mean(a, axis=1)
+    array([ 1.5,  3.5])
+
+    In single precision, `mean` can be inaccurate:
+
+    >>> a = np.zeros((2, 512*512), dtype=np.float32)
+    >>> a[0, :] = 1.0
+    >>> a[1, :] = 0.1
+    >>> np.mean(a)
+    0.546875
+
+    Computing the mean in float64 is more accurate:
+
+    >>> np.mean(a, dtype=np.float64)
+    0.55000000074505806
+
+    """
+    assert dtype is None
+    assert out is None
+    if not hasattr(a, "mean"):
+        a = numpypy.array(a)
+    return a.mean(axis=axis)
+
+
+def std(a, axis=None, dtype=None, out=None, ddof=0):
+    """
+    Compute the standard deviation along the specified axis.
+
+    Returns the standard deviation, a measure of the spread of a distribution,
+    of the array elements. The standard deviation is computed for the
+    flattened array by default, otherwise over the specified axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Calculate the standard deviation of these values.
+    axis : int, optional
+        Axis along which the standard deviation is computed. The default is
+        to compute the standard deviation of the flattened array.
+    dtype : dtype, optional
+        Type to use in computing the standard deviation. For arrays of
+        integer type the default is float64, for arrays of float types it is
+        the same as the array type.
+    out : ndarray, optional
+        Alternative output array in which to place the result. It must have
+        the same shape as the expected output but the type (of the calculated
+        values) will be cast if necessary.
+    ddof : int, optional
+        Means Delta Degrees of Freedom.  The divisor used in calculations
+        is ``N - ddof``, where ``N`` represents the number of elements.
+        By default `ddof` is zero.
+
+    Returns
+    -------
+    standard_deviation : ndarray, see dtype parameter above.
+        If `out` is None, return a new array containing the standard deviation,
+        otherwise return a reference to the output array.
+
+    See Also
+    --------
+    var, mean
+    numpy.doc.ufuncs : Section "Output arguments"
+
+    Notes
+    -----
+    The standard deviation is the square root of the average of the squared
+    deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
+
+    The average squared deviation is normally calculated as ``x.sum() / N``, where
+    ``N = len(x)``.  If, however, `ddof` is specified, the divisor ``N - ddof``
+    is used instead. In standard statistical practice, ``ddof=1`` provides an
+    unbiased estimator of the variance of the infinite population. ``ddof=0``
+    provides a maximum likelihood estimate of the variance for normally
+    distributed variables. The standard deviation computed in this function
+    is the square root of the estimated variance, so even with ``ddof=1``, it
+    will not be an unbiased estimate of the standard deviation per se.
+
+    Note that, for complex numbers, `std` takes the absolute
+    value before squaring, so that the result is always real and nonnegative.
+
+    For floating-point input, the *std* is computed using the same
+    precision the input has. Depending on the input data, this can cause
+    the results to be inaccurate, especially for float32 (see example below).
+    Specifying a higher-accuracy accumulator using the `dtype` keyword can
+    alleviate this issue.
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> np.std(a)
+    1.1180339887498949
+    >>> np.std(a, axis=0)
+    array([ 1.,  1.])
+    >>> np.std(a, axis=1)
+    array([ 0.5,  0.5])
+
+    In single precision, std() can be inaccurate:
+
+    >>> a = np.zeros((2,512*512), dtype=np.float32)
+    >>> a[0,:] = 1.0
+    >>> a[1,:] = 0.1
+    >>> np.std(a)
+    0.45172946707416706
+
+    Computing the standard deviation in float64 is more accurate:
+
+    >>> np.std(a, dtype=np.float64)
+    0.44999999925552653
+
+    """
+    assert axis is None
+    assert dtype is None
+    assert out is None
+    assert ddof == 0
+    if not hasattr(a, "std"):
+        a = numpypy.array(a)
+    return a.std()
+
+
+def var(a, axis=None, dtype=None, out=None, ddof=0):
+    """
+    Compute the variance along the specified axis.
+
+    Returns the variance of the array elements, a measure of the spread of a
+    distribution.  The variance is computed for the flattened array by
+    default, otherwise over the specified axis.
+
+    Parameters
+    ----------
+    a : array_like
+        Array containing numbers whose variance is desired.  If `a` is not an
+        array, a conversion is attempted.
+    axis : int, optional
+        Axis along which the variance is computed.  The default is to compute
+        the variance of the flattened array.
+    dtype : data-type, optional
+        Type to use in computing the variance.  For arrays of integer type
+        the default is `float32`; for arrays of float types it is the same as
+        the array type.
+    out : ndarray, optional
+        Alternate output array in which to place the result.  It must have
+        the same shape as the expected output, but the type is cast if
+        necessary.
+    ddof : int, optional
+        "Delta Degrees of Freedom": the divisor used in the calculation is
+        ``N - ddof``, where ``N`` represents the number of elements. By
+        default `ddof` is zero.
+
+    Returns
+    -------
+    variance : ndarray, see dtype parameter above
+        If ``out=None``, returns a new array containing the variance;
+        otherwise, a reference to the output array is returned.
+
+    See Also
+    --------
+    std : Standard deviation
+    mean : Average
+    numpy.doc.ufuncs : Section "Output arguments"
+
+    Notes
+    -----
+    The variance is the average of the squared deviations from the mean,
+    i.e.,  ``var = mean(abs(x - x.mean())**2)``.
+
+    The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
+    If, however, `ddof` is specified, the divisor ``N - ddof`` is used
+    instead.  In standard statistical practice, ``ddof=1`` provides an
+    unbiased estimator of the variance of a hypothetical infinite population.
+    ``ddof=0`` provides a maximum likelihood estimate of the variance for
+    normally distributed variables.
+
+    Note that for complex numbers, the absolute value is taken before
+    squaring, so that the result is always real and nonnegative.
+
+    For floating-point input, the variance is computed using the same
+    precision the input has.  Depending on the input data, this can cause
+    the results to be inaccurate, especially for `float32` (see example
+    below).  Specifying a higher-accuracy accumulator using the ``dtype``
+    keyword can alleviate this issue.
+
+    Examples
+    --------
+    >>> a = np.array([[1,2],[3,4]])
+    >>> np.var(a)
+    1.25
+    >>> np.var(a,0)
+    array([ 1.,  1.])
+    >>> np.var(a,1)
+    array([ 0.25,  0.25])
+
+    In single precision, var() can be inaccurate:
+
+    >>> a = np.zeros((2,512*512), dtype=np.float32)
+    >>> a[0,:] = 1.0
+    >>> a[1,:] = 0.1
+    >>> np.var(a)
+    0.20405951142311096
+
+    Computing the standard deviation in float64 is more accurate:
+
+    >>> np.var(a, dtype=np.float64)
+    0.20249999932997387
+    >>> ((1-0.55)**2 + (0.1-0.55)**2)/2
+    0.20250000000000001
+
+    """
+    assert axis is None
+    assert dtype is None
+    assert out is None
+    assert ddof == 0
+    if not hasattr(a, "var"):
+        a = numpypy.array(a)
+    return a.var()
diff --git a/py/_code/code.py b/py/_code/code.py
--- a/py/_code/code.py
+++ b/py/_code/code.py
@@ -164,6 +164,7 @@
         #   if something:  # assume this causes a NameError
         #      # _this_ lines and the one
                #        below we don't want from entry.getsource()
+        end = min(end, len(source))
         for i in range(self.lineno, end):
             if source[i].rstrip().endswith(':'):
                 end = i + 1
diff --git a/pypy/annotation/description.py b/pypy/annotation/description.py
--- a/pypy/annotation/description.py
+++ b/pypy/annotation/description.py
@@ -180,7 +180,12 @@
         if name is None:
             name = pyobj.func_name
         if signature is None:
-            signature = cpython_code_signature(pyobj.func_code)
+            if hasattr(pyobj, '_generator_next_method_of_'):
+                from pypy.interpreter.argument import Signature
+                signature = Signature(['entry'])     # haaaaaack
+                defaults = ()
+            else:
+                signature = cpython_code_signature(pyobj.func_code)
         if defaults is None:
             defaults = pyobj.func_defaults
         self.name = name
@@ -252,7 +257,8 @@
         try:
             inputcells = args.match_signature(signature, defs_s)
         except ArgErr, e:
-            raise TypeError, "signature mismatch: %s" % e.getmsg(self.name)
+            raise TypeError("signature mismatch: %s() %s" % 
+                            (self.name, e.getmsg()))
         return inputcells
 
     def specialize(self, inputcells, op=None):
diff --git a/pypy/bin/checkmodule.py b/pypy/bin/checkmodule.py
--- a/pypy/bin/checkmodule.py
+++ b/pypy/bin/checkmodule.py
@@ -1,43 +1,45 @@
 #! /usr/bin/env python
 """
-Usage:  checkmodule.py [-b backend] <module-name>
+Usage:  checkmodule.py <module-name>
 
-Compiles the PyPy extension module from pypy/module/<module-name>/
-into a fake program which does nothing. Useful for testing whether a
-modules compiles without doing a full translation. Default backend is cli.
-
-WARNING: this is still incomplete: there are chances that the
-compilation fails with strange errors not due to the module. If a
-module is known to compile during a translation but don't pass
-checkmodule.py, please report the bug (or, better, correct it :-).
+Check annotation and rtyping of the PyPy extension module from
+pypy/module/<module-name>/.  Useful for testing whether a
+modules compiles without doing a full translation.
 """
 import autopath
-import sys
+import sys, os
 
 from pypy.objspace.fake.checkmodule import checkmodule
 
 def main(argv):
-    try:
-        assert len(argv) in (2, 4)
-        if len(argv) == 2:
-            backend = 'cli'
-            modname = argv[1]
-            if modname in ('-h', '--help'):
-                print >> sys.stderr, __doc__
-                sys.exit(0)
-            if modname.startswith('-'):
-                print >> sys.stderr, "Bad command line"
-                print >> sys.stderr, __doc__
-                sys.exit(1)
-        else:
-            _, b, backend, modname = argv
-            assert b == '-b'
-    except AssertionError:
+    if len(argv) != 2:
         print >> sys.stderr, __doc__
         sys.exit(2)
+    modname = argv[1]
+    if modname in ('-h', '--help'):
+        print >> sys.stderr, __doc__
+        sys.exit(0)
+    if modname.startswith('-'):
+        print >> sys.stderr, "Bad command line"
+        print >> sys.stderr, __doc__
+        sys.exit(1)
+    if os.path.sep in modname:
+        if os.path.basename(modname) == '':
+            modname = os.path.dirname(modname)
+        if os.path.basename(os.path.dirname(modname)) != 'module':
+            print >> sys.stderr, "Must give '../module/xxx', or just 'xxx'."
+            sys.exit(1)
+        modname = os.path.basename(modname)
+    try:
+        checkmodule(modname)
+    except Exception, e:
+        import traceback, pdb
+        traceback.print_exc()
+        pdb.post_mortem(sys.exc_info()[2])
+        return 1
     else:
-        checkmodule(modname, backend, interactive=True)
-        print 'Module compiled succesfully'
+        print 'Passed.'
+        return 0
 
 if __name__ == '__main__':
-    main(sys.argv)
+    sys.exit(main(sys.argv))
diff --git a/pypy/bin/py.py b/pypy/bin/py.py
--- a/pypy/bin/py.py
+++ b/pypy/bin/py.py
@@ -76,6 +76,8 @@
     config.objspace.suggest(allworkingmodules=False)
     if config.objspace.allworkingmodules:
         pypyoption.enable_allworkingmodules(config)
+    if config.objspace.usemodules._continuation:
+        config.translation.continuation = True
     if config.objspace.usemodules.thread:
         config.translation.thread = True
 
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -252,6 +252,10 @@
                    "use small tuples",
                    default=False),
 
+        BoolOption("withspecialisedtuple",
+                   "use specialised tuples",
+                   default=False),
+
         BoolOption("withrope", "use ropes as the string implementation",
                    default=False,
                    requires=[("objspace.std.withstrslice", False),
@@ -336,7 +340,7 @@
                    requires=[("objspace.std.builtinshortcut", True)]),
         BoolOption("withidentitydict",
                    "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not",
-                   default=True),
+                   default=False),
      ]),
 ])
 
@@ -365,6 +369,8 @@
         config.objspace.std.suggest(optimized_list_getitem=True)
         config.objspace.std.suggest(getattributeshortcut=True)
         config.objspace.std.suggest(newshortcut=True)
+        config.objspace.std.suggest(withspecialisedtuple=True)
+        config.objspace.std.suggest(withidentitydict=True)
         #if not IS_64_BITS:
         #    config.objspace.std.suggest(withsmalllong=True)
 
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -496,6 +496,17 @@
     def setup(self):
         super(AppClassCollector, self).setup()
         cls = self.obj
+        #
+        # <hack>
+        for name in dir(cls):
+            if name.startswith('test_'):
+                func = getattr(cls, name, None)
+                code = getattr(func, 'func_code', None)
+                if code and code.co_flags & 32:
+                    raise AssertionError("unsupported: %r is a generator "
+                                         "app-level test method" % (name,))
+        # </hack>
+        #
         space = cls.space
         clsname = cls.__name__
         if self.config.option.runappdirect:
diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile
--- a/pypy/doc/Makefile
+++ b/pypy/doc/Makefile
@@ -12,7 +12,7 @@
 PAPEROPT_letter = -D latex_paper_size=letter
 ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
 
-.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex man changes linkcheck doctest
 
 help:
 	@echo "Please use \`make <target>' where <target> is one of"
@@ -23,6 +23,7 @@
 	@echo "  htmlhelp  to make HTML files and a HTML help project"
 	@echo "  qthelp    to make HTML files and a qthelp project"
 	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  man       to make manual pages"
 	@echo "  changes   to make an overview of all changed/added/deprecated items"
 	@echo "  linkcheck to check all external links for integrity"
 	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
@@ -79,6 +80,11 @@
 	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
 	      "run these through (pdf)latex."
 
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man"
+
 changes:
 	python config/generate.py
 	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -175,15 +175,15 @@
 RPython
 =================
 
-RPython Definition, not
------------------------
+RPython Definition
+------------------
 
-The list and exact details of the "RPython" restrictions are a somewhat
-evolving topic.  In particular, we have no formal language definition
-as we find it more practical to discuss and evolve the set of
-restrictions while working on the whole program analysis.  If you
-have any questions about the restrictions below then please feel
-free to mail us at pypy-dev at codespeak net.
+RPython is a restricted subset of Python that is amenable to static analysis.
+Although there are additions to the language and some things might surprisingly
+work, this is a rough list of restrictions that should be considered. Note
+that there are tons of special cased restrictions that you'll encounter
+as you go. The exact definition is "RPython is everything that our translation
+toolchain can accept" :)
 
 .. _`wrapped object`: coding-guide.html#wrapping-rules
 
@@ -198,7 +198,7 @@
   contain both a string and a int must be avoided.  It is allowed to
   mix None (basically with the role of a null pointer) with many other
   types: `wrapped objects`, class instances, lists, dicts, strings, etc.
-  but *not* with int and floats.
+  but *not* with int, floats or tuples.
 
 **constants**
 
@@ -209,9 +209,12 @@
   have this restriction, so if you need mutable global state, store it
   in the attributes of some prebuilt singleton instance.
 
+
+
 **control structures**
 
-  all allowed but yield, ``for`` loops restricted to builtin types
+  all allowed, ``for`` loops restricted to builtin types, generators
+  very restricted.
 
 **range**
 
@@ -226,7 +229,8 @@
 
 **generators**
 
-  generators are not supported.
+  generators are supported, but their exact scope is very limited. you can't
+  merge two different generator in one control point.
 
 **exceptions**
 
@@ -245,22 +249,27 @@
 
 **strings**
 
-  a lot of, but not all string methods are supported.  Indexes can be
+  a lot of, but not all string methods are supported and those that are
+  supported, not necesarilly accept all arguments.  Indexes can be
   negative.  In case they are not, then you get slightly more efficient
   code if the translator can prove that they are non-negative.  When
   slicing a string it is necessary to prove that the slice start and
-  stop indexes are non-negative.
+  stop indexes are non-negative. There is no implicit str-to-unicode cast
+  anywhere.
 
 **tuples**
 
   no variable-length tuples; use them to store or return pairs or n-tuples of
-  values. Each combination of types for elements and length constitute a separate
-  and not mixable type.
+  values. Each combination of types for elements and length constitute
+  a separate and not mixable type.
 
 **lists**
 
   lists are used as an allocated array.  Lists are over-allocated, so list.append()
-  is reasonably fast.  Negative or out-of-bound indexes are only allowed for the
+  is reasonably fast. However, if you use a fixed-size list, the code
+  is more efficient. Annotator can figure out most of the time that your
+  list is fixed-size, even when you use list comprehension.
+  Negative or out-of-bound indexes are only allowed for the
   most common operations, as follows:
 
   - *indexing*:
@@ -287,16 +296,14 @@
 
 **dicts**
 
-  dicts with a unique key type only, provided it is hashable. 
-  String keys have been the only allowed key types for a while, but this was generalized. 
-  After some re-optimization,
-  the implementation could safely decide that all string dict keys should be interned.
+  dicts with a unique key type only, provided it is hashable. Custom
+  hash functions and custom equality will not be honored.
+  Use ``pypy.rlib.objectmodel.r_dict`` for custom hash functions.
 
 
 **list comprehensions**
 
-  may be used to create allocated, initialized arrays.
-  After list over-allocation was introduced, there is no longer any restriction.
+  May be used to create allocated, initialized arrays.
 
 **functions**
 
@@ -334,9 +341,8 @@
 
 **objects**
 
-  in PyPy, wrapped objects are borrowed from the object space. Just like
-  in CPython, code that needs e.g. a dictionary can use a wrapped dict
-  and the object space operations on it.
+  Normal rules apply. Special methods are not honoured, except ``__init__`` and
+  ``__del__``.
 
 This layout makes the number of types to take care about quite limited.
 
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -45,9 +45,9 @@
 # built documents.
 #
 # The short X.Y version.
-version = '1.6'
+version = '1.7'
 # The full version, including alpha/beta/rc tags.
-release = '1.6'
+release = '1.7'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
@@ -197,3 +197,10 @@
 # Example configuration for intersphinx: refer to the Python standard library.
 intersphinx_mapping = {'http://docs.python.org/': None}
 
+# -- Options for manpage output-------------------------------------------------
+
+man_pages = [
+  ('man/pypy.1', 'pypy',
+   u'fast, compliant alternative implementation of the Python language',
+   u'The PyPy Project', 1)
+]
diff --git a/pypy/doc/config/objspace.std.withspecialisedtuple.txt b/pypy/doc/config/objspace.std.withspecialisedtuple.txt
new file mode 100644
--- /dev/null
+++ b/pypy/doc/config/objspace.std.withspecialisedtuple.txt
@@ -0,0 +1,3 @@
+Use "specialized tuples", a custom implementation for some common kinds
+of tuples.  Currently limited to tuples of length 2, in three variants:
+(int, int), (float, float), and a generic (object, object).
diff --git a/pypy/doc/extradoc.rst b/pypy/doc/extradoc.rst
--- a/pypy/doc/extradoc.rst
+++ b/pypy/doc/extradoc.rst
@@ -8,6 +8,9 @@
 *Articles about PyPy published so far, most recent first:* (bibtex_ file)
 
 
+* `Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`_,
+  C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo
+
 * `Allocation Removal by Partial Evaluation in a Tracing JIT`_,
   C.F. Bolz, A. Cuni, M. Fijalkowski, M. Leuschel, S. Pedroni, A. Rigo
 
@@ -50,6 +53,9 @@
 
 *Other research using PyPy (as far as we know it):*
 
+* `Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`_,
+  N. Riley and C. Zilles
+
 * `PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`_,
   C. Bruni and T. Verwaest
 
@@ -65,6 +71,7 @@
 
 
 .. _bibtex: https://bitbucket.org/pypy/extradoc/raw/tip/talk/bibtex.bib
+.. _`Runtime Feedback in a Meta-Tracing JIT for Efficient Dynamic Languages`: https://bitbucket.org/pypy/extradoc/raw/extradoc/talk/icooolps2011/jit-hints.pdf
 .. _`Allocation Removal by Partial Evaluation in a Tracing JIT`: http://codespeak.net/svn/pypy/extradoc/talk/pepm2011/bolz-allocation-removal.pdf
 .. _`Towards a Jitting VM for Prolog Execution`: http://www.stups.uni-duesseldorf.de/publications/bolz-prolog-jit.pdf
 .. _`High performance implementation of Python for CLI/.NET with JIT compiler generation for dynamic languages`: http://buildbot.pypy.org/misc/antocuni-thesis.pdf
@@ -74,6 +81,7 @@
 .. _`Automatic JIT Compiler Generation with Runtime Partial Evaluation`:  http://www.stups.uni-duesseldorf.de/thesis/final-master.pdf
 .. _`RPython: A Step towards Reconciling Dynamically and Statically Typed OO Languages`: http://www.disi.unige.it/person/AnconaD/papers/Recent_abstracts.html#AACM-DLS07
 .. _`EU Reports`: index-report.html
+.. _`Hardware Transactional Memory Support for Lightweight Dynamic Language Evolution`: http://sabi.net/nriley/pubs/dls6-riley.pdf
 .. _`PyGirl: Generating Whole-System VMs from High-Level Prototypes using PyPy`: http://scg.unibe.ch/archive/papers/Brun09cPyGirl.pdf
 .. _`Representation-Based Just-in-Time Specialization and the Psyco Prototype for Python`: http://psyco.sourceforge.net/psyco-pepm-a.ps.gz
 .. _`Back to the Future in One Week -- Implementing a Smalltalk VM in PyPy`: http://dx.doi.org/10.1007/978-3-540-89275-5_7
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -112,10 +112,32 @@
 You might be interested in our `benchmarking site`_ and our 
 `jit documentation`_.
 
+Note that the JIT has a very high warm-up cost, meaning that the
+programs are slow at the beginning.  If you want to compare the timings
+with CPython, even relatively simple programs need to run *at least* one
+second, preferrably at least a few seconds.  Large, complicated programs
+need even more time to warm-up the JIT.
+
 .. _`benchmarking site`: http://speed.pypy.org
 
 .. _`jit documentation`: jit/index.html
 
+---------------------------------------------------------------
+Couldn't the JIT dump and reload already-compiled machine code?
+---------------------------------------------------------------
+
+No, we found no way of doing that.  The JIT generates machine code
+containing a large number of constant addresses --- constant at the time
+the machine code is written.  The vast majority is probably not at all
+constants that you find in the executable, with a nice link name.  E.g.
+the addresses of Python classes are used all the time, but Python
+classes don't come statically from the executable; they are created anew
+every time you restart your program.  This makes saving and reloading
+machine code completely impossible without some very advanced way of
+mapping addresses in the old (now-dead) process to addresses in the new
+process, including checking that all the previous assumptions about the
+(now-dead) object are still true about the new object.
+
 
 .. _`prolog and javascript`:
 
diff --git a/pypy/doc/getting-started.rst b/pypy/doc/getting-started.rst
--- a/pypy/doc/getting-started.rst
+++ b/pypy/doc/getting-started.rst
@@ -53,11 +53,11 @@
 PyPy is ready to be executed as soon as you unpack the tarball or the zip
 file, with no need to install it in any specific location::
 
-    $ tar xf pypy-1.6-linux.tar.bz2
+    $ tar xf pypy-1.7-linux.tar.bz2
 
-    $ ./pypy-1.6/bin/pypy
+    $ ./pypy-1.7/bin/pypy
     Python 2.7.1 (?, Apr 27 2011, 12:44:21)
-    [PyPy 1.6.0 with GCC 4.4.3] on linux2
+    [PyPy 1.7.0 with GCC 4.4.3] on linux2
     Type "help", "copyright", "credits" or "license" for more information.
     And now for something completely different: ``implementing LOGO in LOGO:
     "turtles all the way down"''
@@ -75,14 +75,14 @@
 
     $ curl -O https://raw.github.com/pypa/pip/master/contrib/get-pip.py
 
-    $ ./pypy-1.6/bin/pypy distribute_setup.py
+    $ ./pypy-1.7/bin/pypy distribute_setup.py
 
-    $ ./pypy-1.6/bin/pypy get-pip.py
+    $ ./pypy-1.7/bin/pypy get-pip.py
 
-    $ ./pypy-1.6/bin/pip install pygments  # for example
+    $ ./pypy-1.7/bin/pip install pygments  # for example
 
-3rd party libraries will be installed in ``pypy-1.6/site-packages``, and
-the scripts in ``pypy-1.6/bin``.
+3rd party libraries will be installed in ``pypy-1.7/site-packages``, and
+the scripts in ``pypy-1.7/bin``.
 
 Installing using virtualenv
 ---------------------------
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/man/pypy.1.rst
@@ -0,0 +1,90 @@
+======
+ pypy
+======
+
+SYNOPSIS
+========
+
+``pypy`` [*options*]
+[``-c`` *cmd*\ \|\ ``-m`` *mod*\ \|\ *file.py*\ \|\ ``-``\ ]
+[*arg*\ ...]
+
+OPTIONS
+=======
+
+-i
+    Inspect interactively after running script.
+
+-O
+    Dummy optimization flag for compatibility with C Python.
+
+-c *cmd*
+    Program passed in as CMD (terminates option list).
+
+-S
+    Do not ``import site`` on initialization.
+
+-u
+    Unbuffered binary ``stdout`` and ``stderr``.
+
+-h, --help
+    Show a help message and exit.
+
+-m *mod*
+    Library module to be run as a script (terminates option list).
+
+-W *arg*
+    Warning control (*arg* is *action*:*message*:*category*:*module*:*lineno*).
+
+-E
+    Ignore environment variables (such as ``PYTHONPATH``).
+
+--version
+    Print the PyPy version.
+
+--info
+    Print translation information about this PyPy executable.
+
+--jit *arg*
+    Low level JIT parameters. Format is
+    *arg*\ ``=``\ *value*\ [``,``\ *arg*\ ``=``\ *value*\ ...]
+
+    ``off``
+        Disable the JIT.
+
+    ``threshold=``\ *value*
+        Number of times a loop has to run for it to become hot.
+
+    ``function_threshold=``\ *value*
+        Number of times a function must run for it to become traced from
+        start.
+
+    ``inlining=``\ *value*
+        Inline python functions or not (``1``/``0``).
+
+    ``loop_longevity=``\ *value*
+        A parameter controlling how long loops will be kept before being
+        freed, an estimate.
+
+    ``max_retrace_guards=``\ *value*
+        Number of extra guards a retrace can cause.
+
+    ``retrace_limit=``\ *value*
+        How many times we can try retracing before giving up.
+
+    ``trace_eagerness=``\ *value*
+        Number of times a guard has to fail before we start compiling a
+        bridge.
+
+    ``trace_limit=``\ *value*
+        Number of recorded operations before we abort tracing with
+        ``ABORT_TRACE_TOO_LONG``.
+
+    ``enable_opts=``\ *value*
+        Optimizations to enabled or ``all``.
+        Warning, this option is dangerous, and should be avoided.
+
+SEE ALSO
+========
+
+**python**\ (1)
diff --git a/pypy/doc/tool/makecontributor.py b/pypy/doc/tool/makecontributor.py
deleted file mode 100644
--- a/pypy/doc/tool/makecontributor.py
+++ /dev/null
@@ -1,47 +0,0 @@
-"""
-
-generates a contributor list
-
-"""
-import py
-
-# this file is useless, use the following commandline instead:
-# hg churn -c -t "{author}" | sed -e 's/ <.*//'
-
-try: 
-    path = py.std.sys.argv[1]
-except IndexError: 
-    print "usage: %s ROOTPATH" %(py.std.sys.argv[0])
-    raise SystemExit, 1
-
-d = {}
-
-for logentry in py.path.svnwc(path).log(): 
-    a = logentry.author 
-    if a in d: 
-        d[a] += 1
-    else: 
-        d[a] = 1
-
-items = d.items()
-items.sort(lambda x,y: -cmp(x[1], y[1]))
-
-import uconf # http://codespeak.net/svn/uconf/dist/uconf 
-
-# Authors that don't want to be listed
-excluded = set("anna gintas ignas".split())
-cutoff = 5 # cutoff for authors in the LICENSE file
-mark = False
-for author, count in items: 
-    if author in excluded:
-        continue
-    user = uconf.system.User(author)
-    try:
-        realname = user.realname.strip()
-    except KeyError:
-        realname = author
-    if not mark and count < cutoff:
-        mark = True
-        print '-'*60
-    print "   ", realname
-    #print count, "   ", author 
diff --git a/pypy/doc/translation.rst b/pypy/doc/translation.rst
--- a/pypy/doc/translation.rst
+++ b/pypy/doc/translation.rst
@@ -155,7 +155,7 @@
                    function.  The two input variables are the exception class
                    and the exception value, respectively.  (No other block will
                    actually link to the exceptblock if the function does not
-                   explicitely raise exceptions.)
+                   explicitly raise exceptions.)
 
 
 ``Block``
@@ -325,7 +325,7 @@
 Mutable objects need special treatment during annotation, because
 the annotation of contained values needs to be possibly updated to account
 for mutation operations, and consequently the annotation information
-reflown through the relevant parts of the flow the graphs.
+reflown through the relevant parts of the flow graphs.
 
 * ``SomeList`` stands for a list of homogeneous type (i.e. all the
   elements of the list are represented by a single common ``SomeXxx``
@@ -503,8 +503,8 @@
 
 Since RPython is a garbage collected language there is a lot of heap memory
 allocation going on all the time, which would either not occur at all in a more
-traditional explicitely managed language or results in an object which dies at
-a time known in advance and can thus be explicitely deallocated. For example a
+traditional explicitly managed language or results in an object which dies at
+a time known in advance and can thus be explicitly deallocated. For example a
 loop of the following form::
 
     for i in range(n):
@@ -696,7 +696,7 @@
 
 So far it is the second most mature high level backend after GenCLI:
 it still can't translate the full Standard Interpreter, but after the
-Leysin sprint we were able to compile and run the rpytstone and
+Leysin sprint we were able to compile and run the rpystone and
 richards benchmarks.
 
 GenJVM is almost entirely the work of Niko Matsakis, who worked on it
diff --git a/pypy/interpreter/argument.py b/pypy/interpreter/argument.py
--- a/pypy/interpreter/argument.py
+++ b/pypy/interpreter/argument.py
@@ -428,8 +428,8 @@
             return self._match_signature(w_firstarg,
                                          scope_w, signature, defaults_w, 0)
         except ArgErr, e:
-            raise OperationError(self.space.w_TypeError,
-                                 self.space.wrap(e.getmsg(fnname)))
+            raise operationerrfmt(self.space.w_TypeError,
+                                  "%s() %s", fnname, e.getmsg())
 
     def _parse(self, w_firstarg, signature, defaults_w, blindargs=0):
         """Parse args and kwargs according to the signature of a code object,
@@ -450,8 +450,8 @@
         try:
             return self._parse(w_firstarg, signature, defaults_w, blindargs)
         except ArgErr, e:
-            raise OperationError(self.space.w_TypeError,
-                                 self.space.wrap(e.getmsg(fnname)))
+            raise operationerrfmt(self.space.w_TypeError,
+                                  "%s() %s", fnname, e.getmsg())
 
     @staticmethod
     def frompacked(space, w_args=None, w_kwds=None):
@@ -626,7 +626,7 @@
 
 class ArgErr(Exception):
 
-    def getmsg(self, fnname):
+    def getmsg(self):
         raise NotImplementedError
 
 class ArgErrCount(ArgErr):
@@ -642,11 +642,10 @@
         self.num_args = got_nargs
         self.num_kwds = nkwds
 
-    def getmsg(self, fnname):
+    def getmsg(self):
         n = self.expected_nargs
         if n == 0:
-            msg = "%s() takes no arguments (%d given)" % (
-                fnname,
+            msg = "takes no arguments (%d given)" % (
                 self.num_args + self.num_kwds)
         else:
             defcount = self.num_defaults
@@ -672,8 +671,7 @@
                 msg2 = " non-keyword"
             else:
                 msg2 = ""
-            msg = "%s() takes %s %d%s argument%s (%d given)" % (
-                fnname,
+            msg = "takes %s %d%s argument%s (%d given)" % (
                 msg1,
                 n,
                 msg2,
@@ -686,9 +684,8 @@
     def __init__(self, argname):
         self.argname = argname
 
-    def getmsg(self, fnname):
-        msg = "%s() got multiple values for keyword argument '%s'" % (
-            fnname,
+    def getmsg(self):
+        msg = "got multiple values for keyword argument '%s'" % (
             self.argname)
         return msg
 
@@ -722,13 +719,11 @@
                     break
         self.kwd_name = name
 
-    def getmsg(self, fnname):
+    def getmsg(self):
         if self.num_kwds == 1:
-            msg = "%s() got an unexpected keyword argument '%s'" % (
-                fnname,
+            msg = "got an unexpected keyword argument '%s'" % (
                 self.kwd_name)
         else:
-            msg = "%s() got %d unexpected keyword arguments" % (
-                fnname,
+            msg = "got %d unexpected keyword arguments" % (
                 self.num_kwds)
         return msg
diff --git a/pypy/interpreter/astcompiler/ast.py b/pypy/interpreter/astcompiler/ast.py
--- a/pypy/interpreter/astcompiler/ast.py
+++ b/pypy/interpreter/astcompiler/ast.py
@@ -51,6 +51,24 @@
             space.setattr(self, w_name,
                           space.getitem(w_state, w_name))
 
+    def missing_field(self, space, required, host):
+        "Find which required field is missing."
+        state = self.initialization_state
+        for i in range(len(required)):
+            if (state >> i) & 1:
+                continue  # field is present
+            missing = required[i]
+            if missing is None:
+                continue  # field is optional
+            w_obj = self.getdictvalue(space, missing)
+            if w_obj is None:
+                err = "required field \"%s\" missing from %s"
+                raise operationerrfmt(space.w_TypeError, err, missing, host)
+            else:
+                err = "incorrect type for field \"%s\" in %s"
+                raise operationerrfmt(space.w_TypeError, err, missing, host)
+        raise AssertionError("should not reach here")
+
 
 class NodeVisitorNotImplemented(Exception):
     pass
@@ -94,17 +112,6 @@
 )
 
 
-def missing_field(space, state, required, host):
-    "Find which required field is missing."
-    for i in range(len(required)):
-        if not (state >> i) & 1:
-            missing = required[i]
-            if missing is not None:
-                 err = "required field \"%s\" missing from %s"
-                 err = err % (missing, host)
-                 w_err = space.wrap(err)
-                 raise OperationError(space.w_TypeError, w_err)
-    raise AssertionError("should not reach here")
 
 
 class mod(AST):
@@ -112,7 +119,6 @@
 
 class Module(mod):
 
-
     def __init__(self, body):
         self.body = body
         self.w_body = None
@@ -128,7 +134,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['body'], 'Module')
+            self.missing_field(space, ['body'], 'Module')
         else:
             pass
         w_list = self.w_body
@@ -145,7 +151,6 @@
 
 class Interactive(mod):
 
-
     def __init__(self, body):
         self.body = body
         self.w_body = None
@@ -161,7 +166,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['body'], 'Interactive')
+            self.missing_field(space, ['body'], 'Interactive')
         else:
             pass
         w_list = self.w_body
@@ -178,7 +183,6 @@
 
 class Expression(mod):
 
-
     def __init__(self, body):
         self.body = body
         self.initialization_state = 1
@@ -192,7 +196,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['body'], 'Expression')
+            self.missing_field(space, ['body'], 'Expression')
         else:
             pass
         self.body.sync_app_attrs(space)
@@ -200,7 +204,6 @@
 
 class Suite(mod):
 
-
     def __init__(self, body):
         self.body = body
         self.w_body = None
@@ -216,7 +219,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['body'], 'Suite')
+            self.missing_field(space, ['body'], 'Suite')
         else:
             pass
         w_list = self.w_body
@@ -232,15 +235,13 @@
 
 
 class stmt(AST):
+
     def __init__(self, lineno, col_offset):
         self.lineno = lineno
         self.col_offset = col_offset
 
 class FunctionDef(stmt):
 
-    _lineno_mask = 16
-    _col_offset_mask = 32
-
     def __init__(self, name, args, body, decorator_list, lineno, col_offset):
         self.name = name
         self.args = args
@@ -264,7 +265,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 63:
-            missing_field(space, self.initialization_state, ['name', 'args', 'body', 'decorator_list', 'lineno', 'col_offset'], 'FunctionDef')
+            self.missing_field(space, ['lineno', 'col_offset', 'name', 'args', 'body', 'decorator_list'], 'FunctionDef')
         else:
             pass
         self.args.sync_app_attrs(space)
@@ -292,9 +293,6 @@
 
 class ClassDef(stmt):
 
-    _lineno_mask = 16
-    _col_offset_mask = 32
-
     def __init__(self, name, bases, body, decorator_list, lineno, col_offset):
         self.name = name
         self.bases = bases
@@ -320,7 +318,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 63:
-            missing_field(space, self.initialization_state, ['name', 'bases', 'body', 'decorator_list', 'lineno', 'col_offset'], 'ClassDef')
+            self.missing_field(space, ['lineno', 'col_offset', 'name', 'bases', 'body', 'decorator_list'], 'ClassDef')
         else:
             pass
         w_list = self.w_bases
@@ -357,9 +355,6 @@
 
 class Return(stmt):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, value, lineno, col_offset):
         self.value = value
         stmt.__init__(self, lineno, col_offset)
@@ -374,10 +369,10 @@
         return visitor.visit_Return(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~1) ^ 6:
-            missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Return')
+        if (self.initialization_state & ~4) ^ 3:
+            self.missing_field(space, ['lineno', 'col_offset', None], 'Return')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.value = None
         if self.value:
             self.value.sync_app_attrs(space)
@@ -385,9 +380,6 @@
 
 class Delete(stmt):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, targets, lineno, col_offset):
         self.targets = targets
         self.w_targets = None
@@ -404,7 +396,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['targets', 'lineno', 'col_offset'], 'Delete')
+            self.missing_field(space, ['lineno', 'col_offset', 'targets'], 'Delete')
         else:
             pass
         w_list = self.w_targets
@@ -421,9 +413,6 @@
 
 class Assign(stmt):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, targets, value, lineno, col_offset):
         self.targets = targets
         self.w_targets = None
@@ -442,7 +431,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['targets', 'value', 'lineno', 'col_offset'], 'Assign')
+            self.missing_field(space, ['lineno', 'col_offset', 'targets', 'value'], 'Assign')
         else:
             pass
         w_list = self.w_targets
@@ -460,9 +449,6 @@
 
 class AugAssign(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, target, op, value, lineno, col_offset):
         self.target = target
         self.op = op
@@ -480,7 +466,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['target', 'op', 'value', 'lineno', 'col_offset'], 'AugAssign')
+            self.missing_field(space, ['lineno', 'col_offset', 'target', 'op', 'value'], 'AugAssign')
         else:
             pass
         self.target.sync_app_attrs(space)
@@ -489,9 +475,6 @@
 
 class Print(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, dest, values, nl, lineno, col_offset):
         self.dest = dest
         self.values = values
@@ -511,10 +494,10 @@
         return visitor.visit_Print(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~1) ^ 30:
-            missing_field(space, self.initialization_state, [None, 'values', 'nl', 'lineno', 'col_offset'], 'Print')
+        if (self.initialization_state & ~4) ^ 27:
+            self.missing_field(space, ['lineno', 'col_offset', None, 'values', 'nl'], 'Print')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.dest = None
         if self.dest:
             self.dest.sync_app_attrs(space)
@@ -532,9 +515,6 @@
 
 class For(stmt):
 
-    _lineno_mask = 16
-    _col_offset_mask = 32
-
     def __init__(self, target, iter, body, orelse, lineno, col_offset):
         self.target = target
         self.iter = iter
@@ -559,7 +539,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 63:
-            missing_field(space, self.initialization_state, ['target', 'iter', 'body', 'orelse', 'lineno', 'col_offset'], 'For')
+            self.missing_field(space, ['lineno', 'col_offset', 'target', 'iter', 'body', 'orelse'], 'For')
         else:
             pass
         self.target.sync_app_attrs(space)
@@ -588,9 +568,6 @@
 
 class While(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, test, body, orelse, lineno, col_offset):
         self.test = test
         self.body = body
@@ -613,7 +590,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'While')
+            self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'While')
         else:
             pass
         self.test.sync_app_attrs(space)
@@ -641,9 +618,6 @@
 
 class If(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, test, body, orelse, lineno, col_offset):
         self.test = test
         self.body = body
@@ -666,7 +640,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'If')
+            self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'If')
         else:
             pass
         self.test.sync_app_attrs(space)
@@ -694,9 +668,6 @@
 
 class With(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, context_expr, optional_vars, body, lineno, col_offset):
         self.context_expr = context_expr
         self.optional_vars = optional_vars
@@ -717,10 +688,10 @@
         return visitor.visit_With(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~2) ^ 29:
-            missing_field(space, self.initialization_state, ['context_expr', None, 'body', 'lineno', 'col_offset'], 'With')
+        if (self.initialization_state & ~8) ^ 23:
+            self.missing_field(space, ['lineno', 'col_offset', 'context_expr', None, 'body'], 'With')
         else:
-            if not self.initialization_state & 2:
+            if not self.initialization_state & 8:
                 self.optional_vars = None
         self.context_expr.sync_app_attrs(space)
         if self.optional_vars:
@@ -739,9 +710,6 @@
 
 class Raise(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, type, inst, tback, lineno, col_offset):
         self.type = type
         self.inst = inst
@@ -762,14 +730,14 @@
         return visitor.visit_Raise(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~7) ^ 24:
-            missing_field(space, self.initialization_state, [None, None, None, 'lineno', 'col_offset'], 'Raise')
+        if (self.initialization_state & ~28) ^ 3:
+            self.missing_field(space, ['lineno', 'col_offset', None, None, None], 'Raise')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.type = None
-            if not self.initialization_state & 2:
+            if not self.initialization_state & 8:
                 self.inst = None
-            if not self.initialization_state & 4:
+            if not self.initialization_state & 16:
                 self.tback = None
         if self.type:
             self.type.sync_app_attrs(space)
@@ -781,9 +749,6 @@
 
 class TryExcept(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, body, handlers, orelse, lineno, col_offset):
         self.body = body
         self.w_body = None
@@ -808,7 +773,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['body', 'handlers', 'orelse', 'lineno', 'col_offset'], 'TryExcept')
+            self.missing_field(space, ['lineno', 'col_offset', 'body', 'handlers', 'orelse'], 'TryExcept')
         else:
             pass
         w_list = self.w_body
@@ -845,9 +810,6 @@
 
 class TryFinally(stmt):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, body, finalbody, lineno, col_offset):
         self.body = body
         self.w_body = None
@@ -868,7 +830,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['body', 'finalbody', 'lineno', 'col_offset'], 'TryFinally')
+            self.missing_field(space, ['lineno', 'col_offset', 'body', 'finalbody'], 'TryFinally')
         else:
             pass
         w_list = self.w_body
@@ -895,9 +857,6 @@
 
 class Assert(stmt):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, test, msg, lineno, col_offset):
         self.test = test
         self.msg = msg
@@ -914,10 +873,10 @@
         return visitor.visit_Assert(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~2) ^ 13:
-            missing_field(space, self.initialization_state, ['test', None, 'lineno', 'col_offset'], 'Assert')
+        if (self.initialization_state & ~8) ^ 7:
+            self.missing_field(space, ['lineno', 'col_offset', 'test', None], 'Assert')
         else:
-            if not self.initialization_state & 2:
+            if not self.initialization_state & 8:
                 self.msg = None
         self.test.sync_app_attrs(space)
         if self.msg:
@@ -926,9 +885,6 @@
 
 class Import(stmt):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, names, lineno, col_offset):
         self.names = names
         self.w_names = None
@@ -945,7 +901,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Import')
+            self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Import')
         else:
             pass
         w_list = self.w_names
@@ -962,9 +918,6 @@
 
 class ImportFrom(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, module, names, level, lineno, col_offset):
         self.module = module
         self.names = names
@@ -982,12 +935,12 @@
         return visitor.visit_ImportFrom(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~5) ^ 26:
-            missing_field(space, self.initialization_state, [None, 'names', None, 'lineno', 'col_offset'], 'ImportFrom')
+        if (self.initialization_state & ~20) ^ 11:
+            self.missing_field(space, ['lineno', 'col_offset', None, 'names', None], 'ImportFrom')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.module = None
-            if not self.initialization_state & 4:
+            if not self.initialization_state & 16:
                 self.level = 0
         w_list = self.w_names
         if w_list is not None:
@@ -1003,9 +956,6 @@
 
 class Exec(stmt):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, body, globals, locals, lineno, col_offset):
         self.body = body
         self.globals = globals
@@ -1025,12 +975,12 @@
         return visitor.visit_Exec(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~6) ^ 25:
-            missing_field(space, self.initialization_state, ['body', None, None, 'lineno', 'col_offset'], 'Exec')
+        if (self.initialization_state & ~24) ^ 7:
+            self.missing_field(space, ['lineno', 'col_offset', 'body', None, None], 'Exec')
         else:
-            if not self.initialization_state & 2:
+            if not self.initialization_state & 8:
                 self.globals = None
-            if not self.initialization_state & 4:
+            if not self.initialization_state & 16:
                 self.locals = None
         self.body.sync_app_attrs(space)
         if self.globals:
@@ -1041,9 +991,6 @@
 
 class Global(stmt):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, names, lineno, col_offset):
         self.names = names
         self.w_names = None
@@ -1058,7 +1005,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['names', 'lineno', 'col_offset'], 'Global')
+            self.missing_field(space, ['lineno', 'col_offset', 'names'], 'Global')
         else:
             pass
         w_list = self.w_names
@@ -1072,9 +1019,6 @@
 
 class Expr(stmt):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, value, lineno, col_offset):
         self.value = value
         stmt.__init__(self, lineno, col_offset)
@@ -1089,7 +1033,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Expr')
+            self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Expr')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -1097,9 +1041,6 @@
 
 class Pass(stmt):
 
-    _lineno_mask = 1
-    _col_offset_mask = 2
-
     def __init__(self, lineno, col_offset):
         stmt.__init__(self, lineno, col_offset)
         self.initialization_state = 3
@@ -1112,16 +1053,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 3:
-            missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Pass')
+            self.missing_field(space, ['lineno', 'col_offset'], 'Pass')
         else:
             pass
 
 
 class Break(stmt):
 
-    _lineno_mask = 1
-    _col_offset_mask = 2
-
     def __init__(self, lineno, col_offset):
         stmt.__init__(self, lineno, col_offset)
         self.initialization_state = 3
@@ -1134,16 +1072,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 3:
-            missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Break')
+            self.missing_field(space, ['lineno', 'col_offset'], 'Break')
         else:
             pass
 
 
 class Continue(stmt):
 
-    _lineno_mask = 1
-    _col_offset_mask = 2
-
     def __init__(self, lineno, col_offset):
         stmt.__init__(self, lineno, col_offset)
         self.initialization_state = 3
@@ -1156,21 +1091,19 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 3:
-            missing_field(space, self.initialization_state, ['lineno', 'col_offset'], 'Continue')
+            self.missing_field(space, ['lineno', 'col_offset'], 'Continue')
         else:
             pass
 
 
 class expr(AST):
+
     def __init__(self, lineno, col_offset):
         self.lineno = lineno
         self.col_offset = col_offset
 
 class BoolOp(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, op, values, lineno, col_offset):
         self.op = op
         self.values = values
@@ -1188,7 +1121,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['op', 'values', 'lineno', 'col_offset'], 'BoolOp')
+            self.missing_field(space, ['lineno', 'col_offset', 'op', 'values'], 'BoolOp')
         else:
             pass
         w_list = self.w_values
@@ -1205,9 +1138,6 @@
 
 class BinOp(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, left, op, right, lineno, col_offset):
         self.left = left
         self.op = op
@@ -1225,7 +1155,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['left', 'op', 'right', 'lineno', 'col_offset'], 'BinOp')
+            self.missing_field(space, ['lineno', 'col_offset', 'left', 'op', 'right'], 'BinOp')
         else:
             pass
         self.left.sync_app_attrs(space)
@@ -1234,9 +1164,6 @@
 
 class UnaryOp(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, op, operand, lineno, col_offset):
         self.op = op
         self.operand = operand
@@ -1252,7 +1179,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['op', 'operand', 'lineno', 'col_offset'], 'UnaryOp')
+            self.missing_field(space, ['lineno', 'col_offset', 'op', 'operand'], 'UnaryOp')
         else:
             pass
         self.operand.sync_app_attrs(space)
@@ -1260,9 +1187,6 @@
 
 class Lambda(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, args, body, lineno, col_offset):
         self.args = args
         self.body = body
@@ -1279,7 +1203,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['args', 'body', 'lineno', 'col_offset'], 'Lambda')
+            self.missing_field(space, ['lineno', 'col_offset', 'args', 'body'], 'Lambda')
         else:
             pass
         self.args.sync_app_attrs(space)
@@ -1288,9 +1212,6 @@
 
 class IfExp(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, test, body, orelse, lineno, col_offset):
         self.test = test
         self.body = body
@@ -1309,7 +1230,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['test', 'body', 'orelse', 'lineno', 'col_offset'], 'IfExp')
+            self.missing_field(space, ['lineno', 'col_offset', 'test', 'body', 'orelse'], 'IfExp')
         else:
             pass
         self.test.sync_app_attrs(space)
@@ -1319,9 +1240,6 @@
 
 class Dict(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, keys, values, lineno, col_offset):
         self.keys = keys
         self.w_keys = None
@@ -1342,7 +1260,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['keys', 'values', 'lineno', 'col_offset'], 'Dict')
+            self.missing_field(space, ['lineno', 'col_offset', 'keys', 'values'], 'Dict')
         else:
             pass
         w_list = self.w_keys
@@ -1369,9 +1287,6 @@
 
 class Set(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, elts, lineno, col_offset):
         self.elts = elts
         self.w_elts = None
@@ -1388,7 +1303,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['elts', 'lineno', 'col_offset'], 'Set')
+            self.missing_field(space, ['lineno', 'col_offset', 'elts'], 'Set')
         else:
             pass
         w_list = self.w_elts
@@ -1405,9 +1320,6 @@
 
 class ListComp(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, elt, generators, lineno, col_offset):
         self.elt = elt
         self.generators = generators
@@ -1426,7 +1338,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'ListComp')
+            self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'ListComp')
         else:
             pass
         self.elt.sync_app_attrs(space)
@@ -1444,9 +1356,6 @@
 
 class SetComp(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, elt, generators, lineno, col_offset):
         self.elt = elt
         self.generators = generators
@@ -1465,7 +1374,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'SetComp')
+            self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'SetComp')
         else:
             pass
         self.elt.sync_app_attrs(space)
@@ -1483,9 +1392,6 @@
 
 class DictComp(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, key, value, generators, lineno, col_offset):
         self.key = key
         self.value = value
@@ -1506,7 +1412,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['key', 'value', 'generators', 'lineno', 'col_offset'], 'DictComp')
+            self.missing_field(space, ['lineno', 'col_offset', 'key', 'value', 'generators'], 'DictComp')
         else:
             pass
         self.key.sync_app_attrs(space)
@@ -1525,9 +1431,6 @@
 
 class GeneratorExp(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, elt, generators, lineno, col_offset):
         self.elt = elt
         self.generators = generators
@@ -1546,7 +1449,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['elt', 'generators', 'lineno', 'col_offset'], 'GeneratorExp')
+            self.missing_field(space, ['lineno', 'col_offset', 'elt', 'generators'], 'GeneratorExp')
         else:
             pass
         self.elt.sync_app_attrs(space)
@@ -1564,9 +1467,6 @@
 
 class Yield(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, value, lineno, col_offset):
         self.value = value
         expr.__init__(self, lineno, col_offset)
@@ -1581,10 +1481,10 @@
         return visitor.visit_Yield(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~1) ^ 6:
-            missing_field(space, self.initialization_state, [None, 'lineno', 'col_offset'], 'Yield')
+        if (self.initialization_state & ~4) ^ 3:
+            self.missing_field(space, ['lineno', 'col_offset', None], 'Yield')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.value = None
         if self.value:
             self.value.sync_app_attrs(space)
@@ -1592,9 +1492,6 @@
 
 class Compare(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, left, ops, comparators, lineno, col_offset):
         self.left = left
         self.ops = ops
@@ -1615,7 +1512,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['left', 'ops', 'comparators', 'lineno', 'col_offset'], 'Compare')
+            self.missing_field(space, ['lineno', 'col_offset', 'left', 'ops', 'comparators'], 'Compare')
         else:
             pass
         self.left.sync_app_attrs(space)
@@ -1640,9 +1537,6 @@
 
 class Call(expr):
 
-    _lineno_mask = 32
-    _col_offset_mask = 64
-
     def __init__(self, func, args, keywords, starargs, kwargs, lineno, col_offset):
         self.func = func
         self.args = args
@@ -1670,12 +1564,12 @@
         return visitor.visit_Call(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~24) ^ 103:
-            missing_field(space, self.initialization_state, ['func', 'args', 'keywords', None, None, 'lineno', 'col_offset'], 'Call')
+        if (self.initialization_state & ~96) ^ 31:
+            self.missing_field(space, ['lineno', 'col_offset', 'func', 'args', 'keywords', None, None], 'Call')
         else:
-            if not self.initialization_state & 8:
+            if not self.initialization_state & 32:
                 self.starargs = None
-            if not self.initialization_state & 16:
+            if not self.initialization_state & 64:
                 self.kwargs = None
         self.func.sync_app_attrs(space)
         w_list = self.w_args
@@ -1706,9 +1600,6 @@
 
 class Repr(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, value, lineno, col_offset):
         self.value = value
         expr.__init__(self, lineno, col_offset)
@@ -1723,7 +1614,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Repr')
+            self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Repr')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -1731,9 +1622,6 @@
 
 class Num(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, n, lineno, col_offset):
         self.n = n
         expr.__init__(self, lineno, col_offset)
@@ -1747,16 +1635,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['n', 'lineno', 'col_offset'], 'Num')
+            self.missing_field(space, ['lineno', 'col_offset', 'n'], 'Num')
         else:
             pass
 
 
 class Str(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, s, lineno, col_offset):
         self.s = s
         expr.__init__(self, lineno, col_offset)
@@ -1770,16 +1655,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['s', 'lineno', 'col_offset'], 'Str')
+            self.missing_field(space, ['lineno', 'col_offset', 's'], 'Str')
         else:
             pass
 
 
 class Attribute(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, value, attr, ctx, lineno, col_offset):
         self.value = value
         self.attr = attr
@@ -1796,7 +1678,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['value', 'attr', 'ctx', 'lineno', 'col_offset'], 'Attribute')
+            self.missing_field(space, ['lineno', 'col_offset', 'value', 'attr', 'ctx'], 'Attribute')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -1804,9 +1686,6 @@
 
 class Subscript(expr):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, value, slice, ctx, lineno, col_offset):
         self.value = value
         self.slice = slice
@@ -1824,7 +1703,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 31:
-            missing_field(space, self.initialization_state, ['value', 'slice', 'ctx', 'lineno', 'col_offset'], 'Subscript')
+            self.missing_field(space, ['lineno', 'col_offset', 'value', 'slice', 'ctx'], 'Subscript')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -1833,9 +1712,6 @@
 
 class Name(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, id, ctx, lineno, col_offset):
         self.id = id
         self.ctx = ctx
@@ -1850,16 +1726,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['id', 'ctx', 'lineno', 'col_offset'], 'Name')
+            self.missing_field(space, ['lineno', 'col_offset', 'id', 'ctx'], 'Name')
         else:
             pass
 
 
 class List(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, elts, ctx, lineno, col_offset):
         self.elts = elts
         self.w_elts = None
@@ -1877,7 +1750,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'List')
+            self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'List')
         else:
             pass
         w_list = self.w_elts
@@ -1894,9 +1767,6 @@
 
 class Tuple(expr):
 
-    _lineno_mask = 4
-    _col_offset_mask = 8
-
     def __init__(self, elts, ctx, lineno, col_offset):
         self.elts = elts
         self.w_elts = None
@@ -1914,7 +1784,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 15:
-            missing_field(space, self.initialization_state, ['elts', 'ctx', 'lineno', 'col_offset'], 'Tuple')
+            self.missing_field(space, ['lineno', 'col_offset', 'elts', 'ctx'], 'Tuple')
         else:
             pass
         w_list = self.w_elts
@@ -1931,9 +1801,6 @@
 
 class Const(expr):
 
-    _lineno_mask = 2
-    _col_offset_mask = 4
-
     def __init__(self, value, lineno, col_offset):
         self.value = value
         expr.__init__(self, lineno, col_offset)
@@ -1947,7 +1814,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['value', 'lineno', 'col_offset'], 'Const')
+            self.missing_field(space, ['lineno', 'col_offset', 'value'], 'Const')
         else:
             pass
 
@@ -2009,7 +1876,6 @@
 
 class Ellipsis(slice):
 
-
     def __init__(self):
         self.initialization_state = 0
 
@@ -2021,14 +1887,13 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 0:
-            missing_field(space, self.initialization_state, [], 'Ellipsis')
+            self.missing_field(space, [], 'Ellipsis')
         else:
             pass
 
 
 class Slice(slice):
 
-
     def __init__(self, lower, upper, step):
         self.lower = lower
         self.upper = upper
@@ -2049,7 +1914,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~7) ^ 0:
-            missing_field(space, self.initialization_state, [None, None, None], 'Slice')
+            self.missing_field(space, [None, None, None], 'Slice')
         else:
             if not self.initialization_state & 1:
                 self.lower = None
@@ -2067,7 +1932,6 @@
 
 class ExtSlice(slice):
 
-
     def __init__(self, dims):
         self.dims = dims
         self.w_dims = None
@@ -2083,7 +1947,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['dims'], 'ExtSlice')
+            self.missing_field(space, ['dims'], 'ExtSlice')
         else:
             pass
         w_list = self.w_dims
@@ -2100,7 +1964,6 @@
 
 class Index(slice):
 
-
     def __init__(self, value):
         self.value = value
         self.initialization_state = 1
@@ -2114,7 +1977,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 1:
-            missing_field(space, self.initialization_state, ['value'], 'Index')
+            self.missing_field(space, ['value'], 'Index')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -2377,7 +2240,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 7:
-            missing_field(space, self.initialization_state, ['target', 'iter', 'ifs'], 'comprehension')
+            self.missing_field(space, ['target', 'iter', 'ifs'], 'comprehension')
         else:
             pass
         self.target.sync_app_attrs(space)
@@ -2394,15 +2257,13 @@
                 node.sync_app_attrs(space)
 
 class excepthandler(AST):
+
     def __init__(self, lineno, col_offset):
         self.lineno = lineno
         self.col_offset = col_offset
 
 class ExceptHandler(excepthandler):
 
-    _lineno_mask = 8
-    _col_offset_mask = 16
-
     def __init__(self, type, name, body, lineno, col_offset):
         self.type = type
         self.name = name
@@ -2424,12 +2285,12 @@
         return visitor.visit_ExceptHandler(self)
 
     def sync_app_attrs(self, space):
-        if (self.initialization_state & ~3) ^ 28:
-            missing_field(space, self.initialization_state, [None, None, 'body', 'lineno', 'col_offset'], 'ExceptHandler')
+        if (self.initialization_state & ~12) ^ 19:
+            self.missing_field(space, ['lineno', 'col_offset', None, None, 'body'], 'ExceptHandler')
         else:
-            if not self.initialization_state & 1:
+            if not self.initialization_state & 4:
                 self.type = None
-            if not self.initialization_state & 2:
+            if not self.initialization_state & 8:
                 self.name = None
         if self.type:
             self.type.sync_app_attrs(space)
@@ -2470,7 +2331,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~6) ^ 9:
-            missing_field(space, self.initialization_state, ['args', None, None, 'defaults'], 'arguments')
+            self.missing_field(space, ['args', None, None, 'defaults'], 'arguments')
         else:
             if not self.initialization_state & 2:
                 self.vararg = None
@@ -2513,7 +2374,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~0) ^ 3:
-            missing_field(space, self.initialization_state, ['arg', 'value'], 'keyword')
+            self.missing_field(space, ['arg', 'value'], 'keyword')
         else:
             pass
         self.value.sync_app_attrs(space)
@@ -2533,7 +2394,7 @@
 
     def sync_app_attrs(self, space):
         if (self.initialization_state & ~2) ^ 1:
-            missing_field(space, self.initialization_state, ['name', None], 'alias')
+            self.missing_field(space, ['name', None], 'alias')
         else:
             if not self.initialization_state & 2:
                 self.asname = None
@@ -3019,6 +2880,8 @@
 def Expression_set_body(space, w_self, w_new_value):
     try:
         w_self.body = space.interp_w(expr, w_new_value, False)
+        if type(w_self.body) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -3098,7 +2961,7 @@
         w_obj = w_self.getdictvalue(space, 'lineno')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._lineno_mask:
+    if not w_self.initialization_state & 1:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno')
     return space.wrap(w_self.lineno)
@@ -3112,14 +2975,14 @@
         w_self.setdictvalue(space, 'lineno', w_new_value)
         return
     w_self.deldictvalue(space, 'lineno')
-    w_self.initialization_state |= w_self._lineno_mask
+    w_self.initialization_state |= 1
 
 def stmt_get_col_offset(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'col_offset')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._col_offset_mask:
+    if not w_self.initialization_state & 2:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset')
     return space.wrap(w_self.col_offset)
@@ -3133,7 +2996,7 @@
         w_self.setdictvalue(space, 'col_offset', w_new_value)
         return
     w_self.deldictvalue(space, 'col_offset')
-    w_self.initialization_state |= w_self._col_offset_mask
+    w_self.initialization_state |= 2
 
 stmt.typedef = typedef.TypeDef("stmt",
     AST.typedef,
@@ -3149,7 +3012,7 @@
         w_obj = w_self.getdictvalue(space, 'name')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name')
     return space.wrap(w_self.name)
@@ -3163,14 +3026,14 @@
         w_self.setdictvalue(space, 'name', w_new_value)
         return
     w_self.deldictvalue(space, 'name')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def FunctionDef_get_args(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'args')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args')
     return space.wrap(w_self.args)
@@ -3184,10 +3047,10 @@
         w_self.setdictvalue(space, 'args', w_new_value)
         return
     w_self.deldictvalue(space, 'args')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def FunctionDef_get_body(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -3201,10 +3064,10 @@
 
 def FunctionDef_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 def FunctionDef_get_decorator_list(space, w_self):
-    if not w_self.initialization_state & 8:
+    if not w_self.initialization_state & 32:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list')
     if w_self.w_decorator_list is None:
@@ -3218,7 +3081,7 @@
 
 def FunctionDef_set_decorator_list(space, w_self, w_new_value):
     w_self.w_decorator_list = w_new_value
-    w_self.initialization_state |= 8
+    w_self.initialization_state |= 32
 
 _FunctionDef_field_unroller = unrolling_iterable(['name', 'args', 'body', 'decorator_list'])
 def FunctionDef_init(space, w_self, __args__):
@@ -3254,7 +3117,7 @@
         w_obj = w_self.getdictvalue(space, 'name')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name')
     return space.wrap(w_self.name)
@@ -3268,10 +3131,10 @@
         w_self.setdictvalue(space, 'name', w_new_value)
         return
     w_self.deldictvalue(space, 'name')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def ClassDef_get_bases(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'bases')
     if w_self.w_bases is None:
@@ -3285,10 +3148,10 @@
 
 def ClassDef_set_bases(space, w_self, w_new_value):
     w_self.w_bases = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def ClassDef_get_body(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -3302,10 +3165,10 @@
 
 def ClassDef_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 def ClassDef_get_decorator_list(space, w_self):
-    if not w_self.initialization_state & 8:
+    if not w_self.initialization_state & 32:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'decorator_list')
     if w_self.w_decorator_list is None:
@@ -3319,7 +3182,7 @@
 
 def ClassDef_set_decorator_list(space, w_self, w_new_value):
     w_self.w_decorator_list = w_new_value
-    w_self.initialization_state |= 8
+    w_self.initialization_state |= 32
 
 _ClassDef_field_unroller = unrolling_iterable(['name', 'bases', 'body', 'decorator_list'])
 def ClassDef_init(space, w_self, __args__):
@@ -3356,7 +3219,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -3364,13 +3227,15 @@
 def Return_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, True)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Return_field_unroller = unrolling_iterable(['value'])
 def Return_init(space, w_self, __args__):
@@ -3397,7 +3262,7 @@
 )
 
 def Delete_get_targets(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets')
     if w_self.w_targets is None:
@@ -3411,7 +3276,7 @@
 
 def Delete_set_targets(space, w_self, w_new_value):
     w_self.w_targets = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Delete_field_unroller = unrolling_iterable(['targets'])
 def Delete_init(space, w_self, __args__):
@@ -3439,7 +3304,7 @@
 )
 
 def Assign_get_targets(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'targets')
     if w_self.w_targets is None:
@@ -3453,14 +3318,14 @@
 
 def Assign_set_targets(space, w_self, w_new_value):
     w_self.w_targets = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Assign_get_value(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -3468,13 +3333,15 @@
 def Assign_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Assign_field_unroller = unrolling_iterable(['targets', 'value'])
 def Assign_init(space, w_self, __args__):
@@ -3507,7 +3374,7 @@
         w_obj = w_self.getdictvalue(space, 'target')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target')
     return space.wrap(w_self.target)
@@ -3515,20 +3382,22 @@
 def AugAssign_set_target(space, w_self, w_new_value):
     try:
         w_self.target = space.interp_w(expr, w_new_value, False)
+        if type(w_self.target) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'target', w_new_value)
         return
     w_self.deldictvalue(space, 'target')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def AugAssign_get_op(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'op')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op')
     return operator_to_class[w_self.op - 1]()
@@ -3544,14 +3413,14 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'op', w_new_value)
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def AugAssign_get_value(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -3559,13 +3428,15 @@
 def AugAssign_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _AugAssign_field_unroller = unrolling_iterable(['target', 'op', 'value'])
 def AugAssign_init(space, w_self, __args__):
@@ -3598,7 +3469,7 @@
         w_obj = w_self.getdictvalue(space, 'dest')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'dest')
     return space.wrap(w_self.dest)
@@ -3606,16 +3477,18 @@
 def Print_set_dest(space, w_self, w_new_value):
     try:
         w_self.dest = space.interp_w(expr, w_new_value, True)
+        if type(w_self.dest) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'dest', w_new_value)
         return
     w_self.deldictvalue(space, 'dest')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Print_get_values(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values')
     if w_self.w_values is None:
@@ -3629,14 +3502,14 @@
 
 def Print_set_values(space, w_self, w_new_value):
     w_self.w_values = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Print_get_nl(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'nl')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'nl')
     return space.wrap(w_self.nl)
@@ -3650,7 +3523,7 @@
         w_self.setdictvalue(space, 'nl', w_new_value)
         return
     w_self.deldictvalue(space, 'nl')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Print_field_unroller = unrolling_iterable(['dest', 'values', 'nl'])
 def Print_init(space, w_self, __args__):
@@ -3684,7 +3557,7 @@
         w_obj = w_self.getdictvalue(space, 'target')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'target')
     return space.wrap(w_self.target)
@@ -3692,20 +3565,22 @@
 def For_set_target(space, w_self, w_new_value):
     try:
         w_self.target = space.interp_w(expr, w_new_value, False)
+        if type(w_self.target) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'target', w_new_value)
         return
     w_self.deldictvalue(space, 'target')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def For_get_iter(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'iter')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'iter')
     return space.wrap(w_self.iter)
@@ -3713,16 +3588,18 @@
 def For_set_iter(space, w_self, w_new_value):
     try:
         w_self.iter = space.interp_w(expr, w_new_value, False)
+        if type(w_self.iter) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'iter', w_new_value)
         return
     w_self.deldictvalue(space, 'iter')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def For_get_body(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -3736,10 +3613,10 @@
 
 def For_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 def For_get_orelse(space, w_self):
-    if not w_self.initialization_state & 8:
+    if not w_self.initialization_state & 32:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse')
     if w_self.w_orelse is None:
@@ -3753,7 +3630,7 @@
 
 def For_set_orelse(space, w_self, w_new_value):
     w_self.w_orelse = w_new_value
-    w_self.initialization_state |= 8
+    w_self.initialization_state |= 32
 
 _For_field_unroller = unrolling_iterable(['target', 'iter', 'body', 'orelse'])
 def For_init(space, w_self, __args__):
@@ -3789,7 +3666,7 @@
         w_obj = w_self.getdictvalue(space, 'test')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test')
     return space.wrap(w_self.test)
@@ -3797,16 +3674,18 @@
 def While_set_test(space, w_self, w_new_value):
     try:
         w_self.test = space.interp_w(expr, w_new_value, False)
+        if type(w_self.test) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'test', w_new_value)
         return
     w_self.deldictvalue(space, 'test')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def While_get_body(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -3820,10 +3699,10 @@
 
 def While_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def While_get_orelse(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse')
     if w_self.w_orelse is None:
@@ -3837,7 +3716,7 @@
 
 def While_set_orelse(space, w_self, w_new_value):
     w_self.w_orelse = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _While_field_unroller = unrolling_iterable(['test', 'body', 'orelse'])
 def While_init(space, w_self, __args__):
@@ -3872,7 +3751,7 @@
         w_obj = w_self.getdictvalue(space, 'test')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test')
     return space.wrap(w_self.test)
@@ -3880,16 +3759,18 @@
 def If_set_test(space, w_self, w_new_value):
     try:
         w_self.test = space.interp_w(expr, w_new_value, False)
+        if type(w_self.test) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'test', w_new_value)
         return
     w_self.deldictvalue(space, 'test')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def If_get_body(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -3903,10 +3784,10 @@
 
 def If_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def If_get_orelse(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse')
     if w_self.w_orelse is None:
@@ -3920,7 +3801,7 @@
 
 def If_set_orelse(space, w_self, w_new_value):
     w_self.w_orelse = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _If_field_unroller = unrolling_iterable(['test', 'body', 'orelse'])
 def If_init(space, w_self, __args__):
@@ -3955,7 +3836,7 @@
         w_obj = w_self.getdictvalue(space, 'context_expr')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'context_expr')
     return space.wrap(w_self.context_expr)
@@ -3963,20 +3844,22 @@
 def With_set_context_expr(space, w_self, w_new_value):
     try:
         w_self.context_expr = space.interp_w(expr, w_new_value, False)
+        if type(w_self.context_expr) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'context_expr', w_new_value)
         return
     w_self.deldictvalue(space, 'context_expr')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def With_get_optional_vars(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'optional_vars')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'optional_vars')
     return space.wrap(w_self.optional_vars)
@@ -3984,16 +3867,18 @@
 def With_set_optional_vars(space, w_self, w_new_value):
     try:
         w_self.optional_vars = space.interp_w(expr, w_new_value, True)
+        if type(w_self.optional_vars) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'optional_vars', w_new_value)
         return
     w_self.deldictvalue(space, 'optional_vars')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def With_get_body(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -4007,7 +3892,7 @@
 
 def With_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _With_field_unroller = unrolling_iterable(['context_expr', 'optional_vars', 'body'])
 def With_init(space, w_self, __args__):
@@ -4041,7 +3926,7 @@
         w_obj = w_self.getdictvalue(space, 'type')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type')
     return space.wrap(w_self.type)
@@ -4049,20 +3934,22 @@
 def Raise_set_type(space, w_self, w_new_value):
     try:
         w_self.type = space.interp_w(expr, w_new_value, True)
+        if type(w_self.type) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'type', w_new_value)
         return
     w_self.deldictvalue(space, 'type')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Raise_get_inst(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'inst')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'inst')
     return space.wrap(w_self.inst)
@@ -4070,20 +3957,22 @@
 def Raise_set_inst(space, w_self, w_new_value):
     try:
         w_self.inst = space.interp_w(expr, w_new_value, True)
+        if type(w_self.inst) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'inst', w_new_value)
         return
     w_self.deldictvalue(space, 'inst')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Raise_get_tback(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'tback')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'tback')
     return space.wrap(w_self.tback)
@@ -4091,13 +3980,15 @@
 def Raise_set_tback(space, w_self, w_new_value):
     try:
         w_self.tback = space.interp_w(expr, w_new_value, True)
+        if type(w_self.tback) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'tback', w_new_value)
         return
     w_self.deldictvalue(space, 'tback')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Raise_field_unroller = unrolling_iterable(['type', 'inst', 'tback'])
 def Raise_init(space, w_self, __args__):
@@ -4126,7 +4017,7 @@
 )
 
 def TryExcept_get_body(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -4140,10 +4031,10 @@
 
 def TryExcept_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def TryExcept_get_handlers(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'handlers')
     if w_self.w_handlers is None:
@@ -4157,10 +4048,10 @@
 
 def TryExcept_set_handlers(space, w_self, w_new_value):
     w_self.w_handlers = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def TryExcept_get_orelse(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse')
     if w_self.w_orelse is None:
@@ -4174,7 +4065,7 @@
 
 def TryExcept_set_orelse(space, w_self, w_new_value):
     w_self.w_orelse = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _TryExcept_field_unroller = unrolling_iterable(['body', 'handlers', 'orelse'])
 def TryExcept_init(space, w_self, __args__):
@@ -4206,7 +4097,7 @@
 )
 
 def TryFinally_get_body(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -4220,10 +4111,10 @@
 
 def TryFinally_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def TryFinally_get_finalbody(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'finalbody')
     if w_self.w_finalbody is None:
@@ -4237,7 +4128,7 @@
 
 def TryFinally_set_finalbody(space, w_self, w_new_value):
     w_self.w_finalbody = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _TryFinally_field_unroller = unrolling_iterable(['body', 'finalbody'])
 def TryFinally_init(space, w_self, __args__):
@@ -4271,7 +4162,7 @@
         w_obj = w_self.getdictvalue(space, 'test')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test')
     return space.wrap(w_self.test)
@@ -4279,20 +4170,22 @@
 def Assert_set_test(space, w_self, w_new_value):
     try:
         w_self.test = space.interp_w(expr, w_new_value, False)
+        if type(w_self.test) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'test', w_new_value)
         return
     w_self.deldictvalue(space, 'test')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Assert_get_msg(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'msg')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'msg')
     return space.wrap(w_self.msg)
@@ -4300,13 +4193,15 @@
 def Assert_set_msg(space, w_self, w_new_value):
     try:
         w_self.msg = space.interp_w(expr, w_new_value, True)
+        if type(w_self.msg) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'msg', w_new_value)
         return
     w_self.deldictvalue(space, 'msg')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Assert_field_unroller = unrolling_iterable(['test', 'msg'])
 def Assert_init(space, w_self, __args__):
@@ -4334,7 +4229,7 @@
 )
 
 def Import_get_names(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names')
     if w_self.w_names is None:
@@ -4348,7 +4243,7 @@
 
 def Import_set_names(space, w_self, w_new_value):
     w_self.w_names = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Import_field_unroller = unrolling_iterable(['names'])
 def Import_init(space, w_self, __args__):
@@ -4380,7 +4275,7 @@
         w_obj = w_self.getdictvalue(space, 'module')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'module')
     return space.wrap(w_self.module)
@@ -4397,10 +4292,10 @@
         w_self.setdictvalue(space, 'module', w_new_value)
         return
     w_self.deldictvalue(space, 'module')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def ImportFrom_get_names(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names')
     if w_self.w_names is None:
@@ -4414,14 +4309,14 @@
 
 def ImportFrom_set_names(space, w_self, w_new_value):
     w_self.w_names = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def ImportFrom_get_level(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'level')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'level')
     return space.wrap(w_self.level)
@@ -4435,7 +4330,7 @@
         w_self.setdictvalue(space, 'level', w_new_value)
         return
     w_self.deldictvalue(space, 'level')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _ImportFrom_field_unroller = unrolling_iterable(['module', 'names', 'level'])
 def ImportFrom_init(space, w_self, __args__):
@@ -4469,7 +4364,7 @@
         w_obj = w_self.getdictvalue(space, 'body')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     return space.wrap(w_self.body)
@@ -4477,20 +4372,22 @@
 def Exec_set_body(space, w_self, w_new_value):
     try:
         w_self.body = space.interp_w(expr, w_new_value, False)
+        if type(w_self.body) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'body', w_new_value)
         return
     w_self.deldictvalue(space, 'body')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Exec_get_globals(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'globals')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'globals')
     return space.wrap(w_self.globals)
@@ -4498,20 +4395,22 @@
 def Exec_set_globals(space, w_self, w_new_value):
     try:
         w_self.globals = space.interp_w(expr, w_new_value, True)
+        if type(w_self.globals) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'globals', w_new_value)
         return
     w_self.deldictvalue(space, 'globals')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Exec_get_locals(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'locals')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'locals')
     return space.wrap(w_self.locals)
@@ -4519,13 +4418,15 @@
 def Exec_set_locals(space, w_self, w_new_value):
     try:
         w_self.locals = space.interp_w(expr, w_new_value, True)
+        if type(w_self.locals) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'locals', w_new_value)
         return
     w_self.deldictvalue(space, 'locals')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Exec_field_unroller = unrolling_iterable(['body', 'globals', 'locals'])
 def Exec_init(space, w_self, __args__):
@@ -4554,7 +4455,7 @@
 )
 
 def Global_get_names(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'names')
     if w_self.w_names is None:
@@ -4568,7 +4469,7 @@
 
 def Global_set_names(space, w_self, w_new_value):
     w_self.w_names = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Global_field_unroller = unrolling_iterable(['names'])
 def Global_init(space, w_self, __args__):
@@ -4600,7 +4501,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -4608,13 +4509,15 @@
 def Expr_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Expr_field_unroller = unrolling_iterable(['value'])
 def Expr_init(space, w_self, __args__):
@@ -4696,7 +4599,7 @@
         w_obj = w_self.getdictvalue(space, 'lineno')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._lineno_mask:
+    if not w_self.initialization_state & 1:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno')
     return space.wrap(w_self.lineno)
@@ -4710,14 +4613,14 @@
         w_self.setdictvalue(space, 'lineno', w_new_value)
         return
     w_self.deldictvalue(space, 'lineno')
-    w_self.initialization_state |= w_self._lineno_mask
+    w_self.initialization_state |= 1
 
 def expr_get_col_offset(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'col_offset')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._col_offset_mask:
+    if not w_self.initialization_state & 2:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset')
     return space.wrap(w_self.col_offset)
@@ -4731,7 +4634,7 @@
         w_self.setdictvalue(space, 'col_offset', w_new_value)
         return
     w_self.deldictvalue(space, 'col_offset')
-    w_self.initialization_state |= w_self._col_offset_mask
+    w_self.initialization_state |= 2
 
 expr.typedef = typedef.TypeDef("expr",
     AST.typedef,
@@ -4747,7 +4650,7 @@
         w_obj = w_self.getdictvalue(space, 'op')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op')
     return boolop_to_class[w_self.op - 1]()
@@ -4763,10 +4666,10 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'op', w_new_value)
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def BoolOp_get_values(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values')
     if w_self.w_values is None:
@@ -4780,7 +4683,7 @@
 
 def BoolOp_set_values(space, w_self, w_new_value):
     w_self.w_values = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _BoolOp_field_unroller = unrolling_iterable(['op', 'values'])
 def BoolOp_init(space, w_self, __args__):
@@ -4813,7 +4716,7 @@
         w_obj = w_self.getdictvalue(space, 'left')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left')
     return space.wrap(w_self.left)
@@ -4821,20 +4724,22 @@
 def BinOp_set_left(space, w_self, w_new_value):
     try:
         w_self.left = space.interp_w(expr, w_new_value, False)
+        if type(w_self.left) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'left', w_new_value)
         return
     w_self.deldictvalue(space, 'left')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def BinOp_get_op(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'op')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op')
     return operator_to_class[w_self.op - 1]()
@@ -4850,14 +4755,14 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'op', w_new_value)
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def BinOp_get_right(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'right')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'right')
     return space.wrap(w_self.right)
@@ -4865,13 +4770,15 @@
 def BinOp_set_right(space, w_self, w_new_value):
     try:
         w_self.right = space.interp_w(expr, w_new_value, False)
+        if type(w_self.right) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'right', w_new_value)
         return
     w_self.deldictvalue(space, 'right')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _BinOp_field_unroller = unrolling_iterable(['left', 'op', 'right'])
 def BinOp_init(space, w_self, __args__):
@@ -4904,7 +4811,7 @@
         w_obj = w_self.getdictvalue(space, 'op')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'op')
     return unaryop_to_class[w_self.op - 1]()
@@ -4920,14 +4827,14 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'op', w_new_value)
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def UnaryOp_get_operand(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'operand')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'operand')
     return space.wrap(w_self.operand)
@@ -4935,13 +4842,15 @@
 def UnaryOp_set_operand(space, w_self, w_new_value):
     try:
         w_self.operand = space.interp_w(expr, w_new_value, False)
+        if type(w_self.operand) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'operand', w_new_value)
         return
     w_self.deldictvalue(space, 'operand')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _UnaryOp_field_unroller = unrolling_iterable(['op', 'operand'])
 def UnaryOp_init(space, w_self, __args__):
@@ -4973,7 +4882,7 @@
         w_obj = w_self.getdictvalue(space, 'args')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args')
     return space.wrap(w_self.args)
@@ -4987,14 +4896,14 @@
         w_self.setdictvalue(space, 'args', w_new_value)
         return
     w_self.deldictvalue(space, 'args')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Lambda_get_body(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'body')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     return space.wrap(w_self.body)
@@ -5002,13 +4911,15 @@
 def Lambda_set_body(space, w_self, w_new_value):
     try:
         w_self.body = space.interp_w(expr, w_new_value, False)
+        if type(w_self.body) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'body', w_new_value)
         return
     w_self.deldictvalue(space, 'body')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Lambda_field_unroller = unrolling_iterable(['args', 'body'])
 def Lambda_init(space, w_self, __args__):
@@ -5040,7 +4951,7 @@
         w_obj = w_self.getdictvalue(space, 'test')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'test')
     return space.wrap(w_self.test)
@@ -5048,20 +4959,22 @@
 def IfExp_set_test(space, w_self, w_new_value):
     try:
         w_self.test = space.interp_w(expr, w_new_value, False)
+        if type(w_self.test) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'test', w_new_value)
         return
     w_self.deldictvalue(space, 'test')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def IfExp_get_body(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'body')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     return space.wrap(w_self.body)
@@ -5069,20 +4982,22 @@
 def IfExp_set_body(space, w_self, w_new_value):
     try:
         w_self.body = space.interp_w(expr, w_new_value, False)
+        if type(w_self.body) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'body', w_new_value)
         return
     w_self.deldictvalue(space, 'body')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def IfExp_get_orelse(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'orelse')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'orelse')
     return space.wrap(w_self.orelse)
@@ -5090,13 +5005,15 @@
 def IfExp_set_orelse(space, w_self, w_new_value):
     try:
         w_self.orelse = space.interp_w(expr, w_new_value, False)
+        if type(w_self.orelse) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'orelse', w_new_value)
         return
     w_self.deldictvalue(space, 'orelse')
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _IfExp_field_unroller = unrolling_iterable(['test', 'body', 'orelse'])
 def IfExp_init(space, w_self, __args__):
@@ -5125,7 +5042,7 @@
 )
 
 def Dict_get_keys(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keys')
     if w_self.w_keys is None:
@@ -5139,10 +5056,10 @@
 
 def Dict_set_keys(space, w_self, w_new_value):
     w_self.w_keys = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Dict_get_values(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'values')
     if w_self.w_values is None:
@@ -5156,7 +5073,7 @@
 
 def Dict_set_values(space, w_self, w_new_value):
     w_self.w_values = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Dict_field_unroller = unrolling_iterable(['keys', 'values'])
 def Dict_init(space, w_self, __args__):
@@ -5186,7 +5103,7 @@
 )
 
 def Set_get_elts(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts')
     if w_self.w_elts is None:
@@ -5200,7 +5117,7 @@
 
 def Set_set_elts(space, w_self, w_new_value):
     w_self.w_elts = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Set_field_unroller = unrolling_iterable(['elts'])
 def Set_init(space, w_self, __args__):
@@ -5232,7 +5149,7 @@
         w_obj = w_self.getdictvalue(space, 'elt')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt')
     return space.wrap(w_self.elt)
@@ -5240,16 +5157,18 @@
 def ListComp_set_elt(space, w_self, w_new_value):
     try:
         w_self.elt = space.interp_w(expr, w_new_value, False)
+        if type(w_self.elt) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'elt', w_new_value)
         return
     w_self.deldictvalue(space, 'elt')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def ListComp_get_generators(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators')
     if w_self.w_generators is None:
@@ -5263,7 +5182,7 @@
 
 def ListComp_set_generators(space, w_self, w_new_value):
     w_self.w_generators = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _ListComp_field_unroller = unrolling_iterable(['elt', 'generators'])
 def ListComp_init(space, w_self, __args__):
@@ -5296,7 +5215,7 @@
         w_obj = w_self.getdictvalue(space, 'elt')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt')
     return space.wrap(w_self.elt)
@@ -5304,16 +5223,18 @@
 def SetComp_set_elt(space, w_self, w_new_value):
     try:
         w_self.elt = space.interp_w(expr, w_new_value, False)
+        if type(w_self.elt) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'elt', w_new_value)
         return
     w_self.deldictvalue(space, 'elt')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def SetComp_get_generators(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators')
     if w_self.w_generators is None:
@@ -5327,7 +5248,7 @@
 
 def SetComp_set_generators(space, w_self, w_new_value):
     w_self.w_generators = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _SetComp_field_unroller = unrolling_iterable(['elt', 'generators'])
 def SetComp_init(space, w_self, __args__):
@@ -5360,7 +5281,7 @@
         w_obj = w_self.getdictvalue(space, 'key')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'key')
     return space.wrap(w_self.key)
@@ -5368,20 +5289,22 @@
 def DictComp_set_key(space, w_self, w_new_value):
     try:
         w_self.key = space.interp_w(expr, w_new_value, False)
+        if type(w_self.key) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'key', w_new_value)
         return
     w_self.deldictvalue(space, 'key')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def DictComp_get_value(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -5389,16 +5312,18 @@
 def DictComp_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def DictComp_get_generators(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators')
     if w_self.w_generators is None:
@@ -5412,7 +5337,7 @@
 
 def DictComp_set_generators(space, w_self, w_new_value):
     w_self.w_generators = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _DictComp_field_unroller = unrolling_iterable(['key', 'value', 'generators'])
 def DictComp_init(space, w_self, __args__):
@@ -5446,7 +5371,7 @@
         w_obj = w_self.getdictvalue(space, 'elt')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elt')
     return space.wrap(w_self.elt)
@@ -5454,16 +5379,18 @@
 def GeneratorExp_set_elt(space, w_self, w_new_value):
     try:
         w_self.elt = space.interp_w(expr, w_new_value, False)
+        if type(w_self.elt) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'elt', w_new_value)
         return
     w_self.deldictvalue(space, 'elt')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def GeneratorExp_get_generators(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'generators')
     if w_self.w_generators is None:
@@ -5477,7 +5404,7 @@
 
 def GeneratorExp_set_generators(space, w_self, w_new_value):
     w_self.w_generators = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _GeneratorExp_field_unroller = unrolling_iterable(['elt', 'generators'])
 def GeneratorExp_init(space, w_self, __args__):
@@ -5510,7 +5437,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -5518,13 +5445,15 @@
 def Yield_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, True)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Yield_field_unroller = unrolling_iterable(['value'])
 def Yield_init(space, w_self, __args__):
@@ -5555,7 +5484,7 @@
         w_obj = w_self.getdictvalue(space, 'left')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'left')
     return space.wrap(w_self.left)
@@ -5563,16 +5492,18 @@
 def Compare_set_left(space, w_self, w_new_value):
     try:
         w_self.left = space.interp_w(expr, w_new_value, False)
+        if type(w_self.left) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'left', w_new_value)
         return
     w_self.deldictvalue(space, 'left')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Compare_get_ops(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ops')
     if w_self.w_ops is None:
@@ -5586,10 +5517,10 @@
 
 def Compare_set_ops(space, w_self, w_new_value):
     w_self.w_ops = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Compare_get_comparators(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'comparators')
     if w_self.w_comparators is None:
@@ -5603,7 +5534,7 @@
 
 def Compare_set_comparators(space, w_self, w_new_value):
     w_self.w_comparators = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Compare_field_unroller = unrolling_iterable(['left', 'ops', 'comparators'])
 def Compare_init(space, w_self, __args__):
@@ -5638,7 +5569,7 @@
         w_obj = w_self.getdictvalue(space, 'func')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'func')
     return space.wrap(w_self.func)
@@ -5646,16 +5577,18 @@
 def Call_set_func(space, w_self, w_new_value):
     try:
         w_self.func = space.interp_w(expr, w_new_value, False)
+        if type(w_self.func) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'func', w_new_value)
         return
     w_self.deldictvalue(space, 'func')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Call_get_args(space, w_self):
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'args')
     if w_self.w_args is None:
@@ -5669,10 +5602,10 @@
 
 def Call_set_args(space, w_self, w_new_value):
     w_self.w_args = w_new_value
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Call_get_keywords(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'keywords')
     if w_self.w_keywords is None:
@@ -5686,14 +5619,14 @@
 
 def Call_set_keywords(space, w_self, w_new_value):
     w_self.w_keywords = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 def Call_get_starargs(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'starargs')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 8:
+    if not w_self.initialization_state & 32:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'starargs')
     return space.wrap(w_self.starargs)
@@ -5701,20 +5634,22 @@
 def Call_set_starargs(space, w_self, w_new_value):
     try:
         w_self.starargs = space.interp_w(expr, w_new_value, True)
+        if type(w_self.starargs) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'starargs', w_new_value)
         return
     w_self.deldictvalue(space, 'starargs')
-    w_self.initialization_state |= 8
+    w_self.initialization_state |= 32
 
 def Call_get_kwargs(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'kwargs')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 16:
+    if not w_self.initialization_state & 64:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'kwargs')
     return space.wrap(w_self.kwargs)
@@ -5722,13 +5657,15 @@
 def Call_set_kwargs(space, w_self, w_new_value):
     try:
         w_self.kwargs = space.interp_w(expr, w_new_value, True)
+        if type(w_self.kwargs) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'kwargs', w_new_value)
         return
     w_self.deldictvalue(space, 'kwargs')
-    w_self.initialization_state |= 16
+    w_self.initialization_state |= 64
 
 _Call_field_unroller = unrolling_iterable(['func', 'args', 'keywords', 'starargs', 'kwargs'])
 def Call_init(space, w_self, __args__):
@@ -5765,7 +5702,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -5773,13 +5710,15 @@
 def Repr_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Repr_field_unroller = unrolling_iterable(['value'])
 def Repr_init(space, w_self, __args__):
@@ -5810,7 +5749,7 @@
         w_obj = w_self.getdictvalue(space, 'n')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'n')
     return w_self.n
@@ -5824,7 +5763,7 @@
         w_self.setdictvalue(space, 'n', w_new_value)
         return
     w_self.deldictvalue(space, 'n')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Num_field_unroller = unrolling_iterable(['n'])
 def Num_init(space, w_self, __args__):
@@ -5855,7 +5794,7 @@
         w_obj = w_self.getdictvalue(space, 's')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 's')
     return w_self.s
@@ -5869,7 +5808,7 @@
         w_self.setdictvalue(space, 's', w_new_value)
         return
     w_self.deldictvalue(space, 's')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Str_field_unroller = unrolling_iterable(['s'])
 def Str_init(space, w_self, __args__):
@@ -5900,7 +5839,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -5908,20 +5847,22 @@
 def Attribute_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Attribute_get_attr(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'attr')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'attr')
     return space.wrap(w_self.attr)
@@ -5935,14 +5876,14 @@
         w_self.setdictvalue(space, 'attr', w_new_value)
         return
     w_self.deldictvalue(space, 'attr')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Attribute_get_ctx(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'ctx')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx')
     return expr_context_to_class[w_self.ctx - 1]()
@@ -5958,7 +5899,7 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'ctx', w_new_value)
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Attribute_field_unroller = unrolling_iterable(['value', 'attr', 'ctx'])
 def Attribute_init(space, w_self, __args__):
@@ -5991,7 +5932,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return space.wrap(w_self.value)
@@ -5999,20 +5940,22 @@
 def Subscript_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Subscript_get_slice(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'slice')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'slice')
     return space.wrap(w_self.slice)
@@ -6020,20 +5963,22 @@
 def Subscript_set_slice(space, w_self, w_new_value):
     try:
         w_self.slice = space.interp_w(slice, w_new_value, False)
+        if type(w_self.slice) is slice:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'slice', w_new_value)
         return
     w_self.deldictvalue(space, 'slice')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def Subscript_get_ctx(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'ctx')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx')
     return expr_context_to_class[w_self.ctx - 1]()
@@ -6049,7 +5994,7 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'ctx', w_new_value)
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _Subscript_field_unroller = unrolling_iterable(['value', 'slice', 'ctx'])
 def Subscript_init(space, w_self, __args__):
@@ -6082,7 +6027,7 @@
         w_obj = w_self.getdictvalue(space, 'id')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'id')
     return space.wrap(w_self.id)
@@ -6096,14 +6041,14 @@
         w_self.setdictvalue(space, 'id', w_new_value)
         return
     w_self.deldictvalue(space, 'id')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Name_get_ctx(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'ctx')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx')
     return expr_context_to_class[w_self.ctx - 1]()
@@ -6119,7 +6064,7 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'ctx', w_new_value)
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Name_field_unroller = unrolling_iterable(['id', 'ctx'])
 def Name_init(space, w_self, __args__):
@@ -6147,7 +6092,7 @@
 )
 
 def List_get_elts(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts')
     if w_self.w_elts is None:
@@ -6161,14 +6106,14 @@
 
 def List_set_elts(space, w_self, w_new_value):
     w_self.w_elts = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def List_get_ctx(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'ctx')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx')
     return expr_context_to_class[w_self.ctx - 1]()
@@ -6184,7 +6129,7 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'ctx', w_new_value)
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _List_field_unroller = unrolling_iterable(['elts', 'ctx'])
 def List_init(space, w_self, __args__):
@@ -6213,7 +6158,7 @@
 )
 
 def Tuple_get_elts(space, w_self):
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'elts')
     if w_self.w_elts is None:
@@ -6227,14 +6172,14 @@
 
 def Tuple_set_elts(space, w_self, w_new_value):
     w_self.w_elts = w_new_value
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def Tuple_get_ctx(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'ctx')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'ctx')
     return expr_context_to_class[w_self.ctx - 1]()
@@ -6250,7 +6195,7 @@
         return
     # need to save the original object too
     w_self.setdictvalue(space, 'ctx', w_new_value)
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 _Tuple_field_unroller = unrolling_iterable(['elts', 'ctx'])
 def Tuple_init(space, w_self, __args__):
@@ -6283,7 +6228,7 @@
         w_obj = w_self.getdictvalue(space, 'value')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'value')
     return w_self.value
@@ -6297,7 +6242,7 @@
         w_self.setdictvalue(space, 'value', w_new_value)
         return
     w_self.deldictvalue(space, 'value')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 _Const_field_unroller = unrolling_iterable(['value'])
 def Const_init(space, w_self, __args__):
@@ -6409,6 +6354,8 @@
 def Slice_set_lower(space, w_self, w_new_value):
     try:
         w_self.lower = space.interp_w(expr, w_new_value, True)
+        if type(w_self.lower) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6430,6 +6377,8 @@
 def Slice_set_upper(space, w_self, w_new_value):
     try:
         w_self.upper = space.interp_w(expr, w_new_value, True)
+        if type(w_self.upper) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6451,6 +6400,8 @@
 def Slice_set_step(space, w_self, w_new_value):
     try:
         w_self.step = space.interp_w(expr, w_new_value, True)
+        if type(w_self.step) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6540,6 +6491,8 @@
 def Index_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6809,6 +6762,8 @@
 def comprehension_set_target(space, w_self, w_new_value):
     try:
         w_self.target = space.interp_w(expr, w_new_value, False)
+        if type(w_self.target) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6830,6 +6785,8 @@
 def comprehension_set_iter(space, w_self, w_new_value):
     try:
         w_self.iter = space.interp_w(expr, w_new_value, False)
+        if type(w_self.iter) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
@@ -6887,7 +6844,7 @@
         w_obj = w_self.getdictvalue(space, 'lineno')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._lineno_mask:
+    if not w_self.initialization_state & 1:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'lineno')
     return space.wrap(w_self.lineno)
@@ -6901,14 +6858,14 @@
         w_self.setdictvalue(space, 'lineno', w_new_value)
         return
     w_self.deldictvalue(space, 'lineno')
-    w_self.initialization_state |= w_self._lineno_mask
+    w_self.initialization_state |= 1
 
 def excepthandler_get_col_offset(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'col_offset')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & w_self._col_offset_mask:
+    if not w_self.initialization_state & 2:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'col_offset')
     return space.wrap(w_self.col_offset)
@@ -6922,7 +6879,7 @@
         w_self.setdictvalue(space, 'col_offset', w_new_value)
         return
     w_self.deldictvalue(space, 'col_offset')
-    w_self.initialization_state |= w_self._col_offset_mask
+    w_self.initialization_state |= 2
 
 excepthandler.typedef = typedef.TypeDef("excepthandler",
     AST.typedef,
@@ -6938,7 +6895,7 @@
         w_obj = w_self.getdictvalue(space, 'type')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 1:
+    if not w_self.initialization_state & 4:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'type')
     return space.wrap(w_self.type)
@@ -6946,20 +6903,22 @@
 def ExceptHandler_set_type(space, w_self, w_new_value):
     try:
         w_self.type = space.interp_w(expr, w_new_value, True)
+        if type(w_self.type) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'type', w_new_value)
         return
     w_self.deldictvalue(space, 'type')
-    w_self.initialization_state |= 1
+    w_self.initialization_state |= 4
 
 def ExceptHandler_get_name(space, w_self):
     if w_self.w_dict is not None:
         w_obj = w_self.getdictvalue(space, 'name')
         if w_obj is not None:
             return w_obj
-    if not w_self.initialization_state & 2:
+    if not w_self.initialization_state & 8:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'name')
     return space.wrap(w_self.name)
@@ -6967,16 +6926,18 @@
 def ExceptHandler_set_name(space, w_self, w_new_value):
     try:
         w_self.name = space.interp_w(expr, w_new_value, True)
+        if type(w_self.name) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
         w_self.setdictvalue(space, 'name', w_new_value)
         return
     w_self.deldictvalue(space, 'name')
-    w_self.initialization_state |= 2
+    w_self.initialization_state |= 8
 
 def ExceptHandler_get_body(space, w_self):
-    if not w_self.initialization_state & 4:
+    if not w_self.initialization_state & 16:
         typename = space.type(w_self).getname(space)
         raise operationerrfmt(space.w_AttributeError, "'%s' object has no attribute '%s'", typename, 'body')
     if w_self.w_body is None:
@@ -6990,7 +6951,7 @@
 
 def ExceptHandler_set_body(space, w_self, w_new_value):
     w_self.w_body = w_new_value
-    w_self.initialization_state |= 4
+    w_self.initialization_state |= 16
 
 _ExceptHandler_field_unroller = unrolling_iterable(['type', 'name', 'body'])
 def ExceptHandler_init(space, w_self, __args__):
@@ -7164,6 +7125,8 @@
 def keyword_set_value(space, w_self, w_new_value):
     try:
         w_self.value = space.interp_w(expr, w_new_value, False)
+        if type(w_self.value) is expr:
+            raise OperationError(space.w_TypeError, space.w_None)
     except OperationError, e:
         if not e.match(space, space.w_TypeError):
             raise
diff --git a/pypy/interpreter/astcompiler/optimize.py b/pypy/interpreter/astcompiler/optimize.py
--- a/pypy/interpreter/astcompiler/optimize.py
+++ b/pypy/interpreter/astcompiler/optimize.py
@@ -1,6 +1,5 @@
 """codegen helpers and AST constant folding."""
 import sys
-import itertools
 
 from pypy.interpreter.astcompiler import ast, consts, misc
 from pypy.tool import stdlib_opcode as ops
@@ -146,8 +145,7 @@
 }
 unrolling_unary_folders = unrolling_iterable(unary_folders.items())
 
-for folder in itertools.chain(binary_folders.itervalues(),
-                              unary_folders.itervalues()):
+for folder in binary_folders.values() + unary_folders.values():
     folder._always_inline_ = True
 del folder
 
diff --git a/pypy/interpreter/astcompiler/tools/asdl_py.py b/pypy/interpreter/astcompiler/tools/asdl_py.py
--- a/pypy/interpreter/astcompiler/tools/asdl_py.py
+++ b/pypy/interpreter/astcompiler/tools/asdl_py.py
@@ -79,6 +79,7 @@
         else:
             self.emit("class %s(AST):" % (base,))
             if sum.attributes:
+                self.emit("")
                 args = ", ".join(attr.name.value for attr in sum.attributes)
                 self.emit("def __init__(self, %s):" % (args,), 1)
                 for attr in sum.attributes:
@@ -114,7 +115,7 @@
             else:
                 names.append(repr(field.name.value))
         sub = (", ".join(names), name.value)
-        self.emit("missing_field(space, self.initialization_state, [%s], %r)"
+        self.emit("self.missing_field(space, [%s], %r)"
                   % sub, 3)
         self.emit("else:", 2)
         # Fill in all the default fields.
@@ -195,17 +196,13 @@
     def visitConstructor(self, cons, base, extra_attributes):
         self.emit("class %s(%s):" % (cons.name, base))
         self.emit("")
-        for field in self.data.cons_attributes[cons]:
-            subst = (field.name, self.data.field_masks[field])
-            self.emit("_%s_mask = %i" % subst, 1)
-        self.emit("")
         self.make_constructor(cons.fields, cons, extra_attributes, base)
         self.emit("")
         self.emit("def walkabout(self, visitor):", 1)
         self.emit("visitor.visit_%s(self)" % (cons.name,), 2)
         self.emit("")
         self.make_mutate_over(cons, cons.name)
-        self.make_var_syncer(cons.fields + self.data.cons_attributes[cons],
+        self.make_var_syncer(self.data.cons_attributes[cons] + cons.fields,
                              cons, cons.name)
 
     def visitField(self, field):
@@ -324,7 +321,7 @@
 
     def visitSum(self, sum, name):
         for field in sum.attributes:
-            self.make_property(field, name, True)
+            self.make_property(field, name)
         self.make_typedef(name, "AST", sum.attributes,
                           fields_name="_attributes")
         if not is_simple_sum(sum):
@@ -400,13 +397,10 @@
     def visitField(self, field, name):
         self.make_property(field, name)
 
-    def make_property(self, field, name, different_masks=False):
+    def make_property(self, field, name):
         func = "def %s_get_%s(space, w_self):" % (name, field.name)
         self.emit(func)
-        if different_masks:
-            flag = "w_self._%s_mask" % (field.name,)
-        else:
-            flag = self.data.field_masks[field]
+        flag = self.data.field_masks[field]
         if not field.seq:
             self.emit("if w_self.w_dict is not None:", 1)
             self.emit("    w_obj = w_self.getdictvalue(space, '%s')" % (field.name,), 1)
@@ -458,6 +452,11 @@
                     config = (field.name, field.type, repr(field.opt))
                     self.emit("w_self.%s = space.interp_w(%s, w_new_value, %s)" %
                               config, 2)
+                    if field.type.value not in self.data.prod_simple:
+                        self.emit("if type(w_self.%s) is %s:" % (
+                                field.name, field.type), 2)
+                        self.emit("raise OperationError(space.w_TypeError, "
+                                  "space.w_None)", 3)
             else:
                 level = 2
                 if field.opt and field.type.value != "int":
@@ -505,7 +504,10 @@
             optional_mask = 0
             for i, field in enumerate(fields):
                 flag = 1 << i
-                field_masks[field] = flag
+                if field not in field_masks:
+                    field_masks[field] = flag
+                else:
+                    assert field_masks[field] == flag
                 if field.opt:
                     optional_mask |= flag
                 else:
@@ -518,9 +520,9 @@
                 if is_simple_sum(sum):
                     simple_types.add(tp.name.value)
                 else:
+                    attrs = [field for field in sum.attributes]
                     for cons in sum.types:
-                        attrs = [copy_field(field) for field in sum.attributes]
-                        add_masks(cons.fields + attrs, cons)
+                        add_masks(attrs + cons.fields, cons)
                         cons_attributes[cons] = attrs
             else:
                 prod = tp.value
@@ -588,6 +590,24 @@
             space.setattr(self, w_name,
                           space.getitem(w_state, w_name))
 
+    def missing_field(self, space, required, host):
+        "Find which required field is missing."
+        state = self.initialization_state
+        for i in range(len(required)):
+            if (state >> i) & 1:
+                continue  # field is present
+            missing = required[i]
+            if missing is None:
+                continue  # field is optional
+            w_obj = self.getdictvalue(space, missing)
+            if w_obj is None:
+                err = "required field \\"%s\\" missing from %s"
+                raise operationerrfmt(space.w_TypeError, err, missing, host)
+            else:
+                err = "incorrect type for field \\"%s\\" in %s"
+                raise operationerrfmt(space.w_TypeError, err, missing, host)
+        raise AssertionError("should not reach here")
+
 
 class NodeVisitorNotImplemented(Exception):
     pass
@@ -631,15 +651,6 @@
 )
 
 
-def missing_field(space, state, required, host):
-    "Find which required field is missing."
-    for i in range(len(required)):
-        if not (state >> i) & 1:
-            missing = required[i]
-            if missing is not None:
-                 err = "required field \\"%s\\" missing from %s"
-                 raise operationerrfmt(space.w_TypeError, err, missing, host)
-    raise AssertionError("should not reach here")
 
 
 """
diff --git a/pypy/interpreter/baseobjspace.py b/pypy/interpreter/baseobjspace.py
--- a/pypy/interpreter/baseobjspace.py
+++ b/pypy/interpreter/baseobjspace.py
@@ -1,4 +1,3 @@
-import itertools
 import pypy
 from pypy.interpreter.executioncontext import ExecutionContext, ActionFlag
 from pypy.interpreter.executioncontext import UserDelAction, FrameTraceAction
@@ -191,8 +190,8 @@
     def is_w(self, space, w_other):
         return self is w_other
 
-    def unique_id(self, space):
-        return space.wrap(compute_unique_id(self))
+    def immutable_unique_id(self, space):
+        return None
 
     def str_w(self, space):
         w_msg = typed_unwrap_error_msg(space, "string", self)
@@ -488,6 +487,16 @@
         'parser', 'fcntl', '_codecs', 'binascii'
     ]
 
+    # These modules are treated like CPython treats built-in modules,
+    # i.e. they always shadow any xx.py.  The other modules are treated
+    # like CPython treats extension modules, and are loaded in sys.path
+    # order by the fake entry '.../lib_pypy/__extensions__'.
+    MODULES_THAT_ALWAYS_SHADOW = dict.fromkeys([
+        '__builtin__', '__pypy__', '_ast', '_codecs', '_sre', '_warnings',
+        '_weakref', 'errno', 'exceptions', 'gc', 'imp', 'marshal',
+        'posix', 'nt', 'pwd', 'signal', 'sys', 'thread', 'zipimport',
+    ], None)
+
     def make_builtins(self):
         "NOT_RPYTHON: only for initializing the space."
 
@@ -519,8 +528,8 @@
         exception_types_w = self.export_builtin_exceptions()
 
         # initialize with "bootstrap types" from objspace  (e.g. w_None)
-        types_w = itertools.chain(self.get_builtin_types().iteritems(),
-                                  exception_types_w.iteritems())
+        types_w = (self.get_builtin_types().items() +
+                   exception_types_w.items())
         for name, w_type in types_w:
             self.setitem(self.builtin.w_dict, self.wrap(name), w_type)
 
@@ -697,7 +706,10 @@
         return w_two.is_w(self, w_one)
 
     def id(self, w_obj):
-        return w_obj.unique_id(self)
+        w_result = w_obj.immutable_unique_id(self)
+        if w_result is None:
+            w_result = self.wrap(compute_unique_id(w_obj))
+        return w_result
 
     def hash_w(self, w_obj):
         """shortcut for space.int_w(space.hash(w_obj))"""
@@ -1579,12 +1591,15 @@
     'ArithmeticError',
     'AssertionError',
     'AttributeError',
+    'BaseException',
+    'DeprecationWarning',
     'EOFError',
     'EnvironmentError',
     'Exception',
     'FloatingPointError',
     'IOError',
     'ImportError',
+    'ImportWarning',
     'IndentationError',
     'IndexError',
     'KeyError',
@@ -1605,9 +1620,14 @@
     'TabError',
     'TypeError',
     'UnboundLocalError',
+    'UnicodeDecodeError',
     'UnicodeError',
+    'UnicodeEncodeError',
+    'UnicodeTranslateError',
     'ValueError',
     'ZeroDivisionError',
+    'UnicodeEncodeError',
+    'UnicodeDecodeError',
     ]
 
 ## Irregular part of the interface:
diff --git a/pypy/interpreter/eval.py b/pypy/interpreter/eval.py
--- a/pypy/interpreter/eval.py
+++ b/pypy/interpreter/eval.py
@@ -2,7 +2,6 @@
 This module defines the abstract base classes that support execution:
 Code and Frame.
 """
-from pypy.rlib import jit
 from pypy.interpreter.error import OperationError
 from pypy.interpreter.baseobjspace import Wrappable
 
@@ -98,7 +97,6 @@
         "Abstract. Get the expected number of locals."
         raise TypeError, "abstract"
 
-    @jit.dont_look_inside
     def fast2locals(self):
         # Copy values from the fastlocals to self.w_locals
         if self.w_locals is None:
@@ -112,7 +110,6 @@
                 w_name = self.space.wrap(name)
                 self.space.setitem(self.w_locals, w_name, w_value)
 
-    @jit.dont_look_inside
     def locals2fast(self):
         # Copy values from self.w_locals to the fastlocals
         assert self.w_locals is not None
diff --git a/pypy/interpreter/executioncontext.py b/pypy/interpreter/executioncontext.py
--- a/pypy/interpreter/executioncontext.py
+++ b/pypy/interpreter/executioncontext.py
@@ -445,6 +445,7 @@
         AsyncAction.__init__(self, space)
         self.dying_objects = []
         self.finalizers_lock_count = 0
+        self.enabled_at_app_level = True
 
     def register_callback(self, w_obj, callback, descrname):
         self.dying_objects.append((w_obj, callback, descrname))
diff --git a/pypy/interpreter/gateway.py b/pypy/interpreter/gateway.py
--- a/pypy/interpreter/gateway.py
+++ b/pypy/interpreter/gateway.py
@@ -619,7 +619,8 @@
                                                   self.descr_reqcls,
                                                   args)
         except Exception, e:
-            raise self.handle_exception(space, e)
+            self.handle_exception(space, e)
+            w_result = None
         if w_result is None:
             w_result = space.w_None
         return w_result
@@ -655,7 +656,8 @@
                                                   self.descr_reqcls,
                                                   args)
         except Exception, e:
-            raise self.handle_exception(space, e)
+            self.handle_exception(space, e)
+            w_result = None
         if w_result is None:
             w_result = space.w_None
         return w_result
@@ -674,7 +676,8 @@
                                                   self.descr_reqcls,
                                                   args.prepend(w_obj))
         except Exception, e:
-            raise self.handle_exception(space, e)
+            self.handle_exception(space, e)
+            w_result = None
         if w_result is None:
             w_result = space.w_None
         return w_result
@@ -690,7 +693,8 @@
             raise OperationError(space.w_SystemError,
                                  space.wrap("unexpected DescrMismatch error"))
         except Exception, e:
-            raise self.handle_exception(space, e)
+            self.handle_exception(space, e)
+            w_result = None
         if w_result is None:
             w_result = space.w_None
         return w_result
@@ -708,7 +712,8 @@
                                            self.descr_reqcls,
                                            Arguments(space, [w1]))
         except Exception, e:
-            raise self.handle_exception(space, e)
+            self.handle_exception(space, e)
+            w_result = None
         if w_result is None:
             w_result = space.w_None
         return w_result
@@ -726,7 +731,8 @@
                                            self.descr_reqcls,
                                            Arguments(space, [w1, w2]))
         except Exception, e:
-            raise self.handle_exception(space, e)
+            self.handle_exception(space, e)
+            w_result = None
         if w_result is None:
             w_result = space.w_None
         return w_result
@@ -744,7 +750,8 @@
                                            self.descr_reqcls,
                                            Arguments(space, [w1, w2, w3]))
         except Exception, e:
-            raise self.handle_exception(space, e)
+            self.handle_exception(space, e)
+            w_result = None
         if w_result is None:
             w_result = space.w_None
         return w_result
@@ -763,7 +770,8 @@
                                            Arguments(space,
                                                      [w1, w2, w3, w4]))
         except Exception, e:
-            raise self.handle_exception(space, e)
+            self.handle_exception(space, e)
+            w_result = None
         if w_result is None:
             w_result = space.w_None
         return w_result
diff --git a/pypy/interpreter/generator.py b/pypy/interpreter/generator.py
--- a/pypy/interpreter/generator.py
+++ b/pypy/interpreter/generator.py
@@ -162,7 +162,8 @@
     # generate 2 versions of the function and 2 jit drivers.
     def _create_unpack_into():
         jitdriver = jit.JitDriver(greens=['pycode'],
-                                  reds=['self', 'frame', 'results'])
+                                  reds=['self', 'frame', 'results'],
+                                  name='unpack_into')
         def unpack_into(self, results):
             """This is a hack for performance: runs the generator and collects
             all produced items in a list."""
@@ -196,4 +197,4 @@
                 self.frame = None
         return unpack_into
     unpack_into = _create_unpack_into()
-    unpack_into_w = _create_unpack_into()
\ No newline at end of file
+    unpack_into_w = _create_unpack_into()
diff --git a/pypy/interpreter/test/test_argument.py b/pypy/interpreter/test/test_argument.py
--- a/pypy/interpreter/test/test_argument.py
+++ b/pypy/interpreter/test/test_argument.py
@@ -393,8 +393,8 @@
 
         class FakeArgErr(ArgErr):
 
-            def getmsg(self, fname):
-                return "msg "+fname
+            def getmsg(self):
+                return "msg"
 
         def _match_signature(*args):
             raise FakeArgErr()
@@ -404,7 +404,7 @@
         excinfo = py.test.raises(OperationError, args.parse_obj, "obj", "foo",
                        Signature(["a", "b"], None, None))
         assert excinfo.value.w_type is TypeError
-        assert excinfo.value._w_value == "msg foo"
+        assert excinfo.value.get_w_value(space) == "foo() msg"
 
 
     def test_args_parsing_into_scope(self):
@@ -448,8 +448,8 @@
 
         class FakeArgErr(ArgErr):
 
-            def getmsg(self, fname):
-                return "msg "+fname
+            def getmsg(self):
+                return "msg"
 
         def _match_signature(*args):
             raise FakeArgErr()
@@ -460,7 +460,7 @@
                                  "obj", [None, None], "foo",
                                  Signature(["a", "b"], None, None))
         assert excinfo.value.w_type is TypeError
-        assert excinfo.value._w_value == "msg foo"
+        assert excinfo.value.get_w_value(space) == "foo() msg"
 
     def test_topacked_frompacked(self):
         space = DummySpace()
@@ -493,35 +493,35 @@
         # got_nargs, nkwds, expected_nargs, has_vararg, has_kwarg,
         # defaults_w, missing_args
         err = ArgErrCount(1, 0, 0, False, False, None, 0)
-        s = err.getmsg('foo')
-        assert s == "foo() takes no arguments (1 given)"
+        s = err.getmsg()
+        assert s == "takes no arguments (1 given)"
         err = ArgErrCount(0, 0, 1, False, False, [], 1)
-        s = err.getmsg('foo')
-        assert s == "foo() takes exactly 1 argument (0 given)"
+        s = err.getmsg()
+        assert s == "takes exactly 1 argument (0 given)"
         err = ArgErrCount(3, 0, 2, False, False, [], 0)
-        s = err.getmsg('foo')
-        assert s == "foo() takes exactly 2 arguments (3 given)"
+        s = err.getmsg()
+        assert s == "takes exactly 2 arguments (3 given)"
         err = ArgErrCount(3, 0, 2, False, False, ['a'], 0)
-        s = err.getmsg('foo')
-        assert s == "foo() takes at most 2 arguments (3 given)"
+        s = err.getmsg()
+        assert s == "takes at most 2 arguments (3 given)"
         err = ArgErrCount(1, 0, 2, True, False, [], 1)
-        s = err.getmsg('foo')
-        assert s == "foo() takes at least 2 arguments (1 given)"
+        s = err.getmsg()
+        assert s == "takes at least 2 arguments (1 given)"
         err = ArgErrCount(0, 1, 2, True, False, ['a'], 1)
-        s = err.getmsg('foo')
-        assert s == "foo() takes at least 1 non-keyword argument (0 given)"
+        s = err.getmsg()
+        assert s == "takes at least 1 non-keyword argument (0 given)"
         err = ArgErrCount(2, 1, 1, False, True, [], 0)
-        s = err.getmsg('foo')
-        assert s == "foo() takes exactly 1 non-keyword argument (2 given)"
+        s = err.getmsg()
+        assert s == "takes exactly 1 non-keyword argument (2 given)"
         err = ArgErrCount(0, 1, 1, False, True, [], 1)
-        s = err.getmsg('foo')
-        assert s == "foo() takes exactly 1 non-keyword argument (0 given)"
+        s = err.getmsg()
+        assert s == "takes exactly 1 non-keyword argument (0 given)"
         err = ArgErrCount(0, 1, 1, True, True, [], 1)
-        s = err.getmsg('foo')
-        assert s == "foo() takes at least 1 non-keyword argument (0 given)"
+        s = err.getmsg()
+        assert s == "takes at least 1 non-keyword argument (0 given)"
         err = ArgErrCount(2, 1, 1, False, True, ['a'], 0)
-        s = err.getmsg('foo')
-        assert s == "foo() takes at most 1 non-keyword argument (2 given)"
+        s = err.getmsg()
+        assert s == "takes at most 1 non-keyword argument (2 given)"
 
     def test_bad_type_for_star(self):
         space = self.space
@@ -543,12 +543,12 @@
     def test_unknown_keywords(self):
         space = DummySpace()
         err = ArgErrUnknownKwds(space, 1, ['a', 'b'], [True, False], None)
-        s = err.getmsg('foo')
-        assert s == "foo() got an unexpected keyword argument 'b'"
+        s = err.getmsg()
+        assert s == "got an unexpected keyword argument 'b'"
         err = ArgErrUnknownKwds(space, 2, ['a', 'b', 'c'],
                                 [True, False, False], None)
-        s = err.getmsg('foo')
-        assert s == "foo() got 2 unexpected keyword arguments"
+        s = err.getmsg()
+        assert s == "got 2 unexpected keyword arguments"
 
     def test_unknown_unicode_keyword(self):
         class DummySpaceUnicode(DummySpace):
@@ -558,13 +558,13 @@
         err = ArgErrUnknownKwds(space, 1, ['a', None, 'b', 'c'],
                                 [True, False, True, True],
                                 [unichr(0x1234), u'b', u'c'])
-        s = err.getmsg('foo')
-        assert s == "foo() got an unexpected keyword argument '\xe1\x88\xb4'"
+        s = err.getmsg()
+        assert s == "got an unexpected keyword argument '\xe1\x88\xb4'"
 
     def test_multiple_values(self):
         err = ArgErrMultipleValues('bla')
-        s = err.getmsg('foo')
-        assert s == "foo() got multiple values for keyword argument 'bla'"
+        s = err.getmsg()
+        assert s == "got multiple values for keyword argument 'bla'"
 
 class AppTestArgument:
     def test_error_message(self):
diff --git a/pypy/interpreter/typedef.py b/pypy/interpreter/typedef.py
--- a/pypy/interpreter/typedef.py
+++ b/pypy/interpreter/typedef.py
@@ -54,7 +54,11 @@
 #  Hash support
 
 def default_identity_hash(space, w_obj):
-    return space.wrap(compute_identity_hash(w_obj))
+    w_unique_id = w_obj.immutable_unique_id(space)
+    if w_unique_id is None:     # common case
+        return space.wrap(compute_identity_hash(w_obj))
+    else:
+        return space.hash(w_unique_id)
 
 # ____________________________________________________________
 #
diff --git a/pypy/jit/backend/llgraph/llimpl.py b/pypy/jit/backend/llgraph/llimpl.py
--- a/pypy/jit/backend/llgraph/llimpl.py
+++ b/pypy/jit/backend/llgraph/llimpl.py
@@ -8,6 +8,7 @@
 from pypy.objspace.flow.model import Variable, Constant
 from pypy.annotation import model as annmodel
 from pypy.jit.metainterp.history import REF, INT, FLOAT
+from pypy.jit.metainterp import history
 from pypy.jit.codewriter import heaptracker
 from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rffi
 from pypy.rpython.ootypesystem import ootype
@@ -48,6 +49,11 @@
         value._the_opaque_pointer = op
         return op
 
+def _normalize(value):
+    if isinstance(value, lltype._ptr):
+        value = lltype.top_container(value._obj)
+    return value
+
 def from_opaque_string(s):
     if isinstance(s, str):
         return s
@@ -322,6 +328,14 @@
     _variables.append(v)
     return r
 
+def compile_started_vars(clt):
+    if not hasattr(clt, '_debug_argtypes'):    # only when compiling the loop
+        argtypes = [v.concretetype for v in _variables]
+        try:
+            clt._debug_argtypes = argtypes
+        except AttributeError:    # when 'clt' is actually a translated
+            pass                  # GcStruct
+
 def compile_add(loop, opnum):
     loop = _from_opaque(loop)
     loop.operations.append(Operation(opnum))
@@ -347,6 +361,16 @@
     op = loop.operations[-1]
     op.descr = weakref.ref(descr)
 
+TARGET_TOKENS = weakref.WeakKeyDictionary()
+
+def compile_add_target_token(loop, descr, clt):
+    # here, 'clt' is the compiled_loop_token of the original loop that
+    # we are compiling
+    loop = _from_opaque(loop)
+    op = loop.operations[-1]
+    descrobj = _normalize(descr)
+    TARGET_TOKENS[descrobj] = loop, len(loop.operations), op.args, clt
+
 def compile_add_var(loop, intvar):
     loop = _from_opaque(loop)
     op = loop.operations[-1]
@@ -381,13 +405,25 @@
     _variables.append(v)
     return r
 
-def compile_add_jump_target(loop, loop_target):
+def compile_add_jump_target(loop, targettoken, source_clt):
     loop = _from_opaque(loop)
-    loop_target = _from_opaque(loop_target)
+    descrobj = _normalize(targettoken)
+    (loop_target, target_opindex, target_inputargs, target_clt
+        ) = TARGET_TOKENS[descrobj]
+    #
+    try:
+        assert source_clt._debug_argtypes == target_clt._debug_argtypes
+    except AttributeError:   # when translated
+        pass
+    #
     op = loop.operations[-1]
     op.jump_target = loop_target
+    op.jump_target_opindex = target_opindex
+    op.jump_target_inputargs = target_inputargs
     assert op.opnum == rop.JUMP
-    assert len(op.args) == len(loop_target.inputargs)
+    assert [v.concretetype for v in op.args] == (
+           [v.concretetype for v in target_inputargs])
+    #
     if loop_target == loop:
         log.info("compiling new loop")
     else:
@@ -521,10 +557,11 @@
                 self.opindex += 1
                 continue
             if op.opnum == rop.JUMP:
-                assert len(op.jump_target.inputargs) == len(args)
-                self.env = dict(zip(op.jump_target.inputargs, args))
+                inputargs = op.jump_target_inputargs
+                assert len(inputargs) == len(args)
+                self.env = dict(zip(inputargs, args))
                 self.loop = op.jump_target
-                self.opindex = 0
+                self.opindex = op.jump_target_opindex
                 _stats.exec_jumps += 1
             elif op.opnum == rop.FINISH:
                 if self.verbose:
@@ -617,6 +654,15 @@
         #
         return _op_default_implementation
 
+    def op_label(self, _, *args):
+        op = self.loop.operations[self.opindex]
+        assert op.opnum == rop.LABEL
+        assert len(op.args) == len(args)
+        newenv = {}
+        for v, value in zip(op.args, args):
+            newenv[v] = value
+        self.env = newenv
+
     def op_debug_merge_point(self, _, *args):
         from pypy.jit.metainterp.warmspot import get_stats
         try:
@@ -959,6 +1005,7 @@
         self._may_force = self.opindex
         try:
             inpargs = _from_opaque(ctl.compiled_version).inputargs
+            assert len(inpargs) == len(args)
             for i, inparg in enumerate(inpargs):
                 TYPE = inparg.concretetype
                 if TYPE is lltype.Signed:
@@ -1788,9 +1835,11 @@
 setannotation(compile_start_int_var, annmodel.SomeInteger())
 setannotation(compile_start_ref_var, annmodel.SomeInteger())
 setannotation(compile_start_float_var, annmodel.SomeInteger())
+setannotation(compile_started_vars, annmodel.s_None)
 setannotation(compile_add, annmodel.s_None)
 setannotation(compile_add_descr, annmodel.s_None)
 setannotation(compile_add_descr_arg, annmodel.s_None)
+setannotation(compile_add_target_token, annmodel.s_None)
 setannotation(compile_add_var, annmodel.s_None)
 setannotation(compile_add_int_const, annmodel.s_None)
 setannotation(compile_add_ref_const, annmodel.s_None)
diff --git a/pypy/jit/backend/llgraph/runner.py b/pypy/jit/backend/llgraph/runner.py
--- a/pypy/jit/backend/llgraph/runner.py
+++ b/pypy/jit/backend/llgraph/runner.py
@@ -37,7 +37,7 @@
     def get_arg_types(self):
         return self.arg_types
 
-    def get_return_type(self):
+    def get_result_type(self):
         return self.typeinfo
 
     def get_extra_info(self):
@@ -138,29 +138,30 @@
         clt = original_loop_token.compiled_loop_token
         clt.loop_and_bridges.append(c)
         clt.compiling_a_bridge()
-        self._compile_loop_or_bridge(c, inputargs, operations)
+        self._compile_loop_or_bridge(c, inputargs, operations, clt)
         old, oldindex = faildescr._compiled_fail
         llimpl.compile_redirect_fail(old, oldindex, c)
 
-    def compile_loop(self, inputargs, operations, looptoken, log=True, name=''):
+    def compile_loop(self, inputargs, operations, jitcell_token,
+                     log=True, name=''):
         """In a real assembler backend, this should assemble the given
         list of operations.  Here we just generate a similar CompiledLoop
         instance.  The code here is RPython, whereas the code in llimpl
         is not.
         """
         c = llimpl.compile_start()
-        clt = model.CompiledLoopToken(self, looptoken.number)
+        clt = model.CompiledLoopToken(self, jitcell_token.number)
         clt.loop_and_bridges = [c]
         clt.compiled_version = c
-        looptoken.compiled_loop_token = clt
-        self._compile_loop_or_bridge(c, inputargs, operations)
+        jitcell_token.compiled_loop_token = clt
+        self._compile_loop_or_bridge(c, inputargs, operations, clt)
 
     def free_loop_and_bridges(self, compiled_loop_token):
         for c in compiled_loop_token.loop_and_bridges:
             llimpl.mark_as_free(c)
         model.AbstractCPU.free_loop_and_bridges(self, compiled_loop_token)
 
-    def _compile_loop_or_bridge(self, c, inputargs, operations):
+    def _compile_loop_or_bridge(self, c, inputargs, operations, clt):
         var2index = {}
         for box in inputargs:
             if isinstance(box, history.BoxInt):
@@ -172,10 +173,11 @@
                 var2index[box] = llimpl.compile_start_float_var(c)
             else:
                 raise Exception("box is: %r" % (box,))
-        self._compile_operations(c, operations, var2index)
+        llimpl.compile_started_vars(clt)
+        self._compile_operations(c, operations, var2index, clt)
         return c
 
-    def _compile_operations(self, c, operations, var2index):
+    def _compile_operations(self, c, operations, var2index, clt):
         for op in operations:
             llimpl.compile_add(c, op.getopnum())
             descr = op.getdescr()
@@ -183,9 +185,11 @@
                 llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo,
                                          descr.arg_types, descr.extrainfo,
                                          descr.width)
-            if (isinstance(descr, history.LoopToken) and
-                op.getopnum() != rop.JUMP):
+            if isinstance(descr, history.JitCellToken):
+                assert op.getopnum() != rop.JUMP
                 llimpl.compile_add_loop_token(c, descr)
+            if isinstance(descr, history.TargetToken) and op.getopnum() == rop.LABEL:
+                llimpl.compile_add_target_token(c, descr, clt)
             if self.is_oo and isinstance(descr, (OODescr, MethDescr)):
                 # hack hack, not rpython
                 c._obj.externalobj.operations[-1].setdescr(descr)
@@ -239,9 +243,7 @@
         assert op.is_final()
         if op.getopnum() == rop.JUMP:
             targettoken = op.getdescr()
-            assert isinstance(targettoken, history.LoopToken)
-            compiled_version = targettoken.compiled_loop_token.compiled_version
-            llimpl.compile_add_jump_target(c, compiled_version)
+            llimpl.compile_add_jump_target(c, targettoken, clt)
         elif op.getopnum() == rop.FINISH:
             faildescr = op.getdescr()
             index = self.get_fail_descr_number(faildescr)
@@ -260,21 +262,28 @@
         self.latest_frame = frame
         return fail_index
 
-    def execute_token(self, loop_token):
-        """Calls the assembler generated for the given loop.
-        Returns the ResOperation that failed, of type rop.FAIL.
-        """
-        fail_index = self._execute_token(loop_token)
-        return self.get_fail_descr_from_number(fail_index)
-
-    def set_future_value_int(self, index, intvalue):
-        llimpl.set_future_value_int(index, intvalue)
-
-    def set_future_value_ref(self, index, objvalue):
-        llimpl.set_future_value_ref(index, objvalue)
-
-    def set_future_value_float(self, index, floatvalue):
-        llimpl.set_future_value_float(index, floatvalue)
+    def make_execute_token(self, *argtypes):
+        nb_args = len(argtypes)
+        unroll_argtypes = unrolling_iterable(list(enumerate(argtypes)))
+        #
+        def execute_token(loop_token, *args):
+            assert len(args) == nb_args
+            for index, TYPE in unroll_argtypes:
+                x = args[index]
+                assert TYPE == lltype.typeOf(x)
+                if TYPE == lltype.Signed:
+                    llimpl.set_future_value_int(index, x)
+                elif TYPE == llmemory.GCREF:
+                    llimpl.set_future_value_ref(index, x)
+                elif TYPE == longlong.FLOATSTORAGE:
+                    llimpl.set_future_value_float(index, x)
+                else:
+                    assert 0
+            #
+            fail_index = self._execute_token(loop_token)
+            return self.get_fail_descr_from_number(fail_index)
+        #
+        return execute_token
 
     def get_latest_value_int(self, index):
         return llimpl.frame_int_getvalue(self.latest_frame, index)
diff --git a/pypy/jit/backend/llsupport/descr.py b/pypy/jit/backend/llsupport/descr.py
--- a/pypy/jit/backend/llsupport/descr.py
+++ b/pypy/jit/backend/llsupport/descr.py
@@ -5,11 +5,7 @@
 from pypy.jit.metainterp.history import AbstractDescr, getkind
 from pypy.jit.metainterp import history
 from pypy.jit.codewriter import heaptracker, longlong
-
-# The point of the class organization in this file is to make instances
-# as compact as possible.  This is done by not storing the field size or
-# the 'is_pointer_field' flag in the instance itself but in the class
-# (in methods actually) using a few classes instead of just one.
+from pypy.jit.codewriter.longlong import is_longlong
 
 
 class GcCache(object):
@@ -19,6 +15,7 @@
         self._cache_size = {}
         self._cache_field = {}
         self._cache_array = {}
+        self._cache_arraylen = {}
         self._cache_call = {}
         self._cache_interiorfield = {}
 
@@ -26,24 +23,15 @@
         assert isinstance(STRUCT, lltype.GcStruct)
 
     def init_array_descr(self, ARRAY, arraydescr):
-        assert isinstance(ARRAY, lltype.GcArray)
+        assert (isinstance(ARRAY, lltype.GcArray) or
+                isinstance(ARRAY, lltype.GcStruct) and ARRAY._arrayfld)
 
 
-if lltype.SignedLongLong is lltype.Signed:
-    def is_longlong(TYPE):
-        return False
-else:
-    assert rffi.sizeof(lltype.SignedLongLong) == rffi.sizeof(lltype.Float)
-    def is_longlong(TYPE):
-        return TYPE in (lltype.SignedLongLong, lltype.UnsignedLongLong)
-
 # ____________________________________________________________
 # SizeDescrs
 
 class SizeDescr(AbstractDescr):
     size = 0      # help translation
-    is_immutable = False
-
     tid = llop.combine_ushort(lltype.Signed, 0, 0)
 
     def __init__(self, size, count_fields_if_immut=-1):
@@ -77,265 +65,247 @@
         cache[STRUCT] = sizedescr
         return sizedescr
 
+
 # ____________________________________________________________
 # FieldDescrs
 
-class BaseFieldDescr(AbstractDescr):
+FLAG_POINTER  = 'P'
+FLAG_FLOAT    = 'F'
+FLAG_UNSIGNED = 'U'
+FLAG_SIGNED   = 'S'
+FLAG_STRUCT   = 'X'
+FLAG_VOID     = 'V'
+
+class FieldDescr(AbstractDescr):
+    name = ''
     offset = 0      # help translation
-    name = ''
-    _clsname = ''
+    field_size = 0
+    flag = '\x00'
 
-    def __init__(self, name, offset):
+    def __init__(self, name, offset, field_size, flag):
         self.name = name
         self.offset = offset
+        self.field_size = field_size
+        self.flag = flag
+
+    def is_pointer_field(self):
+        return self.flag == FLAG_POINTER
+
+    def is_float_field(self):
+        return self.flag == FLAG_FLOAT
+
+    def is_field_signed(self):
+        return self.flag == FLAG_SIGNED
 
     def sort_key(self):
         return self.offset
 
-    def get_field_size(self, translate_support_code):
-        raise NotImplementedError
+    def repr_of_descr(self):
+        return '<Field%s %s %s>' % (self.flag, self.name, self.offset)
 
-    _is_pointer_field = False   # unless overridden by GcPtrFieldDescr
-    _is_float_field = False     # unless overridden by FloatFieldDescr
-    _is_field_signed = False    # unless overridden by XxxFieldDescr
-
-    def is_pointer_field(self):
-        return self._is_pointer_field
-
-    def is_float_field(self):
-        return self._is_float_field
-
-    def is_field_signed(self):
-        return self._is_field_signed
-
-    def repr_of_descr(self):
-        return '<%s %s %s>' % (self._clsname, self.name, self.offset)
-
-class DynamicFieldDescr(BaseFieldDescr):
-    def __init__(self, offset, fieldsize, is_pointer, is_float, is_signed):
-        self.offset = offset
-        self._fieldsize = fieldsize
-        self._is_pointer_field = is_pointer
-        self._is_float_field = is_float
-        self._is_field_signed = is_signed
-
-    def get_field_size(self, translate_support_code):
-        return self._fieldsize
-
-class NonGcPtrFieldDescr(BaseFieldDescr):
-    _clsname = 'NonGcPtrFieldDescr'
-    def get_field_size(self, translate_support_code):
-        return symbolic.get_size_of_ptr(translate_support_code)
-
-class GcPtrFieldDescr(NonGcPtrFieldDescr):
-    _clsname = 'GcPtrFieldDescr'
-    _is_pointer_field = True
-
-def getFieldDescrClass(TYPE):
-    return getDescrClass(TYPE, BaseFieldDescr, GcPtrFieldDescr,
-                         NonGcPtrFieldDescr, 'Field', 'get_field_size',
-                         '_is_float_field', '_is_field_signed')
 
 def get_field_descr(gccache, STRUCT, fieldname):
     cache = gccache._cache_field
     try:
         return cache[STRUCT][fieldname]
     except KeyError:
-        offset, _ = symbolic.get_field_token(STRUCT, fieldname,
-                                             gccache.translate_support_code)
+        offset, size = symbolic.get_field_token(STRUCT, fieldname,
+                                                gccache.translate_support_code)
         FIELDTYPE = getattr(STRUCT, fieldname)
+        flag = get_type_flag(FIELDTYPE)
         name = '%s.%s' % (STRUCT._name, fieldname)
-        fielddescr = getFieldDescrClass(FIELDTYPE)(name, offset)
+        fielddescr = FieldDescr(name, offset, size, flag)
         cachedict = cache.setdefault(STRUCT, {})
         cachedict[fieldname] = fielddescr
         return fielddescr
 
+def get_type_flag(TYPE):
+    if isinstance(TYPE, lltype.Ptr):
+        if TYPE.TO._gckind == 'gc':
+            return FLAG_POINTER
+        else:
+            return FLAG_UNSIGNED
+    if isinstance(TYPE, lltype.Struct):
+        return FLAG_STRUCT
+    if TYPE is lltype.Float or is_longlong(TYPE):
+        return FLAG_FLOAT
+    if (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and
+           rffi.cast(TYPE, -1) == -1):
+        return FLAG_SIGNED
+    return FLAG_UNSIGNED
+
+def get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT):
+    cache = gccache._cache_arraylen
+    try:
+        return cache[ARRAY_OR_STRUCT]
+    except KeyError:
+        tsc = gccache.translate_support_code
+        (_, _, ofs) = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc)
+        size = symbolic.get_size(lltype.Signed, tsc)
+        result = FieldDescr("len", ofs, size, get_type_flag(lltype.Signed))
+        cache[ARRAY_OR_STRUCT] = result
+        return result
+
+
 # ____________________________________________________________
 # ArrayDescrs
 
-_A = lltype.GcArray(lltype.Signed)     # a random gcarray
-_AF = lltype.GcArray(lltype.Float)     # an array of C doubles
+class ArrayDescr(AbstractDescr):
+    tid = 0
+    basesize = 0       # workaround for the annotator
+    itemsize = 0
+    lendescr = None
+    flag = '\x00'
 
-
-class BaseArrayDescr(AbstractDescr):
-    _clsname = ''
-    tid = llop.combine_ushort(lltype.Signed, 0, 0)
-
-    def get_base_size(self, translate_support_code):
-        basesize, _, _ = symbolic.get_array_token(_A, translate_support_code)
-        return basesize
-
-    def get_ofs_length(self, translate_support_code):
-        _, _, ofslength = symbolic.get_array_token(_A, translate_support_code)
-        return ofslength
-
-    def get_item_size(self, translate_support_code):
-        raise NotImplementedError
-
-    _is_array_of_pointers = False      # unless overridden by GcPtrArrayDescr
-    _is_array_of_floats   = False      # unless overridden by FloatArrayDescr
-    _is_array_of_structs  = False      # unless overridden by StructArrayDescr
-    _is_item_signed       = False      # unless overridden by XxxArrayDescr
+    def __init__(self, basesize, itemsize, lendescr, flag):
+        self.basesize = basesize
+        self.itemsize = itemsize
+        self.lendescr = lendescr    # or None, if no length
+        self.flag = flag
 
     def is_array_of_pointers(self):
-        return self._is_array_of_pointers
+        return self.flag == FLAG_POINTER
 
     def is_array_of_floats(self):
-        return self._is_array_of_floats
+        return self.flag == FLAG_FLOAT
+
+    def is_item_signed(self):
+        return self.flag == FLAG_SIGNED
 
     def is_array_of_structs(self):
-        return self._is_array_of_structs
-
-    def is_item_signed(self):
-        return self._is_item_signed
+        return self.flag == FLAG_STRUCT
 
     def repr_of_descr(self):
-        return '<%s>' % self._clsname
+        return '<Array%s %s>' % (self.flag, self.itemsize)
 
 
-class NonGcPtrArrayDescr(BaseArrayDescr):
-    _clsname = 'NonGcPtrArrayDescr'
-    def get_item_size(self, translate_support_code):
-        return symbolic.get_size_of_ptr(translate_support_code)
-
-class GcPtrArrayDescr(NonGcPtrArrayDescr):
-    _clsname = 'GcPtrArrayDescr'
-    _is_array_of_pointers = True
-
-class FloatArrayDescr(BaseArrayDescr):
-    _clsname = 'FloatArrayDescr'
-    _is_array_of_floats = True
-    def get_base_size(self, translate_support_code):
-        basesize, _, _ = symbolic.get_array_token(_AF, translate_support_code)
-        return basesize
-    def get_item_size(self, translate_support_code):
-        return symbolic.get_size(lltype.Float, translate_support_code)
-
-class StructArrayDescr(BaseArrayDescr):
-    _clsname = 'StructArrayDescr'
-    _is_array_of_structs = True
-
-class BaseArrayNoLengthDescr(BaseArrayDescr):
-    def get_base_size(self, translate_support_code):
-        return 0
-
-    def get_ofs_length(self, translate_support_code):
-        return -1
-
-class DynamicArrayNoLengthDescr(BaseArrayNoLengthDescr):
-    def __init__(self, itemsize):
-        self.itemsize = itemsize
-
-    def get_item_size(self, translate_support_code):
-        return self.itemsize
-
-class NonGcPtrArrayNoLengthDescr(BaseArrayNoLengthDescr):
-    _clsname = 'NonGcPtrArrayNoLengthDescr'
-    def get_item_size(self, translate_support_code):
-        return symbolic.get_size_of_ptr(translate_support_code)
-
-class GcPtrArrayNoLengthDescr(NonGcPtrArrayNoLengthDescr):
-    _clsname = 'GcPtrArrayNoLengthDescr'
-    _is_array_of_pointers = True
-
-def getArrayDescrClass(ARRAY):
-    if ARRAY.OF is lltype.Float:
-        return FloatArrayDescr
-    elif isinstance(ARRAY.OF, lltype.Struct):
-        class Descr(StructArrayDescr):
-            _clsname = '%sArrayDescr' % ARRAY.OF._name
-            def get_item_size(self, translate_support_code):
-                return symbolic.get_size(ARRAY.OF, translate_support_code)
-        Descr.__name__ = Descr._clsname
-        return Descr
-    return getDescrClass(ARRAY.OF, BaseArrayDescr, GcPtrArrayDescr,
-                         NonGcPtrArrayDescr, 'Array', 'get_item_size',
-                         '_is_array_of_floats', '_is_item_signed')
-
-def getArrayNoLengthDescrClass(ARRAY):
-    return getDescrClass(ARRAY.OF, BaseArrayNoLengthDescr, GcPtrArrayNoLengthDescr,
-                         NonGcPtrArrayNoLengthDescr, 'ArrayNoLength', 'get_item_size',
-                         '_is_array_of_floats', '_is_item_signed')
-
-def get_array_descr(gccache, ARRAY):
+def get_array_descr(gccache, ARRAY_OR_STRUCT):
     cache = gccache._cache_array
     try:
-        return cache[ARRAY]
+        return cache[ARRAY_OR_STRUCT]
     except KeyError:
-        # we only support Arrays that are either GcArrays, or raw no-length
-        # non-gc Arrays.
-        if ARRAY._hints.get('nolength', False):
-            assert not isinstance(ARRAY, lltype.GcArray)
-            arraydescr = getArrayNoLengthDescrClass(ARRAY)()
+        tsc = gccache.translate_support_code
+        basesize, itemsize, _ = symbolic.get_array_token(ARRAY_OR_STRUCT, tsc)
+        if isinstance(ARRAY_OR_STRUCT, lltype.Array):
+            ARRAY_INSIDE = ARRAY_OR_STRUCT
         else:
-            assert isinstance(ARRAY, lltype.GcArray)
-            arraydescr = getArrayDescrClass(ARRAY)()
-        # verify basic assumption that all arrays' basesize and ofslength
-        # are equal
-        basesize, itemsize, ofslength = symbolic.get_array_token(ARRAY, False)
-        assert basesize == arraydescr.get_base_size(False)
-        assert itemsize == arraydescr.get_item_size(False)
-        if not ARRAY._hints.get('nolength', False):
-            assert ofslength == arraydescr.get_ofs_length(False)
-        if isinstance(ARRAY, lltype.GcArray):
-            gccache.init_array_descr(ARRAY, arraydescr)
-        cache[ARRAY] = arraydescr
+            ARRAY_INSIDE = ARRAY_OR_STRUCT._flds[ARRAY_OR_STRUCT._arrayfld]
+        if ARRAY_INSIDE._hints.get('nolength', False):
+            lendescr = None
+        else:
+            lendescr = get_field_arraylen_descr(gccache, ARRAY_OR_STRUCT)
+        flag = get_type_flag(ARRAY_INSIDE.OF)
+        arraydescr = ArrayDescr(basesize, itemsize, lendescr, flag)
+        if ARRAY_OR_STRUCT._gckind == 'gc':
+            gccache.init_array_descr(ARRAY_OR_STRUCT, arraydescr)
+        cache[ARRAY_OR_STRUCT] = arraydescr
         return arraydescr
 
+
 # ____________________________________________________________
 # InteriorFieldDescr
 
 class InteriorFieldDescr(AbstractDescr):
-    arraydescr = BaseArrayDescr()     # workaround for the annotator
-    fielddescr = BaseFieldDescr('', 0)
+    arraydescr = ArrayDescr(0, 0, None, '\x00')  # workaround for the annotator
+    fielddescr = FieldDescr('', 0, 0, '\x00')
 
     def __init__(self, arraydescr, fielddescr):
+        assert arraydescr.flag == FLAG_STRUCT
         self.arraydescr = arraydescr
         self.fielddescr = fielddescr
 
+    def sort_key(self):
+        return self.fielddescr.sort_key()
+
     def is_pointer_field(self):
         return self.fielddescr.is_pointer_field()
 
     def is_float_field(self):
         return self.fielddescr.is_float_field()
 
-    def sort_key(self):
-        return self.fielddescr.sort_key()
-
     def repr_of_descr(self):
         return '<InteriorFieldDescr %s>' % self.fielddescr.repr_of_descr()
 
-def get_interiorfield_descr(gc_ll_descr, ARRAY, FIELDTP, name):
+def get_interiorfield_descr(gc_ll_descr, ARRAY, name):
     cache = gc_ll_descr._cache_interiorfield
     try:
-        return cache[(ARRAY, FIELDTP, name)]
+        return cache[(ARRAY, name)]
     except KeyError:
         arraydescr = get_array_descr(gc_ll_descr, ARRAY)
-        fielddescr = get_field_descr(gc_ll_descr, FIELDTP, name)
+        fielddescr = get_field_descr(gc_ll_descr, ARRAY.OF, name)
         descr = InteriorFieldDescr(arraydescr, fielddescr)
-        cache[(ARRAY, FIELDTP, name)] = descr
+        cache[(ARRAY, name)] = descr
         return descr
 
+def get_dynamic_interiorfield_descr(gc_ll_descr, offset, width, fieldsize,
+                                    is_pointer, is_float, is_signed):
+    arraydescr = ArrayDescr(0, width, None, FLAG_STRUCT)
+    if is_pointer:
+        assert not is_float
+        flag = FLAG_POINTER
+    elif is_float:
+        flag = FLAG_FLOAT
+    elif is_signed:
+        flag = FLAG_SIGNED
+    else:
+        flag = FLAG_UNSIGNED
+    fielddescr = FieldDescr('dynamic', offset, fieldsize, flag)
+    return InteriorFieldDescr(arraydescr, fielddescr)
+
+
 # ____________________________________________________________
 # CallDescrs
 
-class BaseCallDescr(AbstractDescr):
-    _clsname = ''
-    loop_token = None
+class CallDescr(AbstractDescr):
     arg_classes = ''     # <-- annotation hack
+    result_type = '\x00'
+    result_flag = '\x00'
     ffi_flags = 1
+    call_stub_i = staticmethod(lambda func, args_i, args_r, args_f:
+                               0)
+    call_stub_r = staticmethod(lambda func, args_i, args_r, args_f:
+                               lltype.nullptr(llmemory.GCREF.TO))
+    call_stub_f = staticmethod(lambda func,args_i,args_r,args_f:
+                               longlong.ZEROF)
 
-    def __init__(self, arg_classes, extrainfo=None, ffi_flags=1):
-        self.arg_classes = arg_classes    # string of "r" and "i" (ref/int)
+    def __init__(self, arg_classes, result_type, result_signed, result_size,
+                 extrainfo=None, ffi_flags=1):
+        """
+            'arg_classes' is a string of characters, one per argument:
+                'i', 'r', 'f', 'L', 'S'
+
+            'result_type' is one character from the same list or 'v'
+
+            'result_signed' is a boolean True/False
+        """
+        self.arg_classes = arg_classes
+        self.result_type = result_type
+        self.result_size = result_size
         self.extrainfo = extrainfo
         self.ffi_flags = ffi_flags
         # NB. the default ffi_flags is 1, meaning FUNCFLAG_CDECL, which
         # makes sense on Windows as it's the one for all the C functions
         # we are compiling together with the JIT.  On non-Windows platforms
         # it is just ignored anyway.
+        if result_type == 'v':
+            result_flag = FLAG_VOID
+        elif result_type == 'i':
+            if result_signed:
+                result_flag = FLAG_SIGNED
+            else:
+                result_flag = FLAG_UNSIGNED
+        elif result_type == history.REF:
+            result_flag = FLAG_POINTER
+        elif result_type == history.FLOAT or result_type == 'L':
+            result_flag = FLAG_FLOAT
+        elif result_type == 'S':
+            result_flag = FLAG_UNSIGNED
+        else:
+            raise NotImplementedError("result_type = '%s'" % (result_type,))
+        self.result_flag = result_flag
 
     def __repr__(self):
-        res = '%s(%s)' % (self.__class__.__name__, self.arg_classes)
+        res = 'CallDescr(%s)' % (self.arg_classes,)
         extraeffect = getattr(self.extrainfo, 'extraeffect', None)
         if extraeffect is not None:
             res += ' EF=%r' % extraeffect
@@ -363,14 +333,14 @@
     def get_arg_types(self):
         return self.arg_classes
 
-    def get_return_type(self):
-        return self._return_type
+    def get_result_type(self):
+        return self.result_type
 
-    def get_result_size(self, translate_support_code):
-        raise NotImplementedError
+    def get_result_size(self):
+        return self.result_size
 
     def is_result_signed(self):
-        return False    # unless overridden
+        return self.result_flag == FLAG_SIGNED
 
     def create_call_stub(self, rtyper, RESULT):
         from pypy.rlib.clibffi import FFI_DEFAULT_ABI
@@ -408,18 +378,26 @@
         seen = {'i': 0, 'r': 0, 'f': 0}
         args = ", ".join([process(c) for c in self.arg_classes])
 
-        if self.get_return_type() == history.INT:
+        result_type = self.get_result_type()
+        if result_type == history.INT:
             result = 'rffi.cast(lltype.Signed, res)'
-        elif self.get_return_type() == history.REF:
+            category = 'i'
+        elif result_type == history.REF:
+            assert RESULT == llmemory.GCREF   # should be ensured by the caller
             result = 'lltype.cast_opaque_ptr(llmemory.GCREF, res)'
-        elif self.get_return_type() == history.FLOAT:
+            category = 'r'
+        elif result_type == history.FLOAT:
             result = 'longlong.getfloatstorage(res)'
-        elif self.get_return_type() == 'L':
+            category = 'f'
+        elif result_type == 'L':
             result = 'rffi.cast(lltype.SignedLongLong, res)'
-        elif self.get_return_type() == history.VOID:
-            result = 'None'
-        elif self.get_return_type() == 'S':
+            category = 'f'
+        elif result_type == history.VOID:
+            result = '0'
+            category = 'i'
+        elif result_type == 'S':
             result = 'longlong.singlefloat2int(res)'
+            category = 'i'
         else:
             assert 0
         source = py.code.Source("""
@@ -433,10 +411,13 @@
         d = globals().copy()
         d.update(locals())
         exec source.compile() in d
-        self.call_stub = d['call_stub']
+        call_stub = d['call_stub']
+        # store the function into one of three attributes, to preserve
+        # type-correctness of the return value
+        setattr(self, 'call_stub_%s' % category, call_stub)
 
     def verify_types(self, args_i, args_r, args_f, return_type):
-        assert self._return_type in return_type
+        assert self.result_type in return_type
         assert (self.arg_classes.count('i') +
                 self.arg_classes.count('S')) == len(args_i or ())
         assert self.arg_classes.count('r') == len(args_r or ())
@@ -444,161 +425,56 @@
                 self.arg_classes.count('L')) == len(args_f or ())
 
     def repr_of_descr(self):
-        return '<%s>' % self._clsname
+        res = 'Call%s %d' % (self.result_type, self.result_size)
+        if self.arg_classes:
+            res += ' ' + self.arg_classes
+        if self.extrainfo:
+            res += ' EF=%d' % self.extrainfo.extraeffect
+            oopspecindex = self.extrainfo.oopspecindex
+            if oopspecindex:
+                res += ' OS=%d' % oopspecindex
+        return '<%s>' % res
 
 
-class BaseIntCallDescr(BaseCallDescr):
-    # Base class of the various subclasses of descrs corresponding to
-    # calls having a return kind of 'int' (including non-gc pointers).
-    # The inheritance hierarchy is a bit different than with other Descr
-    # classes because of the 'call_stub' attribute, which is of type
-    #
-    #     lambda func, args_i, args_r, args_f --> int/ref/float/void
-    #
-    # The purpose of BaseIntCallDescr is to be the parent of all classes
-    # in which 'call_stub' has a return kind of 'int'.
-    _return_type = history.INT
-    call_stub = staticmethod(lambda func, args_i, args_r, args_f: 0)
-
-    _is_result_signed = False      # can be overridden in XxxCallDescr
-    def is_result_signed(self):
-        return self._is_result_signed
-
-class DynamicIntCallDescr(BaseIntCallDescr):
-    """
-    calldescr that works for every integer type, by explicitly passing it the
-    size of the result. Used only by get_call_descr_dynamic
-    """
-    _clsname = 'DynamicIntCallDescr'
-
-    def __init__(self, arg_classes, result_size, result_sign, extrainfo, ffi_flags):
-        BaseIntCallDescr.__init__(self, arg_classes, extrainfo, ffi_flags)
-        assert isinstance(result_sign, bool)
-        self._result_size = chr(result_size)
-        self._result_sign = result_sign
-
-    def get_result_size(self, translate_support_code):
-        return ord(self._result_size)
-
-    def is_result_signed(self):
-        return self._result_sign
-
-
-class NonGcPtrCallDescr(BaseIntCallDescr):
-    _clsname = 'NonGcPtrCallDescr'
-    def get_result_size(self, translate_support_code):
-        return symbolic.get_size_of_ptr(translate_support_code)
-
-class GcPtrCallDescr(BaseCallDescr):
-    _clsname = 'GcPtrCallDescr'
-    _return_type = history.REF
-    call_stub = staticmethod(lambda func, args_i, args_r, args_f:
-                             lltype.nullptr(llmemory.GCREF.TO))
-    def get_result_size(self, translate_support_code):
-        return symbolic.get_size_of_ptr(translate_support_code)
-
-class FloatCallDescr(BaseCallDescr):
-    _clsname = 'FloatCallDescr'
-    _return_type = history.FLOAT
-    call_stub = staticmethod(lambda func,args_i,args_r,args_f: longlong.ZEROF)
-    def get_result_size(self, translate_support_code):
-        return symbolic.get_size(lltype.Float, translate_support_code)
-
-class LongLongCallDescr(FloatCallDescr):
-    _clsname = 'LongLongCallDescr'
-    _return_type = 'L'
-
-class VoidCallDescr(BaseCallDescr):
-    _clsname = 'VoidCallDescr'
-    _return_type = history.VOID
-    call_stub = staticmethod(lambda func, args_i, args_r, args_f: None)
-    def get_result_size(self, translate_support_code):
-        return 0
-
-_SingleFloatCallDescr = None   # built lazily
-
-def getCallDescrClass(RESULT):
-    if RESULT is lltype.Void:
-        return VoidCallDescr
-    if RESULT is lltype.Float:
-        return FloatCallDescr
-    if RESULT is lltype.SingleFloat:
-        global _SingleFloatCallDescr
-        if _SingleFloatCallDescr is None:
-            assert rffi.sizeof(rffi.UINT) == rffi.sizeof(RESULT)
-            class SingleFloatCallDescr(getCallDescrClass(rffi.UINT)):
-                _clsname = 'SingleFloatCallDescr'
-                _return_type = 'S'
-            _SingleFloatCallDescr = SingleFloatCallDescr
-        return _SingleFloatCallDescr
-    if is_longlong(RESULT):
-        return LongLongCallDescr
-    return getDescrClass(RESULT, BaseIntCallDescr, GcPtrCallDescr,
-                         NonGcPtrCallDescr, 'Call', 'get_result_size',
-                         Ellipsis,  # <= floatattrname should not be used here
-                         '_is_result_signed')
-getCallDescrClass._annspecialcase_ = 'specialize:memo'
+def map_type_to_argclass(ARG, accept_void=False):
+    kind = getkind(ARG)
+    if   kind == 'int':
+        if ARG is lltype.SingleFloat: return 'S'
+        else:                         return 'i'
+    elif kind == 'ref':               return 'r'
+    elif kind == 'float':
+        if is_longlong(ARG):          return 'L'
+        else:                         return 'f'
+    elif kind == 'void':
+        if accept_void:               return 'v'
+    raise NotImplementedError('ARG = %r' % (ARG,))
 
 def get_call_descr(gccache, ARGS, RESULT, extrainfo=None):
-    arg_classes = []
-    for ARG in ARGS:
-        kind = getkind(ARG)
-        if   kind == 'int':
-            if ARG is lltype.SingleFloat:
-                arg_classes.append('S')
+    arg_classes = map(map_type_to_argclass, ARGS)
+    arg_classes = ''.join(arg_classes)
+    result_type = map_type_to_argclass(RESULT, accept_void=True)
+    RESULT_ERASED = RESULT
+    if RESULT is lltype.Void:
+        result_size = 0
+        result_signed = False
+    else:
+        if isinstance(RESULT, lltype.Ptr):
+            # avoid too many CallDescrs
+            if result_type == 'r':
+                RESULT_ERASED = llmemory.GCREF
             else:
-                arg_classes.append('i')
-        elif kind == 'ref': arg_classes.append('r')
-        elif kind == 'float':
-            if is_longlong(ARG):
-                arg_classes.append('L')
-            else:
-                arg_classes.append('f')
-        else:
-            raise NotImplementedError('ARG = %r' % (ARG,))
-    arg_classes = ''.join(arg_classes)
-    cls = getCallDescrClass(RESULT)
-    key = (cls, arg_classes, extrainfo)
+                RESULT_ERASED = llmemory.Address
+        result_size = symbolic.get_size(RESULT_ERASED,
+                                        gccache.translate_support_code)
+        result_signed = get_type_flag(RESULT) == FLAG_SIGNED
+    key = (arg_classes, result_type, result_signed, RESULT_ERASED, extrainfo)
     cache = gccache._cache_call
     try:
-        return cache[key]
+        calldescr = cache[key]
     except KeyError:
-        calldescr = cls(arg_classes, extrainfo)
-        calldescr.create_call_stub(gccache.rtyper, RESULT)
+        calldescr = CallDescr(arg_classes, result_type, result_signed,
+                              result_size, extrainfo)
+        calldescr.create_call_stub(gccache.rtyper, RESULT_ERASED)
         cache[key] = calldescr
-        return calldescr
-
-
-# ____________________________________________________________
-
-def getDescrClass(TYPE, BaseDescr, GcPtrDescr, NonGcPtrDescr,
-                  nameprefix, methodname, floatattrname, signedattrname,
-                  _cache={}):
-    if isinstance(TYPE, lltype.Ptr):
-        if TYPE.TO._gckind == 'gc':
-            return GcPtrDescr
-        else:
-            return NonGcPtrDescr
-    if TYPE is lltype.SingleFloat:
-        assert rffi.sizeof(rffi.UINT) == rffi.sizeof(TYPE)
-        TYPE = rffi.UINT
-    try:
-        return _cache[nameprefix, TYPE]
-    except KeyError:
-        #
-        class Descr(BaseDescr):
-            _clsname = '%s%sDescr' % (TYPE._name, nameprefix)
-        Descr.__name__ = Descr._clsname
-        #
-        def method(self, translate_support_code):
-            return symbolic.get_size(TYPE, translate_support_code)
-        setattr(Descr, methodname, method)
-        #
-        if TYPE is lltype.Float or is_longlong(TYPE):
-            setattr(Descr, floatattrname, True)
-        elif (TYPE is not lltype.Bool and isinstance(TYPE, lltype.Number) and
-              rffi.cast(TYPE, -1) == -1):
-            setattr(Descr, signedattrname, True)
-        #
-        _cache[nameprefix, TYPE] = Descr
-        return Descr
+    assert repr(calldescr.result_size) == repr(result_size)
+    return calldescr
diff --git a/pypy/jit/backend/llsupport/ffisupport.py b/pypy/jit/backend/llsupport/ffisupport.py
--- a/pypy/jit/backend/llsupport/ffisupport.py
+++ b/pypy/jit/backend/llsupport/ffisupport.py
@@ -1,9 +1,7 @@
 from pypy.rlib.rarithmetic import intmask
 from pypy.jit.metainterp import history
 from pypy.rpython.lltypesystem import rffi
-from pypy.jit.backend.llsupport.descr import (
-    DynamicIntCallDescr, NonGcPtrCallDescr, FloatCallDescr, VoidCallDescr,
-    LongLongCallDescr, getCallDescrClass)
+from pypy.jit.backend.llsupport.descr import CallDescr
 
 class UnsupportedKind(Exception):
     pass
@@ -16,29 +14,13 @@
         argkinds = [get_ffi_type_kind(cpu, arg) for arg in ffi_args]
     except UnsupportedKind:
         return None
-    arg_classes = ''.join(argkinds)
-    if reskind == history.INT:
-        size = intmask(ffi_result.c_size)
-        signed = is_ffi_type_signed(ffi_result)
-        return DynamicIntCallDescr(arg_classes, size, signed, extrainfo,
-                                   ffi_flags=ffi_flags)
-    elif reskind == history.REF:
-        return  NonGcPtrCallDescr(arg_classes, extrainfo,
-                                  ffi_flags=ffi_flags)
-    elif reskind == history.FLOAT:
-        return FloatCallDescr(arg_classes, extrainfo,
-                              ffi_flags=ffi_flags)
-    elif reskind == history.VOID:
-        return VoidCallDescr(arg_classes, extrainfo,
-                             ffi_flags=ffi_flags)
-    elif reskind == 'L':
-        return LongLongCallDescr(arg_classes, extrainfo,
-                                 ffi_flags=ffi_flags)
-    elif reskind == 'S':
-        SingleFloatCallDescr = getCallDescrClass(rffi.FLOAT)
-        return SingleFloatCallDescr(arg_classes, extrainfo,
-                                    ffi_flags=ffi_flags)
-    assert False
+    if reskind == history.VOID:
+        result_size = 0
+    else:
+        result_size = intmask(ffi_result.c_size)
+    argkinds = ''.join(argkinds)
+    return CallDescr(argkinds, reskind, is_ffi_type_signed(ffi_result),
+                     result_size, extrainfo, ffi_flags=ffi_flags)
 
 def get_ffi_type_kind(cpu, ffi_type):
     from pypy.rlib.libffi import types
diff --git a/pypy/jit/backend/llsupport/gc.py b/pypy/jit/backend/llsupport/gc.py
--- a/pypy/jit/backend/llsupport/gc.py
+++ b/pypy/jit/backend/llsupport/gc.py
@@ -1,6 +1,6 @@
 import os
 from pypy.rlib import rgc
-from pypy.rlib.objectmodel import we_are_translated
+from pypy.rlib.objectmodel import we_are_translated, specialize
 from pypy.rlib.debug import fatalerror
 from pypy.rlib.rarithmetic import ovfcheck
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rclass, rstr
@@ -8,52 +8,93 @@
 from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.rpython.annlowlevel import llhelper
 from pypy.translator.tool.cbuild import ExternalCompilationInfo
-from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt, ConstPtr
-from pypy.jit.metainterp.history import AbstractDescr
+from pypy.jit.codewriter import heaptracker
+from pypy.jit.metainterp.history import ConstPtr, AbstractDescr
 from pypy.jit.metainterp.resoperation import ResOperation, rop
 from pypy.jit.backend.llsupport import symbolic
 from pypy.jit.backend.llsupport.symbolic import WORD
-from pypy.jit.backend.llsupport.descr import BaseSizeDescr, BaseArrayDescr
+from pypy.jit.backend.llsupport.descr import SizeDescr, ArrayDescr
 from pypy.jit.backend.llsupport.descr import GcCache, get_field_descr
-from pypy.jit.backend.llsupport.descr import GcPtrFieldDescr
+from pypy.jit.backend.llsupport.descr import get_array_descr
 from pypy.jit.backend.llsupport.descr import get_call_descr
+from pypy.jit.backend.llsupport.rewrite import GcRewriterAssembler
 from pypy.rpython.memory.gctransform import asmgcroot
 
 # ____________________________________________________________
 
 class GcLLDescription(GcCache):
-    minimal_size_in_nursery = 0
-    get_malloc_slowpath_addr = None
 
     def __init__(self, gcdescr, translator=None, rtyper=None):
         GcCache.__init__(self, translator is not None, rtyper)
         self.gcdescr = gcdescr
+        if translator and translator.config.translation.gcremovetypeptr:
+            self.fielddescr_vtable = None
+        else:
+            self.fielddescr_vtable = get_field_descr(self, rclass.OBJECT,
+                                                     'typeptr')
+        self._generated_functions = []
+
+    def _setup_str(self):
+        self.str_descr     = get_array_descr(self, rstr.STR)
+        self.unicode_descr = get_array_descr(self, rstr.UNICODE)
+
+    def generate_function(self, funcname, func, ARGS, RESULT=llmemory.GCREF):
+        """Generates a variant of malloc with the given name and the given
+        arguments.  It should return NULL if out of memory.  If it raises
+        anything, it must be an optional MemoryError.
+        """
+        FUNCPTR = lltype.Ptr(lltype.FuncType(ARGS, RESULT))
+        descr = get_call_descr(self, ARGS, RESULT)
+        setattr(self, funcname, func)
+        setattr(self, funcname + '_FUNCPTR', FUNCPTR)
+        setattr(self, funcname + '_descr', descr)
+        self._generated_functions.append(funcname)
+
+    @specialize.arg(1)
+    def get_malloc_fn(self, funcname):
+        func = getattr(self, funcname)
+        FUNC = getattr(self, funcname + '_FUNCPTR')
+        return llhelper(FUNC, func)
+
+    @specialize.arg(1)
+    def get_malloc_fn_addr(self, funcname):
+        ll_func = self.get_malloc_fn(funcname)
+        return heaptracker.adr2int(llmemory.cast_ptr_to_adr(ll_func))
+
     def _freeze_(self):
         return True
     def initialize(self):
         pass
     def do_write_barrier(self, gcref_struct, gcref_newptr):
         pass
-    def rewrite_assembler(self, cpu, operations, gcrefs_output_list):
-        return operations
-    def can_inline_malloc(self, descr):
-        return False
-    def can_inline_malloc_varsize(self, descr, num_elem):
+    def can_use_nursery_malloc(self, size):
         return False
     def has_write_barrier_class(self):
         return None
     def freeing_block(self, start, stop):
         pass
+    def get_nursery_free_addr(self):
+        raise NotImplementedError
+    def get_nursery_top_addr(self):
+        raise NotImplementedError
 
-    def get_funcptr_for_newarray(self):
-        return llhelper(self.GC_MALLOC_ARRAY, self.malloc_array)
-    def get_funcptr_for_newstr(self):
-        return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_str)
-    def get_funcptr_for_newunicode(self):
-        return llhelper(self.GC_MALLOC_STR_UNICODE, self.malloc_unicode)
+    def gc_malloc(self, sizedescr):
+        """Blackhole: do a 'bh_new'.  Also used for 'bh_new_with_vtable',
+        with the vtable pointer set manually afterwards."""
+        assert isinstance(sizedescr, SizeDescr)
+        return self._bh_malloc(sizedescr)
 
+    def gc_malloc_array(self, arraydescr, num_elem):
+        assert isinstance(arraydescr, ArrayDescr)
+        return self._bh_malloc_array(arraydescr, num_elem)
 
-    def record_constptrs(self, op, gcrefs_output_list):
+    def gc_malloc_str(self, num_elem):
+        return self._bh_malloc_array(self.str_descr, num_elem)
+
+    def gc_malloc_unicode(self, num_elem):
+        return self._bh_malloc_array(self.unicode_descr, num_elem)
+
+    def _record_constptrs(self, op, gcrefs_output_list):
         for i in range(op.numargs()):
             v = op.getarg(i)
             if isinstance(v, ConstPtr) and bool(v.value):
@@ -61,11 +102,27 @@
                 rgc._make_sure_does_not_move(p)
                 gcrefs_output_list.append(p)
 
+    def rewrite_assembler(self, cpu, operations, gcrefs_output_list):
+        rewriter = GcRewriterAssembler(self, cpu)
+        newops = rewriter.rewrite(operations)
+        # record all GCREFs, because the GC (or Boehm) cannot see them and
+        # keep them alive if they end up as constants in the assembler
+        for op in newops:
+            self._record_constptrs(op, gcrefs_output_list)
+        return newops
+
 # ____________________________________________________________
 
 class GcLLDescr_boehm(GcLLDescription):
-    moving_gc = False
-    gcrootmap = None
+    kind                  = 'boehm'
+    moving_gc             = False
+    round_up              = False
+    gcrootmap             = None
+    write_barrier_descr   = None
+    fielddescr_tid        = None
+    str_type_id           = 0
+    unicode_type_id       = 0
+    get_malloc_slowpath_addr = None
 
     @classmethod
     def configure_boehm_once(cls):
@@ -76,6 +133,16 @@
         from pypy.rpython.tool import rffi_platform
         compilation_info = rffi_platform.configure_boehm()
 
+        # on some platform GC_init is required before any other
+        # GC_* functions, call it here for the benefit of tests
+        # XXX move this to tests
+        init_fn_ptr = rffi.llexternal("GC_init",
+                                      [], lltype.Void,
+                                      compilation_info=compilation_info,
+                                      sandboxsafe=True,
+                                      _nowrapper=True)
+        init_fn_ptr()
+
         # Versions 6.x of libgc needs to use GC_local_malloc().
         # Versions 7.x of libgc removed this function; GC_malloc() has
         # the same behavior if libgc was compiled with
@@ -95,96 +162,42 @@
                                         sandboxsafe=True,
                                         _nowrapper=True)
         cls.malloc_fn_ptr = malloc_fn_ptr
-        cls.compilation_info = compilation_info
         return malloc_fn_ptr
 
     def __init__(self, gcdescr, translator, rtyper):
         GcLLDescription.__init__(self, gcdescr, translator, rtyper)
         # grab a pointer to the Boehm 'malloc' function
-        malloc_fn_ptr = self.configure_boehm_once()
-        self.funcptr_for_new = malloc_fn_ptr
+        self.malloc_fn_ptr = self.configure_boehm_once()
+        self._setup_str()
+        self._make_functions()
 
-        def malloc_array(basesize, itemsize, ofs_length, num_elem):
+    def _make_functions(self):
+
+        def malloc_fixedsize(size):
+            return self.malloc_fn_ptr(size)
+        self.generate_function('malloc_fixedsize', malloc_fixedsize,
+                               [lltype.Signed])
+
+        def malloc_array(basesize, num_elem, itemsize, ofs_length):
             try:
-                size = ovfcheck(basesize + ovfcheck(itemsize * num_elem))
+                totalsize = ovfcheck(basesize + ovfcheck(itemsize * num_elem))
             except OverflowError:
                 return lltype.nullptr(llmemory.GCREF.TO)
-            res = self.funcptr_for_new(size)
-            if not res:
-                return res
-            rffi.cast(rffi.CArrayPtr(lltype.Signed), res)[ofs_length/WORD] = num_elem
+            res = self.malloc_fn_ptr(totalsize)
+            if res:
+                arrayptr = rffi.cast(rffi.CArrayPtr(lltype.Signed), res)
+                arrayptr[ofs_length/WORD] = num_elem
             return res
-        self.malloc_array = malloc_array
-        self.GC_MALLOC_ARRAY = lltype.Ptr(lltype.FuncType(
-            [lltype.Signed] * 4, llmemory.GCREF))
+        self.generate_function('malloc_array', malloc_array,
+                               [lltype.Signed] * 4)
 
+    def _bh_malloc(self, sizedescr):
+        return self.malloc_fixedsize(sizedescr.size)
 
-        (str_basesize, str_itemsize, str_ofs_length
-         ) = symbolic.get_array_token(rstr.STR, self.translate_support_code)
-        (unicode_basesize, unicode_itemsize, unicode_ofs_length
-         ) = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code)
-        def malloc_str(length):
-            return self.malloc_array(
-                str_basesize, str_itemsize, str_ofs_length, length
-            )
-        def malloc_unicode(length):
-            return self.malloc_array(
-                unicode_basesize, unicode_itemsize, unicode_ofs_length, length
-            )
-        self.malloc_str = malloc_str
-        self.malloc_unicode = malloc_unicode
-        self.GC_MALLOC_STR_UNICODE = lltype.Ptr(lltype.FuncType(
-            [lltype.Signed], llmemory.GCREF))
-
-
-        # on some platform GC_init is required before any other
-        # GC_* functions, call it here for the benefit of tests
-        # XXX move this to tests
-        init_fn_ptr = rffi.llexternal("GC_init",
-                                      [], lltype.Void,
-                                      compilation_info=self.compilation_info,
-                                      sandboxsafe=True,
-                                      _nowrapper=True)
-
-        init_fn_ptr()
-
-    def gc_malloc(self, sizedescr):
-        assert isinstance(sizedescr, BaseSizeDescr)
-        return self.funcptr_for_new(sizedescr.size)
-
-    def gc_malloc_array(self, arraydescr, num_elem):
-        assert isinstance(arraydescr, BaseArrayDescr)
-        ofs_length = arraydescr.get_ofs_length(self.translate_support_code)
-        basesize = arraydescr.get_base_size(self.translate_support_code)
-        itemsize = arraydescr.get_item_size(self.translate_support_code)
-        return self.malloc_array(basesize, itemsize, ofs_length, num_elem)


More information about the pypy-commit mailing list