[pypy-commit] pypy release-5.x: merge default into branch

mattip pypy.commits at gmail.com
Sun Jun 5 15:57:19 EDT 2016


Author: Matti Picus <matti.picus at gmail.com>
Branch: release-5.x
Changeset: r84943:b849a6c35c3f
Date: 2016-06-05 22:29 +0300
http://bitbucket.org/pypy/pypy/changeset/b849a6c35c3f/

Log:	merge default into branch

diff too long, truncating to 2000 out of 47348 lines

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -20,4 +20,8 @@
 5f8302b8bf9f53056e40426f10c72151564e5b19 release-4.0.1
 246c9cf22037b11dc0e8c29ce3f291d3b8c5935a release-5.0
 bbd45126bc691f669c4ebdfbd74456cd274c6b92 release-5.0.1
+3260adbeba4a8b6659d1cc0d0b41f266769b74da release-5.1
+b0a649e90b6642251fb4a765fe5b27a97b1319a9 release-5.1.1
 80ef432a32d9baa4b3c5a54c215e8ebe499f6374 release-5.1.2
+40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
+40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
diff --git a/dotviewer/graphserver.py b/dotviewer/graphserver.py
--- a/dotviewer/graphserver.py
+++ b/dotviewer/graphserver.py
@@ -143,6 +143,11 @@
 
 if __name__ == '__main__':
     if len(sys.argv) != 2:
+        if len(sys.argv) == 1:
+            # start locally
+            import sshgraphserver
+            sshgraphserver.ssh_graph_server(['LOCAL'])
+            sys.exit(0)
         print >> sys.stderr, __doc__
         sys.exit(2)
     if sys.argv[1] == '--stdio':
diff --git a/dotviewer/sshgraphserver.py b/dotviewer/sshgraphserver.py
--- a/dotviewer/sshgraphserver.py
+++ b/dotviewer/sshgraphserver.py
@@ -4,11 +4,14 @@
 
 Usage:
     sshgraphserver.py  hostname  [more args for ssh...]
+    sshgraphserver.py  LOCAL
 
 This logs in to 'hostname' by passing the arguments on the command-line
 to ssh.  No further configuration is required: it works for all programs
 using the dotviewer library as long as they run on 'hostname' under the
 same username as the one sshgraphserver logs as.
+
+If 'hostname' is the string 'LOCAL', then it starts locally without ssh.
 """
 
 import graphserver, socket, subprocess, random
@@ -18,12 +21,19 @@
     s1 = socket.socket()
     s1.bind(('127.0.0.1', socket.INADDR_ANY))
     localhost, localport = s1.getsockname()
-    remoteport = random.randrange(10000, 20000)
-    #  ^^^ and just hope there is no conflict
 
-    args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (remoteport, localport)]
-    args = args + sshargs + ['python -u -c "exec input()"']
-    print ' '.join(args[:-1])
+    if sshargs[0] != 'LOCAL':
+        remoteport = random.randrange(10000, 20000)
+        #  ^^^ and just hope there is no conflict
+
+        args = ['ssh', '-S', 'none', '-C', '-R%d:127.0.0.1:%d' % (
+            remoteport, localport)]
+        args = args + sshargs + ['python -u -c "exec input()"']
+    else:
+        remoteport = localport
+        args = ['python', '-u', '-c', 'exec input()']
+
+    print ' '.join(args)
     p = subprocess.Popen(args, bufsize=0,
                          stdin=subprocess.PIPE,
                          stdout=subprocess.PIPE)
diff --git a/lib-python/2.7/distutils/cmd.py b/lib-python/2.7/distutils/cmd.py
--- a/lib-python/2.7/distutils/cmd.py
+++ b/lib-python/2.7/distutils/cmd.py
@@ -298,8 +298,16 @@
         src_cmd_obj.ensure_finalized()
         for (src_option, dst_option) in option_pairs:
             if getattr(self, dst_option) is None:
-                setattr(self, dst_option,
-                        getattr(src_cmd_obj, src_option))
+                try:
+                    setattr(self, dst_option,
+                            getattr(src_cmd_obj, src_option))
+                except AttributeError:
+                    # This was added after problems with setuptools 18.4.
+                    # It seems that setuptools 20.9 fixes the problem.
+                    # But e.g. on Ubuntu 14.04 with /usr/bin/virtualenv
+                    # if I say "virtualenv -p pypy venv-pypy" then it
+                    # just installs setuptools 18.4 from some cache...
+                    pass
 
 
     def get_finalized_command(self, command, create=1):
diff --git a/lib-python/2.7/subprocess.py b/lib-python/2.7/subprocess.py
--- a/lib-python/2.7/subprocess.py
+++ b/lib-python/2.7/subprocess.py
@@ -834,54 +834,63 @@
             c2pread, c2pwrite = None, None
             errread, errwrite = None, None
 
+            ispread = False
             if stdin is None:
                 p2cread = _subprocess.GetStdHandle(_subprocess.STD_INPUT_HANDLE)
                 if p2cread is None:
                     p2cread, _ = _subprocess.CreatePipe(None, 0)
+                    ispread = True
             elif stdin == PIPE:
                 p2cread, p2cwrite = _subprocess.CreatePipe(None, 0)
+                ispread = True
             elif isinstance(stdin, int):
                 p2cread = msvcrt.get_osfhandle(stdin)
             else:
                 # Assuming file-like object
                 p2cread = msvcrt.get_osfhandle(stdin.fileno())
-            p2cread = self._make_inheritable(p2cread)
+            p2cread = self._make_inheritable(p2cread, ispread)
             # We just duplicated the handle, it has to be closed at the end
             to_close.add(p2cread)
             if stdin == PIPE:
                 to_close.add(p2cwrite)
 
+            ispwrite = False
             if stdout is None:
                 c2pwrite = _subprocess.GetStdHandle(_subprocess.STD_OUTPUT_HANDLE)
                 if c2pwrite is None:
                     _, c2pwrite = _subprocess.CreatePipe(None, 0)
+                    ispwrite = True
             elif stdout == PIPE:
                 c2pread, c2pwrite = _subprocess.CreatePipe(None, 0)
+                ispwrite = True
             elif isinstance(stdout, int):
                 c2pwrite = msvcrt.get_osfhandle(stdout)
             else:
                 # Assuming file-like object
                 c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
-            c2pwrite = self._make_inheritable(c2pwrite)
+            c2pwrite = self._make_inheritable(c2pwrite, ispwrite)
             # We just duplicated the handle, it has to be closed at the end
             to_close.add(c2pwrite)
             if stdout == PIPE:
                 to_close.add(c2pread)
 
+            ispwrite = False
             if stderr is None:
                 errwrite = _subprocess.GetStdHandle(_subprocess.STD_ERROR_HANDLE)
                 if errwrite is None:
                     _, errwrite = _subprocess.CreatePipe(None, 0)
+                    ispwrite = True
             elif stderr == PIPE:
                 errread, errwrite = _subprocess.CreatePipe(None, 0)
+                ispwrite = True
             elif stderr == STDOUT:
-                errwrite = c2pwrite.handle # pass id to not close it
+                errwrite = c2pwrite
             elif isinstance(stderr, int):
                 errwrite = msvcrt.get_osfhandle(stderr)
             else:
                 # Assuming file-like object
                 errwrite = msvcrt.get_osfhandle(stderr.fileno())
-            errwrite = self._make_inheritable(errwrite)
+            errwrite = self._make_inheritable(errwrite, ispwrite)
             # We just duplicated the handle, it has to be closed at the end
             to_close.add(errwrite)
             if stderr == PIPE:
@@ -892,13 +901,14 @@
                     errread, errwrite), to_close
 
 
-        def _make_inheritable(self, handle):
+        def _make_inheritable(self, handle, close=False):
             """Return a duplicate of handle, which is inheritable"""
             dupl = _subprocess.DuplicateHandle(_subprocess.GetCurrentProcess(),
                                 handle, _subprocess.GetCurrentProcess(), 0, 1,
                                 _subprocess.DUPLICATE_SAME_ACCESS)
-            # If the initial handle was obtained with CreatePipe, close it.
-            if not isinstance(handle, int):
+            # PyPy: If the initial handle was obtained with CreatePipe,
+            # close it.
+            if close:
                 handle.Close()
             return dupl
 
diff --git a/lib-python/2.7/test/test_descr.py b/lib-python/2.7/test/test_descr.py
--- a/lib-python/2.7/test/test_descr.py
+++ b/lib-python/2.7/test/test_descr.py
@@ -1735,7 +1735,6 @@
             ("__reversed__", reversed, empty_seq, set(), {}),
             ("__length_hint__", list, zero, set(),
              {"__iter__" : iden, "next" : stop}),
-            ("__sizeof__", sys.getsizeof, zero, set(), {}),
             ("__instancecheck__", do_isinstance, return_true, set(), {}),
             ("__missing__", do_dict_missing, some_number,
              set(("__class__",)), {}),
@@ -1747,6 +1746,8 @@
             ("__format__", format, format_impl, set(), {}),
             ("__dir__", dir, empty_seq, set(), {}),
             ]
+        if test_support.check_impl_detail():
+            specials.append(("__sizeof__", sys.getsizeof, zero, set(), {}))
 
         class Checker(object):
             def __getattr__(self, attr, test=self):
@@ -1768,10 +1769,6 @@
                 raise MyException
 
         for name, runner, meth_impl, ok, env in specials:
-            if name == '__length_hint__' or name == '__sizeof__':
-                if not test_support.check_impl_detail():
-                    continue
-
             class X(Checker):
                 pass
             for attr, obj in env.iteritems():
diff --git a/lib-python/2.7/test/test_sys_settrace.py b/lib-python/2.7/test/test_sys_settrace.py
--- a/lib-python/2.7/test/test_sys_settrace.py
+++ b/lib-python/2.7/test/test_sys_settrace.py
@@ -328,8 +328,8 @@
 
     def test_13_genexp(self):
         if self.using_gc:
+            gc.enable()
             test_support.gc_collect()
-            gc.enable()
         try:
             self.run_test(generator_example)
             # issue1265: if the trace function contains a generator,
diff --git a/lib-python/stdlib-upgrade.txt b/lib-python/stdlib-upgrade.txt
--- a/lib-python/stdlib-upgrade.txt
+++ b/lib-python/stdlib-upgrade.txt
@@ -5,15 +5,23 @@
 
     overly detailed
 
-1. check out the branch vendor/stdlib
+0. make sure your working dir is clean
+1. check out the branch vendor/stdlib (for 2.7) or vendor/stdlib-3-* (for py3k)
+   or create branch vendor/stdlib-3-*
 2. upgrade the files there
+   2a. remove lib-python/2.7/ or lib-python/3/
+   2b. copy the files from the cpython repo
+   2c. hg add lib-python/2.7/ or lib-python/3/
+   2d. hg remove --after
+   2e. show copied files in cpython repo by running `hg diff --git -r v<old> -r v<new> Lib | grep '^copy \(from\|to\)'`
+   2f. fix copies / renames manually by running `hg copy --after <from> <to>` for each copied file
 3. update stdlib-version.txt with the output of hg -id from the cpython repo
 4. commit
-5. update to default/py3k
+5. update to default / py3k
 6. create a integration branch for the new stdlib
    (just hg branch stdlib-$version)
-7. merge vendor/stdlib
+7. merge vendor/stdlib or vendor/stdlib-3-*
 8. commit
 10. fix issues
 11. commit --close-branch
-12. merge to default
+12. merge to default / py3k
diff --git a/lib_pypy/_pypy_interact.py b/lib_pypy/_pypy_interact.py
--- a/lib_pypy/_pypy_interact.py
+++ b/lib_pypy/_pypy_interact.py
@@ -6,7 +6,7 @@
 irc_header = "And now for something completely different"
 
 
-def interactive_console(mainmodule=None, quiet=False):
+def interactive_console(mainmodule=None, quiet=False, future_flags=0):
     # set sys.{ps1,ps2} just before invoking the interactive interpreter. This
     # mimics what CPython does in pythonrun.c
     if not hasattr(sys, 'ps1'):
@@ -37,15 +37,17 @@
             raise ImportError
         from pyrepl.simple_interact import run_multiline_interactive_console
     except ImportError:
-        run_simple_interactive_console(mainmodule)
+        run_simple_interactive_console(mainmodule, future_flags=future_flags)
     else:
-        run_multiline_interactive_console(mainmodule)
+        run_multiline_interactive_console(mainmodule, future_flags=future_flags)
 
-def run_simple_interactive_console(mainmodule):
+def run_simple_interactive_console(mainmodule, future_flags=0):
     import code
     if mainmodule is None:
         import __main__ as mainmodule
     console = code.InteractiveConsole(mainmodule.__dict__, filename='<stdin>')
+    if future_flags:
+        console.compile.compiler.flags |= future_flags
     # some parts of code.py are copied here because it seems to be impossible
     # to start an interactive console without printing at least one line
     # of banner
diff --git a/lib_pypy/_pypy_irc_topic.py b/lib_pypy/_pypy_irc_topic.py
--- a/lib_pypy/_pypy_irc_topic.py
+++ b/lib_pypy/_pypy_irc_topic.py
@@ -224,23 +224,9 @@
 va ClCl orvat bayl zbqrengryl zntvp vf n tbbq guvat <psobym>
 """
 
-from string import ascii_uppercase, ascii_lowercase
-
 def rot13(data):
-    """ A simple rot-13 encoder since `str.encode('rot13')` was removed from
-        Python as of version 3.0.  It rotates both uppercase and lowercase letters individually.
-    """
-    total = []
-    for char in data:
-        if char in ascii_uppercase:
-            index = (ascii_uppercase.find(char) + 13) % 26
-            total.append(ascii_uppercase[index])
-        elif char in ascii_lowercase:
-            index = (ascii_lowercase.find(char) + 13) % 26
-            total.append(ascii_lowercase[index])
-        else:
-            total.append(char)
-    return "".join(total)
+    return ''.join(chr(ord(c)+(13 if 'A'<=c.upper()<='M' else
+                              -13 if 'N'<=c.upper()<='Z' else 0)) for c in data)
 
 def some_topic():
     import time
diff --git a/lib_pypy/_subprocess.py b/lib_pypy/_subprocess.py
--- a/lib_pypy/_subprocess.py
+++ b/lib_pypy/_subprocess.py
@@ -4,6 +4,9 @@
 subprocess module on Windows.
 """
 
+import sys
+if sys.platform != 'win32':
+    raise ImportError("The '_subprocess' module is only available on Windows")
 
 # Declare external Win32 functions
 
diff --git a/lib_pypy/cffi/api.py b/lib_pypy/cffi/api.py
--- a/lib_pypy/cffi/api.py
+++ b/lib_pypy/cffi/api.py
@@ -332,8 +332,8 @@
     def from_buffer(self, python_buffer):
         """Return a <cdata 'char[]'> that points to the data of the
         given Python object, which must support the buffer interface.
-        Note that this is not meant to be used on the built-in types str,
-        unicode, or bytearray (you can build 'char[]' arrays explicitly)
+        Note that this is not meant to be used on the built-in types
+        str or unicode (you can build 'char[]' arrays explicitly)
         but only on objects containing large quantities of raw data
         in some other format, like 'array.array' or numpy arrays.
         """
@@ -397,20 +397,7 @@
         data.  Later, when this new cdata object is garbage-collected,
         'destructor(old_cdata_object)' will be called.
         """
-        try:
-            gcp = self._backend.gcp
-        except AttributeError:
-            pass
-        else:
-            return gcp(cdata, destructor)
-        #
-        with self._lock:
-            try:
-                gc_weakrefs = self.gc_weakrefs
-            except AttributeError:
-                from .gc_weakref import GcWeakrefs
-                gc_weakrefs = self.gc_weakrefs = GcWeakrefs(self)
-            return gc_weakrefs.build(cdata, destructor)
+        return self._backend.gcp(cdata, destructor)
 
     def _get_cached_btype(self, type):
         assert self._lock.acquire(False) is False
diff --git a/lib_pypy/cffi/backend_ctypes.py b/lib_pypy/cffi/backend_ctypes.py
--- a/lib_pypy/cffi/backend_ctypes.py
+++ b/lib_pypy/cffi/backend_ctypes.py
@@ -460,6 +460,11 @@
                         return x._value
                     raise TypeError("character expected, got %s" %
                                     type(x).__name__)
+                def __nonzero__(self):
+                    return ord(self._value) != 0
+            else:
+                def __nonzero__(self):
+                    return self._value != 0
 
             if kind == 'float':
                 @staticmethod
@@ -993,6 +998,31 @@
         assert onerror is None   # XXX not implemented
         return BType(source, error)
 
+    def gcp(self, cdata, destructor):
+        BType = self.typeof(cdata)
+
+        if destructor is None:
+            if not (hasattr(BType, '_gcp_type') and
+                    BType._gcp_type is BType):
+                raise TypeError("Can remove destructor only on a object "
+                                "previously returned by ffi.gc()")
+            cdata._destructor = None
+            return None
+
+        try:
+            gcp_type = BType._gcp_type
+        except AttributeError:
+            class CTypesDataGcp(BType):
+                __slots__ = ['_orig', '_destructor']
+                def __del__(self):
+                    if self._destructor is not None:
+                        self._destructor(self._orig)
+            gcp_type = BType._gcp_type = CTypesDataGcp
+        new_cdata = self.cast(gcp_type, cdata)
+        new_cdata._orig = cdata
+        new_cdata._destructor = destructor
+        return new_cdata
+
     typeof = type
 
     def getcname(self, BType, replace_with):
diff --git a/lib_pypy/cffi/commontypes.py b/lib_pypy/cffi/commontypes.py
--- a/lib_pypy/cffi/commontypes.py
+++ b/lib_pypy/cffi/commontypes.py
@@ -35,8 +35,11 @@
                                "you call ffi.set_unicode()" % (commontype,))
         else:
             if commontype == cdecl:
-                raise api.FFIError("Unsupported type: %r.  Please file a bug "
-                                   "if you think it should be." % (commontype,))
+                raise api.FFIError(
+                    "Unsupported type: %r.  Please look at "
+        "http://cffi.readthedocs.io/en/latest/cdef.html#ffi-cdef-limitations "
+                    "and file an issue if you think this type should really "
+                    "be supported." % (commontype,))
             result, quals = parser.parse_type_and_quals(cdecl)   # recursive
 
         assert isinstance(result, model.BaseTypeByIdentity)
diff --git a/lib_pypy/cffi/recompiler.py b/lib_pypy/cffi/recompiler.py
--- a/lib_pypy/cffi/recompiler.py
+++ b/lib_pypy/cffi/recompiler.py
@@ -814,7 +814,7 @@
             try:
                 if ftype.is_integer_type() or fbitsize >= 0:
                     # accept all integers, but complain on float or double
-                    prnt("  (void)((p->%s) << 1);  /* check that '%s.%s' is "
+                    prnt("  (void)((p->%s) | 0);  /* check that '%s.%s' is "
                          "an integer */" % (fname, cname, fname))
                     continue
                 # only accept exactly the type declared, except that '[]'
@@ -991,7 +991,7 @@
             prnt('static int %s(unsigned long long *o)' % funcname)
             prnt('{')
             prnt('  int n = (%s) <= 0;' % (name,))
-            prnt('  *o = (unsigned long long)((%s) << 0);'
+            prnt('  *o = (unsigned long long)((%s) | 0);'
                  '  /* check that %s is an integer */' % (name, name))
             if check_value is not None:
                 if check_value > 0:
@@ -1250,7 +1250,7 @@
 
     def _emit_bytecode_UnknownIntegerType(self, tp, index):
         s = ('_cffi_prim_int(sizeof(%s), (\n'
-             '           ((%s)-1) << 0 /* check that %s is an integer type */\n'
+             '           ((%s)-1) | 0 /* check that %s is an integer type */\n'
              '         ) <= 0)' % (tp.name, tp.name, tp.name))
         self.cffi_types[index] = CffiOp(OP_PRIMITIVE, s)
 
diff --git a/lib_pypy/pyrepl/simple_interact.py b/lib_pypy/pyrepl/simple_interact.py
--- a/lib_pypy/pyrepl/simple_interact.py
+++ b/lib_pypy/pyrepl/simple_interact.py
@@ -43,11 +43,13 @@
         return short
     return text
 
-def run_multiline_interactive_console(mainmodule=None):
+def run_multiline_interactive_console(mainmodule=None, future_flags=0):
     import code
     if mainmodule is None:
         import __main__ as mainmodule
     console = code.InteractiveConsole(mainmodule.__dict__, filename='<stdin>')
+    if future_flags:
+        console.compile.compiler.flags |= future_flags
 
     def more_lines(unicodetext):
         # ooh, look at the hack:
diff --git a/lib_pypy/syslog.py b/lib_pypy/syslog.py
--- a/lib_pypy/syslog.py
+++ b/lib_pypy/syslog.py
@@ -51,6 +51,8 @@
     # if log is not opened, open it now
     if not _S_log_open:
         openlog()
+    if isinstance(message, unicode):
+        message = str(message)
     lib.syslog(priority, "%s", message)
 
 @builtinify
diff --git a/pypy/__init__.py b/pypy/__init__.py
--- a/pypy/__init__.py
+++ b/pypy/__init__.py
@@ -1,4 +1,5 @@
-# Empty
+import os
+pypydir = os.path.realpath(os.path.dirname(__file__))
 
 # XXX Should be empty again, soon.
 # XXX hack for win64:
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -204,15 +204,6 @@
         BoolOption("withstrbuf", "use strings optimized for addition (ver 2)",
                    default=False),
 
-        BoolOption("withprebuiltchar",
-                   "use prebuilt single-character string objects",
-                   default=False),
-
-        BoolOption("sharesmallstr",
-                   "always reuse the prebuilt string objects "
-                   "(the empty string and potentially single-char strings)",
-                   default=False),
-
         BoolOption("withspecialisedtuple",
                    "use specialised tuples",
                    default=False),
@@ -222,39 +213,14 @@
                    default=False,
                    requires=[("objspace.honor__builtins__", False)]),
 
-        BoolOption("withmapdict",
-                   "make instances really small but slow without the JIT",
-                   default=False,
-                   requires=[("objspace.std.getattributeshortcut", True),
-                             ("objspace.std.withtypeversion", True),
-                       ]),
-
-        BoolOption("withrangelist",
-                   "enable special range list implementation that does not "
-                   "actually create the full list until the resulting "
-                   "list is mutated",
-                   default=False),
         BoolOption("withliststrategies",
                    "enable optimized ways to store lists of primitives ",
                    default=True),
 
-        BoolOption("withtypeversion",
-                   "version type objects when changing them",
-                   cmdline=None,
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
-
-        BoolOption("withmethodcache",
-                   "try to cache method lookups",
-                   default=False,
-                   requires=[("objspace.std.withtypeversion", True),
-                             ("translation.rweakref", True)]),
         BoolOption("withmethodcachecounter",
                    "try to cache methods and provide a counter in __pypy__. "
                    "for testing purposes only.",
-                   default=False,
-                   requires=[("objspace.std.withmethodcache", True)]),
+                   default=False),
         IntOption("methodcachesizeexp",
                   " 2 ** methodcachesizeexp is the size of the of the method cache ",
                   default=11),
@@ -265,22 +231,10 @@
         BoolOption("optimized_list_getitem",
                    "special case the 'list[integer]' expressions",
                    default=False),
-        BoolOption("getattributeshortcut",
-                   "track types that override __getattribute__",
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
         BoolOption("newshortcut",
                    "cache and shortcut calling __new__ from builtin types",
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
+                   default=False),
 
-        BoolOption("withidentitydict",
-                   "track types that override __hash__, __eq__ or __cmp__ and use a special dict strategy for those which do not",
-                   default=False,
-                   # weakrefs needed, because of get_subclasses()
-                   requires=[("translation.rweakref", True)]),
      ]),
 ])
 
@@ -296,15 +250,10 @@
     """
     # all the good optimizations for PyPy should be listed here
     if level in ['2', '3', 'jit']:
-        config.objspace.std.suggest(withrangelist=True)
-        config.objspace.std.suggest(withmethodcache=True)
-        config.objspace.std.suggest(withprebuiltchar=True)
         config.objspace.std.suggest(intshortcut=True)
         config.objspace.std.suggest(optimized_list_getitem=True)
-        config.objspace.std.suggest(getattributeshortcut=True)
         #config.objspace.std.suggest(newshortcut=True)
         config.objspace.std.suggest(withspecialisedtuple=True)
-        config.objspace.std.suggest(withidentitydict=True)
         #if not IS_64_BITS:
         #    config.objspace.std.suggest(withsmalllong=True)
 
@@ -317,16 +266,13 @@
     # memory-saving optimizations
     if level == 'mem':
         config.objspace.std.suggest(withprebuiltint=True)
-        config.objspace.std.suggest(withrangelist=True)
-        config.objspace.std.suggest(withprebuiltchar=True)
-        config.objspace.std.suggest(withmapdict=True)
+        config.objspace.std.suggest(withliststrategies=True)
         if not IS_64_BITS:
             config.objspace.std.suggest(withsmalllong=True)
 
     # extra optimizations with the JIT
     if level == 'jit':
         config.objspace.std.suggest(withcelldict=True)
-        config.objspace.std.suggest(withmapdict=True)
 
 
 def enable_allworkingmodules(config):
diff --git a/pypy/config/test/test_pypyoption.py b/pypy/config/test/test_pypyoption.py
--- a/pypy/config/test/test_pypyoption.py
+++ b/pypy/config/test/test_pypyoption.py
@@ -11,12 +11,6 @@
 
     assert conf.objspace.usemodules.gc
 
-    conf.objspace.std.withmapdict = True
-    assert conf.objspace.std.withtypeversion
-    conf = get_pypy_config()
-    conf.objspace.std.withtypeversion = False
-    py.test.raises(ConfigError, "conf.objspace.std.withmapdict = True")
-
 def test_conflicting_gcrootfinder():
     conf = get_pypy_config()
     conf.translation.gc = "boehm"
@@ -47,18 +41,10 @@
 def test_set_pypy_opt_level():
     conf = get_pypy_config()
     set_pypy_opt_level(conf, '2')
-    assert conf.objspace.std.getattributeshortcut
+    assert conf.objspace.std.intshortcut
     conf = get_pypy_config()
     set_pypy_opt_level(conf, '0')
-    assert not conf.objspace.std.getattributeshortcut
-
-def test_rweakref_required():
-    conf = get_pypy_config()
-    conf.translation.rweakref = False
-    set_pypy_opt_level(conf, '3')
-
-    assert not conf.objspace.std.withtypeversion
-    assert not conf.objspace.std.withmethodcache
+    assert not conf.objspace.std.intshortcut
 
 def test_check_documentation():
     def check_file_exists(fn):
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -1,4 +1,4 @@
-import py, pytest, sys, os, textwrap
+import py, pytest, sys, textwrap
 from inspect import isclass
 
 # pytest settings
@@ -10,8 +10,6 @@
 #
 option = None
 
-pypydir = os.path.realpath(os.path.dirname(__file__))
-
 def braindead_deindent(self):
     """monkeypatch that wont end up doing stupid in the python tokenizer"""
     text = '\n'.join(self.lines)
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -70,9 +70,6 @@
 bz2
     libbz2
 
-lzma (PyPy3 only)
-    liblzma
-
 pyexpat
     libexpat1
 
@@ -98,19 +95,24 @@
 tk
     tk-dev
 
+lzma (PyPy3 only)
+    liblzma
+
+To run untranslated tests, you need the Boehm garbage collector libgc.
+
 On Debian, this is the command to install all build-time dependencies::
 
     apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
     libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
-    tk-dev
+    tk-dev libgc-dev liblzma-dev
 
 For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
 
 On Fedora::
 
-    yum install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
-    lib-sqlite3-devel ncurses-devel expat-devel openssl-devel
-    (XXX plus the Febora version of libgdbm-dev and tk-dev)
+    dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
+    lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
+    gdbm-devel
 
 For the optional lzma module on PyPy3 you will also need ``xz-devel``.
 
diff --git a/pypy/doc/coding-guide.rst b/pypy/doc/coding-guide.rst
--- a/pypy/doc/coding-guide.rst
+++ b/pypy/doc/coding-guide.rst
@@ -266,7 +266,13 @@
 
 To raise an application-level exception::
 
-    raise OperationError(space.w_XxxError, space.wrap("message"))
+    from pypy.interpreter.error import oefmt
+
+    raise oefmt(space.w_XxxError, "message")
+
+    raise oefmt(space.w_XxxError, "file '%s' not found in '%s'", filename, dir)
+
+    raise oefmt(space.w_XxxError, "file descriptor '%d' not open", fd)
 
 To catch a specific application-level exception::
 
diff --git a/pypy/doc/config/objspace.std.getattributeshortcut.txt b/pypy/doc/config/objspace.std.getattributeshortcut.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.getattributeshortcut.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-Performance only: track types that override __getattribute__.
diff --git a/pypy/doc/config/objspace.std.methodcachesizeexp.txt b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
--- a/pypy/doc/config/objspace.std.methodcachesizeexp.txt
+++ b/pypy/doc/config/objspace.std.methodcachesizeexp.txt
@@ -1,1 +1,1 @@
-Set the cache size (number of entries) for :config:`objspace.std.withmethodcache`.
+Set the cache size (number of entries) for the method cache.
diff --git a/pypy/doc/config/objspace.std.withidentitydict.txt b/pypy/doc/config/objspace.std.withidentitydict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withidentitydict.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-=============================
-objspace.std.withidentitydict
-=============================
-
-* **name:** withidentitydict
-
-* **description:** enable a dictionary strategy for "by identity" comparisons
-
-* **command-line:** --objspace-std-withidentitydict
-
-* **command-line for negation:** --no-objspace-std-withidentitydict
-
-* **option type:** boolean option
-
-* **default:** True
-
-
-Enable a dictionary strategy specialized for instances of classes which
-compares "by identity", which is the default unless you override ``__hash__``,
-``__eq__`` or ``__cmp__``.  This strategy will be used only with new-style
-classes.
diff --git a/pypy/doc/config/objspace.std.withmapdict.txt b/pypy/doc/config/objspace.std.withmapdict.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmapdict.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Enable the new version of "sharing dictionaries".
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#sharing-dicts
diff --git a/pypy/doc/config/objspace.std.withmethodcache.txt b/pypy/doc/config/objspace.std.withmethodcache.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withmethodcache.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Enable method caching. See the section "Method Caching" in `Standard
-Interpreter Optimizations <../interpreter-optimizations.html#method-caching>`__.
diff --git a/pypy/doc/config/objspace.std.withmethodcachecounter.txt b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
--- a/pypy/doc/config/objspace.std.withmethodcachecounter.txt
+++ b/pypy/doc/config/objspace.std.withmethodcachecounter.txt
@@ -1,1 +1,1 @@
-Testing/debug option for :config:`objspace.std.withmethodcache`.
+Testing/debug option for the method cache.
diff --git a/pypy/doc/config/objspace.std.withprebuiltchar.txt b/pypy/doc/config/objspace.std.withprebuiltchar.txt
deleted file mode 100644
diff --git a/pypy/doc/config/objspace.std.withrangelist.txt b/pypy/doc/config/objspace.std.withrangelist.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withrangelist.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-Enable "range list" objects. They are an additional implementation of the Python
-``list`` type, indistinguishable for the normal user. Whenever the ``range``
-builtin is called, an range list is returned. As long as this list is not
-mutated (and for example only iterated over), it uses only enough memory to
-store the start, stop and step of the range. This makes using ``range`` as
-efficient as ``xrange``, as long as the result is only used in a ``for``-loop.
-
-See the section in `Standard Interpreter Optimizations`_ for more details.
-
-.. _`Standard Interpreter Optimizations`: ../interpreter-optimizations.html#range-lists
-
diff --git a/pypy/doc/config/objspace.std.withtypeversion.txt b/pypy/doc/config/objspace.std.withtypeversion.txt
deleted file mode 100644
--- a/pypy/doc/config/objspace.std.withtypeversion.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-This (mostly internal) option enables "type versions": Every type object gets an
-(only internally visible) version that is updated when the type's dict is
-changed. This is e.g. used for invalidating caches. It does not make sense to
-enable this option alone.
-
-.. internal
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -12,9 +12,9 @@
 The work on the cling backend has so far been done only for CPython, but
 bringing it to PyPy is a lot less work than developing it in the first place.
 
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
-.. _CINT: http://root.cern.ch/drupal/content/cint
-.. _cling: http://root.cern.ch/drupal/content/cling
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
+.. _CINT: https://root.cern.ch/introduction-cint
+.. _cling: https://root.cern.ch/cling
 .. _llvm: http://llvm.org/
 .. _clang: http://clang.llvm.org/
 
@@ -283,7 +283,8 @@
 core reflection set, but for the moment assume we want to have it in the
 reflection library that we are building for this example.
 
-The ``genreflex`` script can be steered using a so-called `selection file`_,
+The ``genreflex`` script can be steered using a so-called `selection file`_
+(see "Generating Reflex Dictionaries")
 which is a simple XML file specifying, either explicitly or by using a
 pattern, which classes, variables, namespaces, etc. to select from the given
 header file.
@@ -305,7 +306,7 @@
         <function name="BaseFactory" />
     </lcgdict>
 
-.. _selection file: http://root.cern.ch/drupal/content/generating-reflex-dictionaries
+.. _selection file: https://root.cern.ch/how/how-use-reflex
 
 Now the reflection info can be generated and compiled::
 
@@ -811,7 +812,7 @@
 immediately if you add ``$ROOTSYS/lib`` to the ``PYTHONPATH`` environment
 variable.
 
-.. _PyROOT: http://root.cern.ch/drupal/content/pyroot
+.. _PyROOT: https://root.cern.ch/pyroot
 
 There are a couple of minor differences between PyCintex and cppyy, most to do
 with naming.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -387,6 +387,14 @@
   wrappers.  On PyPy we can't tell the difference, so
   ``ismethod([].__add__) == ismethod(list.__add__) == True``.
 
+* in CPython, the built-in types have attributes that can be
+  implemented in various ways.  Depending on the way, if you try to
+  write to (or delete) a read-only (or undeletable) attribute, you get
+  either a ``TypeError`` or an ``AttributeError``.  PyPy tries to
+  strike some middle ground between full consistency and full
+  compatibility here.  This means that a few corner cases don't raise
+  the same exception, like ``del (lambda:None).__closure__``.
+
 * in pure Python, if you write ``class A(object): def f(self): pass``
   and have a subclass ``B`` which doesn't override ``f()``, then
   ``B.f(x)`` still checks that ``x`` is an instance of ``B``.  In
diff --git a/pypy/doc/dir-reference.rst b/pypy/doc/dir-reference.rst
--- a/pypy/doc/dir-reference.rst
+++ b/pypy/doc/dir-reference.rst
@@ -21,7 +21,7 @@
 
 :source:`pypy/doc/discussion/`            drafts of ideas and documentation
 
-:source:`pypy/goal/`                      our :ref:`main PyPy-translation scripts <translate-pypy>`
+:source:`pypy/goal/`                      our main PyPy-translation scripts
                                           live here
 
 :source:`pypy/interpreter/`               :doc:`bytecode interpreter <interpreter>` and related objects
diff --git a/pypy/doc/discussion/finalizer-order.rst b/pypy/doc/discussion/finalizer-order.rst
--- a/pypy/doc/discussion/finalizer-order.rst
+++ b/pypy/doc/discussion/finalizer-order.rst
@@ -1,19 +1,127 @@
-.. XXX armin, what do we do with this?
+Ordering finalizers in the MiniMark GC
+======================================
 
 
-Ordering finalizers in the SemiSpace GC
-=======================================
+RPython interface
+-----------------
 
-Goal
-----
+In RPython programs like PyPy, we need a fine-grained method of
+controlling the RPython- as well as the app-level ``__del__()``.  To
+make it possible, the RPython interface is now the following one (from
+May 2016):
 
-After a collection, the SemiSpace GC should call the finalizers on
+* RPython objects can have ``__del__()``.  These are called
+  immediately by the GC when the last reference to the object goes
+  away, like in CPython.  However, the long-term goal is that all
+  ``__del__()`` methods should only contain simple enough code.  If
+  they do, we call them "destructors".  They can't use operations that
+  would resurrect the object, for example.  Use the decorator
+  ``@rgc.must_be_light_finalizer`` to ensure they are destructors.
+
+* RPython-level ``__del__()`` that are not passing the destructor test
+  are supported for backward compatibility, but deprecated.  The rest
+  of this document assumes that ``__del__()`` are all destructors.
+
+* For any more advanced usage --- in particular for any app-level
+  object with a __del__ --- we don't use the RPython-level
+  ``__del__()`` method.  Instead we use
+  ``rgc.FinalizerController.register_finalizer()``.  This allows us to
+  attach a finalizer method to the object, giving more control over
+  the ordering than just an RPython ``__del__()``.
+
+We try to consistently call ``__del__()`` a destructor, to distinguish
+it from a finalizer.  A finalizer runs earlier, and in topological
+order; care must be taken that the object might still be reachable at
+this point if we're clever enough.  A destructor on the other hand runs
+last; nothing can be done with the object any more, and the GC frees it
+immediately.
+
+
+Destructors
+-----------
+
+A destructor is an RPython ``__del__()`` method that is called directly
+by the GC when it is about to free the memory.  Intended for objects
+that just need to free an extra block of raw memory.
+
+There are restrictions on the kind of code you can put in ``__del__()``,
+including all other functions called by it.  These restrictions are
+checked.  In particular you cannot access fields containing GC objects.
+Right now you can't call any external C function either.
+
+Destructors are called precisely when the GC frees the memory of the
+object.  As long as the object exists (even in some finalizer queue or
+anywhere), its destructor is not called.
+
+
+Register_finalizer
+------------------
+
+The interface for full finalizers is made with PyPy in mind, but should
+be generally useful.
+
+The idea is that you subclass the ``rgc.FinalizerQueue`` class::
+
+* You must give a class-level attribute ``base_class``, which is the
+  base class of all instances with a finalizer.  (If you need
+  finalizers on several unrelated classes, you need several unrelated
+  ``FinalizerQueue`` subclasses.)
+
+* You override the ``finalizer_trigger()`` method; see below.
+
+Then you create one global (or space-specific) instance of this
+subclass; call it ``fin``.  At runtime, you call
+``fin.register_finalizer(obj)`` for every instance ``obj`` that needs
+a finalizer.  Each ``obj`` must be an instance of ``fin.base_class``,
+but not every such instance needs to have a finalizer registered;
+typically we try to register a finalizer on as few objects as possible
+(e.g. only if it is an object which has an app-level ``__del__()``
+method).
+
+After a major collection, the GC finds all objects ``obj`` on which a
+finalizer was registered and which are unreachable, and mark them as
+reachable again, as well as all objects they depend on.  It then picks
+a topological ordering (breaking cycles randomly, if any) and enqueues
+the objects and their registered finalizer functions in that order, in
+a queue specific to the prebuilt ``fin`` instance.  Finally, when the
+major collection is done, it calls ``fin.finalizer_trigger()``.
+
+This method ``finalizer_trigger()`` can either do some work directly,
+or delay it to be done later (e.g. between two bytecodes).  If it does
+work directly, note that it cannot (directly or indirectly) cause the
+GIL to be released.
+
+To find the queued items, call ``fin.next_dead()`` repeatedly.  It
+returns the next queued item, or ``None`` when the queue is empty.
+
+In theory, it would kind of work if you cumulate several different
+``FinalizerQueue`` instances for objects of the same class, and
+(always in theory) the same ``obj`` could be registered several times
+in the same queue, or in several queues.  This is not tested though.
+For now the untranslated emulation does not support registering the
+same object several times.
+
+Note that the Boehm garbage collector, used in ``rpython -O0``,
+completely ignores ``register_finalizer()``.
+
+
+Ordering of finalizers
+----------------------
+
+After a collection, the MiniMark GC should call the finalizers on
 *some* of the objects that have one and that have become unreachable.
 Basically, if there is a reference chain from an object a to an object b
 then it should not call the finalizer for b immediately, but just keep b
 alive and try again to call its finalizer after the next collection.
 
-This basic idea fails when there are cycles.  It's not a good idea to
+(Note that this creates rare but annoying issues as soon as the program
+creates chains of objects with finalizers more quickly than the rate at
+which major collections go (which is very slow).  In August 2013 we tried
+instead to call all finalizers of all objects found unreachable at a major
+collection.  That branch, ``gc-del``, was never merged.  It is still
+unclear what the real consequences would be on programs in the wild.)
+
+The basic idea fails in the presence of cycles.  It's not a good idea to
 keep the objects alive forever or to never call any of the finalizers.
 The model we came up with is that in this case, we could just call the
 finalizer of one of the objects in the cycle -- but only, of course, if
@@ -33,6 +141,7 @@
         detach the finalizer (so that it's not called more than once)
         call the finalizer
 
+
 Algorithm
 ---------
 
@@ -136,28 +245,8 @@
 that doesn't change the state of an object, we don't follow its children
 recursively.
 
-In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode
-the 4 states with a single extra bit in the header:
-
-      =====  =============  ========  ====================
-      state  is_forwarded?  bit set?  bit set in the copy?
-      =====  =============  ========  ====================
-        0      no             no        n/a
-        1      no             yes       n/a
-        2      yes            yes       yes
-        3      yes          whatever    no
-      =====  =============  ========  ====================
-
-So the loop above that does the transition from state 1 to state 2 is
-really just a copy(x) followed by scan_copied().  We must also clear the
-bit in the copy at the end, to clean up before the next collection
-(which means recursively bumping the state from 2 to 3 in the final
-loop).
-
-In the MiniMark GC, the objects don't move (apart from when they are
-copied out of the nursery), but we use the flag GCFLAG_VISITED to mark
-objects that survive, so we can also have a single extra bit for
-finalizers:
+In practice, in the MiniMark GCs, we can encode
+the 4 states with a combination of two bits in the header:
 
       =====  ==============  ============================
       state  GCFLAG_VISITED  GCFLAG_FINALIZATION_ORDERING
@@ -167,3 +256,8 @@
         2        yes             yes
         3        yes             no
       =====  ==============  ============================
+
+So the loop above that does the transition from state 1 to state 2 is
+really just a recursive visit.  We must also clear the
+FINALIZATION_ORDERING bit at the end (state 2 to state 3) to clean up
+before the next collection.
diff --git a/pypy/doc/discussions.rst b/pypy/doc/discussions.rst
--- a/pypy/doc/discussions.rst
+++ b/pypy/doc/discussions.rst
@@ -13,3 +13,4 @@
    discussion/improve-rpython
    discussion/ctypes-implementation
    discussion/jit-profiler
+   discussion/rawrefcount
diff --git a/pypy/doc/extending.rst b/pypy/doc/extending.rst
--- a/pypy/doc/extending.rst
+++ b/pypy/doc/extending.rst
@@ -79,7 +79,7 @@
 :doc:`Full details <cppyy>` are `available here <cppyy>`.
 
 .. _installed separately: http://cern.ch/wlav/reflex-2013-08-14.tar.bz2
-.. _Reflex: http://root.cern.ch/drupal/content/reflex
+.. _Reflex: https://root.cern.ch/how/how-use-reflex
 
 
 RPython Mixed Modules
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -106,20 +106,33 @@
 For information on which third party extensions work (or do not work)
 with PyPy see the `compatibility wiki`_.
 
+For more information about how we manage refcounting semamtics see 
+rawrefcount_
+
 .. _compatibility wiki: https://bitbucket.org/pypy/compatibility/wiki/Home
 .. _cffi: http://cffi.readthedocs.org/
+.. _rawrefcount: discussion/rawrefcount.html   
 
 
 On which platforms does PyPy run?
 ---------------------------------
 
-PyPy is regularly and extensively tested on Linux machines. It mostly
+PyPy currently supports:
+
+  * **x86** machines on most common operating systems
+    (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD),
+  
+  * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+  
+  * big- and little-endian variants of **PPC64** running Linux,
+
+  * **s390x** running Linux
+
+PyPy is regularly and extensively tested on Linux machines. It
 works on Mac and Windows: it is tested there, but most of us are running
-Linux so fixes may depend on 3rd-party contributions.  PyPy's JIT
-works on x86 (32-bit or 64-bit) and on ARM (ARMv6 or ARMv7).
-Support for POWER (64-bit) is stalled at the moment.
+Linux so fixes may depend on 3rd-party contributions.
 
-To bootstrap from sources, PyPy can use either CPython (2.6 or 2.7) or
+To bootstrap from sources, PyPy can use either CPython 2.7 or
 another (e.g. older) PyPy.  Cross-translation is not really supported:
 e.g. to build a 32-bit PyPy, you need to have a 32-bit environment.
 Cross-translation is only explicitly supported between a 32-bit Intel
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
 
 .. toctree::
 
+   release-5.1.1.rst
    release-5.1.0.rst
    release-5.0.1.rst
    release-5.0.0.rst
@@ -48,6 +49,13 @@
    release-0.6
 
 
+CPython 3.3 compatible versions
+-------------------------------
+
+.. toctree::
+
+   release-pypy3.3-v5.2-alpha1.rst
+
 CPython 3.2 compatible versions
 -------------------------------
 
diff --git a/pypy/doc/interpreter-optimizations.rst b/pypy/doc/interpreter-optimizations.rst
--- a/pypy/doc/interpreter-optimizations.rst
+++ b/pypy/doc/interpreter-optimizations.rst
@@ -62,29 +62,37 @@
 Dictionary Optimizations
 ~~~~~~~~~~~~~~~~~~~~~~~~
 
-Multi-Dicts
-+++++++++++
+Dict Strategies
+++++++++++++++++
 
-Multi-dicts are a special implementation of dictionaries.  It became clear that
-it is very useful to *change* the internal representation of an object during
-its lifetime.  Multi-dicts are a general way to do that for dictionaries: they
-provide generic support for the switching of internal representations for
-dicts.
+Dict strategies are an implementation approach for dictionaries (and lists)
+that make it possible to use a specialized representation of the dictionary's
+data, while still being able to switch back to a general representation should
+that become necessary later.
 
-If you just enable multi-dicts, special representations for empty dictionaries,
-for string-keyed dictionaries. In addition there are more specialized dictionary
-implementations for various purposes (see below).
+Dict strategies are always enabled, by default there are special strategies for
+dicts with just string keys, just unicode keys and just integer keys. If one of
+those specialized strategies is used, then dict lookup can use much faster
+hashing and comparison for the dict keys. There is of course also a strategy
+for general keys.
 
-This is now the default implementation of dictionaries in the Python interpreter.
 
+Identity Dicts
++++++++++++++++
 
-Sharing Dicts
+We also have a strategy specialized for keys that are instances of classes
+which compares "by identity", which is the default unless you override
+``__hash__``, ``__eq__`` or ``__cmp__``.  This strategy will be used only with
+new-style classes.
+
+
+Map Dicts
 +++++++++++++
 
-Sharing dictionaries are a special representation used together with multidicts.
-This dict representation is used only for instance dictionaries and tries to
-make instance dictionaries use less memory (in fact, in the ideal case the
-memory behaviour should be mostly like that of using __slots__).
+Map dictionaries are a special representation used together with dict strategies.
+This dict strategy is used only for instance dictionaries and tries to
+make instance dictionaries use less memory (in fact, usually memory behaviour
+should be mostly like that of using ``__slots__``).
 
 The idea is the following: Most instances of the same class have very similar
 attributes, and are even adding these keys to the dictionary in the same order
@@ -95,8 +103,6 @@
 dicts:
 the representation of the instance dict contains only a list of values.
 
-A more advanced version of sharing dicts, called *map dicts,* is available
-with the :config:`objspace.std.withmapdict` option.
 
 
 List Optimizations
@@ -114,8 +120,8 @@
 created. This gives the memory and speed behaviour of ``xrange`` and the generality
 of use of ``range``, and makes ``xrange`` essentially useless.
 
-You can enable this feature with the :config:`objspace.std.withrangelist`
-option.
+This feature is enabled by default as part of the
+:config:`objspace.std.withliststrategies` option.
 
 
 User Class Optimizations
@@ -133,8 +139,7 @@
 base classes is changed). On subsequent lookups the cached version can be used,
 as long as the instance did not shadow any of its classes attributes.
 
-You can enable this feature with the :config:`objspace.std.withmethodcache`
-option.
+This feature is enabled by default.
 
 
 Interpreter Optimizations
diff --git a/pypy/doc/introduction.rst b/pypy/doc/introduction.rst
--- a/pypy/doc/introduction.rst
+++ b/pypy/doc/introduction.rst
@@ -1,16 +1,22 @@
 What is PyPy?
 =============
 
-In common parlance, PyPy has been used to mean two things.  The first is the
-:ref:`RPython translation toolchain <rpython:index>`, which is a framework for generating
-dynamic programming language implementations.  And the second is one
-particular implementation that is so generated --
-an implementation of the Python_ programming language written in
-Python itself.  It is designed to be flexible and easy to experiment with.
+Historically, PyPy has been used to mean two things.  The first is the
+:ref:`RPython translation toolchain <rpython:index>` for generating
+interpreters for dynamic programming languages.  And the second is one
+particular implementation of Python_ produced with it. Because RPython
+uses the same syntax as Python, this generated version became known as
+Python interpreter written in Python. It is designed to be flexible and
+easy to experiment with.
 
-This double usage has proven to be confusing, and we are trying to move
-away from using the word PyPy to mean both things.  From now on we will
-try to use PyPy to only mean the Python implementation, and say the
+To make it more clear, we start with source code written in RPython,
+apply the RPython translation toolchain, and end up with PyPy as a
+binary executable. This executable is the Python interpreter.
+
+Double usage has proven to be confusing, so we've moved away from using
+the word PyPy to mean both toolchain and generated interpreter.  Now we
+use word PyPy to refer to the Python implementation, and explicitly
+mention
 :ref:`RPython translation toolchain <rpython:index>` when we mean the framework.
 
 Some older documents, presentations, papers and videos will still have the old
diff --git a/pypy/doc/release-5.1.0.rst b/pypy/doc/release-5.1.0.rst
--- a/pypy/doc/release-5.1.0.rst
+++ b/pypy/doc/release-5.1.0.rst
@@ -3,10 +3,17 @@
 ========
 
 We have released PyPy 5.1, about a month after PyPy 5.0.
-We encourage all users of PyPy to update to this version. Apart from the usual
-bug fixes, there is an ongoing effort to improve the warmup time and memory
-usage of JIT-related metadata, and we now fully support the IBM s390x 
-architecture.
+
+This release includes more improvement to warmup time and memory
+requirements. We have seen about a 20% memory requirement reduction and up to
+30% warmup time improvement, more detail in the `blog post`_.
+
+We also now have `fully support for the IBM s390x`_. Since this support is in
+`RPython`_, any dynamic language written using RPython, like PyPy, will
+automagically be supported on that architecture.  
+
+We updated cffi_ to 1.6, and continue to improve support for the wider
+python ecosystem using the PyPy interpreter.
 
 You can download the PyPy 5.1 release here:
 
@@ -26,6 +33,9 @@
 .. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
 .. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
 .. _`numpy`: https://bitbucket.org/pypy/numpy
+.. _cffi: https://cffi.readthedocs.org
+.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html
+.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html
 
 What is PyPy?
 =============
@@ -46,7 +56,7 @@
   
   * big- and little-endian variants of **PPC64** running Linux,
 
-  * **s960x** running Linux
+  * **s390x** running Linux
 
 .. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
 .. _`dynamic languages`: http://pypyjs.org
@@ -74,6 +84,8 @@
   * Fix a corner case in the JIT
 
   * Fix edge cases in the cpyext refcounting-compatible semantics
+    (more work on cpyext compatibility is coming in the ``cpyext-ext``
+    branch, but isn't ready yet)
 
   * Try harder to not emit NEON instructions on ARM processors without NEON
     support
@@ -92,11 +104,17 @@
 
   * Fix sandbox startup (a regression in 5.0)
 
+  * Fix possible segfault for classes with mangled mro or __metaclass__
+
+  * Fix isinstance(deque(), Hashable) on the pure python deque
+
+  * Fix an issue with forkpty()
+
   * Issues reported with our previous release were resolved_ after reports from users on
     our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
     #pypy
 
-* Numpy:
+* Numpy_:
 
   * Implemented numpy.where for a single argument
 
@@ -108,6 +126,8 @@
     functions exported from libpypy.so are declared in pypy_numpy.h, which is
     included only when building our fork of numpy
 
+  * Add broadcast
+
 * Performance improvements:
 
   * Improve str.endswith([tuple]) and str.startswith([tuple]) to allow JITting
@@ -119,14 +139,18 @@
   * Remove the forced minor collection that occurs when rewriting the
     assembler at the start of the JIT backend
 
+  * Port the resource module to cffi
+
 * Internal refactorings:
 
   * Use a simpler logger to speed up translation
 
   * Drop vestiges of Python 2.5 support in testing
 
+  * Update rpython functions with ones needed for py3k
+
 .. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.0.0.html
-.. _`blog post`: http://morepypy.blogspot.com/2016/02/c-api-support-update.html
+.. _Numpy: https://bitbucket.org/pypy/numpy
 
 Please update, and continue to help us make PyPy better.
 
diff --git a/pypy/doc/release-5.1.1.rst b/pypy/doc/release-5.1.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-5.1.1.rst
@@ -0,0 +1,45 @@
+==========
+PyPy 5.1.1
+==========
+
+We have released a bugfix for PyPy 5.1, due to a regression_ in
+installing third-party packages dependant on numpy (using our numpy fork
+available at https://bitbucket.org/pypy/numpy ).
+
+Thanks to those who reported the issue. We also fixed a regression in
+translating PyPy which increased the memory required to translate. Improvement
+will be noticed by downstream packagers and those who translate rather than
+download pre-built binaries.
+
+.. _regression: https://bitbucket.org/pypy/pypy/issues/2282
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+  * **x86** machines on most common operating systems
+    (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+  * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+  * big- and little-endian variants of **PPC64** running Linux,
+
+  * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/release-pypy2.7-v5.3.0.rst b/pypy/doc/release-pypy2.7-v5.3.0.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-pypy2.7-v5.3.0.rst
@@ -0,0 +1,194 @@
+============
+PyPy2.7 v5.3
+============
+
+We have released PyPy2.7 v5.3, about six weeks after PyPy 5.1. 
+This release includes further improvements for the CAPI compatibility layer
+which we call cpyext. In addtion to complete support for lxml, we now pass
+most (more than 90%) of the upstream numpy test suite, and much of SciPy is
+supported as well.
+
+We also improved the speed of ... and ...
+
+We updated cffi_ to ...
+
+You can download the PyPy2.7 v5.3 release here:
+
+    http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project.
+
+We would also like to thank our contributors and
+encourage new people to join the project. PyPy has many
+layers and we need help with all of them: `PyPy`_ and `RPython`_ documentation
+improvements, tweaking popular `modules`_ to run on pypy, or general `help`_
+with making RPython's JIT even better.
+
+.. _`PyPy`: http://doc.pypy.org
+.. _`RPython`: https://rpython.readthedocs.org
+.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
+.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
+.. _`numpy`: https://bitbucket.org/pypy/numpy
+.. _cffi: https://cffi.readthedocs.org
+.. _`fully support for the IBM s390x`: http://morepypy.blogspot.com/2016/04/pypy-enterprise-edition.html
+.. _`blog post`: http://morepypy.blogspot.com/2016/04/warmup-improvements-more-efficient.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports: 
+
+  * **x86** machines on most common operating systems
+    (Linux 32/64 bits, Mac OS X 64 bits, Windows 32 bits, OpenBSD, FreeBSD)
+  
+  * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+  
+  * big- and little-endian variants of **PPC64** running Linux,
+
+  * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Other Highlights (since 5.1 released in April 2016)
+=========================================================
+
+* New features:
+
+  * Merge a major expansion of the C-API support in cpyext, here are some of
+    the highlights:
+      - allow c-snippet tests to be run with -A so we can verify we are compatible
+      - fix many edge cases exposed by fixing tests to run with -A
+      - issequence() logic matches cpython
+      - make PyStringObject and PyUnicodeObject field names compatible with cpython
+      - add prelminary support for PyDateTime_*
+      - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy,
+        PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile,
+        PyAnySet_CheckExact, PyUnicode_Concat, PyDateTime_TZInfo
+      - improve support for PyGILState_Ensure, PyGILState_Release, and thread
+        primitives, also find a case where CPython will allow thread creation
+        before PyEval_InitThreads is run, dissallow on PyPy 
+      - create a PyObject-specific list strategy
+      - rewrite slot assignment for typeobjects
+      - improve tracking of PyObject to rpython object mapping
+      - support tp_as_{number, sequence, mapping, buffer} slots
+      - support ByteArrayObject via the new resizable_list_supporting_raw_ptr
+      - implement PyList_SET_ITEM with CPython's behavior, instead of SetItem's
+      - fix the signature of PyUFunc_FromFuncAndDataAndSignature
+      - implement many PyWhatever_FOO() as a macro taking a `void *`
+
+  * CPyExt tweak: instead of "GIL not held when a CPython C extension module
+    calls PyXxx", we now silently acquire/release the GIL.  Helps with
+    CPython C extension modules that call some PyXxx() functions without
+    holding the GIL (arguably, they are theorically buggy).
+
+  * Add rgc.FinalizerQueue, documented in pypy/doc/discussion/finalizer-order.rst.
+    It is a more flexible way to make RPython finalizers. Use this mechanism to
+    clean up handling of ``__del__`` methods, fixing issue #2287
+
+  * Generalize cpyext old-style buffers to more than just str/buffer, add
+    support for mmap
+
+  * Support command line -v to trace import statements
+
+  * Add rposix functions for PyPy3.3 support
+
+  * Give super an __init__ and a simple __new__ for CPython compatibility
+
+  * Revive traceviewer, a tool to use pygame to view traces
+
+  * Update to cffi/847bbc0297f8 which improves help() on cffi objects
+
+* Bug Fixes
+
+   * Fix issue #2277: only special-case two exact lists in zip(), not list
+     subclasses, because an overridden __iter__() should be called (probably)
+
+  * Fix issue #2226: Another tweak in the incremental GC- this should ensure
+    that progress in the major GC occurs quickly enough in all cases.
+
+  * Clarify and refactor documentation on http://doc.pypy.org
+
+  * Use "must be unicode, not %T" in unicodedata TypeErrors.
+
+  * Manually reset sys.settrace() and sys.setprofile() when we're done running.
+    This is not exactly what CPython does, but if we get an exception, unlike
+    CPython, we call functions from the 'traceback' module, and these would
+    call more the trace/profile function.  That's unexpected and can lead
+    to more crashes at this point.
+
+  * Use the appropriate tp_dealloc on a subclass of a builtin type, and call
+    tp_new for a python-sublcass of a C-API type
+
+  * Fix for issue #2285 - rare vmprof segfaults on OS/X
+
+  * Fixed issue #2172 - where a test specified an invalid parameter to mmap on powerpc
+
+  * Fix issue #2311 - grab the `__future__` flags imported in the main script, in
+    `-c`, or in `PYTHON_STARTUP`, and expose them to the `-i` console
+
+  * Issues reported with our previous release were resolved_ after reports from users on
+    our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
+    #pypy
+
+* Numpy_:
+
+  * Implement ufunc.outer on numpypy
+
+  * Move PyPy-specific numpy headers to a subdirectory (also changed pypy/numpy
+    accordingly)
+
+* Performance improvements:
+
+  * Use bitstrings to compress lists of descriptors that are attached to an
+    EffectInfo
+
+  * Remove most of the _ovf, _zer and _val operations from RPython.  Kills
+    quite some code internally, and allows the JIT to do better
+    optimizations: for example, app-level code like ``x / 2`` or ``x % 2``
+    can now be turned into ``x >> 1`` or ``x & 1``, even if x is possibly
+    negative.
+
+  * Copy CPython's 'optimization': ignore __iter__ etc. for `f(**dict_subclass())`
+
+  * Use the __builtin_add_overflow built-ins if they are available
+
+  * Rework the way registers are moved/spilled in before_call()
+
+* Internal refactorings:
+
+  * Refactor code to better support Python3-compatible syntax
+
+  * Document and refactor OperationError -> oefmt
+
+  * Reduce the size of generated C sources during translation by 
+    eliminating many many unused struct declarations (Issue #2281)
+
+  * Remove a number of translation-time options that were not tested and
+    never used. Also fix a performance bug in the method cache
+
+  * Reduce the size of generated code by using the same function objects in
+    all generated subclasses
+
+ * Share cpyext Py* function wrappers according to the signature, shrining the
+   translated libpypy.so by about 
+
+  * Compile c snippets with -Werror, and fix warnings it exposed
+
+.. _resolved: http://doc.pypy.org/en/latest/whatsnew-5.3.0.html
+.. _Numpy: https://bitbucket.org/pypy/numpy
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/release-pypy3.3-v5.2-alpha1.rst b/pypy/doc/release-pypy3.3-v5.2-alpha1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-pypy3.3-v5.2-alpha1.rst
@@ -0,0 +1,69 @@
+===================
+PyPy3 v5.2 alpha 1
+===================
+
+We're pleased to announce the first alpha release of PyPy3.3 v5.2. This is the
+first release of PyPy which targets Python 3.3 (3.3.5) compatibility.
+
+We would like to thank all of the people who donated_ to the `py3k proposal`_
+for supporting the work that went into this and future releases.
+
+You can download the PyPy3.3 v5.2 alpha 1 release here:
+
+    http://pypy.org/download.html#python-3-3-5-compatible-pypy3-3-v5-2
+
+Highlights
+==========
+
+* Python 3.3.5 support!
+
+  - Being an early alpha release, there are some `missing features`_ such as a
+    `PEP 393-like space efficient string representation`_ and `known issues`_
+    including performance regressions (e.g. issue `#2305`_). The focus for this
+    release has been updating to 3.3 compatibility. Windows is also not yet
+    supported.
+
+* `ensurepip`_ is also included (it's only included in CPython 3 >= 3.4).
+
+What is PyPy?
+==============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7.10 and one day 3.3.5. It's fast due to its integrated tracing JIT
+compiler.
+
+We also welcome developers of other `dynamic languages`_ to see what RPython
+can do for them.
+
+This release supports:
+
+  * **x86** machines on most common operating systems except Windows
+    (Linux 32/64, Mac OS X 64, OpenBSD, FreeBSD),
+
+  * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+  * big- and little-endian variants of **PPC64** running Linux,
+
+  * **s390x** running Linux
+
+Please try it out and let us know what you think. We welcome feedback, we know
+you are using PyPy, please tell us about it!
+
+We'd especially like to thank these people for their contributions to this
+release:
+
+Manuel Jacob, Ronan Lamy, Mark Young, Amaury Forgeot d'Arc, Philip Jenvey,
+Martin Matusiak, Vasily Kuznetsov, Matti Picus, Armin Rigo and many others.
+
+Cheers
+
+The PyPy Team
+
+.. _donated: http://morepypy.blogspot.com/2012/01/py3k-and-numpy-first-stage-thanks-to.html
+.. _`py3k proposal`: http://pypy.org/py3donate.html
+.. _`PEP 393-like space efficient string representation`: https://bitbucket.org/pypy/pypy/issues/2309/optimized-unicode-representation
+.. _`missing features`: https://bitbucket.org/pypy/pypy/issues?status=new&status=open&component=PyPy3+%28running+Python+3.x%29&kind=enhancement
+.. _`known issues`: https://bitbucket.org/pypy/pypy/issues?status=new&status=open&component=PyPy3%20%28running%20Python%203.x%29
+.. _`#2305`: https://bitbucket.org/pypy/pypy/issues/2305
+.. _`ensurepip`: https://docs.python.org/3/library/ensurepip.html#module-ensurepip
+.. _`dynamic languages`: http://pypyjs.org
diff --git a/pypy/doc/tool/mydot.py b/pypy/doc/tool/mydot.py
--- a/pypy/doc/tool/mydot.py
+++ b/pypy/doc/tool/mydot.py
@@ -68,7 +68,7 @@
                       help="output format")
     options, args = parser.parse_args()
     if len(args) != 1:
-        raise ValueError, "need exactly one argument"
+        raise ValueError("need exactly one argument")
     epsfile = process_dot(py.path.local(args[0]))
     if options.format == "ps" or options.format == "eps":
         print epsfile.read()
diff --git a/pypy/doc/whatsnew-5.1.0.rst b/pypy/doc/whatsnew-5.1.0.rst
--- a/pypy/doc/whatsnew-5.1.0.rst
+++ b/pypy/doc/whatsnew-5.1.0.rst
@@ -60,3 +60,13 @@
 Remove old uneeded numpy headers, what is left is only for testing. Also 
 generate pypy_numpy.h which exposes functions to directly use micronumpy
 ndarray and ufuncs
+
+.. branch: rposix-for-3
+
+Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
+This updates the underlying rpython functions with the ones needed for the 
+py3k branch
+ 
+.. branch: numpy_broadcast
+
+Add broadcast to micronumpy
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -3,20 +3,143 @@
 =========================
 
 .. this is a revision shortly after release-5.1
-.. startrev: 2180e1eaf6f6
+.. startrev: aa60332382a1
 
-.. branch: rposix-for-3
+.. branch: techtonik/introductionrst-simplify-explanation-abo-1460879168046
 
-Reuse rposix definition of TIMESPEC in rposix_stat, add wrapper for fstatat().
-This updates the underlying rpython functions with the ones needed for the 
-py3k branch
- 
-.. branch: numpy_broadcast
+.. branch: gcheader-decl
 
-Add broadcast to micronumpy
+Reduce the size of generated C sources.
 
-.. branch: z196-support
 
-Fixes a critical issue in the register allocator and extends support on s390x. PyPy runs and translates on
-the s390x revisions z10 (released February 2008, experimental) and z196 (released August 2010)
-) in addition to zEC12 and z13.
+.. branch: remove-objspace-options
+
+Remove a number of options from the build process that were never tested and
+never set. Fix a performance bug in the method cache.
+
+.. branch: bitstring
+
+JIT: use bitstrings to compress the lists of read or written descrs
+that we attach to EffectInfo.  Fixes a problem we had in
+remove-objspace-options.
+
+.. branch: cpyext-for-merge
+
+Update cpyext C-API support After this branch, we are almost able to support 
+upstream numpy via cpyext, so we created (yet another) fork of numpy at 
+github.com/pypy/numpy with the needed changes. Among the significant changes 
+to cpyext:
+  - allow c-snippet tests to be run with -A so we can verify we are compatible
+  - fix many edge cases exposed by fixing tests to run with -A
+  - issequence() logic matches cpython
+  - make PyStringObject and PyUnicodeObject field names compatible with cpython
+  - add prelminary support for PyDateTime_*
+  - support PyComplexObject, PyFloatObject, PyDict_Merge, PyDictProxy,
+    PyMemoryView_*, _Py_HashDouble, PyFile_AsFile, PyFile_FromFile,
+  - PyAnySet_CheckExact, PyUnicode_Concat
+  - improve support for PyGILState_Ensure, PyGILState_Release, and thread
+    primitives, also find a case where CPython will allow thread creation
+    before PyEval_InitThreads is run, dissallow on PyPy 
+  - create a PyObject-specific list strategy
+  - rewrite slot assignment for typeobjects
+  - improve tracking of PyObject to rpython object mapping
+  - support tp_as_{number, sequence, mapping, buffer} slots
+
+(makes the pypy-c bigger; this was fixed subsequently by the
+share-cpyext-cpython-api branch)
+
+.. branch: share-mapdict-methods-2
+
+Reduce generated code for subclasses by using the same function objects in all
+generated subclasses.
+
+.. branch: share-cpyext-cpython-api
+
+.. branch: cpyext-auto-gil
+
+CPyExt tweak: instead of "GIL not held when a CPython C extension module
+calls PyXxx", we now silently acquire/release the GIL.  Helps with
+CPython C extension modules that call some PyXxx() functions without
+holding the GIL (arguably, they are theorically buggy).
+
+.. branch: cpyext-test-A
+
+Get the cpyext tests to pass with "-A" (i.e. when tested directly with
+CPython).
+
+.. branch: oefmt
+
+.. branch: cpyext-werror
+
+Compile c snippets with -Werror in cpyext
+
+.. branch: gc-del-3
+
+Add rgc.FinalizerQueue, documented in pypy/doc/discussion/finalizer-order.rst.
+It is a more flexible way to make RPython finalizers.
+
+.. branch: unpacking-cpython-shortcut
+
+.. branch: cleanups
+
+.. branch: cpyext-more-slots
+
+.. branch: use-gc-del-3
+
+Use the new rgc.FinalizerQueue mechanism to clean up the handling of
+``__del__`` methods.  Fixes notably issue #2287.  (All RPython
+subclasses of W_Root need to use FinalizerQueue now.)
+
+.. branch: ufunc-outer
+
+Implement ufunc.outer on numpypy
+
+.. branch: verbose-imports
+
+Support ``pypy -v``: verbose imports.  It does not log as much as
+cpython, but it should be enough to help when debugging package layout
+problems.
+
+.. branch: cpyext-macros-cast
+
+Fix some warnings when compiling CPython C extension modules
+
+.. branch: syntax_fix
+
+.. branch: remove-raisingops
+
+Remove most of the _ovf, _zer and _val operations from RPython.  Kills
+quite some code internally, and allows the JIT to do better
+optimizations: for example, app-level code like ``x / 2`` or ``x % 2``
+can now be turned into ``x >> 1`` or ``x & 1``, even if x is possibly
+negative.
+
+.. branch: cpyext-old-buffers
+
+Generalize cpyext old-style buffers to more than just str/buffer, add support for mmap
+
+.. branch: numpy-includes
+
+Move _numpypy headers into a directory so they are not picked up by upstream numpy, scipy
+This allows building upstream numpy and scipy in pypy via cpyext
+
+.. branch: traceviewer-common-merge-point-formats
+
+Teach RPython JIT's off-line traceviewer the most common ``debug_merge_point`` formats.
+
+.. branch: cpyext-pickle
+
+Enable pickling of W_PyCFunctionObject by monkeypatching pickle.Pickler.dispatch
+at cpyext import time
+
+.. branch: nonmovable-list
+
+Add a way to ask "give me a raw pointer to this list's
+items".  Only for resizable lists of primitives.  Turns the GcArray
+nonmovable, possibly making a copy of it first.
+
+.. branch: cpyext-ext
+
+Finish the work already partially merged in cpyext-for-merge. Adds support
+for ByteArrayObject using the nonmovable-list, which also enables
+buffer(bytearray(<some-list>)) 
diff --git a/pypy/doc/whatsnew-pypy3-5.1.1-alpha1.rst b/pypy/doc/whatsnew-pypy3-5.1.1-alpha1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/whatsnew-pypy3-5.1.1-alpha1.rst
@@ -0,0 +1,10 @@
+=================================
+What's new in PyPy3 5.1.1 alpha 1
+=================================
+
+.. A recent revision, ignoring all other branches for this release
+.. startrev: 29d14733e007
+
+.. branch: py3.3
+
+Python 3.3 compatibility
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -238,6 +238,15 @@
 for use. The release packaging script will pick up the tcltk runtime in the lib
 directory and put it in the archive.
 
+The lzma compression library
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Python 3.3 ship with CFFI wrappers for the lzma library, which can be
+downloaded from this site http://tukaani.org/xz. Python 3.3-3.5 use version
+5.0.5, a prebuilt version can be downloaded from
+http://tukaani.org/xz/xz-5.0.5-windows.zip, check the signature
+http://tukaani.org/xz/xz-5.0.5-windows.zip.sig
+
 
 Using the mingw compiler
 ------------------------
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -9,7 +9,7 @@
 from rpython.config.config import to_optparse, make_dict, SUPPRESS_USAGE
 from rpython.config.config import ConflictConfigError
 from pypy.tool.option import make_objspace
-from pypy.conftest import pypydir
+from pypy import pypydir
 from rpython.rlib import rthread
 from pypy.module.thread import os_thread
 
@@ -63,7 +63,7 @@
             ##    from pypy.interpreter import main, interactive, error
             ##    con = interactive.PyPyConsole(space)
             ##    con.interact()
-            except OperationError, e:
+            except OperationError as e:
                 debug("OperationError:")
                 debug(" operror-type: " + e.w_type.getname(space))
                 debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -71,7 +71,7 @@
         finally:
             try:
                 space.finish()
-            except OperationError, e:
+            except OperationError as e:
                 debug("OperationError:")
                 debug(" operror-type: " + e.w_type.getname(space))
                 debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -115,7 +115,7 @@
                                     space.wrap('__import__'))
             space.call_function(import_, space.wrap('site'))
             return rffi.cast(rffi.INT, 0)
-        except OperationError, e:
+        except OperationError as e:
             if verbose:
                 debug("OperationError:")
                 debug(" operror-type: " + e.w_type.getname(space))
@@ -167,7 +167,7 @@
                 sys._pypy_execute_source.append(glob)
                 exec stmt in glob
             """)
-        except OperationError, e:
+        except OperationError as e:
             debug("OperationError:")
             debug(" operror-type: " + e.w_type.getname(space))
             debug(" operror-value: " + space.str_w(space.str(e.get_w_value(space))))
@@ -293,7 +293,7 @@
             self.hack_for_cffi_modules(driver)
 
         return self.get_entry_point(config)
-    
+
     def hack_for_cffi_modules(self, driver):
         # HACKHACKHACK
         # ugly hack to modify target goal from compile_* to build_cffi_imports
@@ -320,7 +320,7 @@
             while not basedir.join('include').exists():
                 _basedir = basedir.dirpath()
                 if _basedir == basedir:


More information about the pypy-commit mailing list