[pypy-commit] pypy py3.5: Merge with py3.5-async

raffael_t pypy.commits at gmail.com
Thu Aug 11 14:34:07 EDT 2016


Author: Raffael Tfirst <raffael.tfirst at gmail.com>
Branch: py3.5
Changeset: r86159:83d383a3859c
Date: 2016-08-11 20:31 +0200
http://bitbucket.org/pypy/pypy/changeset/83d383a3859c/

Log:	Merge with py3.5-async

diff too long, truncating to 2000 out of 40779 lines

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -26,3 +26,4 @@
 40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
 40497617ae91caa1a394d8be6f9cd2de31cb0628 release-pypy3.3-v5.2
 c09c19272c990a0611b17569a0085ad1ab00c8ff release-pypy2.7-v5.3
+7e8df3df96417c16c2d55b41352ec82c9c69c978 release-pypy2.7-v5.3.1
diff --git a/dotviewer/graphparse.py b/dotviewer/graphparse.py
--- a/dotviewer/graphparse.py
+++ b/dotviewer/graphparse.py
@@ -85,10 +85,11 @@
     pass
 
 def splitline(line, re_word = re.compile(r'[^\s"]\S*|["]["]|["].*?[^\\]["]')):
+    import ast
     result = []
     for word in re_word.findall(line):
         if word.startswith('"'):
-            word = eval(word)
+            word = ast.literal_eval(word)
         result.append(word)
     return result
 
diff --git a/lib-python/2.7/test/test_hash.py b/lib-python/2.7/test/test_hash.py
--- a/lib-python/2.7/test/test_hash.py
+++ b/lib-python/2.7/test/test_hash.py
@@ -174,7 +174,7 @@
 
 class StringlikeHashRandomizationTests(HashRandomizationTests):
     if check_impl_detail(pypy=True):
-        EMPTY_STRING_HASH = -1
+        EMPTY_STRING_HASH = -2
     else:
         EMPTY_STRING_HASH = 0
 
diff --git a/lib-python/3/opcode.py b/lib-python/3/opcode.py
--- a/lib-python/3/opcode.py
+++ b/lib-python/3/opcode.py
@@ -85,7 +85,10 @@
 def_op('INPLACE_FLOOR_DIVIDE', 28)
 def_op('INPLACE_TRUE_DIVIDE', 29)
 
-def_op('STORE_MAP', 54)
+def_op('GET_AITER', 50)
+def_op('GET_ANEXT', 51)
+def_op('BEFORE_ASYNC_WITH', 52)
+
 def_op('INPLACE_ADD', 55)
 def_op('INPLACE_SUBTRACT', 56)
 def_op('INPLACE_MULTIPLY', 57)
@@ -100,11 +103,12 @@
 def_op('BINARY_OR', 66)
 def_op('INPLACE_POWER', 67)
 def_op('GET_ITER', 68)
-def_op('STORE_LOCALS', 69)
+def_op('GET_YIELD_FROM_ITER', 69)
 
 def_op('PRINT_EXPR', 70)
 def_op('LOAD_BUILD_CLASS', 71)
 def_op('YIELD_FROM', 72)
+def_op('GET_AWAITABLE', 73)
 
 def_op('INPLACE_LSHIFT', 75)
 def_op('INPLACE_RSHIFT', 76)
@@ -196,6 +200,11 @@
 def_op('SET_ADD', 146)
 def_op('MAP_ADD', 147)
 
+def_op('LOAD_CLASSDEREF', 148)
+hasfree.append(148)
+
+jrel_op('SETUP_ASYNC_WITH', 154)
+
 def_op('EXTENDED_ARG', 144)
 EXTENDED_ARG = 144
 
diff --git a/lib-python/3/test/test_hash.py b/lib-python/3/test/test_hash.py
--- a/lib-python/3/test/test_hash.py
+++ b/lib-python/3/test/test_hash.py
@@ -198,7 +198,7 @@
 
 class StringlikeHashRandomizationTests(HashRandomizationTests):
     if check_impl_detail(pypy=True):
-        EMPTY_STRING_HASH = -1
+        EMPTY_STRING_HASH = -2
     else:
         EMPTY_STRING_HASH = 0
     repr_ = None
diff --git a/lib-python/3/test/test_unicode.py b/lib-python/3/test/test_unicode.py
--- a/lib-python/3/test/test_unicode.py
+++ b/lib-python/3/test/test_unicode.py
@@ -2604,7 +2604,8 @@
     def test_getnewargs(self):
         text = 'abc'
         args = text.__getnewargs__()
-        self.assertIsNot(args[0], text)
+        if support.check_impl_detail():
+            self.assertIsNot(args[0], text)
         self.assertEqual(args[0], text)
         self.assertEqual(len(args), 1)
 
diff --git a/lib-python/conftest.py b/lib-python/conftest.py
--- a/lib-python/conftest.py
+++ b/lib-python/conftest.py
@@ -418,7 +418,7 @@
     RegrTest('test_threading.py', usemodules="thread", core=True),
     RegrTest('test_threading_local.py', usemodules="thread", core=True),
     RegrTest('test_threadsignals.py', usemodules="thread"),
-    RegrTest('test_time.py', core=True, usemodules="struct"),
+    RegrTest('test_time.py', core=True, usemodules="struct thread _rawffi"),
     RegrTest('test_timeit.py'),
     RegrTest('test_timeout.py'),
     RegrTest('test_tk.py'),
@@ -452,7 +452,7 @@
     RegrTest('test_userstring.py', core=True),
     RegrTest('test_uu.py'),
     RegrTest('test_uuid.py'),
-    RegrTest('test_venv.py'),
+    RegrTest('test_venv.py', usemodules="struct"),
     RegrTest('test_wait3.py', usemodules="thread"),
     RegrTest('test_wait4.py', usemodules="thread"),
     RegrTest('test_warnings.py', core=True),
diff --git a/lib_pypy/_ctypes/basics.py b/lib_pypy/_ctypes/basics.py
--- a/lib_pypy/_ctypes/basics.py
+++ b/lib_pypy/_ctypes/basics.py
@@ -198,10 +198,13 @@
     return tp._alignmentofinstances()
 
 @builtinify
-def byref(cdata):
+def byref(cdata, offset=0):
     # "pointer" is imported at the end of this module to avoid circular
     # imports
-    return pointer(cdata)
+    ptr = pointer(cdata)
+    if offset != 0:
+        ptr._buffer[0] += offset
+    return ptr
 
 def cdata_from_address(self, address):
     # fix the address: turn it into as unsigned, in case it's a negative number
diff --git a/lib_pypy/_pypy_winbase_build.py b/lib_pypy/_pypy_winbase_build.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_pypy_winbase_build.py
@@ -0,0 +1,91 @@
+# Note: uses the CFFI out-of-line ABI mode.  We can't use the API
+# mode because ffi.compile() needs to run the compiler, which
+# needs 'subprocess', which needs 'msvcrt' and '_subprocess',
+# which depend on '_pypy_winbase_cffi' already.
+#
+# Note that if you need to regenerate _pypy_winbase_cffi and
+# can't use a preexisting PyPy to do that, then running this
+# file should work as long as 'subprocess' is not imported
+# by cffi.  I had to hack in 'cffi._pycparser' to move an
+#'import subprocess' to the inside of a function.  (Also,
+# CPython+CFFI should work as well.)
+#
+# This module supports both msvcrt.py and _subprocess.py.
+
+from cffi import FFI
+
+ffi = FFI()
+
+ffi.set_source("_pypy_winbase_cffi", None)
+
+# ---------- MSVCRT ----------
+
+ffi.cdef("""
+typedef unsigned short wint_t;
+
+int _open_osfhandle(intptr_t osfhandle, int flags);
+intptr_t _get_osfhandle(int fd);
+int _setmode(int fd, int mode);
+int _locking(int fd, int mode, long nbytes);
+
+int _kbhit(void);
+int _getch(void);
+wint_t _getwch(void);
+int _getche(void);
+wint_t _getwche(void);
+int _putch(int);
+wint_t _putwch(wchar_t);
+int _ungetch(int);
+wint_t _ungetwch(wint_t);
+""")
+
+# ---------- SUBPROCESS ----------
+
+ffi.cdef("""
+typedef struct {
+    DWORD  cb;
+    char * lpReserved;
+    char * lpDesktop;
+    char * lpTitle;
+    DWORD  dwX;
+    DWORD  dwY;
+    DWORD  dwXSize;
+    DWORD  dwYSize;
+    DWORD  dwXCountChars;
+    DWORD  dwYCountChars;
+    DWORD  dwFillAttribute;
+    DWORD  dwFlags;
+    WORD   wShowWindow;
+    WORD   cbReserved2;
+    LPBYTE lpReserved2;
+    HANDLE hStdInput;
+    HANDLE hStdOutput;
+    HANDLE hStdError;
+} STARTUPINFO, *LPSTARTUPINFO;
+
+typedef struct {
+    HANDLE hProcess;
+    HANDLE hThread;
+    DWORD  dwProcessId;
+    DWORD  dwThreadId;
+} PROCESS_INFORMATION, *LPPROCESS_INFORMATION;
+
+DWORD WINAPI GetVersion(void);
+BOOL WINAPI CreatePipe(PHANDLE, PHANDLE, void *, DWORD);
+BOOL WINAPI CloseHandle(HANDLE);
+HANDLE WINAPI GetCurrentProcess(void);
+BOOL WINAPI DuplicateHandle(HANDLE, HANDLE, HANDLE, LPHANDLE,
+                            DWORD, BOOL, DWORD);
+BOOL WINAPI CreateProcessA(char *, char *, void *,
+                           void *, BOOL, DWORD, char *,
+                           char *, LPSTARTUPINFO, LPPROCESS_INFORMATION);
+DWORD WINAPI WaitForSingleObject(HANDLE, DWORD);
+BOOL WINAPI GetExitCodeProcess(HANDLE, LPDWORD);
+BOOL WINAPI TerminateProcess(HANDLE, UINT);
+HANDLE WINAPI GetStdHandle(DWORD);
+""")
+
+# --------------------
+
+if __name__ == "__main__":
+    ffi.compile()
diff --git a/lib_pypy/_pypy_winbase_cffi.py b/lib_pypy/_pypy_winbase_cffi.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_pypy_winbase_cffi.py
@@ -0,0 +1,10 @@
+# auto-generated file
+import _cffi_backend
+
+ffi = _cffi_backend.FFI('_pypy_winbase_cffi',
+    _version = 0x2601,
+    _types = b'\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x07\x01\x00\x00\x07\x01\x00\x00\x09\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x19\x01\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x00\x0F\x00\x00\x01\x0D\x00\x00\x50\x03\x00\x00\x13\x11\x00\x00\x53\x03\x00\x00\x15\x11\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x13\x11\x00\x00\x13\x11\x00\x00\x4F\x03\x00\x00\x4E\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x03\x00\x00\x1F\x11\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x08\x01\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x18\x03\x00\x00\x02\x0F\x00\x00\x01\x0D\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x15\x11\x00\x00\x1F\x11\x00\x00\x0A\x01\x00\x00\x07\x01\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x0D\x0D\x00\x00\x07\x01\x00\x00\x00\x0F\x00\x00\x18\x0D\x00\x00\x15\x11\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x18\x0D\x00\x00\x02\x0F\x00\x00\x42\x0D\x00\x00\x06\x01\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x00\x0F\x00\x00\x42\x0D\x00\x00\x10\x01\x00\x00\x00\x0F\x00\x00\x15\x0D\x00\x00\x0A\x01\x00\x00\x02\x0F\x00\x00\x15\x0D\x00\x00\x02\x0F\x00\x00\x00\x09\x00\x00\x01\x09\x00\x00\x02\x01\x00\x00\x52\x03\x00\x00\x04\x01\x00\x00\x00\x01',
+    _globals = (b'\x00\x00\x24\x23CloseHandle',0,b'\x00\x00\x1E\x23CreatePipe',0,b'\x00\x00\x12\x23CreateProcessA',0,b'\x00\x00\x2F\x23DuplicateHandle',0,b'\x00\x00\x4C\x23GetCurrentProcess',0,b'\x00\x00\x2B\x23GetExitCodeProcess',0,b'\x00\x00\x49\x23GetStdHandle',0,b'\x00\x00\x3F\x23GetVersion',0,b'\x00\x00\x27\x23TerminateProcess',0,b'\x00\x00\x3B\x23WaitForSingleObject',0,b'\x00\x00\x38\x23_get_osfhandle',0,b'\x00\x00\x10\x23_getch',0,b'\x00\x00\x10\x23_getche',0,b'\x00\x00\x44\x23_getwch',0,b'\x00\x00\x44\x23_getwche',0,b'\x00\x00\x10\x23_kbhit',0,b'\x00\x00\x07\x23_locking',0,b'\x00\x00\x0C\x23_open_osfhandle',0,b'\x00\x00\x00\x23_putch',0,b'\x00\x00\x46\x23_putwch',0,b'\x00\x00\x03\x23_setmode',0,b'\x00\x00\x00\x23_ungetch',0,b'\x00\x00\x41\x23_ungetwch',0),
+    _struct_unions = ((b'\x00\x00\x00\x4E\x00\x00\x00\x02$PROCESS_INFORMATION',b'\x00\x00\x15\x11hProcess',b'\x00\x00\x15\x11hThread',b'\x00\x00\x18\x11dwProcessId',b'\x00\x00\x18\x11dwThreadId'),(b'\x00\x00\x00\x4F\x00\x00\x00\x02$STARTUPINFO',b'\x00\x00\x18\x11cb',b'\x00\x00\x13\x11lpReserved',b'\x00\x00\x13\x11lpDesktop',b'\x00\x00\x13\x11lpTitle',b'\x00\x00\x18\x11dwX',b'\x00\x00\x18\x11dwY',b'\x00\x00\x18\x11dwXSize',b'\x00\x00\x18\x11dwYSize',b'\x00\x00\x18\x11dwXCountChars',b'\x00\x00\x18\x11dwYCountChars',b'\x00\x00\x18\x11dwFillAttribute',b'\x00\x00\x18\x11dwFlags',b'\x00\x00\x42\x11wShowWindow',b'\x00\x00\x42\x11cbReserved2',b'\x00\x00\x51\x11lpReserved2',b'\x00\x00\x15\x11hStdInput',b'\x00\x00\x15\x11hStdOutput',b'\x00\x00\x15\x11hStdError')),
+    _typenames = (b'\x00\x00\x00\x1CLPPROCESS_INFORMATION',b'\x00\x00\x00\x1BLPSTARTUPINFO',b'\x00\x00\x00\x4EPROCESS_INFORMATION',b'\x00\x00\x00\x4FSTARTUPINFO',b'\x00\x00\x00\x42wint_t'),
+)
diff --git a/lib_pypy/_winapi.py b/lib_pypy/_winapi.py
--- a/lib_pypy/_winapi.py
+++ b/lib_pypy/_winapi.py
@@ -10,152 +10,99 @@
 
 # Declare external Win32 functions
 
-import ctypes
-
-_kernel32 = ctypes.WinDLL('kernel32')
-
-_CloseHandle = _kernel32.CloseHandle
-_CloseHandle.argtypes = [ctypes.c_int]
-_CloseHandle.restype = ctypes.c_int
-
-_CreatePipe = _kernel32.CreatePipe
-_CreatePipe.argtypes = [ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
-                        ctypes.c_void_p, ctypes.c_int]
-_CreatePipe.restype = ctypes.c_int
-
-_GetCurrentProcess = _kernel32.GetCurrentProcess
-_GetCurrentProcess.argtypes = []
-_GetCurrentProcess.restype = ctypes.c_int
+from _pypy_winbase_cffi import ffi as _ffi
+_kernel32 = _ffi.dlopen('kernel32')
 
 GetVersion = _kernel32.GetVersion
-GetVersion.argtypes = []
-GetVersion.restype = ctypes.c_int
 
-_DuplicateHandle = _kernel32.DuplicateHandle
-_DuplicateHandle.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int,
-                             ctypes.POINTER(ctypes.c_int),
-                             ctypes.c_int, ctypes.c_int, ctypes.c_int]
-_DuplicateHandle.restype = ctypes.c_int
 
-_WaitForSingleObject = _kernel32.WaitForSingleObject
-_WaitForSingleObject.argtypes = [ctypes.c_int, ctypes.c_uint]
-_WaitForSingleObject.restype = ctypes.c_int
+# Now the _subprocess module implementation
 
-_GetExitCodeProcess = _kernel32.GetExitCodeProcess
-_GetExitCodeProcess.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
-_GetExitCodeProcess.restype = ctypes.c_int
+def _WinError():
+    code, message = _ffi.getwinerror()
+    raise WindowsError(code, message)
 
-_TerminateProcess = _kernel32.TerminateProcess
-_TerminateProcess.argtypes = [ctypes.c_int, ctypes.c_int]
-_TerminateProcess.restype = ctypes.c_int
+_INVALID_HANDLE_VALUE = _ffi.cast("HANDLE", -1)
 
-_GetStdHandle = _kernel32.GetStdHandle
-_GetStdHandle.argtypes = [ctypes.c_int]
-_GetStdHandle.restype = ctypes.c_int
-
-_GetModuleFileNameW = _kernel32.GetModuleFileNameW
-_GetModuleFileNameW.argtypes = [ctypes.c_int, ctypes.c_wchar_p, ctypes.c_uint]
-_GetModuleFileNameW.restype = ctypes.c_int
-
-class _STARTUPINFO(ctypes.Structure):
-    _fields_ = [('cb',         ctypes.c_int),
-                ('lpReserved', ctypes.c_void_p),
-                ('lpDesktop',  ctypes.c_char_p),
-                ('lpTitle',    ctypes.c_char_p),
-                ('dwX',        ctypes.c_int),
-                ('dwY',        ctypes.c_int),
-                ('dwXSize',    ctypes.c_int),
-                ('dwYSize',    ctypes.c_int),
-                ('dwXCountChars', ctypes.c_int),
-                ('dwYCountChars', ctypes.c_int),
-                ("dwFillAttribute", ctypes.c_int),
-                ("dwFlags", ctypes.c_int),
-                ("wShowWindow", ctypes.c_short),
-                ("cbReserved2", ctypes.c_short),
-                ("lpReserved2", ctypes.c_void_p),
-                ("hStdInput", ctypes.c_int),
-                ("hStdOutput", ctypes.c_int),
-                ("hStdError", ctypes.c_int)
-                ]
-
-class _PROCESS_INFORMATION(ctypes.Structure):
-    _fields_ = [("hProcess", ctypes.c_int),
-                ("hThread", ctypes.c_int),
-                ("dwProcessID", ctypes.c_int),
-                ("dwThreadID", ctypes.c_int)]
-
-_CreateProcess = _kernel32.CreateProcessW
-_CreateProcess.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_void_p, ctypes.c_void_p,
-                           ctypes.c_int, ctypes.c_int, ctypes.c_wchar_p, ctypes.c_wchar_p,
-                           ctypes.POINTER(_STARTUPINFO), ctypes.POINTER(_PROCESS_INFORMATION)]
-_CreateProcess.restype = ctypes.c_int
-
-del ctypes
-
-# Now the _winapi module implementation
-
-from ctypes import c_int as _c_int, byref as _byref, WinError as _WinError
-
-class _handle:
-    def __init__(self, handle):
-        self.handle = handle
+class _handle(object):
+    def __init__(self, c_handle):
+        # 'c_handle' is a cffi cdata of type HANDLE, which is basically 'void *'
+        self.c_handle = c_handle
+        if int(self) != -1:
+            self.c_handle = _ffi.gc(self.c_handle, _kernel32.CloseHandle)
 
     def __int__(self):
-        return self.handle
+        return int(_ffi.cast("intptr_t", self.c_handle))
 
-    def __del__(self):
-        if self.handle is not None:
-            _CloseHandle(self.handle)
+    def __repr__(self):
+        return '<_subprocess.handle %d at 0x%x>' % (int(self), id(self))
 
     def Detach(self):
-        handle, self.handle = self.handle, None
-        return handle
+        h = int(self)
+        if h != -1:
+            c_handle = self.c_handle
+            self.c_handle = _INVALID_HANDLE_VALUE
+            _ffi.gc(c_handle, None)
+        return h
 
     def Close(self):
-        if self.handle not in (-1, None):
-            _CloseHandle(self.handle)
-            self.handle = None
+        if int(self) != -1:
+            c_handle = self.c_handle
+            self.c_handle = _INVALID_HANDLE_VALUE
+            _ffi.gc(c_handle, None)
+            _kernel32.CloseHandle(c_handle)
 
 def CreatePipe(attributes, size):
-    read = _c_int()
-    write = _c_int()
+    handles = _ffi.new("HANDLE[2]")
 
-    res = _CreatePipe(_byref(read), _byref(write), None, size)
+    res = _kernel32.CreatePipe(handles, handles + 1, _ffi.NULL, size)
 
     if not res:
         raise _WinError()
 
-    return _handle(read.value), _handle(write.value)
+    return _handle(handles[0]), _handle(handles[1])
 
 def GetCurrentProcess():
-    return _handle(_GetCurrentProcess())
+    return _handle(_kernel32.GetCurrentProcess())
 
 def DuplicateHandle(source_process, source, target_process, access, inherit, options=0):
-    target = _c_int()
+    # CPython: the first three arguments are expected to be integers
+    target = _ffi.new("HANDLE[1]")
 
-    res = _DuplicateHandle(int(source_process), int(source), int(target_process),
-                           _byref(target),
-                           access, inherit, options)
+    res = _kernel32.DuplicateHandle(
+        _ffi.cast("HANDLE", source_process),
+        _ffi.cast("HANDLE", source),
+        _ffi.cast("HANDLE", target_process),
+        target, access, inherit, options)
 
     if not res:
         raise _WinError()
 
-    return _handle(target.value)
+    return _handle(target[0])
+
+def _z(input):
+    if input is None:
+        return _ffi.NULL
+    if isinstance(input, basestring):
+        return str(input)
+    raise TypeError("string/unicode/None expected, got %r" % (
+        type(input).__name__,))
 
 def CreateProcess(name, command_line, process_attr, thread_attr,
                   inherit, flags, env, start_dir, startup_info):
-    si = _STARTUPINFO()
+    si = _ffi.new("STARTUPINFO *")
     if startup_info is not None:
         si.dwFlags = startup_info.dwFlags
         si.wShowWindow = startup_info.wShowWindow
+        # CPython: these three handles are expected to be _handle objects
         if startup_info.hStdInput:
-            si.hStdInput = int(startup_info.hStdInput)
+            si.hStdInput = startup_info.hStdInput.c_handle
         if startup_info.hStdOutput:
-            si.hStdOutput = int(startup_info.hStdOutput)
+            si.hStdOutput = startup_info.hStdOutput.c_handle
         if startup_info.hStdError:
-            si.hStdError = int(startup_info.hStdError)
+            si.hStdError = startup_info.hStdError.c_handle
 
-    pi = _PROCESS_INFORMATION()
+    pi = _ffi.new("PROCESS_INFORMATION *")
     flags |= CREATE_UNICODE_ENVIRONMENT
 
     if env is not None:
@@ -164,47 +111,55 @@
             envbuf += "%s=%s\0" % (k, v)
         envbuf += '\0'
     else:
-        envbuf = None
+        envbuf = _ffi.NULL
 
-    res = _CreateProcess(name, command_line, None, None, inherit, flags, envbuf,
-                        start_dir, _byref(si), _byref(pi))
+    res = _kernel32.CreateProcessA(_z(name), _z(command_line), _ffi.NULL,
+                                   _ffi.NULL, inherit, flags, envbuf,
+                                   _z(start_dir), si, pi)
 
     if not res:
         raise _WinError()
 
-    return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessID, pi.dwThreadID
+    return _handle(pi.hProcess), _handle(pi.hThread), pi.dwProcessId, pi.dwThreadId
 
 def WaitForSingleObject(handle, milliseconds):
-    res = _WaitForSingleObject(int(handle), milliseconds)
-
+    # CPython: the first argument is expected to be an integer.
+    res = _kernel32.WaitForSingleObject(_ffi.cast("HANDLE", handle),
+                                        milliseconds)
     if res < 0:
         raise _WinError()
 
     return res
 
 def GetExitCodeProcess(handle):
-    code = _c_int()
+    # CPython: the first argument is expected to be an integer.
+    code = _ffi.new("DWORD[1]")
 
-    res = _GetExitCodeProcess(int(handle), _byref(code))
+    res = _kernel32.GetExitCodeProcess(_ffi.cast("HANDLE", handle), code)
 
     if not res:
         raise _WinError()
 
-    return code.value
+    return code[0]
 
 def TerminateProcess(handle, exitcode):
-    res = _TerminateProcess(int(handle), exitcode)
+    # CPython: the first argument is expected to be an integer.
+    # The second argument is silently wrapped in a UINT.
+    res = _kernel32.TerminateProcess(_ffi.cast("HANDLE", handle),
+                                     _ffi.cast("UINT", exitcode))
 
     if not res:
         raise _WinError()
 
 def GetStdHandle(stdhandle):
-    res = _GetStdHandle(stdhandle)
+    stdhandle = _ffi.cast("DWORD", stdhandle)
+    res = _kernel32.GetStdHandle(stdhandle)
 
     if not res:
         return None
     else:
-        return res
+        # note: returns integer, not handle object
+        return int(_ffi.cast("intptr_t", res))
 
 def CloseHandle(handle):
     res = _CloseHandle(handle)
diff --git a/lib_pypy/cffi/_pycparser/__init__.py b/lib_pypy/cffi/_pycparser/__init__.py
--- a/lib_pypy/cffi/_pycparser/__init__.py
+++ b/lib_pypy/cffi/_pycparser/__init__.py
@@ -10,7 +10,6 @@
 __all__ = ['c_lexer', 'c_parser', 'c_ast']
 __version__ = '2.14'
 
-from subprocess import Popen, PIPE
 from .c_parser import CParser
 
 
@@ -28,6 +27,7 @@
         When successful, returns the preprocessed file's contents.
         Errors from cpp will be printed out.
     """
+    from subprocess import Popen, PIPE
     path_list = [cpp_path]
     if isinstance(cpp_args, list):
         path_list += cpp_args
diff --git a/lib_pypy/greenlet.egg-info b/lib_pypy/greenlet.egg-info
--- a/lib_pypy/greenlet.egg-info
+++ b/lib_pypy/greenlet.egg-info
@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: greenlet
-Version: 0.4.9
+Version: 0.4.10
 Summary: Lightweight in-process concurrent programming
 Home-page: https://github.com/python-greenlet/greenlet
 Author: Ralf Schmitt (for CPython), PyPy team
diff --git a/lib_pypy/greenlet.py b/lib_pypy/greenlet.py
--- a/lib_pypy/greenlet.py
+++ b/lib_pypy/greenlet.py
@@ -2,7 +2,7 @@
 import __pypy__
 import _continuation
 
-__version__ = "0.4.9"
+__version__ = "0.4.10"
 
 # ____________________________________________________________
 # Exceptions
diff --git a/lib_pypy/msvcrt.py b/lib_pypy/msvcrt.py
--- a/lib_pypy/msvcrt.py
+++ b/lib_pypy/msvcrt.py
@@ -7,26 +7,39 @@
 # XXX incomplete: implemented only functions needed by subprocess.py
 # PAC: 2010/08 added MS locking for Whoosh
 
-import ctypes
+# 07/2016: rewrote in CFFI
+
+import sys
+if sys.platform != 'win32':
+    raise ImportError("The 'msvcrt' module is only available on Windows")
+
+import _rawffi
+from _pypy_winbase_cffi import ffi as _ffi
+_lib = _ffi.dlopen(_rawffi.get_libc().name)
+
 import errno
-from ctypes_support import standard_c_lib as _c
-from ctypes_support import get_errno
-
-try:
-    open_osfhandle = _c._open_osfhandle
-except AttributeError: # we are not on windows
-    raise ImportError
 
 try: from __pypy__ import builtinify, validate_fd
 except ImportError: builtinify = validate_fd = lambda f: f
 
 
-open_osfhandle.argtypes = [ctypes.c_int, ctypes.c_int]
-open_osfhandle.restype = ctypes.c_int
+def _ioerr():
+    e = _ffi.errno
+    raise IOError(e, errno.errorcode[e])
 
-_get_osfhandle = _c._get_osfhandle
-_get_osfhandle.argtypes = [ctypes.c_int]
-_get_osfhandle.restype = ctypes.c_int
+
+ at builtinify
+def open_osfhandle(fd, flags):
+    """"open_osfhandle(handle, flags) -> file descriptor
+
+    Create a C runtime file descriptor from the file handle handle. The
+    flags parameter should be a bitwise OR of os.O_APPEND, os.O_RDONLY,
+    and os.O_TEXT. The returned file descriptor may be used as a parameter
+    to os.fdopen() to create a file object."""
+    fd = _lib._open_osfhandle(fd, flags)
+    if fd == -1:
+        _ioerr()
+    return fd
 
 @builtinify
 def get_osfhandle(fd):
@@ -38,62 +51,74 @@
         validate_fd(fd)
     except OSError as e:
         raise IOError(*e.args)
-    return _get_osfhandle(fd)
+    result = _lib._get_osfhandle(fd)
+    if result == -1:
+        _ioerr()
+    return result
 
-setmode = _c._setmode
-setmode.argtypes = [ctypes.c_int, ctypes.c_int]
-setmode.restype = ctypes.c_int
+ at builtinify
+def setmode(fd, flags):
+    """setmode(fd, mode) -> Previous mode
+
+    Set the line-end translation mode for the file descriptor fd. To set
+    it to text mode, flags should be os.O_TEXT; for binary, it should be
+    os.O_BINARY."""
+    flags = _lib._setmode(fd, flags)
+    if flags == -1:
+        _ioerr()
+    return flags
 
 LK_UNLCK, LK_LOCK, LK_NBLCK, LK_RLCK, LK_NBRLCK = range(5)
 
-_locking = _c._locking
-_locking.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int]
-_locking.restype = ctypes.c_int
-
 @builtinify
 def locking(fd, mode, nbytes):
-    '''lock or unlock a number of bytes in a file.'''
-    rv = _locking(fd, mode, nbytes)
+    """"locking(fd, mode, nbytes) -> None
+
+    Lock part of a file based on file descriptor fd from the C runtime.
+    Raises IOError on failure. The locked region of the file extends from
+    the current file position for nbytes bytes, and may continue beyond
+    the end of the file. mode must be one of the LK_* constants listed
+    below. Multiple regions in a file may be locked at the same time, but
+    may not overlap. Adjacent regions are not merged; they must be unlocked
+    individually."""
+    rv = _lib._locking(fd, mode, nbytes)
     if rv != 0:
-        e = get_errno()
-        raise IOError(e, errno.errorcode[e])
+        _ioerr()
 
 # Console I/O routines
 
-kbhit = _c._kbhit
-kbhit.argtypes = []
-kbhit.restype = ctypes.c_int
+kbhit = _lib._kbhit
 
-getch = _c._getch
-getch.argtypes = []
-getch.restype = ctypes.c_char
+ at builtinify
+def getch():
+    return chr(_lib._getch())
 
-getwch = _c._getwch
-getwch.argtypes = []
-getwch.restype = ctypes.c_wchar
+ at builtinify
+def getwch():
+    return unichr(_lib._getwch())
 
-getche = _c._getche
-getche.argtypes = []
-getche.restype = ctypes.c_char
+ at builtinify
+def getche():
+    return chr(_lib._getche())
 
-getwche = _c._getwche
-getwche.argtypes = []
-getwche.restype = ctypes.c_wchar
+ at builtinify
+def getwche():
+    return unichr(_lib._getwche())
 
-putch = _c._putch
-putch.argtypes = [ctypes.c_char]
-putch.restype = None
+ at builtinify
+def putch(ch):
+    _lib._putch(ord(ch))
 
-putwch = _c._putwch
-putwch.argtypes = [ctypes.c_wchar]
-putwch.restype = None
+ at builtinify
+def putwch(ch):
+    _lib._putwch(ord(ch))
 
-ungetch = _c._ungetch
-ungetch.argtypes = [ctypes.c_char]
-ungetch.restype = None
+ at builtinify
+def ungetch(ch):
+    if _lib._ungetch(ord(ch)) == -1:   # EOF
+        _ioerr()
 
-ungetwch = _c._ungetwch
-ungetwch.argtypes = [ctypes.c_wchar]
-ungetwch.restype = None
-
-del ctypes
+ at builtinify
+def ungetwch(ch):
+    if _lib._ungetwch(ord(ch)) == -1:   # EOF
+        _ioerr()
diff --git a/pypy/config/pypyoption.py b/pypy/config/pypyoption.py
--- a/pypy/config/pypyoption.py
+++ b/pypy/config/pypyoption.py
@@ -40,7 +40,7 @@
     "binascii", "_multiprocessing", '_warnings', "_collections",
     "_multibytecodec", "_continuation", "_cffi_backend",
     "_csv", "_pypyjson", "_posixsubprocess", # "cppyy", "micronumpy"
-    "faulthandler",
+    "faulthandler", "_jitlog",
 ])
 
 from rpython.jit.backend import detect_cpu
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -94,6 +94,20 @@
 def pytest_pycollect_makemodule(path, parent):
     return PyPyModule(path, parent)
 
+def is_applevel(item):
+    from pypy.tool.pytest.apptest import AppTestFunction
+    return isinstance(item, AppTestFunction)
+
+def pytest_collection_modifyitems(config, items):
+    if config.option.runappdirect:
+        return
+    for item in items:
+        if isinstance(item, py.test.Function):
+            if is_applevel(item):
+                item.add_marker('applevel')
+            else:
+                item.add_marker('interplevel')
+
 class PyPyModule(py.test.collect.Module):
     """ we take care of collecting classes both at app level
         and at interp-level (because we need to stick a space
@@ -128,9 +142,6 @@
             if name.startswith('AppTest'):
                 from pypy.tool.pytest.apptest import AppClassCollector
                 return AppClassCollector(name, parent=self)
-            else:
-                from pypy.tool.pytest.inttest import IntClassCollector
-                return IntClassCollector(name, parent=self)
 
         elif hasattr(obj, 'func_code') and self.funcnamefilter(name):
             if name.startswith('app_test_'):
@@ -138,11 +149,7 @@
                     "generator app level functions? you must be joking"
                 from pypy.tool.pytest.apptest import AppTestFunction
                 return AppTestFunction(name, parent=self)
-            elif obj.func_code.co_flags & 32: # generator function
-                return pytest.Generator(name, parent=self)
-            else:
-                from pypy.tool.pytest.inttest import IntTestFunction
-                return IntTestFunction(name, parent=self)
+        return super(PyPyModule, self).makeitem(name, obj)
 
 def skip_on_missing_buildoption(**ropts):
     __tracebackhide__ = True
@@ -171,28 +178,19 @@
 
 def pytest_runtest_setup(__multicall__, item):
     if isinstance(item, py.test.collect.Function):
-        appclass = item.getparent(PyPyClassCollector)
+        appclass = item.getparent(py.test.Class)
         if appclass is not None:
             # Make cls.space and cls.runappdirect available in tests.
             spaceconfig = getattr(appclass.obj, 'spaceconfig', None)
             if spaceconfig is not None:
                 from pypy.tool.pytest.objspace import gettestobjspace
                 appclass.obj.space = gettestobjspace(**spaceconfig)
+            else:
+                appclass.obj.space = LazyObjSpaceGetter()
             appclass.obj.runappdirect = option.runappdirect
 
     __multicall__.execute()
 
 
-class PyPyClassCollector(py.test.collect.Class):
-    # All pypy Test classes have a "space" member.
-    def setup(self):
-        cls = self.obj
-        if not hasattr(cls, 'spaceconfig'):
-            cls.space = LazyObjSpaceGetter()
-        else:
-            assert hasattr(cls, 'space') # set by pytest_runtest_setup
-        super(PyPyClassCollector, self).setup()
-
-
 def pytest_ignore_collect(path):
     return path.check(link=1)
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -104,27 +104,24 @@
 
     apt-get install gcc make libffi-dev pkg-config libz-dev libbz2-dev \
     libsqlite3-dev libncurses-dev libexpat1-dev libssl-dev libgdbm-dev \
-    tk-dev libgc-dev liblzma-dev
-
-For the optional lzma module on PyPy3 you will also need ``liblzma-dev``.
+    tk-dev libgc-dev \
+    liblzma-dev  # For lzma on PyPy3.
 
 On Fedora::
 
     dnf install gcc make libffi-devel pkgconfig zlib-devel bzip2-devel \
     lib-sqlite3-devel ncurses-devel expat-devel openssl-devel tk-devel \
-    gdbm-devel
-
-For the optional lzma module on PyPy3 you will also need ``xz-devel``.
+    gdbm-devel \
+    xz-devel  # For lzma on PyPy3.
 
 On SLES11::
 
     zypper install gcc make python-devel pkg-config \
     zlib-devel libopenssl-devel libbz2-devel sqlite3-devel \
-    libexpat-devel libffi-devel python-curses
+    libexpat-devel libffi-devel python-curses \
+    xz-devel  # For lzma on PyPy3.
     (XXX plus the SLES11 version of libgdbm-dev and tk-dev)
 
-For the optional lzma module on PyPy3 you will also need ``xz-devel``.
-
 On Mac OS X, most of these build-time dependencies are installed alongside
 the Developer Tools. However, note that in order for the installation to
 find them you may need to run::
diff --git a/pypy/doc/config/commandline.txt b/pypy/doc/config/commandline.txt
--- a/pypy/doc/config/commandline.txt
+++ b/pypy/doc/config/commandline.txt
@@ -9,7 +9,7 @@
 PyPy Python interpreter options
 -------------------------------
 
-The following options can be used after ``translate.py
+The following options can be used after ``rpython
 targetpypystandalone`` or as options to ``py.py``.
 
 .. GENERATE: objspace
@@ -22,7 +22,7 @@
 General translation options
 ---------------------------
 
-The following are options of ``translate.py``.  They must be
+The following are options of ``bin/rpython``.  They must be
 given before the ``targetxxx`` on the command line.
 
 * `--opt -O:`__ set the optimization level `[0, 1, size, mem, 2, 3]`
diff --git a/pypy/doc/config/index.rst b/pypy/doc/config/index.rst
--- a/pypy/doc/config/index.rst
+++ b/pypy/doc/config/index.rst
@@ -15,12 +15,12 @@
 
     ./py.py <`objspace options`_>
 
-and the ``translate.py`` translation entry
+and the ``rpython/bin/rpython`` translation entry
 point which takes arguments of this form:
 
 .. parsed-literal::
 
-    ./translate.py <`translation options`_> <target>
+    ./rpython/bin/rpython <`translation options`_> <target>
 
 For the common case of ``<target>`` being ``targetpypystandalone.py``,
 you can then pass the `object space options`_ after
@@ -28,7 +28,7 @@
 
 .. parsed-literal::
 
-    ./translate.py <`translation options`_> targetpypystandalone.py <`objspace options`_>
+    ./rpython/bin/rpython <`translation options`_> targetpypystandalone.py <`objspace options`_>
 
 There is an `overview`_ of all command line arguments that can be
 passed in either position.
diff --git a/pypy/doc/config/opt.rst b/pypy/doc/config/opt.rst
--- a/pypy/doc/config/opt.rst
+++ b/pypy/doc/config/opt.rst
@@ -4,8 +4,8 @@
 This meta-option selects a default set of optimization
 settings to use during a translation.  Usage::
 
-    translate.py --opt=#
-    translate.py -O#
+    bin/rpython --opt=#
+    bin/rpython -O#
 
 where ``#`` is the desired optimization level.  The valid choices are:
 
diff --git a/pypy/doc/config/translation.dont_write_c_files.txt b/pypy/doc/config/translation.dont_write_c_files.txt
--- a/pypy/doc/config/translation.dont_write_c_files.txt
+++ b/pypy/doc/config/translation.dont_write_c_files.txt
@@ -1,4 +1,4 @@
 write the generated C files to ``/dev/null`` instead of to the disk. Useful if
-you want to use translate.py as a benchmark and don't want to access the disk.
+you want to use translation as a benchmark and don't want to access the disk.
 
 .. _`translation documentation`: ../translation.html
diff --git a/pypy/doc/config/translation.fork_before.txt b/pypy/doc/config/translation.fork_before.txt
--- a/pypy/doc/config/translation.fork_before.txt
+++ b/pypy/doc/config/translation.fork_before.txt
@@ -1,4 +1,4 @@
 This is an option mostly useful when working on the PyPy toolchain. If you use
-it, translate.py will fork before the specified phase. If the translation
+it, translation will fork before the specified phase. If the translation
 crashes after that fork, you can fix the bug in the toolchain, and continue
 translation at the fork-point.
diff --git a/pypy/doc/cppyy.rst b/pypy/doc/cppyy.rst
--- a/pypy/doc/cppyy.rst
+++ b/pypy/doc/cppyy.rst
@@ -122,7 +122,7 @@
     $ hg up reflex-support         # optional
 
     # This example shows python, but using pypy-c is faster and uses less memory
-    $ python rpython/translator/goal/translate.py --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy
+    $ python rpython/bin/rpython --opt=jit pypy/goal/targetpypystandalone --withmod-cppyy
 
 This will build a ``pypy-c`` that includes the cppyy module, and through that,
 Reflex support.
diff --git a/pypy/doc/cpython_differences.rst b/pypy/doc/cpython_differences.rst
--- a/pypy/doc/cpython_differences.rst
+++ b/pypy/doc/cpython_differences.rst
@@ -315,13 +315,28 @@
 
  - ``complex``
 
+ - ``str`` (empty or single-character strings only)
+
+ - ``unicode`` (empty or single-character strings only)
+
+ - ``tuple`` (empty tuples only)
+
+ - ``frozenset`` (empty frozenset only)
+
 This change requires some changes to ``id`` as well. ``id`` fulfills the
 following condition: ``x is y <=> id(x) == id(y)``. Therefore ``id`` of the
 above types will return a value that is computed from the argument, and can
 thus be larger than ``sys.maxint`` (i.e. it can be an arbitrary long).
 
-Notably missing from the list above are ``str`` and ``unicode``.  If your
-code relies on comparing strings with ``is``, then it might break in PyPy.
+Note that strings of length 2 or greater can be equal without being
+identical.  Similarly, ``x is (2,)`` is not necessarily true even if
+``x`` contains a tuple and ``x == (2,)``.  The uniqueness rules apply
+only to the particular cases described above.  The ``str``, ``unicode``,
+``tuple`` and ``frozenset`` rules were added in PyPy 5.4; before that, a
+test like ``if x is "?"`` or ``if x is ()`` could fail even if ``x`` was
+equal to ``"?"`` or ``()``.  The new behavior added in PyPy 5.4 is
+closer to CPython's, which caches precisely the empty tuple/frozenset,
+and (generally but not always) the strings and unicodes of length <= 1.
 
 Note that for floats there "``is``" only one object per "bit pattern"
 of the float.  So ``float('nan') is float('nan')`` is true on PyPy,
diff --git a/pypy/doc/faq.rst b/pypy/doc/faq.rst
--- a/pypy/doc/faq.rst
+++ b/pypy/doc/faq.rst
@@ -335,3 +335,60 @@
 
 This will disable SELinux's protection and allow PyPy to configure correctly.
 Be sure to enable it again if you need it!
+
+
+How should I report a bug?
+--------------------------
+
+Our bug tracker is here: https://bitbucket.org/pypy/pypy/issues/
+
+Missing features or incompatibilities with CPython are considered
+bugs, and they are welcome.  (See also our list of `known
+incompatibilities`__.)
+
+.. __: http://pypy.org/compat.html
+
+For bugs of the kind "I'm getting a PyPy crash or a strange
+exception", please note that: **We can't do anything without
+reproducing the bug ourselves**.  We cannot do anything with
+tracebacks from gdb, or core dumps.  This is not only because the
+standard PyPy is compiled without debug symbols.  The real reason is
+that a C-level traceback is usually of no help at all in PyPy.
+Debugging PyPy can be annoying.
+
+In more details:
+
+* First, please give the exact PyPy version, and the OS.
+
+* It might help focus our search if we know if the bug can be
+  reproduced on a "``pypy --jit off``" or not.  If "``pypy --jit
+  off``" always works, then the problem might be in the JIT.
+  Otherwise, we know we can ignore that part.
+
+* If you got the bug using only Open Source components, please give a
+  step-by-step guide that we can follow to reproduce the problem
+  ourselves.  Don't assume we know anything about any program other
+  than PyPy.  We would like a guide that we can follow point by point
+  (without guessing or having to figure things out)
+  on a machine similar to yours, starting from a bare PyPy, until we
+  see the same problem.  (If you can, you can try to reduce the number
+  of steps and the time it needs to run, but that is not mandatory.)
+
+* If the bug involves Closed Source components, or just too many Open
+  Source components to install them all ourselves, then maybe you can
+  give us some temporary ssh access to a machine where the bug can be
+  reproduced.  Or, maybe we can download a VirtualBox or VMWare
+  virtual machine where the problem occurs.
+
+* If giving us access would require us to use tools other than ssh,
+  make appointments, or sign a NDA, then we can consider a commerical
+  support contract for a small sum of money.
+
+* If even that is not possible for you, then sorry, we can't help.
+
+Of course, you can try to debug the problem yourself, and we can help
+you get started if you ask on the #pypy IRC channel, but be prepared:
+debugging an annoying PyPy problem usually involves quite a lot of gdb
+in auto-generated C code, and at least some knowledge about the
+various components involved, from PyPy's own RPython source code to
+the GC and possibly the JIT.
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
 
 .. toctree::
 
+   release-pypy2.7-v5.3.1.rst
    release-pypy2.7-v5.3.0.rst
    release-5.1.1.rst
    release-5.1.0.rst
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -7,6 +7,7 @@
 .. toctree::
 
    whatsnew-head.rst
+   whatsnew-pypy2-5.3.1.rst
    whatsnew-pypy2-5.3.0.rst
    whatsnew-5.1.0.rst
    whatsnew-5.0.0.rst
diff --git a/pypy/doc/install.rst b/pypy/doc/install.rst
--- a/pypy/doc/install.rst
+++ b/pypy/doc/install.rst
@@ -39,17 +39,16 @@
 library.
 
 If you want to install 3rd party libraries, the most convenient way is
-to install pip_ (unless you want to install virtualenv as explained
-below; then you can directly use pip inside virtualenvs):
+to install pip_ using ensurepip_ (unless you want to install virtualenv as 
+explained below; then you can directly use pip inside virtualenvs):
 
 .. code-block:: console
 
-    $ curl -O https://bootstrap.pypa.io/get-pip.py
-    $ ./pypy-2.1/bin/pypy get-pip.py
-    $ ./pypy-2.1/bin/pip install pygments  # for example
+    $ ./pypy-xxx/bin/pypy -m ensurepip
+    $ ./pypy-xxx/bin/pip install pygments  # for example
 
-Third party libraries will be installed in ``pypy-2.1/site-packages``, and
-the scripts in ``pypy-2.1/bin``.
+Third party libraries will be installed in ``pypy-xxx/site-packages``, and
+the scripts in ``pypy-xxx/bin``.
 
 
 Installing using virtualenv
@@ -61,7 +60,7 @@
 checkout::
 
 	# from a tarball
-	$ virtualenv -p /opt/pypy-c-jit-41718-3fb486695f20-linux/bin/pypy my-pypy-env
+	$ virtualenv -p /opt/pypy-xxx/bin/pypy my-pypy-env
 
 	# from the mercurial checkout
 	$ virtualenv -p /path/to/pypy/pypy/translator/goal/pypy-c my-pypy-env
@@ -69,7 +68,7 @@
 Note that bin/python is now a symlink to bin/pypy.
 
 .. _pip: http://pypi.python.org/pypi/pip
-
+.. _ensurepip: https://docs.python.org/2.7/library/ensurepip.html
 
 Building PyPy yourself
 ~~~~~~~~~~~~~~~~~~~~~~
diff --git a/pypy/doc/release-pypy2.7-v5.3.1.rst b/pypy/doc/release-pypy2.7-v5.3.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-pypy2.7-v5.3.1.rst
@@ -0,0 +1,41 @@
+==========
+PyPy 5.3.1
+==========
+
+We have released a bugfix for PyPy2.7-v5.3.0, released last week,
+due to issues_ reported by users.
+
+Thanks to those who reported the issues.
+
+.. _issues: http://doc.pypy.org/en/latest/whatsnew-pypy2-5.3.1.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`PyPy and CPython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+We also welcome developers of other
+`dynamic languages`_ to see what RPython can do for them.
+
+This release supports:
+
+  * **x86** machines on most common operating systems
+    (Linux 32/64, Mac OS X 64, Windows 32, OpenBSD, FreeBSD),
+
+  * newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux,
+
+  * big- and little-endian variants of **PPC64** running Linux,
+
+  * **s390x** running Linux
+
+.. _`PyPy and CPython 2.7.x`: http://speed.pypy.org
+.. _`dynamic languages`: http://pypyjs.org
+
+Please update, and continue to help us make PyPy better.
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -5,6 +5,13 @@
 .. this is a revision shortly after release-pypy2.7-v5.3
 .. startrev: 873218a739f1
 
+.. 418b05f95db5
+Improve CPython compatibility for ``is``. Now code like ``if x is ():``
+works the same way as it does on CPython.  See http://pypy.readthedocs.io/en/latest/cpython_differences.html#object-identity-of-primitive-values-is-and-id .
+
+.. pull request #455
+Add sys.{get,set}dlopenflags, for cpyext extensions.
+
 .. branch: fix-gen-dfa
 
 Resolves an issue with the generator script to build the dfa for Python syntax.
@@ -19,3 +26,82 @@
 .. branch: s390x-5.3-catchup
 
 Implement the backend related changes for s390x.
+
+.. branch: incminimark-ll_assert
+.. branch: vmprof-openbsd
+
+.. branch: testing-cleanup
+
+Simplify handling of interp-level tests and make it more forward-
+compatible.
+
+.. branch: pyfile-tell
+Sync w_file with the c-level FILE* before returning FILE* in PyFile_AsFile
+
+.. branch: rw-PyString_AS_STRING
+Allow rw access to the char* returned from PyString_AS_STRING, also refactor
+PyStringObject to look like cpython's and allow subclassing PyString_Type and
+PyUnicode_Type
+
+.. branch: save_socket_errno
+
+Bug fix: if ``socket.socket()`` failed, the ``socket.error`` did not show
+the errno of the failing system call, but instead some random previous
+errno.
+
+.. branch: PyTuple_Type-subclass
+
+Refactor PyTupleObject to look like cpython's and allow subclassing 
+PyTuple_Type
+
+.. branch: call-via-pyobj
+
+Use offsets from PyTypeObject to find actual c function to call rather than
+fixed functions, allows function override after PyType_Ready is called
+
+.. branch: issue2335
+
+Avoid exhausting the stack in the JIT due to successive guard
+failures in the same Python function ending up as successive levels of
+RPython functions, while at app-level the traceback is very short
+
+.. branch: use-madv-free
+
+Try harder to memory to the OS.  See e.g. issue #2336.  Note that it does
+not show up as a reduction of the VIRT column in ``top``, and the RES
+column might also not show the reduction, particularly on Linux >= 4.5 or
+on OS/X: it uses MADV_FREE, which only marks the pages as returnable to
+the OS if the memory is low.
+
+.. branch: cpyext-slotdefs2
+
+Fill in more slots when creating a PyTypeObject from a W_TypeObject
+More slots are still TBD, like tp_print and richcmp
+
+.. branch: json-surrogates
+
+Align json module decode with the cpython's impl, fixes issue 2345
+
+.. branch: issue2343
+
+Copy CPython's logic more closely for handling of ``__instancecheck__()``
+and ``__subclasscheck__()``.  Fixes issue 2343.
+
+.. branch: msvcrt-cffi
+
+Rewrite the Win32 dependencies of 'subprocess' to use cffi instead
+of ctypes. This avoids importing ctypes in many small programs and
+scripts, which in turn avoids enabling threads (because ctypes
+creates callbacks at import time, and callbacks need threads).
+
+.. branch: new-jit-log
+
+The new logging facility that integrates with and adds features to vmprof.com.
+
+.. branch: jitlog-32bit
+
+Resolve issues to use the new logging facility on a 32bit system
+
+.. branch: ep2016sprint
+
+Trying harder to make hash(-1) return -2, like it does on CPython
diff --git a/pypy/doc/whatsnew-pypy2-5.3.1.rst b/pypy/doc/whatsnew-pypy2-5.3.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/whatsnew-pypy2-5.3.1.rst
@@ -0,0 +1,15 @@
+===========================
+What's new in PyPy2.7 5.3.1
+===========================
+
+.. this is a revision shortly after release-pypy2.7-v5.3.0
+.. startrev: f4d726d1a010
+
+
+A bug-fix release, merging these changes:
+
+  * Add include guards to pymem.h, fixes issue #2321
+
+  * Make vmprof build on OpenBSD, from pull request #456
+
+  * Fix ``bytearray('').replace('a', 'ab')``, issue #2324
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -419,13 +419,16 @@
                     target_depth -= 2
                 elif (jump_op == ops.SETUP_FINALLY or
                       jump_op == ops.SETUP_EXCEPT or
-                      jump_op == ops.SETUP_WITH):
+                      jump_op == ops.SETUP_WITH or
+                      jump_op == ops.SETUP_ASYNC_WITH):
                     if jump_op == ops.SETUP_FINALLY:
                         target_depth += 4
                     elif jump_op == ops.SETUP_EXCEPT:
                         target_depth += 4
                     elif jump_op == ops.SETUP_WITH:
                         target_depth += 3
+                    elif jump_op == ops.SETUP_ASYNC_WITH:
+                        target_depth += 3
                     if target_depth > self._max_depth:
                         self._max_depth = target_depth
                 elif (jump_op == ops.JUMP_IF_TRUE_OR_POP or
@@ -640,6 +643,13 @@
     ops.LOAD_DEREF: 1,
     ops.STORE_DEREF: -1,
     ops.DELETE_DEREF: 0,
+    
+    ops.GET_AWAITABLE: 0,
+    ops.SETUP_ASYNC_WITH: 2,
+    ops.BEFORE_ASYNC_WITH: -1,
+    ops.GET_AITER: 0,
+    ops.GET_ANEXT: 1,
+    ops.GET_YIELD_FROM_ITER: 0,
 
     ops.LOAD_CONST: 1,
 
@@ -658,6 +668,8 @@
 
     # TODO 
     ops.BUILD_LIST_FROM_ARG: 1,
+    # TODO
+    ops.LOAD_CLASSDEREF: 1,
 }
 
 
diff --git a/pypy/interpreter/astcompiler/assemble.py.orig b/pypy/interpreter/astcompiler/assemble.py.orig
deleted file mode 100644
--- a/pypy/interpreter/astcompiler/assemble.py.orig
+++ /dev/null
@@ -1,765 +0,0 @@
-"""Python control flow graph generation and bytecode assembly."""
-
-import os
-from rpython.rlib import rfloat
-from rpython.rlib.objectmodel import specialize, we_are_translated
-
-from pypy.interpreter.astcompiler import ast, consts, misc, symtable
-from pypy.interpreter.error import OperationError
-from pypy.interpreter.pycode import PyCode
-from pypy.tool import stdlib_opcode as ops
-
-
-class StackDepthComputationError(Exception):
-    pass
-
-
-class Instruction(object):
-    """Represents a single opcode."""
-
-    def __init__(self, opcode, arg=0):
-        self.opcode = opcode
-        self.arg = arg
-        self.lineno = 0
-        self.has_jump = False
-
-    def size(self):
-        """Return the size of bytes of this instruction when it is
-        encoded.
-        """
-        if self.opcode >= ops.HAVE_ARGUMENT:
-            return (6 if self.arg > 0xFFFF else 3)
-        return 1
-
-    def jump_to(self, target, absolute=False):
-        """Indicate the target this jump instruction.
-
-        The opcode must be a JUMP opcode.
-        """
-        self.jump = (target, absolute)
-        self.has_jump = True
-
-    def __repr__(self):
-        data = [ops.opname[self.opcode]]
-        template = "<%s"
-        if self.opcode >= ops.HAVE_ARGUMENT:
-            data.append(self.arg)
-            template += " %i"
-            if self.has_jump:
-                data.append(self.jump[0])
-                template += " %s"
-        template += ">"
-        return template % tuple(data)
-
-
-class Block(object):
-    """A basic control flow block.
-
-    It has one entry point and several possible exit points.  Its
-    instructions may be jumps to other blocks, or if control flow
-    reaches the end of the block, it continues to next_block.
-    """
-
-    marked = False
-    have_return = False
-    auto_inserted_return = False
-
-    def __init__(self):
-        self.instructions = []
-        self.next_block = None
-
-    def _post_order_see(self, stack, nextblock):
-        if nextblock.marked == 0:
-            nextblock.marked = 1
-            stack.append(nextblock)
-
-    def post_order(self):
-        """Return this block and its children in post order.  This means
-        that the graph of blocks is first cleaned up to ignore
-        back-edges, thus turning it into a DAG.  Then the DAG is
-        linearized.  For example:
-
-                   A --> B -\           =>     [A, D, B, C]
-                     \-> D ---> C
-        """
-        resultblocks = []
-        stack = [self]
-        self.marked = 1
-        while stack:
-            current = stack[-1]
-            if current.marked == 1:
-                current.marked = 2
-                if current.next_block is not None:
-                    self._post_order_see(stack, current.next_block)
-            else:
-                i = current.marked - 2
-                assert i >= 0
-                while i < len(current.instructions):
-                    instr = current.instructions[i]
-                    i += 1
-                    if instr.has_jump:
-                        current.marked = i + 2
-                        self._post_order_see(stack, instr.jump[0])
-                        break
-                else:
-                    resultblocks.append(current)
-                    stack.pop()
-        resultblocks.reverse()
-        return resultblocks
-
-    def code_size(self):
-        """Return the encoded size of all the instructions in this
-        block.
-        """
-        i = 0
-        for instr in self.instructions:
-            i += instr.size()
-        return i
-
-    def get_code(self):
-        """Encode the instructions in this block into bytecode."""
-        code = []
-        for instr in self.instructions:
-            opcode = instr.opcode
-            if opcode >= ops.HAVE_ARGUMENT:
-                arg = instr.arg
-                if instr.arg > 0xFFFF:
-                    ext = arg >> 16
-                    code.append(chr(ops.EXTENDED_ARG))
-                    code.append(chr(ext & 0xFF))
-                    code.append(chr(ext >> 8))
-                    arg &= 0xFFFF
-                code.append(chr(opcode))
-                code.append(chr(arg & 0xFF))
-                code.append(chr(arg >> 8))
-            else:
-                code.append(chr(opcode))
-        return ''.join(code)
-
-
-def _make_index_dict_filter(syms, flag):
-    i = 0
-    result = {}
-    for name, scope in syms.iteritems():
-        if scope == flag:
-            result[name] = i
-            i += 1
-    return result
-
-
- at specialize.argtype(0)
-def _iter_to_dict(iterable, offset=0):
-    result = {}
-    index = offset
-    for item in iterable:
-        result[item] = index
-        index += 1
-    return result
-
-
-class PythonCodeMaker(ast.ASTVisitor):
-    """Knows how to assemble a PyCode object."""
-
-    def __init__(self, space, name, first_lineno, scope, compile_info):
-        self.space = space
-        self.name = name
-        self.first_lineno = first_lineno
-        self.compile_info = compile_info
-        self.first_block = self.new_block()
-        self.use_block(self.first_block)
-        self.names = {}
-        self.var_names = _iter_to_dict(scope.varnames)
-        self.cell_vars = _make_index_dict_filter(scope.symbols,
-                                                 symtable.SCOPE_CELL)
-        self.free_vars = _iter_to_dict(scope.free_vars, len(self.cell_vars))
-        self.w_consts = space.newdict()
-        self.argcount = 0
-        self.kwonlyargcount = 0
-        self.lineno_set = False
-        self.lineno = 0
-        self.add_none_to_final_return = True
-
-    def new_block(self):
-        return Block()
-
-    def use_block(self, block):
-        """Start emitting bytecode into block."""
-        self.current_block = block
-        self.instrs = block.instructions
-
-    def use_next_block(self, block=None):
-        """Set this block as the next_block for the last and use it."""
-        if block is None:
-            block = self.new_block()
-        self.current_block.next_block = block
-        self.use_block(block)
-        return block
-
-    def is_dead_code(self):
-        """Return False if any code can be meaningfully added to the
-        current block, or True if it would be dead code."""
-        # currently only True after a RETURN_VALUE.
-        return self.current_block.have_return
-
-    def emit_op(self, op):
-        """Emit an opcode without an argument."""
-        instr = Instruction(op)
-        if not self.lineno_set:
-            instr.lineno = self.lineno
-            self.lineno_set = True
-        if not self.is_dead_code():
-            self.instrs.append(instr)
-            if op == ops.RETURN_VALUE:
-                self.current_block.have_return = True
-        return instr
-
-    def emit_op_arg(self, op, arg):
-        """Emit an opcode with an integer argument."""
-        instr = Instruction(op, arg)
-        if not self.lineno_set:
-            instr.lineno = self.lineno
-            self.lineno_set = True
-        if not self.is_dead_code():
-            self.instrs.append(instr)
-
-    def emit_op_name(self, op, container, name):
-        """Emit an opcode referencing a name."""
-        self.emit_op_arg(op, self.add_name(container, name))
-
-    def emit_jump(self, op, block_to, absolute=False):
-        """Emit a jump opcode to another block."""
-        self.emit_op(op).jump_to(block_to, absolute)
-
-    def add_name(self, container, name):
-        """Get the index of a name in container."""
-        name = self.scope.mangle(name)
-        try:
-            index = container[name]
-        except KeyError:
-            index = len(container)
-            container[name] = index
-        return index
-
-    def add_const(self, obj):
-        """Add a W_Root to the constant array and return its location."""
-        space = self.space
-        # To avoid confusing equal but separate types, we hash store the type
-        # of the constant in the dictionary.  Moreover, we have to keep the
-        # difference between -0.0 and 0.0 floats, and this recursively in
-        # tuples.
-        w_key = self._make_key(obj)
-
-        w_len = space.finditem(self.w_consts, w_key)
-        if w_len is None:
-            w_len = space.len(self.w_consts)
-            space.setitem(self.w_consts, w_key, w_len)
-        if space.int_w(w_len) == 0:
-            self.scope.doc_removable = False
-        return space.int_w(w_len)
-
-    def _make_key(self, obj):
-        # see the tests 'test_zeros_not_mixed*' in ../test/test_compiler.py
-        space = self.space
-        w_type = space.type(obj)
-        if space.is_w(w_type, space.w_float):
-            val = space.float_w(obj)
-            if val == 0.0 and rfloat.copysign(1., val) < 0:
-                w_key = space.newtuple([obj, space.w_float, space.w_None])
-            else:
-                w_key = space.newtuple([obj, space.w_float])
-        elif space.is_w(w_type, space.w_complex):
-            w_real = space.getattr(obj, space.wrap("real"))
-            w_imag = space.getattr(obj, space.wrap("imag"))
-            real = space.float_w(w_real)
-            imag = space.float_w(w_imag)
-            real_negzero = (real == 0.0 and
-                            rfloat.copysign(1., real) < 0)
-            imag_negzero = (imag == 0.0 and
-                            rfloat.copysign(1., imag) < 0)
-            if real_negzero and imag_negzero:
-                tup = [obj, space.w_complex, space.w_None, space.w_None,
-                       space.w_None]
-            elif imag_negzero:
-                tup = [obj, space.w_complex, space.w_None, space.w_None]
-            elif real_negzero:
-                tup = [obj, space.w_complex, space.w_None]
-            else:
-                tup = [obj, space.w_complex]
-            w_key = space.newtuple(tup)
-        elif space.is_w(w_type, space.w_tuple):
-            result_w = [obj, w_type]
-            for w_item in space.fixedview(obj):
-                result_w.append(self._make_key(w_item))
-            w_key = space.newtuple(result_w[:])
-        elif isinstance(obj, PyCode):
-            w_key = space.newtuple([obj, w_type, space.id(obj)])
-        else:
-            w_key = space.newtuple([obj, w_type])
-        return w_key
-
-    def load_const(self, obj):
-        index = self.add_const(obj)
-        self.emit_op_arg(ops.LOAD_CONST, index)
-
-    def update_position(self, lineno, force=False):
-        """Possibly change the lineno for the next instructions."""
-        if force or lineno > self.lineno:
-            self.lineno = lineno
-            self.lineno_set = False
-
-    def _resolve_block_targets(self, blocks):
-        """Compute the arguments of jump instructions."""
-        last_extended_arg_count = 0
-        # The reason for this loop is extended jumps.  EXTENDED_ARG
-        # extends the bytecode size, so it might invalidate the offsets
-        # we've already given.  Thus we have to loop until the number of
-        # extended args is stable.  Any extended jump at all is
-        # extremely rare, so performance is not too concerning.
-        while True:
-            extended_arg_count = 0
-            offset = 0
-            force_redo = False
-            # Calculate the code offset of each block.
-            for block in blocks:
-                block.offset = offset
-                offset += block.code_size()
-            for block in blocks:
-                offset = block.offset
-                for instr in block.instructions:
-                    offset += instr.size()
-                    if instr.has_jump:
-                        target, absolute = instr.jump
-                        op = instr.opcode
-                        # Optimize an unconditional jump going to another
-                        # unconditional jump.
-                        if op == ops.JUMP_ABSOLUTE or op == ops.JUMP_FORWARD:
-                            if target.instructions:
-                                target_op = target.instructions[0].opcode
-                                if target_op == ops.JUMP_ABSOLUTE:
-                                    target = target.instructions[0].jump[0]
-                                    instr.opcode = ops.JUMP_ABSOLUTE
-                                    absolute = True
-                                elif target_op == ops.RETURN_VALUE:
-                                    # Replace JUMP_* to a RETURN into
-                                    # just a RETURN
-                                    instr.opcode = ops.RETURN_VALUE
-                                    instr.arg = 0
-                                    instr.has_jump = False
-                                    # The size of the code changed,
-                                    # we have to trigger another pass
-                                    force_redo = True
-                                    continue
-                        if absolute:
-                            jump_arg = target.offset
-                        else:
-                            jump_arg = target.offset - offset
-                        instr.arg = jump_arg
-                        if jump_arg > 0xFFFF:
-                            extended_arg_count += 1
-            if (extended_arg_count == last_extended_arg_count and
-                not force_redo):
-                break
-            else:
-                last_extended_arg_count = extended_arg_count
-
-    def _build_consts_array(self):
-        """Turn the applevel constants dictionary into a list."""
-        w_consts = self.w_consts
-        space = self.space
-        consts_w = [space.w_None] * space.len_w(w_consts)
-        w_iter = space.iter(w_consts)
-        first = space.wrap(0)
-        while True:
-            try:
-                w_key = space.next(w_iter)
-            except OperationError as e:
-                if not e.match(space, space.w_StopIteration):
-                    raise
-                break
-            w_index = space.getitem(w_consts, w_key)
-            w_constant = space.getitem(w_key, first)
-            w_constant = misc.intern_if_common_string(space, w_constant)
-            consts_w[space.int_w(w_index)] = w_constant
-        return consts_w
-
-    def _get_code_flags(self):
-        """Get an extra flags that should be attached to the code object."""
-        raise NotImplementedError
-
-    def _stacksize(self, blocks):
-        """Compute co_stacksize."""
-        for block in blocks:
-            block.initial_depth = 0
-        # Assumes that it is sufficient to walk the blocks in 'post-order'.
-        # This means we ignore all back-edges, but apart from that, we only
-        # look into a block when all the previous blocks have been done.
-        self._max_depth = 0
-        for block in blocks:
-            depth = self._do_stack_depth_walk(block)
-            if block.auto_inserted_return and depth != 0:
-                os.write(2, "StackDepthComputationError in %s at %s:%s\n" % (
-                    self.compile_info.filename, self.name, self.first_lineno))
-                raise StackDepthComputationError   # fatal error
-        return self._max_depth
-
-    def _next_stack_depth_walk(self, nextblock, depth):
-        if depth > nextblock.initial_depth:
-            nextblock.initial_depth = depth
-
-    def _do_stack_depth_walk(self, block):
-        depth = block.initial_depth
-        for instr in block.instructions:
-            depth += _opcode_stack_effect(instr.opcode, instr.arg)
-            if depth >= self._max_depth:
-                self._max_depth = depth
-            jump_op = instr.opcode
-            if instr.has_jump:
-                target_depth = depth
-                if jump_op == ops.FOR_ITER:
-                    target_depth -= 2
-                elif (jump_op == ops.SETUP_FINALLY or
-                      jump_op == ops.SETUP_EXCEPT or
-                      jump_op == ops.SETUP_WITH):
-                    if jump_op == ops.SETUP_FINALLY:
-                        target_depth += 4
-                    elif jump_op == ops.SETUP_EXCEPT:
-                        target_depth += 4
-                    elif jump_op == ops.SETUP_WITH:
-                        target_depth += 3
-                    if target_depth > self._max_depth:
-                        self._max_depth = target_depth
-                elif (jump_op == ops.JUMP_IF_TRUE_OR_POP or
-                      jump_op == ops.JUMP_IF_FALSE_OR_POP):
-                    depth -= 1
-                self._next_stack_depth_walk(instr.jump[0], target_depth)
-                if jump_op == ops.JUMP_ABSOLUTE or jump_op == ops.JUMP_FORWARD:
-                    # Nothing more can occur.
-                    break
-            elif jump_op == ops.RETURN_VALUE or jump_op == ops.RAISE_VARARGS:
-                # Nothing more can occur.
-                break
-        else:
-            if block.next_block:
-                self._next_stack_depth_walk(block.next_block, depth)
-        return depth
-
-    def _build_lnotab(self, blocks):
-        """Build the line number table for tracebacks and tracing."""
-        current_line = self.first_lineno
-        current_off = 0
-        table = []
-        push = table.append
-        for block in blocks:
-            offset = block.offset
-            for instr in block.instructions:
-                if instr.lineno:
-                    # compute deltas
-                    line = instr.lineno - current_line
-                    if line < 0:
-                        continue
-                    addr = offset - current_off
-                    # Python assumes that lineno always increases with
-                    # increasing bytecode address (lnotab is unsigned
-                    # char).  Depending on when SET_LINENO instructions
-                    # are emitted this is not always true.  Consider the
-                    # code:
-                    #     a = (1,
-                    #          b)
-                    # In the bytecode stream, the assignment to "a"
-                    # occurs after the loading of "b".  This works with
-                    # the C Python compiler because it only generates a
-                    # SET_LINENO instruction for the assignment.
-                    if line or addr:
-                        while addr > 255:
-                            push(chr(255))
-                            push(chr(0))
-                            addr -= 255
-                        while line > 255:
-                            push(chr(addr))
-                            push(chr(255))
-                            line -= 255
-                            addr = 0
-                        push(chr(addr))
-                        push(chr(line))
-                        current_line = instr.lineno
-                        current_off = offset
-                offset += instr.size()
-        return ''.join(table)
-
-    def assemble(self):
-        """Build a PyCode object."""
-        # Unless it's interactive, every code object must end in a return.
-        if not self.current_block.have_return:
-            self.use_next_block()
-            if self.add_none_to_final_return:
-                self.load_const(self.space.w_None)
-            self.emit_op(ops.RETURN_VALUE)
-            self.current_block.auto_inserted_return = True
-        # Set the first lineno if it is not already explicitly set.
-        if self.first_lineno == -1:
-            if self.first_block.instructions:
-                self.first_lineno = self.first_block.instructions[0].lineno
-            else:
-                self.first_lineno = 1
-        blocks = self.first_block.post_order()
-        self._resolve_block_targets(blocks)
-        lnotab = self._build_lnotab(blocks)
-        stack_depth = self._stacksize(blocks)
-        consts_w = self._build_consts_array()
-        names = _list_from_dict(self.names)
-        var_names = _list_from_dict(self.var_names)
-        cell_names = _list_from_dict(self.cell_vars)
-        free_names = _list_from_dict(self.free_vars, len(cell_names))
-        flags = self._get_code_flags()
-        # (Only) inherit compilerflags in PyCF_MASK
-        flags |= (self.compile_info.flags & consts.PyCF_MASK)
-        bytecode = ''.join([block.get_code() for block in blocks])
-        return PyCode(self.space,
-                      self.argcount,
-                      self.kwonlyargcount,
-                      len(self.var_names),
-                      stack_depth,
-                      flags,
-                      bytecode,
-                      list(consts_w),
-                      names,
-                      var_names,
-                      self.compile_info.filename,
-                      self.name,
-                      self.first_lineno,
-                      lnotab,
-                      free_names,
-                      cell_names,
-                      self.compile_info.hidden_applevel)
-
-
-def _list_from_dict(d, offset=0):
-    result = [None] * len(d)
-    for obj, index in d.iteritems():
-        result[index - offset] = obj
-    return result
-
-
-_static_opcode_stack_effects = {
-    ops.NOP: 0,
-
-    ops.POP_TOP: -1,
-    ops.ROT_TWO: 0,
-    ops.ROT_THREE: 0,
-    ops.DUP_TOP: 1,
-    ops.DUP_TOP_TWO: 2,
-
-    ops.UNARY_POSITIVE: 0,
-    ops.UNARY_NEGATIVE: 0,
-    ops.UNARY_NOT: 0,
-    ops.UNARY_INVERT: 0,
-
-    ops.LIST_APPEND: -1,
-    ops.SET_ADD: -1,
-    ops.MAP_ADD: -2,
-<<<<<<< local
-=======
-    # XXX 
-    ops.STORE_MAP: -2,
->>>>>>> other
-
-    ops.BINARY_POWER: -1,
-    ops.BINARY_MULTIPLY: -1,
-    ops.BINARY_MODULO: -1,
-    ops.BINARY_ADD: -1,
-    ops.BINARY_SUBTRACT: -1,
-    ops.BINARY_SUBSCR: -1,
-    ops.BINARY_FLOOR_DIVIDE: -1,
-    ops.BINARY_TRUE_DIVIDE: -1,
-    ops.BINARY_MATRIX_MULTIPLY: -1,
-    ops.BINARY_LSHIFT: -1,
-    ops.BINARY_RSHIFT: -1,
-    ops.BINARY_AND: -1,
-    ops.BINARY_OR: -1,
-    ops.BINARY_XOR: -1,
-
-    ops.INPLACE_FLOOR_DIVIDE: -1,
-    ops.INPLACE_TRUE_DIVIDE: -1,
-    ops.INPLACE_ADD: -1,
-    ops.INPLACE_SUBTRACT: -1,
-    ops.INPLACE_MULTIPLY: -1,
-    ops.INPLACE_MODULO: -1,
-    ops.INPLACE_POWER: -1,
-    ops.INPLACE_MATRIX_MULTIPLY: -1,
-    ops.INPLACE_LSHIFT: -1,
-    ops.INPLACE_RSHIFT: -1,
-    ops.INPLACE_AND: -1,
-    ops.INPLACE_OR: -1,
-    ops.INPLACE_XOR: -1,
-
-    ops.STORE_SUBSCR: -3,
-    ops.DELETE_SUBSCR: -2,
-
-    ops.GET_ITER: 0,
-    ops.FOR_ITER: 1,
-    ops.BREAK_LOOP: 0,
-    ops.CONTINUE_LOOP: 0,
-    ops.SETUP_LOOP: 0,
-
-    ops.PRINT_EXPR: -1,
-
-<<<<<<< local
-    ops.WITH_CLEANUP_START: -1,
-    ops.WITH_CLEANUP_FINISH: -1,  # XXX Sometimes more
-=======
-    # TODO 
-    ops.WITH_CLEANUP: -1,
->>>>>>> other
-    ops.LOAD_BUILD_CLASS: 1,
-<<<<<<< local
-=======
-    # TODO 
-    ops.STORE_LOCALS: -1,
->>>>>>> other
-    ops.POP_BLOCK: 0,
-    ops.POP_EXCEPT: -1,
-    ops.END_FINALLY: -4,     # assume always 4: we pretend that SETUP_FINALLY
-                             # pushes 4.  In truth, it would only push 1 and
-                             # the corresponding END_FINALLY only pops 1.
-    ops.SETUP_WITH: 1,
-    ops.SETUP_FINALLY: 0,
-    ops.SETUP_EXCEPT: 0,
-
-    ops.RETURN_VALUE: -1,
-    ops.YIELD_VALUE: 0,
-    ops.YIELD_FROM: -1,
-    ops.COMPARE_OP: -1,
-
-    # TODO 
-    ops.LOOKUP_METHOD: 1,
-
-    ops.LOAD_NAME: 1,
-    ops.STORE_NAME: -1,
-    ops.DELETE_NAME: 0,
-
-    ops.LOAD_FAST: 1,
-    ops.STORE_FAST: -1,
-    ops.DELETE_FAST: 0,
-
-    ops.LOAD_ATTR: 0,
-    ops.STORE_ATTR: -2,
-    ops.DELETE_ATTR: -1,
-
-    ops.LOAD_GLOBAL: 1,
-    ops.STORE_GLOBAL: -1,
-    ops.DELETE_GLOBAL: 0,
-    ops.DELETE_DEREF: 0,
-
-    ops.LOAD_CLOSURE: 1,
-    ops.LOAD_DEREF: 1,
-    ops.STORE_DEREF: -1,
-    ops.DELETE_DEREF: 0,


More information about the pypy-commit mailing list