[pypy-commit] pypy dtrace-support: merge default

fijal noreply at buildbot.pypy.org
Thu Apr 9 14:02:27 CEST 2015


Author: Maciej Fijalkowski <fijall at gmail.com>
Branch: dtrace-support
Changeset: r76752:e7893a492224
Date: 2015-04-09 12:11 +0200
http://bitbucket.org/pypy/pypy/changeset/e7893a492224/

Log:	merge default

diff too long, truncating to 2000 out of 14913 lines

diff --git a/.hgtags b/.hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -3,11 +3,10 @@
 d8ac7d23d3ec5f9a0fa1264972f74a010dbfd07f release-1.6
 ff4af8f318821f7f5ca998613a60fca09aa137da release-1.7
 07e08e9c885ca67d89bcc304e45a32346daea2fa release-2.0-beta-1
-9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm
-9b623bc48b5950cf07184462a0e48f2c4df0d720 pypy-2.1-beta1-arm
 ab0dd631c22015ed88e583d9fdd4c43eebf0be21 pypy-2.1-beta1-arm
 20e51c4389ed4469b66bb9d6289ce0ecfc82c4b9 release-2.3.0
 394146e9bb673514c61f0150ab2013ccf78e8de7 release-2.3
 32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.2=3.1
 32f35069a16d819b58c1b6efb17c44e3e53397b2 release-2.3.1
 10f1b29a2bd21f837090286174a9ca030b8680b2 release-2.5.0
+9c4588d731b7fe0b08669bd732c2b676cb0a8233 release-2.5.1
diff --git a/.tddium.requirements.txt b/.tddium.requirements.txt
deleted file mode 100644
--- a/.tddium.requirements.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-pytest
diff --git a/lib-python/2.7/test/test_urllib2net.py b/lib-python/2.7/test/test_urllib2net.py
--- a/lib-python/2.7/test/test_urllib2net.py
+++ b/lib-python/2.7/test/test_urllib2net.py
@@ -102,11 +102,8 @@
 
     def test_ftp(self):
         urls = [
-            'ftp://ftp.kernel.org/pub/linux/kernel/README',
-            'ftp://ftp.kernel.org/pub/linux/kernel/non-existent-file',
-            #'ftp://ftp.kernel.org/pub/leenox/kernel/test',
-            'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC'
-                '/research-reports/00README-Legal-Rules-Regs',
+            'ftp://ftp.debian.org/debian/README',
+            'ftp://ftp.debian.org/debian/non-existent-file',
             ]
         self._test_urls(urls, self._extra_handlers())
 
@@ -255,6 +252,7 @@
         with test_support.transient_internet(url, timeout=None):
             u = _urlopen_with_retry(url)
             self.assertIsNone(u.fp._sock.fp._sock.gettimeout())
+            u.close()
 
     def test_http_default_timeout(self):
         self.assertIsNone(socket.getdefaulttimeout())
@@ -266,6 +264,7 @@
             finally:
                 socket.setdefaulttimeout(None)
             self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 60)
+            u.close()
 
     def test_http_no_timeout(self):
         self.assertIsNone(socket.getdefaulttimeout())
@@ -277,20 +276,23 @@
             finally:
                 socket.setdefaulttimeout(None)
             self.assertIsNone(u.fp._sock.fp._sock.gettimeout())
+            u.close()
 
     def test_http_timeout(self):
         url = "http://www.example.com"
         with test_support.transient_internet(url):
             u = _urlopen_with_retry(url, timeout=120)
             self.assertEqual(u.fp._sock.fp._sock.gettimeout(), 120)
+            u.close()
 
-    FTP_HOST = "ftp://ftp.mirror.nl/pub/gnu/"
+    FTP_HOST = 'ftp://ftp.debian.org/debian/'
 
     def test_ftp_basic(self):
         self.assertIsNone(socket.getdefaulttimeout())
         with test_support.transient_internet(self.FTP_HOST, timeout=None):
             u = _urlopen_with_retry(self.FTP_HOST)
             self.assertIsNone(u.fp.fp._sock.gettimeout())
+            u.close()
 
     def test_ftp_default_timeout(self):
         self.assertIsNone(socket.getdefaulttimeout())
@@ -301,6 +303,7 @@
             finally:
                 socket.setdefaulttimeout(None)
             self.assertEqual(u.fp.fp._sock.gettimeout(), 60)
+            u.close()
 
     def test_ftp_no_timeout(self):
         self.assertIsNone(socket.getdefaulttimeout(),)
@@ -311,11 +314,16 @@
             finally:
                 socket.setdefaulttimeout(None)
             self.assertIsNone(u.fp.fp._sock.gettimeout())
+            u.close()
 
     def test_ftp_timeout(self):
         with test_support.transient_internet(self.FTP_HOST):
-            u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
+            try:
+                u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
+            except:
+                raise
             self.assertEqual(u.fp.fp._sock.gettimeout(), 60)
+            u.close()
 
 
 def test_main():
diff --git a/lib_pypy/_sqlite3.py b/lib_pypy/_sqlite3.py
--- a/lib_pypy/_sqlite3.py
+++ b/lib_pypy/_sqlite3.py
@@ -454,6 +454,7 @@
         self.__cursors_counter = 0
         self.__statements = []
         self.__statements_counter = 0
+        self.__rawstatements = set()
         self._statement_cache = _StatementCache(self, cached_statements)
 
         self.__func_cache = {}
@@ -483,6 +484,14 @@
 
         self.__do_all_statements(Statement._finalize, True)
 
+        # depending on when this close() is called, the statements' weakrefs
+        # may be already dead, even though Statement.__del__() was not called
+        # yet.  In this case, self.__rawstatements is not empty.
+        if self.__rawstatements is not None:
+            for stmt in list(self.__rawstatements):
+                self._finalize_raw_statement(stmt)
+            self.__rawstatements = None
+
         if self._db:
             ret = _lib.sqlite3_close(self._db)
             if ret != _lib.SQLITE_OK:
@@ -562,6 +571,7 @@
         self.__cursors = [r for r in self.__cursors if r() is not None]
 
     def _remember_statement(self, statement):
+        self.__rawstatements.add(statement._statement)
         self.__statements.append(weakref.ref(statement))
         self.__statements_counter += 1
         if self.__statements_counter < 200:
@@ -569,6 +579,11 @@
         self.__statements_counter = 0
         self.__statements = [r for r in self.__statements if r() is not None]
 
+    def _finalize_raw_statement(self, _statement):
+        if self.__rawstatements is not None:
+            self.__rawstatements.remove(_statement)
+            _lib.sqlite3_finalize(_statement)
+
     def __do_all_statements(self, action, reset_cursors):
         for weakref in self.__statements:
             statement = weakref()
@@ -1199,7 +1214,6 @@
 
     def __init__(self, connection, sql):
         self.__con = connection
-        self.__con._remember_statement(self)
 
         self._in_use = False
 
@@ -1244,17 +1258,19 @@
         if ret != _lib.SQLITE_OK:
             raise self.__con._get_exception(ret)
 
+        self.__con._remember_statement(self)
+
         tail = _ffi.string(next_char[0]).decode('utf-8')
         if _check_remaining_sql(tail):
             raise Warning("You can only execute one statement at a time.")
 
     def __del__(self):
         if self._statement:
-            _lib.sqlite3_finalize(self._statement)
+            self.__con._finalize_raw_statement(self._statement)
 
     def _finalize(self):
         if self._statement:
-            _lib.sqlite3_finalize(self._statement)
+            self.__con._finalize_raw_statement(self._statement)
             self._statement = None
         self._in_use = False
 
diff --git a/lib_pypy/_tkinter/app.py b/lib_pypy/_tkinter/app.py
--- a/lib_pypy/_tkinter/app.py
+++ b/lib_pypy/_tkinter/app.py
@@ -96,7 +96,7 @@
 
         if not self.threaded:
             # TCL is not thread-safe, calls needs to be serialized.
-            self._tcl_lock = threading.Lock()
+            self._tcl_lock = threading.RLock()
         else:
             self._tcl_lock = _DummyLock()
 
diff --git a/lib_pypy/_tkinter/tklib.py b/lib_pypy/_tkinter/tklib.py
--- a/lib_pypy/_tkinter/tklib.py
+++ b/lib_pypy/_tkinter/tklib.py
@@ -1,7 +1,7 @@
 # C bindings with libtcl and libtk.
 
 from cffi import FFI
-import sys
+import sys, os
 
 tkffi = FFI()
 
@@ -135,9 +135,12 @@
     linklibs = ['tcl', 'tk']
     libdirs = []
 else:
-    incdirs=['/usr/include/tcl']
-    linklibs=['tcl', 'tk']
-    libdirs = []
+    for _ver in ['', '8.6', '8.5', '']:
+        incdirs = ['/usr/include/tcl' + _ver]
+        linklibs = ['tcl' + _ver, 'tk' + _ver]
+        libdirs = []
+        if os.path.isdir(incdirs[0]):
+            break
 
 tklib = tkffi.verify("""
 #include <tcl.h>
diff --git a/lib_pypy/cffi.egg-info b/lib_pypy/cffi.egg-info
--- a/lib_pypy/cffi.egg-info
+++ b/lib_pypy/cffi.egg-info
@@ -1,6 +1,6 @@
 Metadata-Version: 1.0
 Name: cffi
-Version: 0.8.6+
+Version: 0.9.2
 Summary: Foreign Function Interface for Python calling C code.
 Home-page: http://cffi.readthedocs.org
 Author: Armin Rigo, Maciej Fijalkowski
diff --git a/lib_pypy/cffi/__init__.py b/lib_pypy/cffi/__init__.py
--- a/lib_pypy/cffi/__init__.py
+++ b/lib_pypy/cffi/__init__.py
@@ -4,8 +4,8 @@
 from .api import FFI, CDefError, FFIError
 from .ffiplatform import VerificationError, VerificationMissing
 
-__version__ = "0.8.6+"
-__version_info__ = (0, 8, 6, "plus")
+__version__ = "0.9.2"
+__version_info__ = (0, 9, 2)
 
 # The verifier module file names are based on the CRC32 of a string that
 # contains the following version number.  It may be older than __version__
diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py
--- a/lib_pypy/gdbm.py
+++ b/lib_pypy/gdbm.py
@@ -20,9 +20,11 @@
 } datum;
 
 datum gdbm_fetch(void*, datum);
+datum pygdbm_fetch(void*, char*, int);
 int gdbm_delete(void*, datum);
 int gdbm_store(void*, datum, datum, int);
 int gdbm_exists(void*, datum);
+int pygdbm_exists(void*, char*, int);
 
 int gdbm_reorganize(void*);
 
@@ -37,19 +39,29 @@
 ''')
 
 try:
+    verify_code = '''
+    #include "gdbm.h"
+
+    static datum pygdbm_fetch(GDBM_FILE gdbm_file, char *dptr, int dsize) {
+        datum key = {dptr, dsize};
+        return gdbm_fetch(gdbm_file, key);
+    }
+
+    static int pygdbm_exists(GDBM_FILE gdbm_file, char *dptr, int dsize) {
+        datum key = {dptr, dsize};
+        return gdbm_exists(gdbm_file, key);
+    }
+    
+    '''
     if sys.platform.startswith('freebsd'):
         import os.path
         _localbase = os.environ.get('LOCALBASE', '/usr/local')
-        lib = ffi.verify('''
-        #include "gdbm.h"
-        ''', libraries=['gdbm'],
+        lib = ffi.verify(verify_code, libraries=['gdbm'],
              include_dirs=[os.path.join(_localbase, 'include')],
              library_dirs=[os.path.join(_localbase, 'lib')]
         )
     else:
-        lib = ffi.verify('''
-        #include "gdbm.h"
-        ''', libraries=['gdbm'])
+        lib = ffi.verify(verify_code, libraries=['gdbm'])
 except cffi.VerificationError as e:
     # distutils does not preserve the actual message,
     # but the verification is simple enough that the
@@ -59,6 +71,13 @@
 class error(Exception):
     pass
 
+def _checkstr(key):
+    if isinstance(key, unicode):
+        key = key.encode("ascii")
+    if not isinstance(key, str):
+        raise TypeError("gdbm mappings have string indices only")
+    return key
+
 def _fromstr(key):
     if isinstance(key, unicode):
         key = key.encode("ascii")
@@ -107,12 +126,14 @@
 
     def __contains__(self, key):
         self._check_closed()
-        return lib.gdbm_exists(self.ll_dbm, _fromstr(key))
+        key = _checkstr(key)
+        return lib.pygdbm_exists(self.ll_dbm, key, len(key))
     has_key = __contains__
 
     def __getitem__(self, key):
         self._check_closed()
-        drec = lib.gdbm_fetch(self.ll_dbm, _fromstr(key))
+        key = _checkstr(key)        
+        drec = lib.pygdbm_fetch(self.ll_dbm, key, len(key))
         if not drec.dptr:
             raise KeyError(key)
         res = str(ffi.buffer(drec.dptr, drec.dsize))
diff --git a/lib_pypy/pyrepl/readline.py b/lib_pypy/pyrepl/readline.py
--- a/lib_pypy/pyrepl/readline.py
+++ b/lib_pypy/pyrepl/readline.py
@@ -73,7 +73,6 @@
     assume_immutable_completions = False
     use_brackets = False
     sort_in_column = True
-    tab_insert_spaces_if_stem_is_empty = False
 
     def error(self, msg="none"):
         pass    # don't show error messages by default
@@ -87,7 +86,7 @@
         return ''.join(b[p+1:self.pos])
 
     def get_completions(self, stem):
-        if len(stem) == 0 and self.tab_insert_spaces_if_stem_is_empty:
+        if len(stem) == 0 and self.more_lines is not None:
             b = self.buffer
             p = self.pos
             while p > 0 and b[p - 1] != '\n':
@@ -141,12 +140,16 @@
 
     def collect_keymap(self):
         return super(ReadlineAlikeReader, self).collect_keymap() + (
-            (r'\n', 'maybe-accept'),)
+            (r'\n', 'maybe-accept'),
+            (r'\<backspace>', 'backspace-dedent'),
+            )
 
     def __init__(self, console):
         super(ReadlineAlikeReader, self).__init__(console)
         self.commands['maybe_accept'] = maybe_accept
         self.commands['maybe-accept'] = maybe_accept
+        self.commands['backspace_dedent'] = backspace_dedent
+        self.commands['backspace-dedent'] = backspace_dedent
 
     def after_command(self, cmd):
         super(ReadlineAlikeReader, self).after_command(cmd)
@@ -164,6 +167,28 @@
                 if self.pos > len(self.buffer):
                     self.pos = len(self.buffer)
 
+def _get_this_line_indent(buffer, pos):
+    indent = 0
+    while pos > 0 and buffer[pos - 1] in " \t":
+        indent += 1
+        pos -= 1
+    if pos > 0 and buffer[pos - 1] == "\n":
+        return indent
+    return 0
+
+def _get_previous_line_indent(buffer, pos):
+    prevlinestart = pos
+    while prevlinestart > 0 and buffer[prevlinestart - 1] != "\n":
+        prevlinestart -= 1
+    prevlinetext = prevlinestart
+    while prevlinetext < pos and buffer[prevlinetext] in " \t":
+        prevlinetext += 1
+    if prevlinetext == pos:
+        indent = None
+    else:
+        indent = prevlinetext - prevlinestart
+    return prevlinestart, indent
+
 class maybe_accept(commands.Command):
     def do(self):
         r = self.reader
@@ -172,13 +197,39 @@
         # if there are already several lines and the cursor
         # is not on the last one, always insert a new \n.
         text = r.get_unicode()
-        if "\n" in r.buffer[r.pos:]:
+        if ("\n" in r.buffer[r.pos:] or
+            (r.more_lines is not None and r.more_lines(text))):
+            #
+            # auto-indent the next line like the previous line
+            prevlinestart, indent = _get_previous_line_indent(r.buffer, r.pos)
             r.insert("\n")
-        elif r.more_lines is not None and r.more_lines(text):
-            r.insert("\n")
+            if indent:
+                for i in range(prevlinestart, prevlinestart + indent):
+                    r.insert(r.buffer[i])
         else:
             self.finish = 1
 
+class backspace_dedent(commands.Command):
+    def do(self):
+        r = self.reader
+        b = r.buffer
+        if r.pos > 0:
+            repeat = 1
+            if b[r.pos - 1] != "\n":
+                indent = _get_this_line_indent(b, r.pos)
+                if indent > 0:
+                    ls = r.pos - indent
+                    while ls > 0:
+                        ls, pi = _get_previous_line_indent(b, ls - 1)
+                        if pi is not None and pi < indent:
+                            repeat = indent - pi
+                            break
+            r.pos -= repeat
+            del b[r.pos:r.pos + repeat]
+            r.dirty = 1
+        else:
+            self.reader.error("can't backspace at start")
+
 # ____________________________________________________________
 
 class _ReadlineWrapper(object):
@@ -212,15 +263,14 @@
         boolean value is true.
         """
         reader = self.get_reader()
-        saved = reader.more_lines, reader.tab_insert_spaces_if_stem_is_empty
+        saved = reader.more_lines
         try:
             reader.more_lines = more_lines
             reader.ps1 = reader.ps2 = ps1
             reader.ps3 = reader.ps4 = ps2
-            reader.tab_insert_spaces_if_stem_is_empty = True
             return reader.readline(returns_unicode=returns_unicode)
         finally:
-            reader.more_lines, reader.tab_insert_spaces_if_stem_is_empty = saved
+            reader.more_lines = saved
 
     def parse_and_bind(self, string):
         pass  # XXX we don't support parsing GNU-readline-style init files
diff --git a/pypy/conftest.py b/pypy/conftest.py
--- a/pypy/conftest.py
+++ b/pypy/conftest.py
@@ -36,7 +36,7 @@
 def pytest_addoption(parser):
     from rpython.conftest import pytest_addoption
     pytest_addoption(parser)
-    
+
     group = parser.getgroup("pypy options")
     group.addoption('-A', '--runappdirect', action="store_true",
            default=False, dest="runappdirect",
@@ -44,6 +44,9 @@
     group.addoption('--direct', action="store_true",
            default=False, dest="rundirect",
            help="run pexpect tests directly")
+    group.addoption('--raise-operr', action="store_true",
+            default=False, dest="raise_operr",
+            help="Show the interp-level OperationError in app-level tests")
 
 def pytest_funcarg__space(request):
     from pypy.tool.pytest.objspace import gettestobjspace
diff --git a/pypy/doc/build.rst b/pypy/doc/build.rst
--- a/pypy/doc/build.rst
+++ b/pypy/doc/build.rst
@@ -146,6 +146,26 @@
 :doc:`objspace proxies <objspace-proxies>` document.
 
 
+Packaging (preparing for installation)
+--------------------------------------
+
+Packaging is required if you want to install PyPy system-wide, even to
+install on the same machine.  The reason is that doing so prepares a
+number of extra features that cannot be done lazily on a root-installed
+PyPy, because the normal users don't have write access.  This concerns
+mostly libraries that would normally be compiled if and when they are
+imported the first time.
+
+::
+    
+    cd pypy/tool/release
+    ./package.py pypy-VER-PLATFORM
+
+This creates a clean and prepared hierarchy, as well as a ``.tar.bz2``
+with the same content; both are found by default in
+``/tmp/usession-YOURNAME/build/``.  You can then either move the file
+hierarchy or unpack the ``.tar.bz2`` at the correct place.
+
 
 Installation
 ------------
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -58,7 +58,7 @@
 
 # General information about the project.
 project = u'PyPy'
-copyright = u'2014, The PyPy Project'
+copyright = u'2015, The PyPy Project'
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -67,7 +67,7 @@
 # The short X.Y version.
 version = '2.5'
 # The full version, including alpha/beta/rc tags.
-release = '2.5.0'
+release = '2.5.1'
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
diff --git a/pypy/doc/embedding.rst b/pypy/doc/embedding.rst
--- a/pypy/doc/embedding.rst
+++ b/pypy/doc/embedding.rst
@@ -36,7 +36,8 @@
    "PyPy home directory".  The arguments are:
 
    * ``home``: NULL terminated path to an executable inside the pypy directory
-     (can be a .so name, can be made up)
+     (can be a .so name, can be made up).  Used to look up the standard
+     library, and is also set as ``sys.executable``.
 
    * ``verbose``: if non-zero, it will print error messages to stderr
 
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
 
 .. toctree::
 
+   release-2.5.1.rst
    release-2.5.0.rst
    release-2.4.0.rst
    release-2.3.1.rst
diff --git a/pypy/doc/index-of-whatsnew.rst b/pypy/doc/index-of-whatsnew.rst
--- a/pypy/doc/index-of-whatsnew.rst
+++ b/pypy/doc/index-of-whatsnew.rst
@@ -7,6 +7,7 @@
 .. toctree::
 
    whatsnew-head.rst
+   whatsnew-2.5.1.rst
    whatsnew-2.5.0.rst
    whatsnew-2.4.0.rst
    whatsnew-2.3.1.rst
diff --git a/pypy/doc/jit-hooks.rst b/pypy/doc/jit-hooks.rst
--- a/pypy/doc/jit-hooks.rst
+++ b/pypy/doc/jit-hooks.rst
@@ -39,3 +39,30 @@
     Reason is a string, the meaning of other arguments is the same
     as attributes on JitLoopInfo object
 
+.. function:: enable_debug()
+
+    Start recording debugging counters for ``get_stats_snapshot``
+
+.. function:: disable_debug()
+
+    Stop recording debugging counters for ``get_stats_snapshot``
+
+.. function:: get_stats_snapshot()
+
+    Get the jit status in the specific moment in time. Note that this
+    is eager - the attribute access is not lazy, if you need new stats
+    you need to call this function again. You might want to call
+    ``enable_debug`` to get more information. It returns an instance
+    of ``JitInfoSnapshot``
+
+.. class:: JitInfoSnapshot
+
+    A class describing current snapshot. Usable attributes:
+
+    * ``counters`` - internal JIT integer counters
+
+    * ``counter_times`` - internal JIT float counters, notably time spent
+      TRACING and in the JIT BACKEND
+
+    * ``loop_run_times`` - counters for number of times loops are run, only
+      works when ``enable_debug`` is called.
diff --git a/pypy/doc/release-2.5.1.rst b/pypy/doc/release-2.5.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-2.5.1.rst
@@ -0,0 +1,115 @@
+================================
+PyPy 2.5.1 - Pineapple Bromeliad
+================================
+
+We're pleased to announce PyPy 2.5.1, Pineapple `Bromeliad`_ following on the heels of 2.5.0
+
+You can download the PyPy 2.5.1 release here:
+
+    http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project, and for those who donate to our three sub-projects, as well as our
+volunteers and contributors.  
+We've shown quite a bit of progress, but we're slowly running out of funds.
+Please consider donating more, or even better convince your employer to donate,
+so we can finish those projects! The three sub-projects are:
+
+* `Py3k`_ (supporting Python 3.x): We have released a Python 3.2.5 compatible version
+   we call PyPy3 2.4.0, and are working toward a Python 3.3 compatible version
+
+* `STM`_ (software transactional memory): We have released a first working version,
+  and continue to try out new promising paths of achieving a fast multithreaded Python
+
+* `NumPy`_ which requires installation of our fork of upstream numpy,
+  available `on bitbucket`_
+
+.. _`Bromeliad`: http://xkcd.com/1498
+.. _`Py3k`: http://pypy.org/py3donate.html
+.. _`STM`: http://pypy.org/tmdonate2.html
+.. _`NumPy`: http://pypy.org/numpydonate.html
+.. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy
+
+We would also like to encourage new people to join the project. PyPy has many
+layers and we need help with all of them: `PyPy`_ and `Rpython`_ documentation
+improvements, tweaking popular `modules`_ to run on pypy, or general `help`_ with making
+Rpython's JIT even better.
+
+.. _`PyPy`: http://doc.pypy.org 
+.. _`Rpython`: http://rpython.readthedocs.org
+.. _`modules`: http://doc.pypy.org/en/latest/project-ideas.html#make-more-python-modules-pypy-friendly
+.. _`help`: http://doc.pypy.org/en/latest/project-ideas.html
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy and cpython 2.7.x`_ performance comparison)
+due to its integrated tracing JIT compiler.
+
+This release supports **x86** machines on most common operating systems
+(Linux 32/64, Mac OS X 64, Windows, and OpenBSD),
+as well as newer **ARM** hardware (ARMv6 or ARMv7, with VFPv3) running Linux.
+
+While we support 32 bit python on Windows, work on the native Windows 64
+bit python is still stalling, we would welcome a volunteer
+to `handle that`_.
+
+.. _`pypy and cpython 2.7.x`: http://speed.pypy.org
+.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation
+
+Highlights 
+==========
+
+* The past months have seen pypy mature and grow, as rpython becomes the goto
+  solution for writing fast dynamic language interpreters. Our separation of
+  Rpython and the python interpreter PyPy is now much clearer in the
+  `PyPy documentation`_  and we now have seperate `RPython documentation`_.
+  Tell us what still isn't clear, or even better help us improve the documentation.
+
+* We merged version 2.7.9 of python's stdlib. From the python release notice:
+
+  * The entirety of Python 3.4's `ssl module`_ has been backported. 
+    See `PEP 466`_ for justification.
+
+  * HTTPS certificate validation using the system's certificate store is now
+    enabled by default. See `PEP 476`_ for details.
+
+  * SSLv3 has been disabled by default in httplib and its reverse dependencies
+    due to the `POODLE attack`_.
+
+  * The `ensurepip module`_ has been backported, which provides the pip
+    package manager in every Python 2.7 installation. See `PEP 477`_.
+
+* The garbage collector now ignores parts of the stack which did not change
+  since the last collection, another performance boost
+
+* errno and LastError are saved around cffi calls so things like pdb will not
+  overwrite it
+
+* We continue to asymptotically approach a score of 7 times faster than cpython
+  on our benchmark suite, we now rank 6.98 on latest runs
+
+* Issues reported with our previous release were resolved_ after reports from users on
+  our issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
+  #pypy.
+
+.. _`PyPy documentation`: http://doc.pypy.org
+.. _`RPython documentation`: http://rpython.readthedocs.org
+.. _`ssl module`: https://docs.python.org/3/library/ssl.html
+.. _`PEP 466`: https://www.python.org/dev/peps/pep-0466
+.. _`PEP 476`: https://www.python.org/dev/peps/pep-0476
+.. _`PEP 477`: https://www.python.org/dev/peps/pep-0477
+.. _`POODLE attack`: https://www.imperialviolet.org/2014/10/14/poodle.html
+.. _`ensurepip module`: https://docs.python.org/2/library/ensurepip.html
+.. _resolved: http://doc.pypy.org/en/latest/whatsnew-2.5.1.html
+
+Please try it out and let us know what you think. We welcome
+success stories, `experiments`_,  or `benchmarks`_, we know you are using PyPy, please tell us about it!
+
+Cheers
+
+The PyPy Team
+
+.. _`experiments`: http://morepypy.blogspot.com/2015/02/experiments-in-pyrlang-with-rpython.html
+.. _`benchmarks`: https://mithrandi.net/blog/2015/03/axiom-benchmark-results-on-pypy-2-5-0
diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst
--- a/pypy/doc/stm.rst
+++ b/pypy/doc/stm.rst
@@ -25,11 +25,12 @@
 .. _`2nd call for donation`: http://pypy.org/tmdonate2.html
 
 
-Introduction
-============
+What pypy-stm is for
+====================
 
 ``pypy-stm`` is a variant of the regular PyPy interpreter.  (This
-version supports Python 2.7; see below for `Python 3`_.)  With caveats_
+version supports Python 2.7; see below for `Python 3, CPython,
+and others`_.)  With caveats_
 listed below, it should be in theory within 20%-50% slower than a
 regular PyPy, comparing the JIT version in both cases (but see below!).
 It is called
@@ -45,15 +46,36 @@
   it as a drop-in replacement and multithreaded programs will run on
   multiple cores.
 
-* ``pypy-stm`` does not impose any special API to the user, but it
-  provides a new pure Python module called `transactional_memory`_ with
-  features to inspect the state or debug conflicts_ that prevent
-  parallelization.  This module can also be imported on top of a non-STM
-  PyPy or CPython.
+* ``pypy-stm`` provides (but does not impose) a special API to the
+  user in the pure Python module ``transaction``.  This module is based
+  on the lower-level module ``pypystm``, but also provides some
+  compatibily with non-STM PyPy's or CPython's.
 
 * Building on top of the way the GIL is removed, we will talk
-  about `Atomic sections, Transactions, etc.: a better way to write
-  parallel programs`_.
+  about `How to write multithreaded programs: the 10'000-feet view`_
+  and `transaction.TransactionQueue`_.
+
+
+...and what pypy-stm is not for
+-------------------------------
+
+``pypy-stm`` gives a Python without the GIL.  This means that it is
+useful in situations where the GIL is the problem in the first place.
+(This includes cases where the program can easily be modified to run
+in multiple threads; often, we don't consider doing that precisely
+because of the GIL.)
+
+However, there are plenty of cases where the GIL is not the problem.
+Do not hope ``pypy-stm`` to be helpful in these cases!  This includes
+all programs that use multiple threads but don't actually spend a lot
+of time running Python code.  For example, it may be spending all its
+time waiting for I/O to occur, or performing some long computation on
+a huge matrix.  These are cases where the CPU is either idle, or in
+some C/Fortran library anyway; in both cases, the interpreter (either
+CPython or the regular PyPy) should release the GIL around the
+external calls.  The threads will thus not end up fighting for the
+GIL.
+
 
 
 Getting Started
@@ -63,9 +85,10 @@
 
 Development is done in the branch `stmgc-c7`_.  If you are only
 interested in trying it out, you can download a Ubuntu binary here__
-(``pypy-stm-2.3*.tar.bz2``, Ubuntu 12.04-14.04).  The current version
+(``pypy-stm-2.*.tar.bz2``, for Ubuntu 12.04-14.04).  The current version
 supports four "segments", which means that it will run up to four
-threads in parallel.
+threads in parallel.  (Development recently switched to `stmgc-c8`_,
+but that is not ready for trying out yet.)
 
 To build a version from sources, you first need to compile a custom
 version of clang(!); we recommend downloading `llvm and clang like
@@ -78,6 +101,7 @@
    rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py
 
 .. _`stmgc-c7`: https://bitbucket.org/pypy/pypy/src/stmgc-c7/
+.. _`stmgc-c8`: https://bitbucket.org/pypy/pypy/src/stmgc-c8/
 .. __: https://bitbucket.org/pypy/pypy/downloads/
 .. __: http://clang.llvm.org/get_started.html
 .. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/
@@ -85,60 +109,78 @@
 
 .. _caveats:
 
-Current status
---------------
+Current status (stmgc-c7)
+-------------------------
 
-* So far, small examples work fine, but there are still a few bugs.
-  We're busy fixing them as we find them; feel free to `report bugs`_.
+* **NEW:** It seems to work fine, without crashing any more.  Please `report
+  any crash`_ you find (or other bugs).
 
 * It runs with an overhead as low as 20% on examples like "richards".
   There are also other examples with higher overheads --currently up to
   2x for "translate.py"-- which we are still trying to understand.
   One suspect is our partial GC implementation, see below.
 
+* **NEW:** the ``PYPYSTM`` environment variable and the
+  ``pypy/stm/print_stm_log.py`` script let you know exactly which
+  "conflicts" occurred.  This is described in the section
+  `transaction.TransactionQueue`_ below.
+
+* **NEW:** special transaction-friendly APIs (like ``stmdict``),
+  described in the section `transaction.TransactionQueue`_ below.  The
+  old API changed again, mostly moving to different modules.  Sorry
+  about that.  I feel it's a better idea to change the API early
+  instead of being stuck with a bad one later...
+
 * Currently limited to 1.5 GB of RAM (this is just a parameter in
-  `core.h`__).  Memory overflows are not correctly handled; they cause
-  segfaults.
+  `core.h`__ -- theoretically.  In practice, increase it too much and
+  clang crashes again).  Memory overflows are not correctly handled;
+  they cause segfaults.
 
-* The JIT warm-up time improved recently but is still bad.  In order to
-  produce machine code, the JIT needs to enter a special single-threaded
-  mode for now.  This means that you will get bad performance results if
-  your program doesn't run for several seconds, where *several* can mean
-  *many.*  When trying benchmarks, be sure to check that you have
-  reached the warmed state, i.e. the performance is not improving any
-  more.  This should be clear from the fact that as long as it's
-  producing more machine code, ``pypy-stm`` will run on a single core.
+* **NEW:** The JIT warm-up time improved again, but is still
+  relatively large.  In order to produce machine code, the JIT needs
+  to enter "inevitable" mode.  This means that you will get bad
+  performance results if your program doesn't run for several seconds,
+  where *several* can mean *many.* When trying benchmarks, be sure to
+  check that you have reached the warmed state, i.e. the performance
+  is not improving any more.
 
 * The GC is new; although clearly inspired by PyPy's regular GC, it
   misses a number of optimizations for now.  Programs allocating large
   numbers of small objects that don't immediately die (surely a common
-  situation) suffer from these missing optimizations.
+  situation) suffer from these missing optimizations.  (The bleeding
+  edge ``stmgc-c8`` is better at that.)
 
-* The GC has no support for destructors: the ``__del__`` method is never
-  called (including on file objects, which won't be closed for you).
-  This is of course temporary.  Also, weakrefs might appear to work a
-  bit strangely for now (staying alive even though ``gc.collect()``, or
-  even dying but then un-dying for a short time before dying again).
+* Weakrefs might appear to work a bit strangely for now, sometimes
+  staying alive throught ``gc.collect()``, or even dying but then
+  un-dying for a short time before dying again.  A similar problem can
+  show up occasionally elsewhere with accesses to some external
+  resources, where the (apparent) serialized order doesn't match the
+  underlying (multithreading) order.  These are bugs (partially fixed
+  already in ``stmgc-c8``).  Also, debugging helpers like
+  ``weakref.getweakrefcount()`` might give wrong answers.
 
 * The STM system is based on very efficient read/write barriers, which
   are mostly done (their placement could be improved a bit in
-  JIT-generated machine code).  But the overall bookkeeping logic could
-  see more improvements (see `Low-level statistics`_ below).
+  JIT-generated machine code).
 
 * Forking the process is slow because the complete memory needs to be
   copied manually.  A warning is printed to this effect.
 
 * Very long-running processes (on the order of days) will eventually
   crash on an assertion error because of a non-implemented overflow of
-  an internal 29-bit number.
+  an internal 28-bit counter.
 
-.. _`report bugs`: https://bugs.pypy.org/
+* The recursion detection code was not reimplemented.  Infinite
+  recursion just segfaults for now.
+
+
+.. _`report any crash`: https://bitbucket.org/pypy/pypy/issues?status=new&status=open
 .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h
 
 
 
-Python 3
-========
+Python 3, CPython, and others
+=============================
 
 In this document I describe "pypy-stm", which is based on PyPy's Python
 2.7 interpreter.  Supporting Python 3 should take about half an
@@ -153,12 +195,66 @@
 framework, although the amount of work to put there might vary, because
 the STM framework within RPython is currently targeting the PyPy
 interpreter and other ones might have slightly different needs.
+But in general, all the tedious transformations are done by RPython
+and you're only left with the (hopefully few) hard and interesting bits.
+
+The core of STM works as a library written in C (see `reference to
+implementation details`_ below).  It means that it can be used on
+other interpreters than the ones produced by RPython.  Duhton_ is an
+early example of that.  At this point, you might think about adapting
+this library for CPython.  You're warned, though: as far as I can
+tell, it is a doomed idea.  I had a hard time debugging Duhton, and
+that's infinitely simpler than CPython.  Even ignoring that, you can
+see in the C sources of Duhton that many core design decisions are
+different than in CPython: no refcounting; limited support for
+prebuilt "static" objects; ``stm_read()`` and ``stm_write()`` macro
+calls everywhere (and getting very rare and very obscure bugs if you
+forget one); and so on.  You could imagine some custom special-purpose
+extension of the C language, which you would preprocess to regular C.
+In my opinion that's starting to look a lot like RPython itself, but
+maybe you'd prefer this approach.  Of course you still have to worry
+about each and every C extension module you need, but maybe you'd have
+a way forward.
+
+.. _Duhton: https://bitbucket.org/pypy/duhton
 
 
 
 User Guide
 ==========
 
+How to write multithreaded programs: the 10'000-feet view
+---------------------------------------------------------
+
+PyPy-STM offers two ways to write multithreaded programs:
+
+* the traditional way, using the ``thread`` or ``threading`` modules,
+  described first__.
+
+* using ``TransactionQueue``, described next__, as a way to hide the
+  low-level notion of threads.
+
+.. __: `Drop-in replacement`_
+.. __: `transaction.TransactionQueue`_
+
+The issues with low-level threads are well known (particularly in other
+languages that don't have GIL-based interpreters): memory corruption,
+deadlocks, livelocks, and so on.  There are alternative approaches to
+dealing directly with threads, like OpenMP_.  These approaches
+typically enforce some structure on your code.  ``TransactionQueue``
+is in part similar: your program needs to have "some chances" of
+parallelization before you can apply it.  But I believe that the scope
+of applicability is much larger with ``TransactionQueue`` than with
+other approaches.  It usually works without forcing a complete
+reorganization of your existing code, and it works on any Python
+program which has got *latent* and *imperfect* parallelism.  Ideally,
+it only requires that the end programmer identifies where this
+parallelism is likely to be found, and communicates it to the system
+using a simple API.
+
+.. _OpenMP: http://en.wikipedia.org/wiki/OpenMP
+
+
 Drop-in replacement
 -------------------
 
@@ -175,29 +271,199 @@
 
 This works by internally considering the points where a standard PyPy or
 CPython would release the GIL, and replacing them with the boundaries of
-"transaction".  Like their database equivalent, multiple transactions
+"transactions".  Like their database equivalent, multiple transactions
 can execute in parallel, but will commit in some serial order.  They
 appear to behave as if they were completely run in this serialization
 order.
 
 
+transaction.TransactionQueue
+----------------------------
+
+In CPU-hungry programs, we can often easily identify outermost loops
+over some data structure, or other repetitive algorithm, where each
+"block" consists of processing a non-trivial amount of data, and where
+the blocks "have a good chance" to be independent from each other.  We
+don't need to prove that they are actually independent: it is enough
+if they are *often independent* --- or, more precisely, if we *think
+they should be* often independent.
+
+One typical example would look like this, where the function ``func()``
+typically invokes a large amount of code::
+
+    for key, value in bigdict.items():
+        func(key, value)
+
+Then you simply replace the loop with::
+
+    from transaction import TransactionQueue
+
+    tr = TransactionQueue()
+    for key, value in bigdict.items():
+        tr.add(func, key, value)
+    tr.run()
+
+This code's behavior is equivalent.  Internally, the
+``TransactionQueue`` object will start N threads and try to run the
+``func(key, value)`` calls on all threads in parallel.  But note the
+difference with a regular thread-pooling library, as found in many
+lower-level languages than Python: the function calls are not randomly
+interleaved with each other just because they run in parallel.  The
+behavior did not change because we are using ``TransactionQueue``.
+All the calls still *appear* to execute in some serial order.
+
+A typical usage of ``TransactionQueue`` goes like that: at first,
+the performance does not increase.
+In fact, it is likely to be worse.  Typically, this is
+indicated by the total CPU usage, which remains low (closer to 1 than
+N cores).  First note that it is expected that the CPU usage should
+not go much higher than 1 in the JIT warm-up phase: you must run a
+program for several seconds, or for larger programs at least one
+minute, to give the JIT a chance to warm up enough.  But if CPU usage
+remains low even afterwards, then the ``PYPYSTM`` environment variable
+can be used to track what is going on.
+
+Run your program with ``PYPYSTM=logfile`` to produce a log file called
+``logfile``.  Afterwards, use the ``pypy/stm/print_stm_log.py``
+utility to inspect the content of this log file.  It produces output
+like this (sorted by amount of time lost, largest first)::
+
+    10.5s lost in aborts, 1.25s paused (12412x STM_CONTENTION_WRITE_WRITE)
+    File "foo.py", line 10, in f
+      someobj.stuff = 5
+    File "bar.py", line 20, in g
+      someobj.other = 10
+
+This means that 10.5 seconds were lost running transactions that were
+aborted (which caused another 1.25 seconds of lost time by pausing),
+because of the reason shown in the two independent single-entry
+tracebacks: one thread ran the line ``someobj.stuff = 5``, whereas
+another thread concurrently ran the line ``someobj.other = 10`` on the
+same object.  These two writes are done to the same object.  This
+causes a conflict, which aborts one of the two transactions.  In the
+example above this occurred 12412 times.
+
+The two other conflict sources are ``STM_CONTENTION_INEVITABLE``,
+which means that two transactions both tried to do an external
+operation, like printing or reading from a socket or accessing an
+external array of raw data; and ``STM_CONTENTION_WRITE_READ``, which
+means that one transaction wrote to an object but the other one merely
+read it, not wrote to it (in that case only the writing transaction is
+reported; the location for the reads is not recorded because doing so
+is not possible without a very large performance impact).
+
+Common causes of conflicts:
+
+* First of all, any I/O or raw manipulation of memory turns the
+  transaction inevitable ("must not abort").  There can be only one
+  inevitable transaction running at any time.  A common case is if
+  each transaction starts with sending data to a log file.  You should
+  refactor this case so that it occurs either near the end of the
+  transaction (which can then mostly run in non-inevitable mode), or
+  delegate it to a separate transaction or even a separate thread.
+
+* Writing to a list or a dictionary conflicts with any read from the
+  same list or dictionary, even one done with a different key.  For
+  dictionaries and sets, you can try the types ``transaction.stmdict``
+  and ``transaction.stmset``, which behave mostly like ``dict`` and
+  ``set`` but allow concurrent access to different keys.  (What is
+  missing from them so far is lazy iteration: for example,
+  ``stmdict.iterkeys()`` is implemented as ``iter(stmdict.keys())``;
+  and, unlike PyPy's dictionaries and sets, the STM versions are not
+  ordered.)  There are also experimental ``stmiddict`` and
+  ``stmidset`` classes using the identity of the key.
+
+* ``time.time()`` and ``time.clock()`` turn the transaction inevitable
+  in order to guarantee that a call that appears to be later will really
+  return a higher number.  If getting slightly unordered results is
+  fine, use ``transaction.time()`` or ``transaction.clock()``.  The
+  latter operations guarantee to return increasing results only if you
+  can "prove" that two calls occurred in a specific order (for example
+  because they are both called by the same thread).  In cases where no
+  such proof is possible, you might get randomly interleaved values.
+  (If you have two independent transactions, they normally behave as if
+  one of them was fully executed before the other; but using
+  ``transaction.time()`` you might see the "hidden truth" that they are
+  actually interleaved.)
+
+* ``transaction.threadlocalproperty`` can be used at class-level::
+
+      class Foo(object):     # must be a new-style class!
+          x = transaction.threadlocalproperty()
+          y = transaction.threadlocalproperty(dict)
+
+  This declares that instances of ``Foo`` have two attributes ``x``
+  and ``y`` that are thread-local: reading or writing them from
+  concurrently-running transactions will return independent results.
+  (Any other attributes of ``Foo`` instances will be globally visible
+  from all threads, as usual.)  This is useful together with
+  ``TransactionQueue`` for these two cases:
+
+  - For attributes of long-lived objects that change during one
+    transaction, but should always be reset to some initial value
+    around transaction (for example, initialized to 0 at the start of
+    a transaction; or, if used for a list of pending things to do
+    within this transaction, it will always be empty at the end of one
+    transaction).
+
+  - For general caches across transactions.  With ``TransactionQueue``
+    you get a pool of a fixed number N of threads, each running the
+    transactions serially.  A thread-local property will have the
+    value last stored in it by the same thread, which may come from a
+    random previous transaction.  Basically, you get N copies of the
+    property's value, and each transaction accesses a random copy.  It
+    works fine for caches.
+
+  In more details, the optional argument to ``threadlocalproperty()``
+  is the default value factory: in case no value was assigned in the
+  current thread yet, the factory is called and its result becomes the
+  value in that thread (like ``collections.defaultdict``).  If no
+  default value factory is specified, uninitialized reads raise
+  ``AttributeError``.
+
+* In addition to all of the above, there are cases where write-write
+  conflicts are caused by writing the same value to an attribute again
+  and again.  See for example ea2e519614ab_: this fixes two such
+  issues where we write an object field without first checking if we
+  already did it.  The ``dont_change_any_more`` field is a flag set to
+  ``True`` in that part of the code, but usually this
+  ``rtyper_makekey()`` method will be called many times for the same
+  object; the code used to repeatedly set the flag to ``True``, but
+  now it first checks and only does the write if it is ``False``.
+  Similarly, in the second half of the checkin, the method
+  ``setup_block_entry()`` used to both assign the ``concretetype``
+  fields and return a list, but its two callers were different: one
+  would really need the ``concretetype`` fields initialized, whereas
+  the other would only need to get its result list --- the
+  ``concretetype`` field in that case might already be set or not, but
+  that would not matter.
+
+.. _ea2e519614ab: https://bitbucket.org/pypy/pypy/commits/ea2e519614ab
+
+Note that Python is a complicated language; there are a number of less
+common cases that may cause conflict (of any kind) where we might not
+expect it at priori.  In many of these cases it could be fixed; please
+report any case that you don't understand.
+
+
 Atomic sections
 ---------------
 
-PyPy supports *atomic sections,* which are blocks of code which you want
-to execute without "releasing the GIL".  *This is experimental and may
-be removed in the future.*  In STM terms, this means blocks of code that
-are executed while guaranteeing that the transaction is not interrupted
-in the middle.
+The ``TransactionQueue`` class described above is based on *atomic
+sections,* which are blocks of code which you want to execute without
+"releasing the GIL".  In STM terms, this means blocks of code that are
+executed while guaranteeing that the transaction is not interrupted in
+the middle.  *This is experimental and may be removed in the future*
+if `Software lock elision`_ is ever implemented.
 
-Here is a usage example::
+Here is a direct usage example::
 
-    with __pypy__.thread.atomic:
+    with transaction.atomic:
         assert len(lst1) == 10
         x = lst1.pop(0)
         lst1.append(x)
 
-In this (bad) example, we are sure that the item popped off one end of
+In this example, we are sure that the item popped off one end of
 the list is appened again at the other end atomically.  It means that
 another thread can run ``len(lst1)`` or ``x in lst1`` without any
 particular synchronization, and always see the same results,
@@ -221,25 +487,27 @@
 it likely that such a piece of code will eventually block all other
 threads anyway.
 
-Note that if you want to experiment with ``atomic``, you may have to add
-manually a transaction break just before the atomic block.  This is
+Note that if you want to experiment with ``atomic``, you may have to
+manually add a transaction break just before the atomic block.  This is
 because the boundaries of the block are not guaranteed to be the
 boundaries of the transaction: the latter is at least as big as the
-block, but maybe bigger.  Therefore, if you run a big atomic block, it
+block, but may be bigger.  Therefore, if you run a big atomic block, it
 is a good idea to break the transaction just before.  This can be done
-e.g. by the hack of calling ``time.sleep(0)``.  (This may be fixed at
+by calling ``transaction.hint_commit_soon()``.  (This may be fixed at
 some point.)
 
-There are also issues with the interaction of locks and atomic blocks.
-This can be seen if you write to files (which have locks), including
-with a ``print`` to standard output.  If one thread tries to acquire a
-lock while running in an atomic block, and another thread has got the
-same lock, then the former may fail with a ``thread.error``.  The reason
-is that "waiting" for some condition to become true --while running in
-an atomic block-- does not really make sense.  For now you can work
-around it by making sure that, say, all your prints are either in an
-``atomic`` block or none of them are.  (This kind of issue is
-theoretically hard to solve.)
+There are also issues with the interaction of regular locks and atomic
+blocks.  This can be seen if you write to files (which have locks),
+including with a ``print`` to standard output.  If one thread tries to
+acquire a lock while running in an atomic block, and another thread
+has got the same lock at that point, then the former may fail with a
+``thread.error``.  (Don't rely on it; it may also deadlock.)
+The reason is that "waiting" for some condition to
+become true --while running in an atomic block-- does not really make
+sense.  For now you can work around it by making sure that, say, all
+your prints are either in an ``atomic`` block or none of them are.
+(This kind of issue is theoretically hard to solve and may be the
+reason for atomic block support to eventually be removed.)
 
 
 Locks
@@ -293,106 +561,47 @@
 .. _`software lock elision`: https://www.repository.cam.ac.uk/handle/1810/239410
 
 
-Atomic sections, Transactions, etc.: a better way to write parallel programs
-----------------------------------------------------------------------------
+Miscellaneous functions
+-----------------------
 
-(This section is based on locks as we plan to implement them, but also
-works with the existing atomic sections.)
+* First, note that the ``transaction`` module is found in the file
+  ``lib_pypy/transaction.py``.  This file can be copied around to
+  execute the same programs on CPython or on non-STM PyPy, with
+  fall-back behavior.  (One case where the behavior differs is
+  ``atomic``, which is in this fall-back case just a regular lock; so
+  ``with atomic`` only prevent other threads from entering other
+  ``with atomic`` sections, but won't prevent other threads from
+  running non-atomic code.)
 
-In the cases where elision works, the block of code can run in parallel
-with other blocks of code *even if they are protected by the same lock.*
-You still get the illusion that the blocks are run sequentially.  This
-works even for multiple threads that run each a series of such blocks
-and nothing else, protected by one single global lock.  This is
-basically the Python application-level equivalent of what was done with
-the interpreter in ``pypy-stm``: while you think you are writing
-thread-unfriendly code because of this global lock, actually the
-underlying system is able to make it run on multiple cores anyway.
-
-This capability can be hidden in a library or in the framework you use;
-the end user's code does not need to be explicitly aware of using
-threads.  For a simple example of this, there is `transaction.py`_ in
-``lib_pypy``.  The idea is that you write, or already have, some program
-where the function ``f(key, value)`` runs on every item of some big
-dictionary, say::
-
-    for key, value in bigdict.items():
-        f(key, value)
-
-Then you simply replace the loop with::
-
-    for key, value in bigdict.items():
-        transaction.add(f, key, value)
-    transaction.run()
-
-This code runs the various calls to ``f(key, value)`` using a thread
-pool, but every single call is executed under the protection of a unique
-lock.  The end result is that the behavior is exactly equivalent --- in
-fact it makes little sense to do it in this way on a non-STM PyPy or on
-CPython.  But on ``pypy-stm``, the various locked calls to ``f(key,
-value)`` can tentatively be executed in parallel, even if the observable
-result is as if they were executed in some serial order.
-
-This approach hides the notion of threads from the end programmer,
-including all the hard multithreading-related issues.  This is not the
-first alternative approach to explicit threads; for example, OpenMP_ is
-one.  However, it is one of the first ones which does not require the
-code to be organized in a particular fashion.  Instead, it works on any
-Python program which has got latent, imperfect parallelism.  Ideally, it
-only requires that the end programmer identifies where this parallelism
-is likely to be found, and communicates it to the system, using for
-example the ``transaction.add()`` scheme.
-
-.. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py
-.. _OpenMP: http://en.wikipedia.org/wiki/OpenMP
-
-
-.. _`transactional_memory`:
-
-API of transactional_memory
----------------------------
-
-The new pure Python module ``transactional_memory`` runs on both CPython
-and PyPy, both with and without STM.  It contains:
-
-* ``getsegmentlimit()``: return the number of "segments" in
+* ``transaction.getsegmentlimit()``: return the number of "segments" in
   this pypy-stm.  This is the limit above which more threads will not be
   able to execute on more cores.  (Right now it is limited to 4 due to
   inter-segment overhead, but should be increased in the future.  It
   should also be settable, and the default value should depend on the
   number of actual CPUs.)  If STM is not available, this returns 1.
 
-* ``print_abort_info(minimum_time=0.0)``: debugging help.  Each thread
-  remembers the longest abort or pause it did because of cross-thread
-  contention_.  This function prints it to ``stderr`` if the time lost
-  is greater than ``minimum_time`` seconds.  The record is then
-  cleared, to make it ready for new events.  This function returns
-  ``True`` if it printed a report, and ``False`` otherwise.
+* ``__pypy__.thread.signals_enabled``: a context manager that runs its
+  block of code with signals enabled.  By default, signals are only
+  enabled in the main thread; a non-main thread will not receive
+  signals (this is like CPython).  Enabling signals in non-main
+  threads is useful for libraries where threads are hidden and the end
+  user is not expecting his code to run elsewhere than in the main
+  thread.
 
+* ``pypystm.exclusive_atomic``: a context manager similar to
+  ``transaction.atomic`` but which complains if it is nested.
 
-API of __pypy__.thread
-----------------------
+* ``transaction.is_atomic()``: return True if called from an atomic
+  context.
 
-The ``__pypy__.thread`` submodule is a built-in module of PyPy that
-contains a few internal built-in functions used by the
-``transactional_memory`` module, plus the following:
+* ``pypystm.count()``: return a different positive integer every time
+  it is called.  This works without generating conflicts.  The
+  returned integers are only roughly in increasing order; this should
+  not be relied upon.
 
-* ``__pypy__.thread.atomic``: a context manager to run a block in
-  fully atomic mode, without "releasing the GIL".  (May be eventually
-  removed?)
 
-* ``__pypy__.thread.signals_enabled``: a context manager that runs its
-  block with signals enabled.  By default, signals are only enabled in
-  the main thread; a non-main thread will not receive signals (this is
-  like CPython).  Enabling signals in non-main threads is useful for
-  libraries where threads are hidden and the end user is not expecting
-  his code to run elsewhere than in the main thread.
-
-
-.. _contention:
-
-Conflicts
----------
+More details about conflicts
+----------------------------
 
 Based on Software Transactional Memory, the ``pypy-stm`` solution is
 prone to "conflicts".  To repeat the basic idea, threads execute their code
@@ -408,25 +617,26 @@
 the transaction).  If this occurs too often, parallelization fails.
 
 How much actual parallelization a multithreaded program can see is a bit
-subtle.  Basically, a program not using ``__pypy__.thread.atomic`` or
+subtle.  Basically, a program not using ``transaction.atomic`` or
 eliding locks, or doing so for very short amounts of time, will
 parallelize almost freely (as long as it's not some artificial example
 where, say, all threads try to increase the same global counter and do
 nothing else).
 
-However, using if the program requires longer transactions, it comes
+However, if the program requires longer transactions, it comes
 with less obvious rules.  The exact details may vary from version to
 version, too, until they are a bit more stabilized.  Here is an
 overview.
 
 Parallelization works as long as two principles are respected.  The
-first one is that the transactions must not *conflict* with each other.
-The most obvious sources of conflicts are threads that all increment a
-global shared counter, or that all store the result of their
-computations into the same list --- or, more subtly, that all ``pop()``
-the work to do from the same list, because that is also a mutation of
-the list.  (It is expected that some STM-aware library will eventually
-be designed to help with conflict problems, like a STM-aware queue.)
+first one is that the transactions must not *conflict* with each
+other.  The most obvious sources of conflicts are threads that all
+increment a global shared counter, or that all store the result of
+their computations into the same list --- or, more subtly, that all
+``pop()`` the work to do from the same list, because that is also a
+mutation of the list.  (You can work around it with
+``transaction.stmdict``, but for that specific example, some STM-aware
+queue should eventually be designed.)
 
 A conflict occurs as follows: when a transaction commits (i.e. finishes
 successfully) it may cause other transactions that are still in progress
@@ -442,22 +652,23 @@
 Another issue is that of avoiding long-running so-called "inevitable"
 transactions ("inevitable" is taken in the sense of "which cannot be
 avoided", i.e. transactions which cannot abort any more).  Transactions
-like that should only occur if you use ``__pypy__.thread.atomic``,
-generally become of I/O in atomic blocks.  They work, but the
+like that should only occur if you use ``atomic``,
+generally because of I/O in atomic blocks.  They work, but the
 transaction is turned inevitable before the I/O is performed.  For all
 the remaining execution time of the atomic block, they will impede
 parallel work.  The best is to organize the code so that such operations
-are done completely outside ``__pypy__.thread.atomic``.
+are done completely outside ``atomic``.
 
-(This is related to the fact that blocking I/O operations are
+(This is not unrelated to the fact that blocking I/O operations are
 discouraged with Twisted, and if you really need them, you should do
 them on their own separate thread.)
 
-In case of lock elision, we don't get long-running inevitable
-transactions, but a different problem can occur: doing I/O cancels lock
-elision, and the lock turns into a real lock, preventing other threads
-from committing if they also need this lock.  (More about it when lock
-elision is implemented and tested.)
+In case lock elision eventually replaces atomic sections, we wouldn't
+get long-running inevitable transactions, but the same problem occurs
+in a different way: doing I/O cancels lock elision, and the lock turns
+into a real lock.  This prevents other threads from committing if they
+also need this lock.  (More about it when lock elision is implemented
+and tested.)
 
 
 
@@ -467,56 +678,30 @@
 XXX this section mostly empty for now
 
 
-Low-level statistics
---------------------
+Technical reports
+-----------------
 
-When a non-main thread finishes, you get low-level statistics printed to
-stderr, looking like that::
+STMGC-C7 is described in detail in a `technical report`__.
 
-      thread 0x7f73377fe600:
-          outside transaction          42182    0.506 s
-          run current                  85466    0.000 s
-          run committed                34262    3.178 s
-          run aborted write write       6982    0.083 s
-          run aborted write read         550    0.005 s
-          run aborted inevitable         388    0.010 s
-          run aborted other                0    0.000 s
-          wait free segment                0    0.000 s
-          wait write read                 78    0.027 s
-          wait inevitable                887    0.490 s
-          wait other                       0    0.000 s
-          sync commit soon                 1    0.000 s
-          bookkeeping                  51418    0.606 s
-          minor gc                    162970    1.135 s
-          major gc                         1    0.019 s
-          sync pause                   59173    1.738 s
-          longest recordered marker          0.000826 s
-          "File "x.py", line 5, in f"
+A separate `position paper`__ gives an overview of our position about
+STM in general.
 
-On each line, the first number is a counter, and the second number gives
-the associated time --- the amount of real time that the thread was in
-this state.  The sum of all the times should be equal to the total time
-between the thread's start and the thread's end.  The most important
-points are "run committed", which gives the amount of useful work, and
-"outside transaction", which should give the time spent e.g. in library
-calls (right now it seems to be larger than that; to investigate).  The
-various "run aborted" and "wait" entries are time lost due to
-conflicts_.  Everything else is overhead of various forms.  (Short-,
-medium- and long-term future work involves reducing this overhead :-)
-
-The last two lines are special; they are an internal marker read by
-``transactional_memory.print_abort_info()``.
+.. __: http://bitbucket.org/pypy/extradoc/src/extradoc/talk/dls2014/paper/paper.pdf
+.. __: http://bitbucket.org/pypy/extradoc/src/extradoc/talk/icooolps2014/
 
 
 Reference to implementation details
 -----------------------------------
 
-The core of the implementation is in a separate C library called stmgc_,
-in the c7_ subdirectory.  Please see the `README.txt`_ for more
-information.  In particular, the notion of segment is discussed there.
+The core of the implementation is in a separate C library called
+stmgc_, in the c7_ subdirectory (current version of pypy-stm) and in
+the c8_ subdirectory (bleeding edge version).  Please see the
+`README.txt`_ for more information.  In particular, the notion of
+segment is discussed there.
 
 .. _stmgc: https://bitbucket.org/pypy/stmgc/src/default/
 .. _c7: https://bitbucket.org/pypy/stmgc/src/default/c7/
+.. _c8: https://bitbucket.org/pypy/stmgc/src/default/c8/
 .. _`README.txt`: https://bitbucket.org/pypy/stmgc/raw/default/c7/README.txt
 
 PyPy itself adds on top of it the automatic placement of read__ and write__
diff --git a/pypy/doc/whatsnew-2.5.0.rst b/pypy/doc/whatsnew-2.5.0.rst
--- a/pypy/doc/whatsnew-2.5.0.rst
+++ b/pypy/doc/whatsnew-2.5.0.rst
@@ -1,6 +1,6 @@
-=======================
-What's new in PyPy 2.5
-=======================
+========================
+What's new in PyPy 2.5.0
+========================
 
 .. this is a revision shortly after release-2.4.x
 .. startrev: 7026746cbb1b
diff --git a/pypy/doc/whatsnew-2.5.1.rst b/pypy/doc/whatsnew-2.5.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/whatsnew-2.5.1.rst
@@ -0,0 +1,47 @@
+========================
+What's new in PyPy 2.5.1
+========================
+
+.. this is a revision shortly after release-2.5.0
+.. startrev: 397b96217b85
+
+
+Non-blocking file reads sometimes raised EAGAIN even though they
+had buffered data waiting, fixed in b1c4fcb04a42
+
+Fix a bug in cpyext in multithreded programs acquiring/releasing the GIL
+
+.. branch: vmprof
+
+.. branch: stackroot-speedup-2
+
+Avoid tracing all stack roots during repeated minor collections,
+by ignoring the part of the stack that didn't change
+
+.. branch: stdlib-2.7.9
+
+Update stdlib to version 2.7.9
+
+.. branch: fix-kqueue-error2
+
+Fix exception being raised by kqueue.control (CPython compatibility)
+
+.. branch: gitignore
+
+.. branch: framestate2
+
+Refactor rpython.flowspace.framestate.FrameState.
+
+.. branch: alt_errno
+
+Add an alternative location to save LastError, errno around ctypes,
+cffi external calls so things like pdb will not overwrite it
+
+.. branch: nonquadratic-heapcache
+
+Speed up the warmup times of the JIT by removing a quadratic algorithm in the
+heapcache.
+
+.. branch: online-transforms-2
+
+Simplify flow graphs on the fly during annotation phase.
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -2,24 +2,22 @@
 What's new in PyPy 2.5+
 =======================
 
-.. this is a revision shortly after release-2.5.x
-.. startrev: 397b96217b85
+.. this is a revision shortly after release-2.5.1
+.. startrev: cb01edcb59414d9d93056e54ed060673d24e67c1
 
+Issue #2017: on non-Linux-x86 platforms, reduced the memory impact of
+creating a lot of greenlets/tasklets.  Particularly useful on Win32 and
+on ARM, where you used to get a MemoryError after only 2500-5000
+greenlets (the 32-bit address space is exhausted).
 
-Non-blocking file reads sometimes raised EAGAIN even though they
-had buffered data waiting, fixed in b1c4fcb04a42
+.. branch: gc-incminimark-pinning-improve
+Object Pinning is now used in `bz2` and `rzlib` (therefore also affects
+Python's `zlib`). In case the data to compress/decompress is inside the nursery
+(incminimark) it no longer needs to create a non-moving copy of it. This saves
+one `malloc` and copying the data.  Additionally a new GC environment variable
+is introduced (`PYPY_GC_MAX_PINNED`) primarily for debugging purposes.
 
-
-.. branch: vmprof
-
-.. branch: stackroot-speedup-2
-Avoid tracing all stack roots during repeated minor collections,
-by ignoring the part of the stack that didn't change
-
-.. branch: stdlib-2.7.9
-Update stdlib to version 2.7.9
-
-.. branch: fix-kqueue-error2
-Fix exception being raised by kqueue.control (CPython compatibility)
-
-.. branch: gitignore
+.. branch: refactor-pycall
+Make `*`-unpacking in RPython function calls completely equivalent to passing
+the tuple's elements as arguments. In other words, `f(*(a, b))` now behaves 
+exactly like `f(a, b)`.
diff --git a/pypy/goal/getnightly.py b/pypy/goal/getnightly.py
--- a/pypy/goal/getnightly.py
+++ b/pypy/goal/getnightly.py
@@ -7,7 +7,7 @@
 if sys.platform.startswith('linux'):
     arch = 'linux'
     cmd = 'wget "%s"'
-    tar = "tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy'"
+    tar = "tar -x -v --wildcards --strip-components=2 -f %s '*/bin/pypy' '*/bin/libpypy-c.so'"
     if os.uname()[-1].startswith('arm'):
         arch += '-armhf-raspbian'
 elif sys.platform.startswith('darwin'):
diff --git a/pypy/goal/targetpypystandalone.py b/pypy/goal/targetpypystandalone.py
--- a/pypy/goal/targetpypystandalone.py
+++ b/pypy/goal/targetpypystandalone.py
@@ -106,6 +106,9 @@
         space.call_function(w_pathsetter, w_path)
         # import site
         try:
+            space.setattr(space.getbuiltinmodule('sys'),
+                          space.wrap('executable'),
+                          space.wrap(home))
             import_ = space.getattr(space.getbuiltinmodule('__builtin__'),
                                     space.wrap('__import__'))
             space.call_function(import_, space.wrap('site'))
@@ -138,7 +141,7 @@
         res = _pypy_execute_source(source)
         before = rffi.aroundstate.before
         if before: before()
-        return rffi.cast(rffi.INT, res)        
+        return rffi.cast(rffi.INT, res)
 
     @entrypoint('main', [], c_name='pypy_init_threads')
     def pypy_init_threads():
@@ -309,7 +312,7 @@
         w_dict = app.getwdict(space)
         entry_point, _ = create_entry_point(space, w_dict)
 
-        return entry_point, None, PyPyAnnotatorPolicy(single_space = space)
+        return entry_point, None, PyPyAnnotatorPolicy()
 
     def interface(self, ns):
         for name in ['take_options', 'handle_config', 'print_help', 'target',
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -1,5 +1,5 @@
 #! /usr/bin/env python
-# App-level version of py.py.
+# This is pure Python code that handles the main entry point into "pypy".
 # See test/test_app_main.
 
 # Missing vs CPython: -d, -t, -v, -x, -3
@@ -157,10 +157,13 @@
             current = group
     raise SystemExit
 
+def get_sys_executable():
+    return getattr(sys, 'executable', 'pypy')
+
 def print_help(*args):
     import os
     print 'usage: %s [option] ... [-c cmd | -m mod | file | -] [arg] ...' % (
-        sys.executable,)
+        get_sys_executable(),)
     print USAGE1,
     if 'pypyjit' in sys.builtin_module_names:
         print "--jit options: advanced JIT options: try 'off' or 'help'"
@@ -171,7 +174,7 @@
     try:
         import pypyjit
     except ImportError:
-        print >> sys.stderr, "No jit support in %s" % (sys.executable,)
+        print >> sys.stderr, "No jit support in %s" % (get_sys_executable(),)
         return
     items = sorted(pypyjit.defaults.items())
     print 'Advanced JIT options: a comma-separated list of OPTION=VALUE:'
@@ -209,7 +212,7 @@
         raise SystemExit
     if 'pypyjit' not in sys.builtin_module_names:
         print >> sys.stderr, ("Warning: No jit support in %s" %
-                              (sys.executable,))
+                              (get_sys_executable(),))
     else:
         import pypyjit
         pypyjit.set_param(jitparam)
@@ -219,8 +222,8 @@
 
 def print_error(msg):
     print >> sys.stderr, msg
-    print >> sys.stderr, 'usage: %s [options]' % (sys.executable,)
-    print >> sys.stderr, 'Try `%s -h` for more information.' % (sys.executable,)
+    print >> sys.stderr, 'usage: %s [options]' % (get_sys_executable(),)
+    print >> sys.stderr, 'Try `%s -h` for more information.' % (get_sys_executable(),)
 
 def fdopen(fd, mode, bufsize=-1):
     try:
@@ -514,6 +517,10 @@
     elif not sys.stdout.isatty():
         set_fully_buffered_io()
 
+    if we_are_translated():
+        import __pypy__
+        __pypy__.save_module_content_for_future_reload(sys)
+
     mainmodule = type(sys)('__main__')
     sys.modules['__main__'] = mainmodule
 
diff --git a/pypy/interpreter/astcompiler/assemble.py b/pypy/interpreter/astcompiler/assemble.py
--- a/pypy/interpreter/astcompiler/assemble.py
+++ b/pypy/interpreter/astcompiler/assemble.py
@@ -648,7 +648,7 @@
 
 
 def _compute_UNPACK_SEQUENCE(arg):
-    return arg + 1
+    return arg - 1
 
 def _compute_DUP_TOPX(arg):
     return arg
diff --git a/pypy/interpreter/astcompiler/test/test_compiler.py b/pypy/interpreter/astcompiler/test/test_compiler.py
--- a/pypy/interpreter/astcompiler/test/test_compiler.py
+++ b/pypy/interpreter/astcompiler/test/test_compiler.py
@@ -759,6 +759,19 @@
         """
         self.simple_test(source, 'l', [1, 2])
 
+    def test_unpack_wrong_stackeffect(self):
+        source = """if 1:
+        l = [1, 2]
+        a, b = l
+        a, b = l
+        a, b = l
+        a, b = l
+        a, b = l
+        a, b = l
+        """
+        code = compile_with_astcompiler(source, 'exec', self.space)
+        assert code.co_stacksize == 2
+
     def test_lambda(self):
         yield self.st, "y = lambda x: x", "y(4)", 4
 
diff --git a/pypy/interpreter/mixedmodule.py b/pypy/interpreter/mixedmodule.py
--- a/pypy/interpreter/mixedmodule.py
+++ b/pypy/interpreter/mixedmodule.py
@@ -55,7 +55,10 @@
         if self.w_initialdict is None:
             Module.init(self, space)
             if not self.lazy and self.w_initialdict is None:
-                self.w_initialdict = space.call_method(self.w_dict, 'items')
+                self.save_module_content_for_future_reload()
+
+    def save_module_content_for_future_reload(self):
+        self.w_initialdict = self.space.call_method(self.w_dict, 'items')
 
 
     def get_applevel_name(cls):
@@ -119,7 +122,7 @@
                 w_value = self.get(name)
                 space.setitem(self.w_dict, space.new_interned_str(name), w_value)
             self.lazy = False
-            self.w_initialdict = space.call_method(self.w_dict, 'items')
+            self.save_module_content_for_future_reload()
         return self.w_dict
 
     def _cleanup_(self):
diff --git a/pypy/interpreter/pycode.py b/pypy/interpreter/pycode.py
--- a/pypy/interpreter/pycode.py
+++ b/pypy/interpreter/pycode.py
@@ -4,7 +4,7 @@
 The bytecode interpreter itself is implemented by the PyFrame class.
 """
 
-import dis, imp, struct, types, new, sys
+import dis, imp, struct, types, new, sys, os
 
 from pypy.interpreter import eval
 from pypy.interpreter.signature import Signature
@@ -128,6 +128,17 @@
         if (self.magic == cpython_magic and
             '__pypy__' not in sys.builtin_module_names):
             raise Exception("CPython host codes should not be rendered")
+        # When translating PyPy, freeze the file name
+        #     <builtin>/lastdirname/basename.py
+        # instead of freezing the complete translation-time path.
+        filename = self.co_filename.lstrip('<').rstrip('>')
+        if filename.lower().endswith('.pyc'):
+            filename = filename[:-1]
+        basename = os.path.basename(filename)
+        lastdirname = os.path.basename(os.path.dirname(filename))
+        if lastdirname:
+            basename = '%s/%s' % (lastdirname, basename)
+        self.co_filename = '<builtin>/%s' % (basename,)
 
     co_names = property(lambda self: [self.space.unwrap(w_name) for w_name in self.co_names_w]) # for trace
 
diff --git a/pypy/interpreter/pyopcode.py b/pypy/interpreter/pyopcode.py
--- a/pypy/interpreter/pyopcode.py
+++ b/pypy/interpreter/pyopcode.py
@@ -1619,6 +1619,13 @@
     def prepare_exec(f, prog, globals, locals, compile_flags, builtin, codetype):
         """Manipulate parameters to exec statement to (codeobject, dict, dict).
         """
+        if (globals is None and locals is None and
+            isinstance(prog, tuple) and
+            (len(prog) == 2 or len(prog) == 3)):
+            globals = prog[1]
+            if len(prog) == 3:
+                locals = prog[2]
+            prog = prog[0]
         if globals is None:
             globals = f.f_globals
             if locals is None:
diff --git a/pypy/interpreter/test/test_exec.py b/pypy/interpreter/test/test_exec.py
--- a/pypy/interpreter/test/test_exec.py
+++ b/pypy/interpreter/test/test_exec.py
@@ -262,3 +262,11 @@
         """]
         for c in code:
             compile(c, "<code>", "exec")
+
+    def test_exec_tuple(self):
+        # note: this is VERY different than testing exec("a = 42", d), because
+        # this specific case is handled specially by the AST compiler
+        d = {}
+        x = ("a = 42", d)
+        exec x
+        assert d['a'] == 42
diff --git a/pypy/interpreter/test/test_objspace.py b/pypy/interpreter/test/test_objspace.py
--- a/pypy/interpreter/test/test_objspace.py
+++ b/pypy/interpreter/test/test_objspace.py
@@ -373,7 +373,7 @@
         config = make_config(None)
         space = make_objspace(config)
         w_executable = space.wrap('executable')
-        assert space.str_w(space.getattr(space.sys, w_executable)) == 'py.py'
+        assert space.findattr(space.sys, w_executable) is None
         space.setattr(space.sys, w_executable, space.wrap('foobar'))
         assert space.str_w(space.getattr(space.sys, w_executable)) == 'foobar'
         space.startup()
diff --git a/pypy/interpreter/test/test_targetpypy.py b/pypy/interpreter/test/test_targetpypy.py
--- a/pypy/interpreter/test/test_targetpypy.py
+++ b/pypy/interpreter/test/test_targetpypy.py
@@ -8,7 +8,7 @@
         entry_point = get_entry_point(config)[0]
         entry_point(['pypy-c' , '-S', '-c', 'print 3'])
 
-def test_exeucte_source(space):
+def test_execute_source(space):
     _, d = create_entry_point(space, None)
     execute_source = d['pypy_execute_source']
     lls = rffi.str2charp("import sys; sys.modules['xyz'] = 3")
diff --git a/pypy/interpreter/unicodehelper.py b/pypy/interpreter/unicodehelper.py
--- a/pypy/interpreter/unicodehelper.py
+++ b/pypy/interpreter/unicodehelper.py
@@ -24,13 +24,9 @@
         self.end = end
         self.reason = reason
 
- at specialize.memo()
-def rpy_encode_error_handler():
-    # A RPython version of the "strict" error handler.
-    def raise_unicode_exception_encode(errors, encoding, msg, u,
-                                       startingpos, endingpos):
-        raise RUnicodeEncodeError(encoding, u, startingpos, endingpos, msg)
-    return raise_unicode_exception_encode
+def raise_unicode_exception_encode(errors, encoding, msg, u,
+                                   startingpos, endingpos):
+    raise RUnicodeEncodeError(encoding, u, startingpos, endingpos, msg)
 
 # ____________________________________________________________
 
@@ -67,5 +63,5 @@
     # This is not the case with Python3.
     return runicode.unicode_encode_utf_8(
         uni, len(uni), "strict",
-        errorhandler=rpy_encode_error_handler(),
+        errorhandler=raise_unicode_exception_encode,
         allow_surrogates=True)
diff --git a/pypy/module/__pypy__/__init__.py b/pypy/module/__pypy__/__init__.py
--- a/pypy/module/__pypy__/__init__.py
+++ b/pypy/module/__pypy__/__init__.py
@@ -82,6 +82,8 @@
         'strategy'                  : 'interp_magic.strategy',  # dict,set,list
         'set_debug'                 : 'interp_magic.set_debug',
         'locals_to_fast'            : 'interp_magic.locals_to_fast',
+        'save_module_content_for_future_reload':
+                          'interp_magic.save_module_content_for_future_reload',
     }
     if sys.platform == 'win32':
         interpleveldefs['get_console_cp'] = 'interp_magic.get_console_cp'
diff --git a/pypy/module/__pypy__/interp_magic.py b/pypy/module/__pypy__/interp_magic.py
--- a/pypy/module/__pypy__/interp_magic.py
+++ b/pypy/module/__pypy__/interp_magic.py
@@ -1,6 +1,7 @@
 from pypy.interpreter.error import OperationError, wrap_oserror
 from pypy.interpreter.gateway import unwrap_spec
 from pypy.interpreter.pyframe import PyFrame
+from pypy.interpreter.mixedmodule import MixedModule
 from rpython.rlib.objectmodel import we_are_translated
 from pypy.objspace.std.dictmultiobject import W_DictMultiObject
 from pypy.objspace.std.listobject import W_ListObject
@@ -130,3 +131,7 @@
 def locals_to_fast(space, w_frame):
     assert isinstance(w_frame, PyFrame)
     w_frame.locals2fast()
+
+ at unwrap_spec(w_module=MixedModule)
+def save_module_content_for_future_reload(space, w_module):
+    w_module.save_module_content_for_future_reload()
diff --git a/pypy/module/__pypy__/test/test_magic.py b/pypy/module/__pypy__/test/test_magic.py
new file mode 100644
--- /dev/null
+++ b/pypy/module/__pypy__/test/test_magic.py
@@ -0,0 +1,15 @@
+
+class AppTestMagic:
+    spaceconfig = dict(usemodules=['__pypy__'])
+
+    def test_save_module_content_for_future_reload(self):
+        import sys, __pypy__
+        d = sys.dont_write_bytecode
+        sys.dont_write_bytecode = "hello world"
+        __pypy__.save_module_content_for_future_reload(sys)
+        sys.dont_write_bytecode = d
+        reload(sys)
+        assert sys.dont_write_bytecode == "hello world"
+        #
+        sys.dont_write_bytecode = d
+        __pypy__.save_module_content_for_future_reload(sys)
diff --git a/pypy/module/_cffi_backend/__init__.py b/pypy/module/_cffi_backend/__init__.py
--- a/pypy/module/_cffi_backend/__init__.py
+++ b/pypy/module/_cffi_backend/__init__.py
@@ -2,13 +2,15 @@
 from pypy.interpreter.mixedmodule import MixedModule
 from rpython.rlib import rdynload
 
+VERSION = "0.9.2"
+
 
 class Module(MixedModule):
 
     appleveldefs = {
         }
     interpleveldefs = {
-        '__version__': 'space.wrap("0.8.6+")',
+        '__version__': 'space.wrap("%s")' % VERSION,
 
         'load_library': 'libraryobj.load_library',
 
diff --git a/pypy/module/_cffi_backend/cbuffer.py b/pypy/module/_cffi_backend/cbuffer.py
--- a/pypy/module/_cffi_backend/cbuffer.py
+++ b/pypy/module/_cffi_backend/cbuffer.py
@@ -81,4 +81,5 @@
     if size < 0:
         raise oefmt(space.w_TypeError,
                     "don't know the size pointed to by '%s'", ctype.name)
-    return space.wrap(MiniBuffer(LLBuffer(w_cdata._cdata, size), w_cdata))
+    ptr = w_cdata.unsafe_escaping_ptr()    # w_cdata kept alive by MiniBuffer()
+    return space.wrap(MiniBuffer(LLBuffer(ptr, size), w_cdata))
diff --git a/pypy/module/_cffi_backend/ccallback.py b/pypy/module/_cffi_backend/ccallback.py
--- a/pypy/module/_cffi_backend/ccallback.py
+++ b/pypy/module/_cffi_backend/ccallback.py
@@ -48,9 +48,12 @@
             raise oefmt(space.w_NotImplementedError,
                         "%s: callback with unsupported argument or "
                         "return type or with '...'", self.getfunctype().name)
-        res = clibffi.c_ffi_prep_closure(self.get_closure(), cif_descr.cif,
-                                         invoke_callback,
-                                         rffi.cast(rffi.VOIDP, self.unique_id))
+        with self as ptr:
+            closure_ptr = rffi.cast(clibffi.FFI_CLOSUREP, ptr)
+            unique_id = rffi.cast(rffi.VOIDP, self.unique_id)
+            res = clibffi.c_ffi_prep_closure(closure_ptr, cif_descr.cif,
+                                             invoke_callback,
+                                             unique_id)
         if rffi.cast(lltype.Signed, res) != clibffi.FFI_OK:
             raise OperationError(space.w_SystemError,
                 space.wrap("libffi failed to build this callback"))
@@ -62,12 +65,9 @@
             from pypy.module.thread.os_thread import setup_threads
             setup_threads(space)
 
-    def get_closure(self):
-        return rffi.cast(clibffi.FFI_CLOSUREP, self._cdata)
-
     #@rgc.must_be_light_finalizer
     def __del__(self):
-        clibffi.closureHeap.free(self.get_closure())
+        clibffi.closureHeap.free(rffi.cast(clibffi.FFI_CLOSUREP, self._ptr))


More information about the pypy-commit mailing list