[pypy-commit] pypy gc-incminimark-pinning: Merge release-2.3.x into gc-incminimark-pinning

groggi noreply at buildbot.pypy.org
Mon Jun 23 11:48:13 CEST 2014


Author: Gregor Wegberg <code at gregorwegberg.com>
Branch: gc-incminimark-pinning
Changeset: r72151:2dd51f2a10d4
Date: 2014-06-10 10:52 +0200
http://bitbucket.org/pypy/pypy/changeset/2dd51f2a10d4/

Log:	Merge release-2.3.x into gc-incminimark-pinning

diff too long, truncating to 2000 out of 24916 lines

diff --git a/LICENSE b/LICENSE
--- a/LICENSE
+++ b/LICENSE
@@ -128,6 +128,7 @@
     Stian Andreassen
     Laurence Tratt
     Wanja Saatkamp
+    Ivan Sichmann Freitas
     Gerald Klix
     Mike Blume
     Oscar Nierstrasz
@@ -212,7 +213,9 @@
     Alejandro J. Cura
     Jacob Oscarson
     Travis Francis Athougies
+    Ryan Gonzalez
     Kristjan Valur Jonsson
+    Sebastian Pawluś
     Neil Blakey-Milner
     anatoly techtonik
     Lutz Paelike
@@ -245,6 +248,7 @@
     Michael Hudson-Doyle
     Anders Sigfridsson
     Yasir Suhail
+    rafalgalczynski at gmail.com
     Floris Bruynooghe
     Laurens Van Houtven
     Akira Li
@@ -274,6 +278,8 @@
     Zooko Wilcox-O Hearn
     Tomer Chachamu
     Christopher Groskopf
+    Asmo Soinio
+    Stefan Marr
     jiaaro
     opassembler.py
     Antony Lee
@@ -289,6 +295,7 @@
     yasirs
     Michael Chermside
     Anna Ravencroft
+    Andrew Chambers
     Julien Phalip
     Dan Loewenherz
 
diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py
--- a/_pytest/resultlog.py
+++ b/_pytest/resultlog.py
@@ -56,6 +56,9 @@
         for line in longrepr.splitlines():
             py.builtin.print_(" %s" % line, file=self.logfile)
         for key, text in sections:
+            # py.io.StdCaptureFD may send in unicode
+            if isinstance(text, unicode):
+                text = text.encode('utf-8')
             py.builtin.print_(" ", file=self.logfile)
             py.builtin.print_(" -------------------- %s --------------------"
                               % key.rstrip(), file=self.logfile)
diff --git a/lib-python/2.7/imputil.py b/lib-python/2.7/imputil.py
--- a/lib-python/2.7/imputil.py
+++ b/lib-python/2.7/imputil.py
@@ -422,7 +422,8 @@
     saved back to the filesystem for future imports. The source file's
     modification timestamp must be provided as a Long value.
     """
-    codestring = open(pathname, 'rU').read()
+    with open(pathname, 'rU') as fp:
+        codestring = fp.read()
     if codestring and codestring[-1] != '\n':
         codestring = codestring + '\n'
     code = __builtin__.compile(codestring, pathname, 'exec')
@@ -603,8 +604,8 @@
         self.desc = desc
 
     def import_file(self, filename, finfo, fqname):
-        fp = open(filename, self.desc[1])
-        module = imp.load_module(fqname, fp, filename, self.desc)
+        with open(filename, self.desc[1]) as fp:
+            module = imp.load_module(fqname, fp, filename, self.desc)
         module.__file__ = filename
         return 0, module, { }
 
diff --git a/lib-python/2.7/modulefinder.py b/lib-python/2.7/modulefinder.py
--- a/lib-python/2.7/modulefinder.py
+++ b/lib-python/2.7/modulefinder.py
@@ -109,16 +109,16 @@
 
     def run_script(self, pathname):
         self.msg(2, "run_script", pathname)
-        fp = open(pathname, READ_MODE)
-        stuff = ("", "r", imp.PY_SOURCE)
-        self.load_module('__main__', fp, pathname, stuff)
+        with open(pathname, READ_MODE) as fp:
+            stuff = ("", "r", imp.PY_SOURCE)
+            self.load_module('__main__', fp, pathname, stuff)
 
     def load_file(self, pathname):
         dir, name = os.path.split(pathname)
         name, ext = os.path.splitext(name)
-        fp = open(pathname, READ_MODE)
-        stuff = (ext, "r", imp.PY_SOURCE)
-        self.load_module(name, fp, pathname, stuff)
+        with open(pathname, READ_MODE) as fp:
+            stuff = (ext, "r", imp.PY_SOURCE)
+            self.load_module(name, fp, pathname, stuff)
 
     def import_hook(self, name, caller=None, fromlist=None, level=-1):
         self.msg(3, "import_hook", name, caller, fromlist, level)
@@ -461,6 +461,8 @@
         fp, buf, stuff = self.find_module("__init__", m.__path__)
         self.load_module(fqname, fp, buf, stuff)
         self.msgout(2, "load_package ->", m)
+        if fp:
+            fp.close()
         return m
 
     def add_module(self, fqname):
diff --git a/lib-python/2.7/test/test_argparse.py b/lib-python/2.7/test/test_argparse.py
--- a/lib-python/2.7/test/test_argparse.py
+++ b/lib-python/2.7/test/test_argparse.py
@@ -48,6 +48,9 @@
 
     def tearDown(self):
         os.chdir(self.old_dir)
+        import gc
+        # Force a collection which should close FileType() options
+        gc.collect()
         for root, dirs, files in os.walk(self.temp_dir, topdown=False):
             for name in files:
                 os.chmod(os.path.join(self.temp_dir, name), stat.S_IWRITE)
diff --git a/lib-python/2.7/test/test_gdbm.py b/lib-python/2.7/test/test_gdbm.py
--- a/lib-python/2.7/test/test_gdbm.py
+++ b/lib-python/2.7/test/test_gdbm.py
@@ -74,6 +74,29 @@
         size2 = os.path.getsize(filename)
         self.assertTrue(size1 > size2 >= size0)
 
+    def test_sync(self):
+        # check if sync works at all, not sure how to check it
+        self.g = gdbm.open(filename, 'cf')
+        self.g['x'] = 'x' * 10000
+        self.g.sync()
+
+    def test_get_key(self):
+        self.g = gdbm.open(filename, 'cf')
+        self.g['x'] = 'x' * 10000
+        self.g.close()
+        self.g = gdbm.open(filename, 'r')
+        self.assertEquals(self.g['x'], 'x' * 10000)
+
+    def test_key_with_null_bytes(self):
+        key = 'a\x00b'
+        value = 'c\x00d'
+        self.g = gdbm.open(filename, 'cf')
+        self.g[key] = value
+        self.g.close()
+        self.g = gdbm.open(filename, 'r')
+        self.assertEquals(self.g[key], value)
+        self.assertTrue(key in self.g)
+        self.assertTrue(self.g.has_key(key))
 
 def test_main():
     run_unittest(TestGdbm)
diff --git a/lib_pypy/_tkinter/license.terms b/lib_pypy/_tkinter/license.terms
new file mode 100644
--- /dev/null
+++ b/lib_pypy/_tkinter/license.terms
@@ -0,0 +1,39 @@
+This software is copyrighted by the Regents of the University of
+California, Sun Microsystems, Inc., and other parties.  The following
+terms apply to all files associated with the software unless explicitly
+disclaimed in individual files.
+
+The authors hereby grant permission to use, copy, modify, distribute,
+and license this software and its documentation for any purpose, provided
+that existing copyright notices are retained in all copies and that this
+notice is included verbatim in any distributions. No written agreement,
+license, or royalty fee is required for any of the authorized uses.
+Modifications to this software may be copyrighted by their authors
+and need not follow the licensing terms described here, provided that
+the new terms are clearly indicated on the first page of each file where
+they apply.
+
+IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
+FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
+ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
+DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT.  THIS SOFTWARE
+IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
+NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+MODIFICATIONS.
+
+GOVERNMENT USE: If you are acquiring this software on behalf of the
+U.S. government, the Government shall have only "Restricted Rights"
+in the software and related documentation as defined in the Federal 
+Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2).  If you
+are acquiring the software on behalf of the Department of Defense, the
+software shall be classified as "Commercial Computer Software" and the
+Government shall have only "Restricted Rights" as defined in Clause
+252.227-7013 (c) (1) of DFARs.  Notwithstanding the foregoing, the
+authors grant the U.S. Government and others acting in its behalf
+permission to use and distribute the software in accordance with the
+terms specified in this license.
diff --git a/lib_pypy/gdbm.py b/lib_pypy/gdbm.py
new file mode 100644
--- /dev/null
+++ b/lib_pypy/gdbm.py
@@ -0,0 +1,174 @@
+import cffi, os
+
+ffi = cffi.FFI()
+ffi.cdef('''
+#define GDBM_READER ...
+#define GDBM_WRITER ...
+#define GDBM_WRCREAT ...
+#define GDBM_NEWDB ...
+#define GDBM_FAST ...
+#define GDBM_SYNC ...
+#define GDBM_NOLOCK ...
+#define GDBM_REPLACE ...
+
+void* gdbm_open(char *, int, int, int, void (*)());
+void gdbm_close(void*);
+
+typedef struct {
+    char *dptr;
+    int   dsize;
+} datum;
+
+datum gdbm_fetch(void*, datum);
+int gdbm_delete(void*, datum);
+int gdbm_store(void*, datum, datum, int);
+int gdbm_exists(void*, datum);
+
+int gdbm_reorganize(void*);
+
+datum gdbm_firstkey(void*);
+datum gdbm_nextkey(void*, datum);
+void gdbm_sync(void*);
+
+char* gdbm_strerror(int);
+int gdbm_errno;
+
+void free(void*);
+''')
+
+try:
+    lib = ffi.verify('''
+    #include "gdbm.h"
+    ''', libraries=['gdbm'])
+except cffi.VerificationError as e:
+    # distutils does not preserve the actual message,
+    # but the verification is simple enough that the
+    # failure must be due to missing gdbm dev libs
+    raise ImportError('%s: %s' %(e.__class__.__name__, e))
+
+class error(Exception):
+    pass
+
+def _fromstr(key):
+    if not isinstance(key, str):
+        raise TypeError("gdbm mappings have string indices only")
+    return {'dptr': ffi.new("char[]", key), 'dsize': len(key)}
+
+class gdbm(object):
+    ll_dbm = None
+
+    def __init__(self, filename, iflags, mode):
+        res = lib.gdbm_open(filename, 0, iflags, mode, ffi.NULL)
+        self.size = -1
+        if not res:
+            self._raise_from_errno()
+        self.ll_dbm = res
+
+    def close(self):
+        if self.ll_dbm:
+            lib.gdbm_close(self.ll_dbm)
+            self.ll_dbm = None
+
+    def _raise_from_errno(self):
+        if ffi.errno:
+            raise error(os.strerror(ffi.errno))
+        raise error(lib.gdbm_strerror(lib.gdbm_errno))
+
+    def __len__(self):
+        if self.size < 0:
+            self.size = len(self.keys())
+        return self.size
+
+    def __setitem__(self, key, value):
+        self._check_closed()
+        self._size = -1
+        r = lib.gdbm_store(self.ll_dbm, _fromstr(key), _fromstr(value),
+                           lib.GDBM_REPLACE)
+        if r < 0:
+            self._raise_from_errno()
+
+    def __delitem__(self, key):
+        self._check_closed()
+        res = lib.gdbm_delete(self.ll_dbm, _fromstr(key))
+        if res < 0:
+            raise KeyError(key)
+
+    def __contains__(self, key):
+        self._check_closed()
+        return lib.gdbm_exists(self.ll_dbm, _fromstr(key))
+    has_key = __contains__
+
+    def __getitem__(self, key):
+        self._check_closed()
+        drec = lib.gdbm_fetch(self.ll_dbm, _fromstr(key))
+        if not drec.dptr:
+            raise KeyError(key)
+        res = str(ffi.buffer(drec.dptr, drec.dsize))
+        lib.free(drec.dptr)
+        return res
+
+    def keys(self):
+        self._check_closed()
+        l = []
+        key = lib.gdbm_firstkey(self.ll_dbm)
+        while key.dptr:
+            l.append(str(ffi.buffer(key.dptr, key.dsize)))
+            nextkey = lib.gdbm_nextkey(self.ll_dbm, key)
+            lib.free(key.dptr)
+            key = nextkey
+        return l
+
+    def firstkey(self):
+        self._check_closed()
+        key = lib.gdbm_firstkey(self.ll_dbm)
+        if key.dptr:
+            res = str(ffi.buffer(key.dptr, key.dsize))
+            lib.free(key.dptr)
+            return res
+
+    def nextkey(self, key):
+        self._check_closed()
+        key = lib.gdbm_nextkey(self.ll_dbm, _fromstr(key))
+        if key.dptr:
+            res = str(ffi.buffer(key.dptr, key.dsize))
+            lib.free(key.dptr)
+            return res
+
+    def reorganize(self):
+        self._check_closed()
+        if lib.gdbm_reorganize(self.ll_dbm) < 0:
+            self._raise_from_errno()
+
+    def _check_closed(self):
+        if not self.ll_dbm:
+            raise error("GDBM object has already been closed")
+
+    __del__ = close
+
+    def sync(self):
+        self._check_closed()
+        lib.gdbm_sync(self.ll_dbm)
+
+def open(filename, flags='r', mode=0666):
+    if flags[0] == 'r':
+        iflags = lib.GDBM_READER
+    elif flags[0] == 'w':
+        iflags = lib.GDBM_WRITER
+    elif flags[0] == 'c':
+        iflags = lib.GDBM_WRCREAT
+    elif flags[0] == 'n':
+        iflags = lib.GDBM_NEWDB
+    else:
+        raise error("First flag must be one of 'r', 'w', 'c' or 'n'")
+    for flag in flags[1:]:
+        if flag == 'f':
+            iflags |= lib.GDBM_FAST
+        elif flag == 's':
+            iflags |= lib.GDBM_SYNC
+        elif flag == 'u':
+            iflags |= lib.GDBM_NOLOCK
+        else:
+            raise error("Flag '%s' not supported" % flag)
+    return gdbm(filename, iflags, mode)
+
+open_flags = "rwcnfsu"
diff --git a/pypy/doc/Makefile b/pypy/doc/Makefile
--- a/pypy/doc/Makefile
+++ b/pypy/doc/Makefile
@@ -7,63 +7,80 @@
 PAPER         =
 BUILDDIR      = _build
 
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
 # Internal variables.
 PAPEROPT_a4     = -D latex_paper_size=a4
 PAPEROPT_letter = -D latex_paper_size=letter
 ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
 
-.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex man changes linkcheck doctest
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
 
 help:
 	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html      to make standalone HTML files"
-	@echo "  dirhtml   to make HTML files named index.html in directories"
-	@echo "  pickle    to make pickle files"
-	@echo "  json      to make JSON files"
-	@echo "  htmlhelp  to make HTML files and a HTML help project"
-	@echo "  qthelp    to make HTML files and a qthelp project"
-	@echo "  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  man       to make manual pages"
-	@echo "  changes   to make an overview of all changed/added/deprecated items"
-	@echo "  linkcheck to check all external links for integrity"
-	@echo "  doctest   to run all doctests embedded in the documentation (if enabled)"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
 
 clean:
-	-rm -rf $(BUILDDIR)/*
+	rm -rf $(BUILDDIR)/*
 
 html:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
 	@echo
 	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
 
 dirhtml:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
 	@echo
 	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
 
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
 pickle:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
 	@echo
 	@echo "Build finished; now you can process the pickle files."
 
 json:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
 	@echo
 	@echo "Build finished; now you can process the JSON files."
 
 htmlhelp:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
 	@echo
 	@echo "Build finished; now you can run HTML Help Workshop with the" \
 	      ".hhp project file in $(BUILDDIR)/htmlhelp."
 
 qthelp:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
 	@echo
 	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
@@ -72,35 +89,89 @@
 	@echo "To view the help file:"
 	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PyPy.qhc"
 
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/PyPy"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PyPy"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
 latex:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
 	@echo
 	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
-	      "run these through (pdf)latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
 
 man:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
 	@echo
-	@echo "Build finished. The manual pages are in $(BUILDDIR)/man"
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
 
 changes:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
 	@echo
 	@echo "The overview file is in $(BUILDDIR)/changes."
 
 linkcheck:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
 	@echo
 	@echo "Link check complete; look for any errors in the above output " \
 	      "or in $(BUILDDIR)/linkcheck/output.txt."
 
 doctest:
-	# python config/generate.py #readthedocs will not run this Makefile
 	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
 	@echo "Testing of doctests in the sources finished, look at the " \
 	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/pypy/doc/conf.py b/pypy/doc/conf.py
--- a/pypy/doc/conf.py
+++ b/pypy/doc/conf.py
@@ -18,11 +18,31 @@
 # documentation root, use os.path.abspath to make it absolute, like shown here.
 sys.path.append(os.path.abspath('.'))
 
+
+# -- Read The Docs theme config ------------------------------------------------
+
+# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
+on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
+
+if not on_rtd:  # only import and set the theme if we're building docs locally
+    try:
+        import sphinx_rtd_theme
+        html_theme = 'sphinx_rtd_theme'
+        html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+    except ImportError:
+        print('sphinx_rtd_theme is not installed')
+        html_theme = 'default'
+
+# otherwise, readthedocs.org uses their theme by default, so no need to specify it
+
+
 # -- General configuration -----------------------------------------------------
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz', 'pypyconfig']
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
+              'sphinx.ext.todo', 'sphinx.ext.ifconfig', 'sphinx.ext.graphviz',
+              'pypyconfig']
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
@@ -91,7 +111,7 @@
 
 # The theme to use for HTML and HTML Help pages.  Major themes that come with
 # Sphinx are currently 'default' and 'sphinxdoc'.
-html_theme = 'default'
+#html_theme = 'default'
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
diff --git a/pypy/doc/contributor.rst b/pypy/doc/contributor.rst
--- a/pypy/doc/contributor.rst
+++ b/pypy/doc/contributor.rst
@@ -99,6 +99,7 @@
   Stian Andreassen
   Laurence Tratt
   Wanja Saatkamp
+  Ivan Sichmann Freitas
   Gerald Klix
   Mike Blume
   Oscar Nierstrasz
@@ -183,7 +184,9 @@
   Alejandro J. Cura
   Jacob Oscarson
   Travis Francis Athougies
+  Ryan Gonzalez
   Kristjan Valur Jonsson
+  Sebastian Pawluś
   Neil Blakey-Milner
   anatoly techtonik
   Lutz Paelike
@@ -216,6 +219,7 @@
   Michael Hudson-Doyle
   Anders Sigfridsson
   Yasir Suhail
+  rafalgalczynski at gmail.com
   Floris Bruynooghe
   Laurens Van Houtven
   Akira Li
@@ -245,6 +249,8 @@
   Zooko Wilcox-O Hearn
   Tomer Chachamu
   Christopher Groskopf
+  Asmo Soinio
+  Stefan Marr
   jiaaro
   opassembler.py
   Antony Lee
diff --git a/pypy/doc/index-of-release-notes.rst b/pypy/doc/index-of-release-notes.rst
--- a/pypy/doc/index-of-release-notes.rst
+++ b/pypy/doc/index-of-release-notes.rst
@@ -6,6 +6,7 @@
 
 .. toctree::
 
+   release-2.3.1.rst
    release-2.3.0.rst
    release-2.2.1.rst
    release-2.2.0.rst
diff --git a/pypy/doc/index.rst b/pypy/doc/index.rst
--- a/pypy/doc/index.rst
+++ b/pypy/doc/index.rst
@@ -110,7 +110,7 @@
 .. _`Getting Started`: getting-started.html
 .. _`Papers`: extradoc.html
 .. _`Videos`: video-index.html
-.. _`Release 2.3.0`: http://pypy.org/download.html
+.. _`Release 2.3.1`: http://pypy.org/download.html
 .. _`speed.pypy.org`: http://speed.pypy.org
 .. _`RPython toolchain`: translation.html
 .. _`potential project ideas`: project-ideas.html
diff --git a/pypy/doc/make.bat b/pypy/doc/make.bat
--- a/pypy/doc/make.bat
+++ b/pypy/doc/make.bat
@@ -2,11 +2,15 @@
 
 REM Command file for Sphinx documentation
 
-set SPHINXBUILD=sphinx-build
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
 set BUILDDIR=_build
 set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
 if NOT "%PAPER%" == "" (
 	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
 )
 
 if "%1" == "" goto help
@@ -14,16 +18,25 @@
 if "%1" == "help" (
 	:help
 	echo.Please use `make ^<target^>` where ^<target^> is one of
-	echo.  html      to make standalone HTML files
-	echo.  dirhtml   to make HTML files named index.html in directories
-	echo.  pickle    to make pickle files
-	echo.  json      to make JSON files
-	echo.  htmlhelp  to make HTML files and a HTML help project
-	echo.  qthelp    to make HTML files and a qthelp project
-	echo.  latex     to make LaTeX files, you can set PAPER=a4 or PAPER=letter
-	echo.  changes   to make an overview over all changed/added/deprecated items
-	echo.  linkcheck to check all external links for integrity
-	echo.  doctest   to run all doctests embedded in the documentation if enabled
+	echo.  html       to make standalone HTML files
+	echo.  dirhtml    to make HTML files named index.html in directories
+	echo.  singlehtml to make a single large HTML file
+	echo.  pickle     to make pickle files
+	echo.  json       to make JSON files
+	echo.  htmlhelp   to make HTML files and a HTML help project
+	echo.  qthelp     to make HTML files and a qthelp project
+	echo.  devhelp    to make HTML files and a Devhelp project
+	echo.  epub       to make an epub
+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+	echo.  text       to make text files
+	echo.  man        to make manual pages
+	echo.  texinfo    to make Texinfo files
+	echo.  gettext    to make PO message catalogs
+	echo.  changes    to make an overview over all changed/added/deprecated items
+	echo.  xml        to make Docutils-native XML files
+	echo.  pseudoxml  to make pseudoxml-XML files for display purposes
+	echo.  linkcheck  to check all external links for integrity
+	echo.  doctest    to run all doctests embedded in the documentation if enabled
 	goto end
 )
 
@@ -33,8 +46,34 @@
 	goto end
 )
 
+
+REM Check if sphinx-build is available and fallback to Python version if any
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 goto sphinx_python
+goto sphinx_ok
+
+:sphinx_python
+
+set SPHINXBUILD=python -m sphinx.__init__
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.http://sphinx-doc.org/
+	exit /b 1
+)
+
+:sphinx_ok
+
+
 if "%1" == "html" (
 	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+	if errorlevel 1 exit /b 1
 	echo.
 	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
 	goto end
@@ -42,13 +81,23 @@
 
 if "%1" == "dirhtml" (
 	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+	if errorlevel 1 exit /b 1
 	echo.
 	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
 	goto end
 )
 
+if "%1" == "singlehtml" (
+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+	goto end
+)
+
 if "%1" == "pickle" (
 	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+	if errorlevel 1 exit /b 1
 	echo.
 	echo.Build finished; now you can process the pickle files.
 	goto end
@@ -56,6 +105,7 @@
 
 if "%1" == "json" (
 	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+	if errorlevel 1 exit /b 1
 	echo.
 	echo.Build finished; now you can process the JSON files.
 	goto end
@@ -63,6 +113,7 @@
 
 if "%1" == "htmlhelp" (
 	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+	if errorlevel 1 exit /b 1
 	echo.
 	echo.Build finished; now you can run HTML Help Workshop with the ^
 .hhp project file in %BUILDDIR%/htmlhelp.
@@ -71,6 +122,7 @@
 
 if "%1" == "qthelp" (
 	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+	if errorlevel 1 exit /b 1
 	echo.
 	echo.Build finished; now you can run "qcollectiongenerator" with the ^
 .qhcp project file in %BUILDDIR%/qthelp, like this:
@@ -80,15 +132,85 @@
 	goto end
 )
 
+if "%1" == "devhelp" (
+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished.
+	goto end
+)
+
+if "%1" == "epub" (
+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The epub file is in %BUILDDIR%/epub.
+	goto end
+)
+
 if "%1" == "latex" (
 	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	if errorlevel 1 exit /b 1
 	echo.
 	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
 	goto end
 )
 
+if "%1" == "latexpdf" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	cd %BUILDDIR%/latex
+	make all-pdf
+	cd %BUILDDIR%/..
+	echo.
+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "latexpdfja" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	cd %BUILDDIR%/latex
+	make all-pdf-ja
+	cd %BUILDDIR%/..
+	echo.
+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "text" (
+	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The text files are in %BUILDDIR%/text.
+	goto end
+)
+
+if "%1" == "man" (
+	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The manual pages are in %BUILDDIR%/man.
+	goto end
+)
+
+if "%1" == "texinfo" (
+	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+	goto end
+)
+
+if "%1" == "gettext" (
+	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+	goto end
+)
+
 if "%1" == "changes" (
 	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+	if errorlevel 1 exit /b 1
 	echo.
 	echo.The overview file is in %BUILDDIR%/changes.
 	goto end
@@ -96,6 +218,7 @@
 
 if "%1" == "linkcheck" (
 	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+	if errorlevel 1 exit /b 1
 	echo.
 	echo.Link check complete; look for any errors in the above output ^
 or in %BUILDDIR%/linkcheck/output.txt.
@@ -104,10 +227,27 @@
 
 if "%1" == "doctest" (
 	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+	if errorlevel 1 exit /b 1
 	echo.
 	echo.Testing of doctests in the sources finished, look at the ^
 results in %BUILDDIR%/doctest/output.txt.
 	goto end
 )
 
+if "%1" == "xml" (
+	%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The XML files are in %BUILDDIR%/xml.
+	goto end
+)
+
+if "%1" == "pseudoxml" (
+	%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
+	goto end
+)
+
 :end
diff --git a/pypy/doc/man/pypy.1.rst b/pypy/doc/man/pypy.1.rst
--- a/pypy/doc/man/pypy.1.rst
+++ b/pypy/doc/man/pypy.1.rst
@@ -100,6 +100,8 @@
         ``debug_start``/``debug_stop`` but not any nested
         ``debug_print``.
         *fname* can be ``-`` to log to *stderr*.
+        Note that using a : in fname is a bad idea, Windows
+        users, beware.
 
     ``:``\ *fname*
         Full logging, including ``debug_print``.
diff --git a/pypy/doc/release-2.3.0.rst b/pypy/doc/release-2.3.0.rst
--- a/pypy/doc/release-2.3.0.rst
+++ b/pypy/doc/release-2.3.0.rst
@@ -93,7 +93,7 @@
 * Fix handling of tp_name for type objects
 
 .. _`HippyVM`: http://www.hippyvm.com
-.. _`whats-new`: :http://doc.pypy.org/en/latest/whatsnew-2.3.0.html
+.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.0.html
 
 
 New Platforms and Features
diff --git a/pypy/doc/release-2.3.1.rst b/pypy/doc/release-2.3.1.rst
new file mode 100644
--- /dev/null
+++ b/pypy/doc/release-2.3.1.rst
@@ -0,0 +1,78 @@
+=================================================
+PyPy 2.3.1 - Terrestrial Arthropod Trap Revisited
+=================================================
+
+We're pleased to announce PyPy 2.3.1, a feature-and-bugfix improvement over our
+recent release last month.
+
+This release contains several bugfixes and enhancements.
+
+You can download the PyPy 2.3.1 release here:
+
+    http://pypy.org/download.html
+
+We would like to thank our donors for the continued support of the PyPy
+project, and for those who donate to our three sub-projects.
+We've shown quite a bit of progress 
+but we're slowly running out of funds.
+Please consider donating more, or even better convince your employer to donate,
+so we can finish those projects!  The three sub-projects are:
+
+* `STM`_ (software transactional memory): a preview will be released very soon,
+  once we fix a few bugs
+
+* `NumPy`_ which requires installation of our fork of upstream numpy, available `on bitbucket`_
+
+.. _`STM`: http://pypy.org/tmdonate2.html
+.. _`NumPy`: http://pypy.org/numpydonate.html
+.. _`on bitbucket`: https://www.bitbucket.org/pypy/numpy   
+
+What is PyPy?
+=============
+
+PyPy is a very compliant Python interpreter, almost a drop-in replacement for
+CPython 2.7. It's fast (`pypy 2.3 and cpython 2.7.x`_ performance comparison;
+note that cpython's speed has not changed since 2.7.2)
+due to its integrated tracing JIT compiler.
+
+This release supports x86 machines running Linux 32/64, Mac OS X 64, Windows,
+and OpenBSD,
+as well as newer ARM hardware (ARMv6 or ARMv7, with VFPv3) running Linux. 
+
+While we support 32 bit python on Windows, work on the native Windows 64
+bit python is still stalling, we would welcome a volunteer
+to `handle that`_.
+
+.. _`pypy 2.3 and cpython 2.7.x`: http://speed.pypy.org
+.. _`handle that`: http://doc.pypy.org/en/latest/windows.html#what-is-missing-for-a-full-64-bit-translation
+
+Highlights
+==========
+
+Issues with the 2.3 release were resolved after being reported by users to
+our new issue tracker at https://bitbucket.org/pypy/pypy/issues or on IRC at
+#pypy. Here is a summary of the user-facing changes;
+for more information see `whats-new`_:
+
+* The built-in ``struct`` module was renamed to ``_struct``, solving issues
+  with IDLE and other modules.
+
+* Support for compilation with gcc-4.9
+
+* A rewrite of packaging.py which produces our downloadable packages to
+  modernize command line argument handling and to document third-party
+  contributions in our LICENSE file
+
+* A CFFI-based version of the gdbm module is now included in our downloads
+
+* Many issues were resolved_ since the 2.3 release on May 8
+
+.. _`whats-new`: http://doc.pypy.org/en/latest/whatsnew-2.3.1.html
+.. _resolved: https://bitbucket.org/pypy/pypy/issues?status=resolved
+Please try it out and let us know what you think. We especially welcome
+success stories, we know you are using PyPy, please tell us about it!
+
+Cheers
+
+The PyPy Team
+
diff --git a/pypy/doc/stm.rst b/pypy/doc/stm.rst
--- a/pypy/doc/stm.rst
+++ b/pypy/doc/stm.rst
@@ -1,70 +1,78 @@
-======================
-Transactional Memory
-======================
+
+=============================
+Software Transactional Memory
+=============================
 
 .. contents::
 
 
 This page is about ``pypy-stm``, a special in-development version of
 PyPy which can run multiple independent CPU-hungry threads in the same
-process in parallel.  It is side-stepping what is known in the Python
-world as the "global interpreter lock (GIL)" problem.
+process in parallel.  It is a solution to what is known in the Python
+world as the "global interpreter lock (GIL)" problem --- it is an
+implementation of Python without the GIL.
 
-"STM" stands for Software Transactional Memory, the technique used
+"STM" stands for Software `Transactional Memory`_, the technique used
 internally.  This page describes ``pypy-stm`` from the perspective of a
 user, describes work in progress, and finally gives references to more
 implementation details.
 
-This work was done mostly by Remi Meier and Armin Rigo.  Thanks to all
-donors for crowd-funding the work so far!  Please have a look at the
-`2nd call for donation`_.
+This work was done by Remi Meier and Armin Rigo.  Thanks to all donors
+for crowd-funding the work so far!  Please have a look at the `2nd call
+for donation`_.
 
+.. _`Transactional Memory`: http://en.wikipedia.org/wiki/Transactional_memory
 .. _`2nd call for donation`: http://pypy.org/tmdonate2.html
 
 
 Introduction
 ============
 
-``pypy-stm`` is a variant of the regular PyPy interpreter.  With caveats
-listed below, it should be in theory within 25%-50% slower than a
-regular PyPy, comparing the JIT version in both cases.  It is called
+``pypy-stm`` is a variant of the regular PyPy interpreter.  With caveats_
+listed below, it should be in theory within 20%-50% slower than a
+regular PyPy, comparing the JIT version in both cases (but see below!).
+It is called
 STM for Software Transactional Memory, which is the internal technique
 used (see `Reference to implementation details`_).
 
-What you get in exchange for this slow-down is that ``pypy-stm`` runs
-any multithreaded Python program on multiple CPUs at once.  Programs
-running two threads or more in parallel should ideally run faster than
-in a regular PyPy, either now or soon as issues are fixed.  In one way,
-that's all there is to it: this is a GIL-less Python, feel free to
-`download and try it`__.  However, the deeper idea behind the
-``pypy-stm`` project is to improve what is so far the state-of-the-art
-for using multiple CPUs, which for cases where separate processes don't
-work is done by writing explicitly multi-threaded programs.  Instead,
-``pypy-stm`` is pushing forward an approach to *hide* the threads, as
-described below in `atomic sections`_.
+The benefit is that the resulting ``pypy-stm`` can execute multiple
+threads of Python code in parallel.  Programs running two threads or
+more in parallel should ideally run faster than in a regular PyPy
+(either now, or soon as bugs are fixed).
 
+* ``pypy-stm`` is fully compatible with a GIL-based PyPy; you can use
+  it as a drop-in replacement and multithreaded programs will run on
+  multiple cores.
 
-.. __:
+* ``pypy-stm`` does not impose any special API to the user, but it
+  provides a new pure Python module called `transactional_memory`_ with
+  features to inspect the state or debug conflicts_ that prevent
+  parallelization.  This module can also be imported on top of a non-STM
+  PyPy or CPython.
 
-Current status
-==============
+* Building on top of the way the GIL is removed, we will talk
+  about `Atomic sections, Transactions, etc.: a better way to write
+  parallel programs`_.
+
+
+Getting Started
+===============
 
 **pypy-stm requires 64-bit Linux for now.**
 
 Development is done in the branch `stmgc-c7`_.  If you are only
-interested in trying it out, you can download a Ubuntu 12.04 binary
-here__ (``pypy-2.2.x-stm*.tar.bz2``; this version is a release mode,
-but not stripped of debug symbols).  The current version supports four
-"segments", which means that it will run up to four threads in parallel,
-in other words it is running a thread pool up to 4 threads emulating normal
-threads.
+interested in trying it out, you can download a Ubuntu binary here__
+(``pypy-2.3.x-stm*.tar.bz2``, Ubuntu 12.04-14.04; these versions are
+release mode, but not stripped of debug symbols).  The current version
+supports four "segments", which means that it will run up to four
+threads in parallel.
 
 To build a version from sources, you first need to compile a custom
-version of clang; we recommend downloading `llvm and clang like
-described here`__, but at revision 201645 (use ``svn co -r 201645 ...``
+version of clang(!); we recommend downloading `llvm and clang like
+described here`__, but at revision 201645 (use ``svn co -r 201645 <path>``
 for all checkouts).  Then apply all the patches in `this directory`__:
-they are fixes for the very extensive usage that pypy-stm does of a
-clang-only feature (without them, you get crashes of clang).  Then get
+they are fixes for a clang-only feature that hasn't been used so heavily
+in the past (without the patches, you get crashes of clang).  Then get
 the branch `stmgc-c7`_ of PyPy and run::
 
    rpython/bin/rpython -Ojit --stm pypy/goal/targetpypystandalone.py
@@ -75,23 +83,31 @@
 .. __: https://bitbucket.org/pypy/stmgc/src/default/c7/llvmfix/
 
 
-Caveats:
+.. _caveats:
 
-* So far, small examples work fine, but there are still a number of
-  bugs.  We're busy fixing them.
+Current status
+--------------
+
+* So far, small examples work fine, but there are still a few bugs.
+  We're busy fixing them as we find them; feel free to `report bugs`_.
+
+* It runs with an overhead as low as 20% on examples like "richards".
+  There are also other examples with higher overheads --up to 10x for
+  "translate.py"-- which we are still trying to understand.  One suspect
+  is our partial GC implementation, see below.
 
 * Currently limited to 1.5 GB of RAM (this is just a parameter in
-  `core.h`__).  Memory overflows are not detected correctly, so may
-  cause segmentation faults.
+  `core.h`__).  Memory overflows are not correctly handled; they cause
+  segfaults.
 
-* The JIT warm-up time is abysmal (as opposed to the regular PyPy's,
-  which is "only" bad).  Moreover, you should run it with a command like
-  ``pypy-stm --jit trace_limit=60000 args...``; the default value of
-  6000 for ``trace_limit`` is currently too low (6000 should become
-  reasonable again as we improve).  Also, in order to produce machine
-  code, the JIT needs to enter a special single-threaded mode for now.
-  This all means that you *will* get very bad performance results if
-  your program doesn't run for *many* seconds for now.
+* The JIT warm-up time improved recently but is still bad.  In order to
+  produce machine code, the JIT needs to enter a special single-threaded
+  mode for now.  This means that you will get bad performance results if
+  your program doesn't run for several seconds, where *several* can mean
+  *many.*  When trying benchmarks, be sure to check that you have
+  reached the warmed state, i.e. the performance is not improving any
+  more.  This should be clear from the fact that as long as it's
+  producing more machine code, ``pypy-stm`` will run on a single core.
 
 * The GC is new; although clearly inspired by PyPy's regular GC, it
   misses a number of optimizations for now.  Programs allocating large
@@ -108,111 +124,197 @@
 * The STM system is based on very efficient read/write barriers, which
   are mostly done (their placement could be improved a bit in
   JIT-generated machine code).  But the overall bookkeeping logic could
-  see more improvements (see Statistics_ below).
-
-* You can use `atomic sections`_, but the most visible missing thing is
-  that you don't get reports about the "conflicts" you get.  This would
-  be the first thing that you need in order to start using atomic
-  sections more extensively.  Also, for now: for better results, try to
-  explicitly force a transaction break just before (and possibly after)
-  each large atomic section, with ``time.sleep(0)``.
+  see more improvements (see `Low-level statistics`_ below).
 
 * Forking the process is slow because the complete memory needs to be
-  copied manually right now.
+  copied manually.  A warning is printed to this effect.
 
-* Very long-running processes should eventually crash on an assertion
-  error because of a non-implemented overflow of an internal 29-bit
-  number, but this requires at the very least ten hours --- more
-  probably, several days or more.
+* Very long-running processes (on the order of days) will eventually
+  crash on an assertion error because of a non-implemented overflow of
+  an internal 29-bit number.
 
 .. _`report bugs`: https://bugs.pypy.org/
 .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stm/core.h
 
 
 
-Statistics
+User Guide
 ==========
+  
 
-When a non-main thread finishes, you get statistics printed to stderr,
-looking like that::
+Drop-in replacement
+-------------------
 
-      thread 0x7f73377fe600:
-          outside transaction          42182  0.506 s
-          run current                  85466  0.000 s
-          run committed                34262  3.178 s
-          run aborted write write       6982  0.083 s
-          run aborted write read         550  0.005 s
-          run aborted inevitable         388  0.010 s
-          run aborted other                0  0.000 s
-          wait free segment                0  0.000 s
-          wait write read                 78  0.027 s
-          wait inevitable                887  0.490 s
-          wait other                       0  0.000 s
-          bookkeeping                  51418  0.606 s
-          minor gc                    162970  1.135 s
-          major gc                         1  0.019 s
-          sync pause                   59173  1.738 s
-          spin loop                   129512  0.094 s
+Multithreaded, CPU-intensive Python programs should work unchanged on
+``pypy-stm``.  They will run using multiple CPU cores in parallel.
 
-The first number is a counter; the second number gives the associated
-time (the amount of real time that the thread was in this state; the sum
-of all the times should be equal to the total time between the thread's
-start and the thread's end).  The most important points are "run
-committed", which gives the amount of useful work, and "outside
-transaction", which should give the time spent e.g. in library calls
-(right now it seems to be a bit larger than that; to investigate).
-Everything else is overhead of various forms.  (Short-, medium- and
-long-term future work involves reducing this overhead :-)
+The existing semantics of the GIL (Global Interpreter Lock) are
+unchanged: although running on multiple cores in parallel, ``pypy-stm``
+gives the illusion that threads are run serially, with switches only
+occurring between bytecodes, not in the middle of them.  Programs can
+rely on this: using ``shared_list.append()/pop()`` or
+``shared_dict.setdefault()`` as synchronization mecanisms continues to
+work as expected.
 
-These statistics are not printed out for the main thread, for now.
+This works by internally considering the points where a standard PyPy or
+CPython would release the GIL, and replacing them with the boundaries of
+"transaction".  Like their database equivalent, multiple transactions
+can execute in parallel, but will commit in some serial order.  They
+appear to behave as if they were completely run in this serialization
+order.
 
 
 Atomic sections
-===============
+---------------
 
-While one of the goal of pypy-stm is to give a GIL-free but otherwise
-unmodified Python, the other goal is to push for a better way to use
-multithreading.  For this, you (as the Python programmer) get an API
-in the ``__pypy__.thread`` submodule:
+PyPy supports *atomic sections,* which are blocks of code which you want
+to execute without "releasing the GIL".  *This is experimental and may
+be removed in the future.*  In STM terms, this means blocks of code that
+are executed while guaranteeing that the transaction is not interrupted
+in the middle.
 
-* ``__pypy__.thread.atomic``: a context manager (i.e. you use it in
-  a ``with __pypy__.thread.atomic:`` statement).  It runs the whole
-  block of code without breaking the current transaction --- from
-  the point of view of a regular CPython/PyPy, this is equivalent to
-  saying that the GIL will not be released at all between the start and
-  the end of this block of code.
+Here is a usage example::
 
-The obvious usage is to use atomic blocks in the same way as one would
-use locks: to protect changes to some shared data, you do them in a
-``with atomic`` block, just like you would otherwise do them in a ``with
-mylock`` block after ``mylock = thread.allocate_lock()``.  This allows
-you not to care about acquiring the correct locks in the correct order;
-it is equivalent to having only one global lock.  This is how
-transactional memory is `generally described`__: as a way to efficiently
-execute such atomic blocks, running them in parallel while giving the
-illusion that they run in some serial order.
+    with __pypy__.thread.atomic:
+        assert len(lst1) == 10
+        x = lst1.pop(0)
+        lst1.append(x)
 
-.. __: http://en.wikipedia.org/wiki/Transactional_memory
+In this (bad) example, we are sure that the item popped off one end of
+the list is appened again at the other end atomically.  It means that
+another thread can run ``len(lst1)`` or ``x in lst1`` without any
+particular synchronization, and always see the same results,
+respectively ``10`` and ``True``.  It will never see the intermediate
+state where ``lst1`` only contains 9 elements.  Atomic sections are
+similar to re-entrant locks (they can be nested), but additionally they
+protect against the concurrent execution of *any* code instead of just
+code that happens to be protected by the same lock in other threads.
 
-However, the less obvious intended usage of atomic sections is as a
-wide-ranging replacement of explicit threads.  You can turn a program
-that is not multi-threaded at all into a program that uses threads
-internally, together with large atomic sections to keep the behavior
-unchanged.  This capability can be hidden in a library or in the
-framework you use; the end user's code does not need to be explicitly
-aware of using threads.  For a simple example of this, see
-`transaction.py`_ in ``lib_pypy``.  The idea is that if you have a
-program where the function ``f(key, value)`` runs on every item of some
-big dictionary, you can replace the loop with::
+Note that the notion of atomic sections is very strong. If you write
+code like this::
+
+    with __pypy__.thread.atomic:
+        time.sleep(10)
+
+then, if you think about it as if we had a GIL, you are executing a
+10-seconds-long atomic transaction without releasing the GIL at all.
+This prevents all other threads from progressing at all.  While it is
+not strictly true in ``pypy-stm``, the exact rules for when other
+threads can progress or not are rather complicated; you have to consider
+it likely that such a piece of code will eventually block all other
+threads anyway.
+
+Note that if you want to experiment with ``atomic``, you may have to add
+manually a transaction break just before the atomic block.  This is
+because the boundaries of the block are not guaranteed to be the
+boundaries of the transaction: the latter is at least as big as the
+block, but maybe bigger.  Therefore, if you run a big atomic block, it
+is a good idea to break the transaction just before.  This can be done
+e.g. by the hack of calling ``time.sleep(0)``.  (This may be fixed at
+some point.)
+
+There are also issues with the interaction of locks and atomic blocks.
+This can be seen if you write to files (which have locks), including
+with a ``print`` to standard output.  If one thread tries to acquire a
+lock while running in an atomic block, and another thread has got the
+same lock, then the former may fail with a ``thread.error``.  The reason
+is that "waiting" for some condition to become true --while running in
+an atomic block-- does not really make sense.  For now you can work
+around it by making sure that, say, all your prints are either in an
+``atomic`` block or none of them are.  (This kind of issue is
+theoretically hard to solve.)
+
+
+Locks
+-----
+
+**Not Implemented Yet**
+
+The thread module's locks have their basic semantic unchanged.  However,
+using them (e.g. in ``with my_lock:`` blocks) starts an alternative
+running mode, called `Software lock elision`_.  This means that PyPy
+will try to make sure that the transaction extends until the point where
+the lock is released, and if it succeeds, then the acquiring and
+releasing of the lock will be "elided".  This means that in this case,
+the whole transaction will technically not cause any write into the lock
+object --- it was unacquired before, and is still unacquired after the
+transaction.
+
+This is specially useful if two threads run ``with my_lock:`` blocks
+with the same lock.  If they each run a transaction that is long enough
+to contain the whole block, then all writes into the lock will be elided
+and the two transactions will not conflict with each other.  As usual,
+they will be serialized in some order: one of the two will appear to run
+before the other.  Simply, each of them executes an "acquire" followed
+by a "release" in the same transaction.  As explained above, the lock
+state goes from "unacquired" to "unacquired" and can thus be left
+unchanged.
+
+This approach can gracefully fail: unlike atomic sections, there is no
+guarantee that the transaction runs until the end of the block.  If you
+perform any input/output while you hold the lock, the transaction will
+end as usual just before the input/output operation.  If this occurs,
+then the lock elision mode is cancelled and the lock's "acquired" state
+is really written.
+
+Even if the lock is really acquired already, a transaction doesn't have
+to wait for it to become free again.  It can enter the elision-mode anyway
+and tentatively execute the content of the block.  It is only at the end,
+when trying to commit, that the thread will pause.  As soon as the real
+value stored in the lock is switched back to "unacquired", it can then
+proceed and attempt to commit its already-executed transaction (which
+can fail and abort and restart from the scratch, as usual).
+
+Note that this is all *not implemented yet,* but we expect it to work
+even if you acquire and release several locks.  The elision-mode
+transaction will extend until the first lock you acquired is released,
+or until the code performs an input/output or a wait operation (for
+example, waiting for another lock that is currently not free).  In the
+common case of acquiring several locks in nested order, they will all be
+elided by the same transaction.
+
+.. _`software lock elision`: https://www.repository.cam.ac.uk/handle/1810/239410
+
+
+Atomic sections, Transactions, etc.: a better way to write parallel programs
+----------------------------------------------------------------------------
+
+(This section is based on locks as we plan to implement them, but also
+works with the existing atomic sections.)
+
+In the cases where elision works, the block of code can run in parallel
+with other blocks of code *even if they are protected by the same lock.*
+You still get the illusion that the blocks are run sequentially.  This
+works even for multiple threads that run each a series of such blocks
+and nothing else, protected by one single global lock.  This is
+basically the Python application-level equivalent of what was done with
+the interpreter in ``pypy-stm``: while you think you are writing
+thread-unfriendly code because of this global lock, actually the
+underlying system is able to make it run on multiple cores anyway.
+
+This capability can be hidden in a library or in the framework you use;
+the end user's code does not need to be explicitly aware of using
+threads.  For a simple example of this, there is `transaction.py`_ in
+``lib_pypy``.  The idea is that you write, or already have, some program
+where the function ``f(key, value)`` runs on every item of some big
+dictionary, say::
+
+    for key, value in bigdict.items():
+        f(key, value)
+
+Then you simply replace the loop with::
 
     for key, value in bigdict.items():
         transaction.add(f, key, value)
     transaction.run()
 
 This code runs the various calls to ``f(key, value)`` using a thread
-pool, but every single call is done in an atomic section.  The end
-result is that the behavior should be exactly equivalent: you don't get
-any extra multithreading issue.
+pool, but every single call is executed under the protection of a unique
+lock.  The end result is that the behavior is exactly equivalent --- in
+fact it makes little sense to do it in this way on a non-STM PyPy or on
+CPython.  But on ``pypy-stm``, the various locked calls to ``f(key,
+value)`` can tentatively be executed in parallel, even if the observable
+result is as if they were executed in some serial order.
 
 This approach hides the notion of threads from the end programmer,
 including all the hard multithreading-related issues.  This is not the
@@ -223,41 +325,176 @@
 only requires that the end programmer identifies where this parallelism
 is likely to be found, and communicates it to the system, using for
 example the ``transaction.add()`` scheme.
-
+    
 .. _`transaction.py`: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/lib_pypy/transaction.py
 .. _OpenMP: http://en.wikipedia.org/wiki/OpenMP
 
-==================
 
-Other APIs in pypy-stm:
+.. _`transactional_memory`:
 
-* ``__pypy__.thread.getsegmentlimit()``: return the number of "segments"
-  in this pypy-stm.  This is the limit above which more threads will not
-  be able to execute on more cores.  (Right now it is limited to 4 due
-  to inter-segment overhead, but should be increased in the future.  It
+API of transactional_memory
+---------------------------
+
+The new pure Python module ``transactional_memory`` runs on both CPython
+and PyPy, both with and without STM.  It contains:
+
+* ``getsegmentlimit()``: return the number of "segments" in
+  this pypy-stm.  This is the limit above which more threads will not be
+  able to execute on more cores.  (Right now it is limited to 4 due to
+  inter-segment overhead, but should be increased in the future.  It
   should also be settable, and the default value should depend on the
-  number of actual CPUs.)
+  number of actual CPUs.)  If STM is not available, this returns 1.
 
-* ``__pypy__.thread.exclusive_atomic``: same as ``atomic``, but
-  raises an exception if you attempt to nest it inside another
-  ``atomic``.
+* ``print_abort_info(minimum_time=0.0)``: debugging help.  Each thread
+  remembers the longest abort or pause it did because of cross-thread
+  contention_.  This function prints it to ``stderr`` if the time lost
+  is greater than ``minimum_time`` seconds.  The record is then
+  cleared, to make it ready for new events.  This function returns
+  ``True`` if it printed a report, and ``False`` otherwise.
 
-* ``__pypy__.thread.signals_enabled``: a context manager that runs
-  its block with signals enabled.  By default, signals are only
-  enabled in the main thread; a non-main thread will not receive
-  signals (this is like CPython).  Enabling signals in non-main threads
-  is useful for libraries where threads are hidden and the end user is
-  not expecting his code to run elsewhere than in the main thread.
 
-Note that all of this API is (or will be) implemented in a regular PyPy
-too: for example, ``with atomic`` will simply mean "don't release the
-GIL" and ``getsegmentlimit()`` will return 1.
+API of __pypy__.thread
+----------------------
 
-==================
+The ``__pypy__.thread`` submodule is a built-in module of PyPy that
+contains a few internal built-in functions used by the
+``transactional_memory`` module, plus the following:
+    
+* ``__pypy__.thread.atomic``: a context manager to run a block in
+  fully atomic mode, without "releasing the GIL".  (May be eventually
+  removed?)
+
+* ``__pypy__.thread.signals_enabled``: a context manager that runs its
+  block with signals enabled.  By default, signals are only enabled in
+  the main thread; a non-main thread will not receive signals (this is
+  like CPython).  Enabling signals in non-main threads is useful for
+  libraries where threads are hidden and the end user is not expecting
+  his code to run elsewhere than in the main thread.
+
+
+.. _contention:
+
+Conflicts
+---------
+
+Based on Software Transactional Memory, the ``pypy-stm`` solution is
+prone to "conflicts".  To repeat the basic idea, threads execute their code
+speculatively, and at known points (e.g. between bytecodes) they
+coordinate with each other to agree on which order their respective
+actions should be "committed", i.e. become globally visible.  Each
+duration of time between two commit-points is called a transaction.
+
+A conflict occurs when there is no consistent ordering.  The classical
+example is if two threads both tried to change the value of the same
+global variable.  In that case, only one of them can be allowed to
+proceed, and the other one must be either paused or aborted (restarting
+the transaction).  If this occurs too often, parallelization fails.
+
+How much actual parallelization a multithreaded program can see is a bit
+subtle.  Basically, a program not using ``__pypy__.thread.atomic`` or
+eliding locks, or doing so for very short amounts of time, will
+parallelize almost freely (as long as it's not some artificial example
+where, say, all threads try to increase the same global counter and do
+nothing else).
+
+However, using if the program requires longer transactions, it comes
+with less obvious rules.  The exact details may vary from version to
+version, too, until they are a bit more stabilized.  Here is an
+overview.
+
+Parallelization works as long as two principles are respected.  The
+first one is that the transactions must not *conflict* with each other.
+The most obvious sources of conflicts are threads that all increment a
+global shared counter, or that all store the result of their
+computations into the same list --- or, more subtly, that all ``pop()``
+the work to do from the same list, because that is also a mutation of
+the list.  (It is expected that some STM-aware library will eventually
+be designed to help with conflict problems, like a STM-aware queue.)
+
+A conflict occurs as follows: when a transaction commits (i.e. finishes
+successfully) it may cause other transactions that are still in progress
+to abort and retry.  This is a waste of CPU time, but even in the worst
+case senario it is not worse than a GIL, because at least one
+transaction succeeds (so we get at worst N-1 CPUs doing useless jobs and
+1 CPU doing a job that commits successfully).
+
+Conflicts do occur, of course, and it is pointless to try to avoid them
+all.  For example they can be abundant during some warm-up phase.  What
+is important is to keep them rare enough in total.
+
+Another issue is that of avoiding long-running so-called "inevitable"
+transactions ("inevitable" is taken in the sense of "which cannot be
+avoided", i.e. transactions which cannot abort any more).  Transactions
+like that should only occur if you use ``__pypy__.thread.atomic``,
+generally become of I/O in atomic blocks.  They work, but the
+transaction is turned inevitable before the I/O is performed.  For all
+the remaining execution time of the atomic block, they will impede
+parallel work.  The best is to organize the code so that such operations
+are done completely outside ``__pypy__.thread.atomic``.
+
+(This is related to the fact that blocking I/O operations are
+discouraged with Twisted, and if you really need them, you should do
+them on their own separate thread.)
+
+In case of lock elision, we don't get long-running inevitable
+transactions, but a different problem can occur: doing I/O cancels lock
+elision, and the lock turns into a real lock, preventing other threads
+from committing if they also need this lock.  (More about it when lock
+elision is implemented and tested.)
+
+
+
+Implementation
+==============
+
+XXX this section mostly empty for now
+
+
+Low-level statistics
+--------------------
+
+When a non-main thread finishes, you get low-level statistics printed to
+stderr, looking like that::
+
+      thread 0x7f73377fe600:
+          outside transaction          42182    0.506 s
+          run current                  85466    0.000 s
+          run committed                34262    3.178 s
+          run aborted write write       6982    0.083 s
+          run aborted write read         550    0.005 s
+          run aborted inevitable         388    0.010 s
+          run aborted other                0    0.000 s
+          wait free segment                0    0.000 s
+          wait write read                 78    0.027 s
+          wait inevitable                887    0.490 s
+          wait other                       0    0.000 s
+          sync commit soon                 1    0.000 s
+          bookkeeping                  51418    0.606 s
+          minor gc                    162970    1.135 s
+          major gc                         1    0.019 s
+          sync pause                   59173    1.738 s
+          longest recordered marker          0.000826 s
+          "File "x.py", line 5, in f"
+
+On each line, the first number is a counter, and the second number gives
+the associated time --- the amount of real time that the thread was in
+this state.  The sum of all the times should be equal to the total time
+between the thread's start and the thread's end.  The most important
+points are "run committed", which gives the amount of useful work, and
+"outside transaction", which should give the time spent e.g. in library
+calls (right now it seems to be larger than that; to investigate).  The
+various "run aborted" and "wait" entries are time lost due to
+conflicts_.  Everything else is overhead of various forms.  (Short-,
+medium- and long-term future work involves reducing this overhead :-)
+
+The last two lines are special; they are an internal marker read by
+``transactional_memory.print_abort_info()``.
+
+These statistics are not printed out for the main thread, for now.
 
 
 Reference to implementation details
-===================================
+-----------------------------------
 
 The core of the implementation is in a separate C library called stmgc_,
 in the c7_ subdirectory.  Please see the `README.txt`_ for more
@@ -282,3 +519,15 @@
 .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/translator/stm/src_stm/stmgcintf.c
 .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/llsupport/stmrewrite.py
 .. __: https://bitbucket.org/pypy/pypy/raw/stmgc-c7/rpython/jit/backend/x86/assembler.py
+
+
+
+See also
+========
+
+See also
+https://bitbucket.org/pypy/pypy/raw/default/pypy/doc/project-ideas.rst
+(section about STM).
+
+
+.. include:: _ref.txt
diff --git a/pypy/doc/whatsnew-2.3.1.rst b/pypy/doc/whatsnew-2.3.1.rst
--- a/pypy/doc/whatsnew-2.3.1.rst
+++ b/pypy/doc/whatsnew-2.3.1.rst
@@ -9,3 +9,16 @@
 
 Support compilation with gcc-4.9
 
+Added support for the stdlib gdbm module via cffi
+
+Annotator cleanups
+
+.. branch: release-2.3.x
+
+.. branch: unify-call-ops
+
+.. branch packaging
+Use argparse for packaging.py, and add third-party components to LICENSE file.
+Also mention that gdbm is GPL.
+Do not crash the packaging process on failure in CFFI or license-building,
+rather complete the build step and return -1.
diff --git a/pypy/doc/whatsnew-head.rst b/pypy/doc/whatsnew-head.rst
--- a/pypy/doc/whatsnew-head.rst
+++ b/pypy/doc/whatsnew-head.rst
@@ -3,10 +3,6 @@
 =======================
 
 .. this is a revision shortly after release-2.3.x
-.. startrev: b2cc67adbaad
+.. startrev: 87fdc76bccb4
 
-Added support for the stdlib gdbm module via cffi
 
-Fixes for issues #1769, #1764, #1762, #1752
-
-Annotator cleanups
diff --git a/pypy/doc/windows.rst b/pypy/doc/windows.rst
--- a/pypy/doc/windows.rst
+++ b/pypy/doc/windows.rst
@@ -10,8 +10,14 @@
 64bit Windows.  See at the end of this page for what is missing
 for a full 64bit translation.
 
-To build pypy-c you need a C compiler.  Microsoft Visual Studio is
-preferred, but can also use the mingw32 port of gcc.
+To build pypy-c you need a working python environment, and a C compiler.
+It is possible to translate with a CPython 2.6 or later, but this is not
+the preferred way, because it will take a lot longer to run – depending
+on your architecture, between two and three times as long. So head to 
+`our downloads`_ and get the latest stable version.
+
+Microsoft Visual Studio is preferred as a compiler, but there are reports 
+of success with the mingw32 port of gcc.
 
 
 Translating PyPy with Visual Studio
@@ -34,10 +40,20 @@
 **Note:** PyPy is currently not supported for 64 bit Windows, and translation
 will fail in this case.
 
-The compiler is all you need to build pypy-c, but it will miss some
+Python and a C compiler are all you need to build pypy, but it will miss some
 modules that relies on third-party libraries.  See below how to get
 and build them.
 
+Please see the `non-windows instructions`_ for more information, especially note
+that translation is RAM-hungry. A standard translation requires around 4GB, so
+special preparations are necessary, or you may want to use the method in the
+notes of the `build instructions`_ to reduce memory usage at the price of a
+slower translation::
+
+    set PYPY_GC_MAX_DELTA=200MB
+    pypy --jit loop_longevity=300 ../../rpython/bin/rpython -Ojit targetpypystandalone
+    set PYPY_GC_MAX_DELTA=
+
 Preping Windows for the Large Build
 -----------------------------------
 
@@ -52,9 +68,10 @@
 
 Then you need to execute::
 
-    editbin /largeaddressaware pypy.exe
+    editbin /largeaddressaware translator.exe
 
-on the pypy.exe file you compiled.
+where ``translator.exe`` is the pypy.exe or cpython.exe you will use to 
+translate with. 
 
 Installing external packages
 ----------------------------
@@ -244,7 +261,9 @@
 .. _`msys for mingw`: http://sourceforge.net/projects/mingw-w64/files/External%20binary%20packages%20%28Win64%20hosted%29/MSYS%20%2832-bit%29   
 .. _`libffi source files`: http://sourceware.org/libffi/
 .. _`RPython translation toolchain`: translation.html
-
+.. _`our downloads`: http://pypy.org/download.html   
+.. _`non-windows instructions`: getting-started-python.html#translating-the-pypy-python-interpreter
+.. _`build instructions`: http://pypy.org/download.html#building-from-source
 
 What is missing for a full 64-bit translation
 ---------------------------------------------
diff --git a/pypy/interpreter/app_main.py b/pypy/interpreter/app_main.py
--- a/pypy/interpreter/app_main.py
+++ b/pypy/interpreter/app_main.py
@@ -85,7 +85,7 @@
             if softspace:
                 stdout.write('\n')
 
-    except SystemExit, e:
+    except SystemExit as e:
         handle_sys_exit(e)
     except:
         display_exception()
@@ -608,10 +608,9 @@
                 python_startup = readenv and os.getenv('PYTHONSTARTUP')
                 if python_startup:
                     try:
-                        f = open(python_startup)
-                        startup = f.read()
-                        f.close()
-                    except IOError, e:
+                        with open(python_startup) as f:
+                            startup = f.read()
+                    except IOError as e:
                         print >> sys.stderr, "Could not open PYTHONSTARTUP"
                         print >> sys.stderr, "IOError:", e
                     else:
@@ -667,7 +666,7 @@
                     args = (execfile, filename, mainmodule.__dict__)
             success = run_toplevel(*args)
 
-    except SystemExit, e:
+    except SystemExit as e:
         status = e.code
         if inspect_requested():
             display_exception()
@@ -678,10 +677,12 @@
     if inspect_requested():
         try:
             from _pypy_interact import interactive_console
-            irc_topic = readenv and os.getenv('PYPY_IRC_TOPIC')
+            pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info)
+            irc_topic = pypy_version_info[3] != 'final' or (
+                            readenv and os.getenv('PYPY_IRC_TOPIC'))
             success = run_toplevel(interactive_console, mainmodule,
                                    quiet=not irc_topic)
-        except SystemExit, e:
+        except SystemExit as e:
             status = e.code
         else:
             status = not success
@@ -731,10 +732,10 @@
     setup_bootstrap_path(executable)
     try:
         cmdline = parse_command_line(argv)
-    except CommandLineError, e:
+    except CommandLineError as e:
         print_error(str(e))
         return 2
-    except SystemExit, e:
+    except SystemExit as e:
         return e.code or 0
     setup_and_fix_paths(**cmdline)
     return run_command_line(**cmdline)
diff --git a/pypy/interpreter/test/test_app_main.py b/pypy/interpreter/test/test_app_main.py
--- a/pypy/interpreter/test/test_app_main.py
+++ b/pypy/interpreter/test/test_app_main.py
@@ -7,12 +7,8 @@
 from rpython.tool.udir import udir
 from contextlib import contextmanager
 from pypy.conftest import pypydir
-from pypy.module.sys.version import PYPY_VERSION
 from lib_pypy._pypy_interact import irc_header
 
-is_release = PYPY_VERSION[3] == "final"
-
-
 banner = sys.version.splitlines()[0]
 
 app_main = os.path.join(os.path.realpath(os.path.dirname(__file__)), os.pardir, 'app_main.py')
@@ -246,10 +242,6 @@
         child = self.spawn([])
         child.expect('Python ')   # banner
         child.expect('>>> ')      # prompt
-        if is_release:
-            assert irc_header not in child.before
-        else:
-            assert irc_header in child.before
         child.sendline('[6*7]')
         child.expect(re.escape('[42]'))
         child.sendline('def f(x):')
@@ -269,6 +261,22 @@
         child.sendline("'' in sys.path")
         child.expect("True")
 
+    def test_yes_irc_topic(self, monkeypatch):
+        monkeypatch.setenv('PYPY_IRC_TOPIC', '1')
+        child = self.spawn([])
+        child.expect(irc_header)   # banner
+
+    def test_maybe_irc_topic(self):
+        import sys
+        pypy_version_info = getattr(sys, 'pypy_version_info', sys.version_info)
+        irc_topic = pypy_version_info[3] != 'final'
+        child = self.spawn([])
+        child.expect('>>>')   # banner
+        if irc_topic:
+            assert irc_header in child.before
+        else:    
+            assert irc_header not in child.before
+
     def test_help(self):
         # test that -h prints the usage, including the name of the executable
         # which should be /full/path/to/app_main.py in this case
@@ -929,6 +937,7 @@
         # ----------------------------------------
         from pypy.module.sys.version import CPYTHON_VERSION, PYPY_VERSION
         cpy_ver = '%d.%d' % CPYTHON_VERSION[:2]
+        from lib_pypy._pypy_interact import irc_header
 
         goal_dir = os.path.dirname(app_main)
         # build a directory hierarchy like which contains both bin/pypy-c and
@@ -948,6 +957,7 @@
         self.w_fake_exe = self.space.wrap(str(fake_exe))
         self.w_expected_path = self.space.wrap(expected_path)
         self.w_trunkdir = self.space.wrap(os.path.dirname(pypydir))
+        self.w_is_release = self.space.wrap(PYPY_VERSION[3] == "final")
 
         self.w_tmp_dir = self.space.wrap(tmp_dir)
 
@@ -1017,3 +1027,4 @@
             # assert it did not crash
         finally:
             sys.path[:] = old_sys_path
+    
diff --git a/pypy/module/__builtin__/__init__.py b/pypy/module/__builtin__/__init__.py
--- a/pypy/module/__builtin__/__init__.py
+++ b/pypy/module/__builtin__/__init__.py
@@ -33,7 +33,7 @@
 
     interpleveldefs = {
         # constants
-        '__debug__'     : '(space.w_True)',      # XXX
+        '__debug__'     : '(space.w_True)',
         'None'          : '(space.w_None)',
         'False'         : '(space.w_False)',
         'True'          : '(space.w_True)',
diff --git a/pypy/module/__builtin__/app_io.py b/pypy/module/__builtin__/app_io.py
--- a/pypy/module/__builtin__/app_io.py
+++ b/pypy/module/__builtin__/app_io.py
@@ -4,6 +4,7 @@
 """
 
 import sys
+from _ast import PyCF_ACCEPT_NULL_BYTES
 
 def execfile(filename, glob=None, loc=None):
     """execfile(filename[, globals[, locals]])
@@ -24,7 +25,8 @@
     finally:
         f.close()
     #Don't exec the source directly, as this loses the filename info
-    co = compile(source.rstrip()+"\n", filename, 'exec')
+    co = compile(source.rstrip()+"\n", filename, 'exec',
+                 PyCF_ACCEPT_NULL_BYTES)
     exec co in glob, loc
 
 def _write_prompt(stdout, prompt):
diff --git a/pypy/module/__builtin__/test/test_builtin.py b/pypy/module/__builtin__/test/test_builtin.py
--- a/pypy/module/__builtin__/test/test_builtin.py
+++ b/pypy/module/__builtin__/test/test_builtin.py
@@ -1,7 +1,10 @@
 import sys
 
+from rpython.tool.udir import udir
+
 class AppTestBuiltinApp:
     def setup_class(cls):
+        space = cls.space
         class X(object):
             def __eq__(self, other):
                 raise OverflowError
@@ -11,18 +14,25 @@
         try:
             d[X()]


More information about the pypy-commit mailing list