[pypy-svn] pypy jit-short-preamble: hg merge default

hakanardo commits-noreply at bitbucket.org
Fri Jan 14 18:51:34 CET 2011

Author: Hakan Ardo <hakan at debian.org>
Branch: jit-short-preamble
Changeset: r40697:52db1aac6e5f
Date: 2011-01-14 18:51 +0100

Log:	hg merge default

diff --git a/pypy/jit/metainterp/optimizeopt/unroll.py b/pypy/jit/metainterp/optimizeopt/unroll.py
--- a/pypy/jit/metainterp/optimizeopt/unroll.py
+++ b/pypy/jit/metainterp/optimizeopt/unroll.py
@@ -9,6 +9,68 @@
 from pypy.jit.metainterp.history import make_hashable_int
 from pypy.jit.codewriter.effectinfo import EffectInfo
+# Assumptions
+# ===========
+# For this to work some assumptions had to be made about the
+# optimizations performed. At least for the optimizations that are
+# allowed to operate across the loop boundaries. To enforce this, the
+# optimizer chain is recreated at the end of the preamble and only the
+# state of the optimizations that fulfill those assumptions are kept.
+# Since part of this state is stored in virtuals all OptValue objects
+# are also recreated to allow virtuals not supported to be forced.
+# First of all, the optimizations are not allowed to introduce new
+# boxes. It is the unoptimized version of the trace that is inlined to 
+# form the second iteration of the loop. Otherwise the
+# state of the virtuals would not be updated correctly. Whenever some
+# box from the first iteration is reused in the second iteration, it
+# is added to the input arguments of the loop as well as to the
+# arguments of the jump at the end of the preamble. This means that
+# inlining the jump from the unoptimized trace will not work since it
+# contains too few arguments.  Instead the jump at the end of the
+# preamble is inlined. If the arguments of that jump contains boxes
+# that were produced by one of the optimizations, and thus never seen
+# by the inliner, the inliner will not be able to inline them. There
+# is no way of known what these boxes are supposed to contain in the
+# third iteration.
+# The second assumption is that the state of the optimizer should be the
+# same after the second iteration as after the first. This have forced
+# us to disable store sinking across loop boundaries. Consider the
+# following trace
+#         [p1, p2]
+#         i1 = getfield_gc(p1, descr=nextdescr)
+#         i2 = int_sub(i1, 1)
+#         i2b = int_is_true(i2)
+#         guard_true(i2b) []
+#         setfield_gc(p2, i2, descr=nextdescr)
+#         p3 = new_with_vtable(ConstClass(node_vtable))
+#         jump(p2, p3)
+# At the start of the preamble, p1 and p2 will be pointers. The
+# setfield_gc will be removed by the store sinking heap optimizer, and
+# p3 will become a virtual. Jumping to the loop will make p1 a pointer
+# and p2 a virtual at the start of the loop. The setfield_gc will now
+# be absorbed into the virtual p2 and never seen by the heap
+# optimizer. At the end of the loop both p2 and p3 are virtuals, but
+# the loop needs p2 to be a pointer to be able to call itself. So it
+# is forced producing the operations 
+#         p2 = new_with_vtable(ConstClass(node_vtable))
+#         setfield_gc(p2, i2, descr=nextdescr)
+# In this case the setfield_gc is not store sinked, which means we are
+# not in the same state at the end of the loop as at the end of the
+# preamble. When we now call the loop again, the first 4 operations of
+# the trace were optimized under the wrong assumption that the
+# setfield_gc was store sinked which could lead to errors. In this
+# case what would happen is that it would be inserted once more in
+# front of the guard. 
 # FIXME: Introduce some VirtualOptimizer super class instead
 def optimize_unroll(metainterp_sd, loop, optimizations):

diff --git a/pypy/jit/metainterp/test/test_basic.py b/pypy/jit/metainterp/test/test_basic.py
--- a/pypy/jit/metainterp/test/test_basic.py
+++ b/pypy/jit/metainterp/test/test_basic.py
@@ -342,7 +342,7 @@
         self.check_loops({'guard_true': 1,
                           'int_add': 2, 'int_sub': 1, 'int_gt': 1,
-                          'int_mul': 1,
+                          'int_lshift': 1,
                           'jump': 1})
     def test_loop_invariant_mul_bridge1(self):

diff --git a/pypy/jit/metainterp/test/test_optimizeopt.py b/pypy/jit/metainterp/test/test_optimizeopt.py
--- a/pypy/jit/metainterp/test/test_optimizeopt.py
+++ b/pypy/jit/metainterp/test/test_optimizeopt.py
@@ -4105,6 +4105,25 @@
         self.optimize_loop(ops, expected)
+    def test_mul_to_lshift(self):
+        ops = """
+        [i1, i2]
+        i3 = int_mul(i1, 2)
+        i4 = int_mul(2, i2)
+        i5 = int_mul(i1, 32)
+        i6 = int_mul(i1, i2)
+        jump(i5, i6)
+        """
+        expected = """
+        [i1, i2]
+        i3 = int_lshift(i1, 1)
+        i4 = int_lshift(i2, 1)
+        i5 = int_lshift(i1, 5)
+        i6 = int_mul(i1, i2)
+        jump(i5, i6)
+        """
+        self.optimize_loop(ops, expected)
     def test_lshift_rshift(self):
         ops = """
         [i1, i2, i2b, i1b]

More information about the Pypy-commit mailing list