[pypy-svn] r76864 - in pypy/branch/jit-bounds/pypy/jit/metainterp: optimizeopt test
hakanardo at codespeak.net
hakanardo at codespeak.net
Sat Sep 4 12:16:15 CEST 2010
Author: hakanardo
Date: Sat Sep 4 12:16:13 2010
New Revision: 76864
Modified:
pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py
pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py
pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py
pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py
pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py
pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py
pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py
Log:
sparated out virtualization and heap optimizations
Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py
==============================================================================
--- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py (original)
+++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/__init__.py Sat Sep 4 12:16:13 2010
@@ -1,6 +1,8 @@
from optimizer import Optimizer
from rewrite import OptRewrite
from intbounds import OptIntBounds
+from virtualize import OptVirtualize
+from heap import OptHeap
def optimize_loop_1(metainterp_sd, loop, virtuals=True):
"""Optimize loop.operations to make it match the input of loop.specnodes
@@ -10,10 +12,10 @@
"""
optimizations = [OptIntBounds(),
OptRewrite(),
+ OptVirtualize(),
+ OptHeap(),
]
- optimizer = Optimizer(metainterp_sd, loop, optimizations)
- if virtuals:
- optimizer.setup_virtuals_and_constants()
+ optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals)
optimizer.propagate_all_forward()
def optimize_bridge_1(metainterp_sd, bridge):
Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py
==============================================================================
--- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py (original)
+++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/heap.py Sat Sep 4 12:16:13 2010
@@ -1,6 +1,267 @@
+from pypy.jit.metainterp.optimizeutil import _findall
+from pypy.jit.metainterp.resoperation import rop, ResOperation
+from pypy.rlib.objectmodel import we_are_translated
+
from optimizer import Optimization
-class Heap(Optimization):
+class CachedArrayItems(object):
+ def __init__(self):
+ self.fixed_index_items = {}
+ self.var_index_item = None
+ self.var_index_indexvalue = None
+
+
+class OptHeap(Optimization):
"""Cache repeated heap accesses"""
- # FIXME: Move here
+ def __init__(self):
+ # cached fields: {descr: {OptValue_instance: OptValue_fieldvalue}}
+ self.cached_fields = {}
+ # cached array items: {descr: CachedArrayItems}
+ self.cached_arrayitems = {}
+ # lazily written setfields (at most one per descr): {descr: op}
+ self.lazy_setfields = {}
+ self.lazy_setfields_descrs = [] # keys (at least) of previous dict
+
+ def clean_caches(self):
+ self.cached_fields.clear()
+ self.cached_arrayitems.clear()
+
+ def cache_field_value(self, descr, value, fieldvalue, write=False):
+ if write:
+ # when seeing a setfield, we have to clear the cache for the same
+ # field on any other structure, just in case they are aliasing
+ # each other
+ d = self.cached_fields[descr] = {}
+ else:
+ d = self.cached_fields.setdefault(descr, {})
+ d[value] = fieldvalue
+
+ def read_cached_field(self, descr, value):
+ # XXX self.cached_fields and self.lazy_setfields should probably
+ # be merged somehow
+ d = self.cached_fields.get(descr, None)
+ if d is None:
+ op = self.lazy_setfields.get(descr, None)
+ if op is None:
+ return None
+ return self.getvalue(op.args[1])
+ return d.get(value, None)
+
+ def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False):
+ d = self.cached_arrayitems.get(descr, None)
+ if d is None:
+ d = self.cached_arrayitems[descr] = {}
+ cache = d.get(value, None)
+ if cache is None:
+ cache = d[value] = CachedArrayItems()
+ indexbox = self.get_constant_box(indexvalue.box)
+ if indexbox is not None:
+ index = indexbox.getint()
+ if write:
+ for value, othercache in d.iteritems():
+ # fixed index, clean the variable index cache, in case the
+ # index is the same
+ othercache.var_index_indexvalue = None
+ othercache.var_index_item = None
+ try:
+ del othercache.fixed_index_items[index]
+ except KeyError:
+ pass
+ cache.fixed_index_items[index] = fieldvalue
+ else:
+ if write:
+ for value, othercache in d.iteritems():
+ # variable index, clear all caches for this descr
+ othercache.var_index_indexvalue = None
+ othercache.var_index_item = None
+ othercache.fixed_index_items.clear()
+ cache.var_index_indexvalue = indexvalue
+ cache.var_index_item = fieldvalue
+
+ def read_cached_arrayitem(self, descr, value, indexvalue):
+ d = self.cached_arrayitems.get(descr, None)
+ if d is None:
+ return None
+ cache = d.get(value, None)
+ if cache is None:
+ return None
+ indexbox = self.get_constant_box(indexvalue.box)
+ if indexbox is not None:
+ return cache.fixed_index_items.get(indexbox.getint(), None)
+ elif cache.var_index_indexvalue is indexvalue:
+ return cache.var_index_item
+ return None
+
+ def emit_operation(self, op):
+ self.emitting_operation(op)
+ self.next_optimization.propagate_forward(op)
+
+ def emitting_operation(self, op):
+ if op.has_no_side_effect():
+ return
+ if op.is_ovf():
+ return
+ if op.is_guard():
+ self.optimizer.pendingfields = self.force_lazy_setfields_for_guard()
+ return
+ opnum = op.opnum
+ if (opnum == rop.SETFIELD_GC or
+ opnum == rop.SETARRAYITEM_GC or
+ opnum == rop.DEBUG_MERGE_POINT):
+ return
+ assert opnum != rop.CALL_PURE
+ if (opnum == rop.CALL or
+ opnum == rop.CALL_MAY_FORCE or
+ opnum == rop.CALL_ASSEMBLER):
+ if opnum == rop.CALL_ASSEMBLER:
+ effectinfo = None
+ else:
+ effectinfo = op.descr.get_extra_info()
+ if effectinfo is not None:
+ # XXX we can get the wrong complexity here, if the lists
+ # XXX stored on effectinfo are large
+ for fielddescr in effectinfo.readonly_descrs_fields:
+ self.force_lazy_setfield(fielddescr)
+ for fielddescr in effectinfo.write_descrs_fields:
+ self.force_lazy_setfield(fielddescr)
+ try:
+ del self.cached_fields[fielddescr]
+ except KeyError:
+ pass
+ for arraydescr in effectinfo.write_descrs_arrays:
+ try:
+ del self.cached_arrayitems[arraydescr]
+ except KeyError:
+ pass
+ if effectinfo.check_forces_virtual_or_virtualizable():
+ vrefinfo = self.optimizer.metainterp_sd.virtualref_info
+ self.force_lazy_setfield(vrefinfo.descr_forced)
+ # ^^^ we only need to force this field; the other fields
+ # of virtualref_info and virtualizable_info are not gcptrs.
+ return
+ self.force_all_lazy_setfields()
+ elif op.is_final() or (not we_are_translated() and
+ op.opnum < 0): # escape() operations
+ self.force_all_lazy_setfields()
+ self.clean_caches()
+
+
+ def force_lazy_setfield(self, descr, before_guard=False):
+ try:
+ op = self.lazy_setfields[descr]
+ except KeyError:
+ return
+ del self.lazy_setfields[descr]
+ ###self.optimizer._emit_operation(op)
+ self.next_optimization.propagate_forward(op)
+ #
+ # hackish: reverse the order of the last two operations if it makes
+ # sense to avoid a situation like "int_eq/setfield_gc/guard_true",
+ # which the backend (at least the x86 backend) does not handle well.
+ newoperations = self.optimizer.newoperations
+ if before_guard and len(newoperations) >= 2:
+ lastop = newoperations[-1]
+ prevop = newoperations[-2]
+ # - is_comparison() for cases like "int_eq/setfield_gc/guard_true"
+ # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced"
+ # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow"
+ opnum = prevop.opnum
+ if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE
+ or prevop.is_ovf())
+ and prevop.result not in lastop.args):
+ newoperations[-2] = lastop
+ newoperations[-1] = prevop
+
+ def force_all_lazy_setfields(self):
+ if len(self.lazy_setfields_descrs) > 0:
+ for descr in self.lazy_setfields_descrs:
+ self.force_lazy_setfield(descr)
+ del self.lazy_setfields_descrs[:]
+
+ def force_lazy_setfields_for_guard(self):
+ pendingfields = []
+ for descr in self.lazy_setfields_descrs:
+ try:
+ op = self.lazy_setfields[descr]
+ except KeyError:
+ continue
+ # the only really interesting case that we need to handle in the
+ # guards' resume data is that of a virtual object that is stored
+ # into a field of a non-virtual object.
+ value = self.getvalue(op.args[0])
+ assert not value.is_virtual() # it must be a non-virtual
+ fieldvalue = self.getvalue(op.args[1])
+ if fieldvalue.is_virtual():
+ # this is the case that we leave to resume.py
+ pendingfields.append((descr, value.box,
+ fieldvalue.get_key_box()))
+ else:
+ self.force_lazy_setfield(descr, before_guard=True)
+ return pendingfields
+
+ def force_lazy_setfield_if_necessary(self, op, value, write=False):
+ try:
+ op1 = self.lazy_setfields[op.descr]
+ except KeyError:
+ if write:
+ self.lazy_setfields_descrs.append(op.descr)
+ else:
+ if self.getvalue(op1.args[0]) is not value:
+ self.force_lazy_setfield(op.descr)
+
+ def optimize_GETFIELD_GC(self, op):
+ value = self.getvalue(op.args[0])
+ self.force_lazy_setfield_if_necessary(op, value)
+ # check if the field was read from another getfield_gc just before
+ # or has been written to recently
+ fieldvalue = self.read_cached_field(op.descr, value)
+ if fieldvalue is not None:
+ self.make_equal_to(op.result, fieldvalue)
+ return
+ # default case: produce the operation
+ value.ensure_nonnull()
+ ###self.optimizer.optimize_default(op)
+ self.emit_operation(op) # FIXME: These might need constant propagation?
+ # then remember the result of reading the field
+ fieldvalue = self.getvalue(op.result)
+ self.cache_field_value(op.descr, value, fieldvalue)
+
+ def optimize_SETFIELD_GC(self, op):
+ value = self.getvalue(op.args[0])
+ fieldvalue = self.getvalue(op.args[1])
+ self.force_lazy_setfield_if_necessary(op, value, write=True)
+ self.lazy_setfields[op.descr] = op
+ # remember the result of future reads of the field
+ self.cache_field_value(op.descr, value, fieldvalue, write=True)
+
+ def optimize_GETARRAYITEM_GC(self, op):
+ value = self.getvalue(op.args[0])
+ indexvalue = self.getvalue(op.args[1])
+ fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue)
+ if fieldvalue is not None:
+ self.make_equal_to(op.result, fieldvalue)
+ return
+ ###self.optimizer.optimize_default(op)
+ self.emit_operation(op) # FIXME: These might need constant propagation?
+ fieldvalue = self.getvalue(op.result)
+ self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue)
+
+ def optimize_SETARRAYITEM_GC(self, op):
+ self.emit_operation(op)
+ value = self.getvalue(op.args[0])
+ fieldvalue = self.getvalue(op.args[2])
+ indexvalue = self.getvalue(op.args[1])
+ self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue,
+ write=True)
+
+ def propagate_forward(self, op):
+ opnum = op.opnum
+ for value, func in optimize_ops:
+ if opnum == value:
+ func(self, op)
+ break
+ else:
+ self.emit_operation(op)
+
+optimize_ops = _findall(OptHeap, 'optimize_')
Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py
==============================================================================
--- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py (original)
+++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/optimizer.py Sat Sep 4 12:16:13 2010
@@ -4,17 +4,11 @@
from pypy.jit.metainterp.resoperation import rop, ResOperation
from pypy.jit.metainterp import jitprof
from pypy.jit.metainterp.executor import execute_nonspec
-from pypy.jit.metainterp.specnode import SpecNode, NotSpecNode, ConstantSpecNode
-from pypy.jit.metainterp.specnode import AbstractVirtualStructSpecNode
-from pypy.jit.metainterp.specnode import VirtualInstanceSpecNode
-from pypy.jit.metainterp.specnode import VirtualArraySpecNode
-from pypy.jit.metainterp.specnode import VirtualStructSpecNode
from pypy.jit.metainterp.optimizeutil import _findall, sort_descrs
from pypy.jit.metainterp.optimizeutil import descrlist_dict
from pypy.jit.metainterp.optimizeutil import InvalidLoop, args_dict
from pypy.jit.metainterp import resume, compile
from pypy.jit.metainterp.typesystem import llhelper, oohelper
-from pypy.rlib.objectmodel import we_are_translated
from pypy.rpython.lltypesystem import lltype
from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int
from intutils import IntBound, IntUnbounded
@@ -143,250 +137,6 @@
llhelper.CVAL_NULLREF = ConstantValue(llhelper.CONST_NULL)
oohelper.CVAL_NULLREF = ConstantValue(oohelper.CONST_NULL)
-
-class AbstractVirtualValue(OptValue):
- _attrs_ = ('optimizer', 'keybox', 'source_op', '_cached_vinfo')
- box = None
- level = LEVEL_NONNULL
- _cached_vinfo = None
-
- def __init__(self, optimizer, keybox, source_op=None):
- self.optimizer = optimizer
- self.keybox = keybox # only used as a key in dictionaries
- self.source_op = source_op # the NEW_WITH_VTABLE/NEW_ARRAY operation
- # that builds this box
-
- def get_key_box(self):
- if self.box is None:
- return self.keybox
- return self.box
-
- def force_box(self):
- if self.box is None:
- self.optimizer.forget_numberings(self.keybox)
- self._really_force()
- return self.box
-
- def make_virtual_info(self, modifier, fieldnums):
- vinfo = self._cached_vinfo
- if vinfo is not None and vinfo.equals(fieldnums):
- return vinfo
- vinfo = self._make_virtual(modifier)
- vinfo.set_content(fieldnums)
- self._cached_vinfo = vinfo
- return vinfo
-
- def _make_virtual(self, modifier):
- raise NotImplementedError("abstract base")
-
- def _really_force(self):
- raise NotImplementedError("abstract base")
-
-def get_fielddescrlist_cache(cpu):
- if not hasattr(cpu, '_optimizeopt_fielddescrlist_cache'):
- result = descrlist_dict()
- cpu._optimizeopt_fielddescrlist_cache = result
- return result
- return cpu._optimizeopt_fielddescrlist_cache
-get_fielddescrlist_cache._annspecialcase_ = "specialize:memo"
-
-class AbstractVirtualStructValue(AbstractVirtualValue):
- _attrs_ = ('_fields', '_cached_sorted_fields')
-
- def __init__(self, optimizer, keybox, source_op=None):
- AbstractVirtualValue.__init__(self, optimizer, keybox, source_op)
- self._fields = {}
- self._cached_sorted_fields = None
-
- def getfield(self, ofs, default):
- return self._fields.get(ofs, default)
-
- def setfield(self, ofs, fieldvalue):
- assert isinstance(fieldvalue, OptValue)
- self._fields[ofs] = fieldvalue
-
- def _really_force(self):
- assert self.source_op is not None
- # ^^^ This case should not occur any more (see test_bug_3).
- #
- newoperations = self.optimizer.newoperations
- newoperations.append(self.source_op)
- self.box = box = self.source_op.result
- #
- iteritems = self._fields.iteritems()
- if not we_are_translated(): #random order is fine, except for tests
- iteritems = list(iteritems)
- iteritems.sort(key = lambda (x,y): x.sort_key())
- for ofs, value in iteritems:
- if value.is_null():
- continue
- subbox = value.force_box()
- op = ResOperation(rop.SETFIELD_GC, [box, subbox], None,
- descr=ofs)
- newoperations.append(op)
- self._fields = None
-
- def _get_field_descr_list(self):
- _cached_sorted_fields = self._cached_sorted_fields
- if (_cached_sorted_fields is not None and
- len(self._fields) == len(_cached_sorted_fields)):
- lst = self._cached_sorted_fields
- else:
- lst = self._fields.keys()
- sort_descrs(lst)
- cache = get_fielddescrlist_cache(self.optimizer.cpu)
- result = cache.get(lst, None)
- if result is None:
- cache[lst] = lst
- else:
- lst = result
- # store on self, to not have to repeatedly get it from the global
- # cache, which involves sorting
- self._cached_sorted_fields = lst
- return lst
-
- def get_args_for_fail(self, modifier):
- if self.box is None and not modifier.already_seen_virtual(self.keybox):
- # checks for recursion: it is False unless
- # we have already seen the very same keybox
- lst = self._get_field_descr_list()
- fieldboxes = [self._fields[ofs].get_key_box() for ofs in lst]
- modifier.register_virtual_fields(self.keybox, fieldboxes)
- for ofs in lst:
- fieldvalue = self._fields[ofs]
- fieldvalue.get_args_for_fail(modifier)
-
-
-class VirtualValue(AbstractVirtualStructValue):
- level = LEVEL_KNOWNCLASS
-
- def __init__(self, optimizer, known_class, keybox, source_op=None):
- AbstractVirtualStructValue.__init__(self, optimizer, keybox, source_op)
- assert isinstance(known_class, Const)
- self.known_class = known_class
-
- def _make_virtual(self, modifier):
- fielddescrs = self._get_field_descr_list()
- return modifier.make_virtual(self.known_class, fielddescrs)
-
-class VStructValue(AbstractVirtualStructValue):
-
- def __init__(self, optimizer, structdescr, keybox, source_op=None):
- AbstractVirtualStructValue.__init__(self, optimizer, keybox, source_op)
- self.structdescr = structdescr
-
- def _make_virtual(self, modifier):
- fielddescrs = self._get_field_descr_list()
- return modifier.make_vstruct(self.structdescr, fielddescrs)
-
-class VArrayValue(AbstractVirtualValue):
-
- def __init__(self, optimizer, arraydescr, size, keybox, source_op=None):
- AbstractVirtualValue.__init__(self, optimizer, keybox, source_op)
- self.arraydescr = arraydescr
- self.constvalue = optimizer.new_const_item(arraydescr)
- self._items = [self.constvalue] * size
-
- def getlength(self):
- return len(self._items)
-
- def getitem(self, index):
- res = self._items[index]
- return res
-
- def setitem(self, index, itemvalue):
- assert isinstance(itemvalue, OptValue)
- self._items[index] = itemvalue
-
- def _really_force(self):
- assert self.source_op is not None
- newoperations = self.optimizer.newoperations
- newoperations.append(self.source_op)
- self.box = box = self.source_op.result
- for index in range(len(self._items)):
- subvalue = self._items[index]
- if subvalue is not self.constvalue:
- if subvalue.is_null():
- continue
- subbox = subvalue.force_box()
- op = ResOperation(rop.SETARRAYITEM_GC,
- [box, ConstInt(index), subbox], None,
- descr=self.arraydescr)
- newoperations.append(op)
-
- def get_args_for_fail(self, modifier):
- if self.box is None and not modifier.already_seen_virtual(self.keybox):
- # checks for recursion: it is False unless
- # we have already seen the very same keybox
- itemboxes = []
- for itemvalue in self._items:
- itemboxes.append(itemvalue.get_key_box())
- modifier.register_virtual_fields(self.keybox, itemboxes)
- for itemvalue in self._items:
- if itemvalue is not self.constvalue:
- itemvalue.get_args_for_fail(modifier)
-
- def _make_virtual(self, modifier):
- return modifier.make_varray(self.arraydescr)
-
-class __extend__(SpecNode):
- def setup_virtual_node(self, optimizer, box, newinputargs):
- raise NotImplementedError
- def teardown_virtual_node(self, optimizer, value, newexitargs):
- raise NotImplementedError
-
-class __extend__(NotSpecNode):
- def setup_virtual_node(self, optimizer, box, newinputargs):
- newinputargs.append(box)
- def teardown_virtual_node(self, optimizer, value, newexitargs):
- newexitargs.append(value.force_box())
-
-class __extend__(ConstantSpecNode):
- def setup_virtual_node(self, optimizer, box, newinputargs):
- optimizer.make_constant(box, self.constbox)
- def teardown_virtual_node(self, optimizer, value, newexitargs):
- pass
-
-class __extend__(AbstractVirtualStructSpecNode):
- def setup_virtual_node(self, optimizer, box, newinputargs):
- vvalue = self._setup_virtual_node_1(optimizer, box)
- for ofs, subspecnode in self.fields:
- subbox = optimizer.new_box(ofs)
- subspecnode.setup_virtual_node(optimizer, subbox, newinputargs)
- vvaluefield = optimizer.getvalue(subbox)
- vvalue.setfield(ofs, vvaluefield)
- def _setup_virtual_node_1(self, optimizer, box):
- raise NotImplementedError
- def teardown_virtual_node(self, optimizer, value, newexitargs):
- assert value.is_virtual()
- for ofs, subspecnode in self.fields:
- subvalue = value.getfield(ofs, optimizer.new_const(ofs))
- subspecnode.teardown_virtual_node(optimizer, subvalue, newexitargs)
-
-class __extend__(VirtualInstanceSpecNode):
- def _setup_virtual_node_1(self, optimizer, box):
- return optimizer.make_virtual(self.known_class, box)
-
-class __extend__(VirtualStructSpecNode):
- def _setup_virtual_node_1(self, optimizer, box):
- return optimizer.make_vstruct(self.typedescr, box)
-
-class __extend__(VirtualArraySpecNode):
- def setup_virtual_node(self, optimizer, box, newinputargs):
- vvalue = optimizer.make_varray(self.arraydescr, len(self.items), box)
- for index in range(len(self.items)):
- subbox = optimizer.new_box_item(self.arraydescr)
- subspecnode = self.items[index]
- subspecnode.setup_virtual_node(optimizer, subbox, newinputargs)
- vvalueitem = optimizer.getvalue(subbox)
- vvalue.setitem(index, vvalueitem)
- def teardown_virtual_node(self, optimizer, value, newexitargs):
- assert value.is_virtual()
- for index in range(len(self.items)):
- subvalue = value.getitem(index)
- subspecnode = self.items[index]
- subspecnode.teardown_virtual_node(optimizer, subvalue, newexitargs)
-
class Optimization(object):
def propagate_forward(self, op):
raise NotImplementedError
@@ -394,6 +144,7 @@
def emit_operation(self, op):
self.next_optimization.propagate_forward(op)
+ # FIXME: Move some of these here?
def getvalue(self, box):
return self.optimizer.getvalue(box)
@@ -406,6 +157,21 @@
def make_equal_to(self, box, value):
return self.optimizer.make_equal_to(box, value)
+ def get_constant_box(self, box):
+ return self.optimizer.get_constant_box(box)
+
+ def new_box(self, fieldofs):
+ return self.optimizer.new_box(fieldofs)
+
+ def new_const(self, fieldofs):
+ return self.optimizer.new_const(fieldofs)
+
+ def new_box_item(self, arraydescr):
+ return self.optimizer.new_box_item(arraydescr)
+
+ def new_const_item(self, arraydescr):
+ return self.optimizer.new_const_item(arraydescr)
+
def pure(self, opnum, args, result):
op = ResOperation(opnum, args, result)
self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op
@@ -416,20 +182,23 @@
def skip_nextop(self):
self.optimizer.i += 1
+ def setup(self, virtuals):
+ pass
+
class Optimizer(Optimization):
- def __init__(self, metainterp_sd, loop, optimizations=[]):
+ def __init__(self, metainterp_sd, loop, optimizations=[], virtuals=True):
self.metainterp_sd = metainterp_sd
self.cpu = metainterp_sd.cpu
self.loop = loop
self.values = {}
self.interned_refs = self.cpu.ts.new_ref_dict()
self.resumedata_memo = resume.ResumeDataLoopMemo(metainterp_sd)
- self.heap_op_optimizer = HeapOpOptimizer(self)
self.bool_boxes = {}
self.loop_invariant_results = {}
self.pure_operations = args_dict()
self.producer = {}
+ self.pendingfields = []
if len(optimizations) == 0:
self.first_optimization = self
@@ -440,6 +209,7 @@
optimizations[-1].next_optimization = self
for o in optimizations:
o.optimizer = self
+ o.setup(virtuals)
def forget_numberings(self, virtualbox):
self.metainterp_sd.profiler.count(jitprof.OPT_FORCINGS)
@@ -488,21 +258,6 @@
def make_constant_int(self, box, intvalue):
self.make_constant(box, ConstInt(intvalue))
- def make_virtual(self, known_class, box, source_op=None):
- vvalue = VirtualValue(self, known_class, box, source_op)
- self.make_equal_to(box, vvalue)
- return vvalue
-
- def make_varray(self, arraydescr, size, box, source_op=None):
- vvalue = VArrayValue(self, arraydescr, size, box, source_op)
- self.make_equal_to(box, vvalue)
- return vvalue
-
- def make_vstruct(self, structdescr, box, source_op=None):
- vvalue = VStructValue(self, structdescr, box, source_op)
- self.make_equal_to(box, vvalue)
- return vvalue
-
def new_ptr_box(self):
return self.cpu.ts.BoxRef()
@@ -538,25 +293,13 @@
else:
return CVAL_ZERO
- # ----------
-
- def setup_virtuals_and_constants(self):
- inputargs = self.loop.inputargs
- specnodes = self.loop.token.specnodes
- assert len(inputargs) == len(specnodes)
- newinputargs = []
- for i in range(len(inputargs)):
- specnodes[i].setup_virtual_node(self, inputargs[i], newinputargs)
- self.loop.inputargs = newinputargs
-
- # ----------
-
def propagate_all_forward(self):
self.exception_might_have_happened = False
self.newoperations = []
self.i = 0
while self.i < len(self.loop.operations):
op = self.loop.operations[self.i]
+ #print "OP: %s" % op
self.first_optimization.propagate_forward(op)
self.i += 1
self.loop.operations = self.newoperations
@@ -572,10 +315,11 @@
break
else:
self.optimize_default(op)
+ #print '\n'.join([str(o) for o in self.newoperations]) + '\n---\n'
def emit_operation(self, op):
- self.heap_op_optimizer.emitting_operation(op)
+ ###self.heap_op_optimizer.emitting_operation(op)
self._emit_operation(op)
def _emit_operation(self, op):
@@ -595,11 +339,11 @@
self.newoperations.append(op)
def store_final_boxes_in_guard(self, op):
- pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard()
+ ###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard()
descr = op.descr
assert isinstance(descr, compile.ResumeGuardDescr)
modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo)
- newboxes = modifier.finish(self.values, pendingfields)
+ newboxes = modifier.finish(self.values, self.pendingfields)
if len(newboxes) > self.metainterp_sd.options.failargs_limit: # XXX be careful here
compile.giveup()
descr.store_final_boxes(op, newboxes)
@@ -667,125 +411,11 @@
# otherwise, the operation remains
self.emit_operation(op)
- def optimize_JUMP(self, op):
- orgop = self.loop.operations[-1]
- exitargs = []
- target_loop_token = orgop.descr
- assert isinstance(target_loop_token, LoopToken)
- specnodes = target_loop_token.specnodes
- assert len(op.args) == len(specnodes)
- for i in range(len(specnodes)):
- value = self.getvalue(op.args[i])
- specnodes[i].teardown_virtual_node(self, value, exitargs)
- op.args = exitargs[:]
- self.emit_operation(op)
-
- def optimize_guard(self, op, constbox, emit_operation=True):
- value = self.getvalue(op.args[0])
- if value.is_constant():
- box = value.box
- assert isinstance(box, Const)
- if not box.same_constant(constbox):
- raise InvalidLoop
- return
- if emit_operation:
- self.emit_operation(op)
- value.make_constant(constbox)
-
- def optimize_GUARD_ISNULL(self, op):
- value = self.getvalue(op.args[0])
- if value.is_null():
- return
- elif value.is_nonnull():
- raise InvalidLoop
- self.emit_operation(op)
- value.make_constant(self.cpu.ts.CONST_NULL)
-
- def optimize_GUARD_NONNULL(self, op):
- value = self.getvalue(op.args[0])
- if value.is_nonnull():
- return
- elif value.is_null():
- raise InvalidLoop
- self.emit_operation(op)
- value.make_nonnull(len(self.newoperations) - 1)
-
- def optimize_GUARD_VALUE(self, op):
- value = self.getvalue(op.args[0])
- emit_operation = True
- if value.last_guard_index != -1:
- # there already has been a guard_nonnull or guard_class or
- # guard_nonnull_class on this value, which is rather silly.
- # replace the original guard with a guard_value
- old_guard_op = self.newoperations[value.last_guard_index]
- old_opnum = old_guard_op.opnum
- old_guard_op.opnum = rop.GUARD_VALUE
- old_guard_op.args = [old_guard_op.args[0], op.args[1]]
- # hack hack hack. Change the guard_opnum on
- # old_guard_op.descr so that when resuming,
- # the operation is not skipped by pyjitpl.py.
- descr = old_guard_op.descr
- assert isinstance(descr, compile.ResumeGuardDescr)
- descr.guard_opnum = rop.GUARD_VALUE
- descr.make_a_counter_per_value(old_guard_op)
- emit_operation = False
- constbox = op.args[1]
- assert isinstance(constbox, Const)
- self.optimize_guard(op, constbox, emit_operation)
-
- def optimize_GUARD_TRUE(self, op):
- self.optimize_guard(op, CONST_1)
-
- def optimize_GUARD_FALSE(self, op):
- self.optimize_guard(op, CONST_0)
-
- def optimize_GUARD_CLASS(self, op):
- value = self.getvalue(op.args[0])
- expectedclassbox = op.args[1]
- assert isinstance(expectedclassbox, Const)
- realclassbox = value.get_constant_class(self.cpu)
- if realclassbox is not None:
- # the following assert should always be true for now,
- # because invalid loops that would fail it are detected
- # earlier, in optimizefindnode.py.
- assert realclassbox.same_constant(expectedclassbox)
- return
- emit_operation = True
- if value.last_guard_index != -1:
- # there already has been a guard_nonnull or guard_class or
- # guard_nonnull_class on this value.
- old_guard_op = self.newoperations[value.last_guard_index]
- if old_guard_op.opnum == rop.GUARD_NONNULL:
- # it was a guard_nonnull, which we replace with a
- # guard_nonnull_class.
- old_guard_op.opnum = rop.GUARD_NONNULL_CLASS
- old_guard_op.args = [old_guard_op.args[0], op.args[1]]
- # hack hack hack. Change the guard_opnum on
- # old_guard_op.descr so that when resuming,
- # the operation is not skipped by pyjitpl.py.
- descr = old_guard_op.descr
- assert isinstance(descr, compile.ResumeGuardDescr)
- descr.guard_opnum = rop.GUARD_NONNULL_CLASS
- emit_operation = False
- if emit_operation:
- self.emit_operation(op)
- last_guard_index = len(self.newoperations) - 1
- else:
- last_guard_index = value.last_guard_index
- value.make_constant_class(expectedclassbox, last_guard_index)
-
- def optimize_GUARD_NO_EXCEPTION(self, op):
- if not self.exception_might_have_happened:
- return
- self.emit_operation(op)
- self.exception_might_have_happened = False
-
def optimize_GUARD_NO_OVERFLOW(self, op):
# otherwise the default optimizer will clear fields, which is unwanted
# in this case
self.emit_operation(op)
-
def _optimize_nullness(self, op, box, expect_nonnull):
value = self.getvalue(box)
if value.is_nonnull():
@@ -838,150 +468,6 @@
def optimize_PTR_EQ(self, op):
self._optimize_oois_ooisnot(op, False)
- def optimize_VIRTUAL_REF(self, op):
- indexbox = op.args[1]
- #
- # get some constants
- vrefinfo = self.metainterp_sd.virtualref_info
- c_cls = vrefinfo.jit_virtual_ref_const_class
- descr_virtual_token = vrefinfo.descr_virtual_token
- descr_virtualref_index = vrefinfo.descr_virtualref_index
- #
- # Replace the VIRTUAL_REF operation with a virtual structure of type
- # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon,
- # but the point is that doing so does not force the original structure.
- op = ResOperation(rop.NEW_WITH_VTABLE, [c_cls], op.result)
- vrefvalue = self.make_virtual(c_cls, op.result, op)
- tokenbox = BoxInt()
- self.emit_operation(ResOperation(rop.FORCE_TOKEN, [], tokenbox))
- vrefvalue.setfield(descr_virtual_token, self.getvalue(tokenbox))
- vrefvalue.setfield(descr_virtualref_index, self.getvalue(indexbox))
-
- def optimize_VIRTUAL_REF_FINISH(self, op):
- # Set the 'forced' field of the virtual_ref.
- # In good cases, this is all virtual, so has no effect.
- # Otherwise, this forces the real object -- but only now, as
- # opposed to much earlier. This is important because the object is
- # typically a PyPy PyFrame, and now is the end of its execution, so
- # forcing it now does not have catastrophic effects.
- vrefinfo = self.metainterp_sd.virtualref_info
- # op.args[1] should really never point to null here
- # - set 'forced' to point to the real object
- op1 = ResOperation(rop.SETFIELD_GC, op.args, None,
- descr = vrefinfo.descr_forced)
- self.optimize_SETFIELD_GC(op1)
- # - set 'virtual_token' to TOKEN_NONE
- args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)]
- op1 = ResOperation(rop.SETFIELD_GC, args, None,
- descr = vrefinfo.descr_virtual_token)
- self.optimize_SETFIELD_GC(op1)
- # Note that in some cases the virtual in op.args[1] has been forced
- # already. This is fine. In that case, and *if* a residual
- # CALL_MAY_FORCE suddenly turns out to access it, then it will
- # trigger a ResumeGuardForcedDescr.handle_async_forcing() which
- # will work too (but just be a little pointless, as the structure
- # was already forced).
-
- def optimize_GETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
- if value.is_virtual():
- # optimizefindnode should ensure that fieldvalue is found
- assert isinstance(value, AbstractVirtualValue)
- fieldvalue = value.getfield(op.descr, None)
- assert fieldvalue is not None
- self.make_equal_to(op.result, fieldvalue)
- else:
- value.ensure_nonnull()
- self.heap_op_optimizer.optimize_GETFIELD_GC(op, value)
-
- # note: the following line does not mean that the two operations are
- # completely equivalent, because GETFIELD_GC_PURE is_always_pure().
- optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC
-
- def optimize_SETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
- fieldvalue = self.getvalue(op.args[1])
- if value.is_virtual():
- value.setfield(op.descr, fieldvalue)
- else:
- value.ensure_nonnull()
- self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue)
-
- def optimize_NEW_WITH_VTABLE(self, op):
- self.make_virtual(op.args[0], op.result, op)
-
- def optimize_NEW(self, op):
- self.make_vstruct(op.descr, op.result, op)
-
- def optimize_NEW_ARRAY(self, op):
- sizebox = self.get_constant_box(op.args[0])
- if sizebox is not None:
- # if the original 'op' did not have a ConstInt as argument,
- # build a new one with the ConstInt argument
- if not isinstance(op.args[0], ConstInt):
- op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result,
- descr=op.descr)
- self.make_varray(op.descr, sizebox.getint(), op.result, op)
- else:
- self.optimize_default(op)
-
- def optimize_ARRAYLEN_GC(self, op):
- value = self.getvalue(op.args[0])
- if value.is_virtual():
- self.make_constant_int(op.result, value.getlength())
- else:
- value.ensure_nonnull()
- self.optimize_default(op)
-
- def optimize_GETARRAYITEM_GC(self, op):
- value = self.getvalue(op.args[0])
- if value.is_virtual():
- indexbox = self.get_constant_box(op.args[1])
- if indexbox is not None:
- itemvalue = value.getitem(indexbox.getint())
- self.make_equal_to(op.result, itemvalue)
- return
- value.ensure_nonnull()
- self.heap_op_optimizer.optimize_GETARRAYITEM_GC(op, value)
-
- # note: the following line does not mean that the two operations are
- # completely equivalent, because GETARRAYITEM_GC_PURE is_always_pure().
- optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC
-
- def optimize_SETARRAYITEM_GC(self, op):
- value = self.getvalue(op.args[0])
- if value.is_virtual():
- indexbox = self.get_constant_box(op.args[1])
- if indexbox is not None:
- value.setitem(indexbox.getint(), self.getvalue(op.args[2]))
- return
- value.ensure_nonnull()
- fieldvalue = self.getvalue(op.args[2])
- self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue)
-
- def optimize_ARRAYCOPY(self, op):
- source_value = self.getvalue(op.args[2])
- dest_value = self.getvalue(op.args[3])
- source_start_box = self.get_constant_box(op.args[4])
- dest_start_box = self.get_constant_box(op.args[5])
- length = self.get_constant_box(op.args[6])
- if (source_value.is_virtual() and source_start_box and dest_start_box
- and length and dest_value.is_virtual()):
- # XXX optimize the case where dest value is not virtual,
- # but we still can avoid a mess
- source_start = source_start_box.getint()
- dest_start = dest_start_box.getint()
- for index in range(length.getint()):
- val = source_value.getitem(index + source_start)
- dest_value.setitem(index + dest_start, val)
- return
- if length and length.getint() == 0:
- return # 0-length arraycopy
- descr = op.args[0]
- assert isinstance(descr, AbstractDescr)
- self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
- descr))
-
def optimize_INSTANCEOF(self, op):
value = self.getvalue(op.args[0])
realclassbox = value.get_constant_class(self.cpu)
@@ -1013,254 +499,7 @@
resvalue = self.getvalue(op.result)
self.loop_invariant_results[key] = resvalue
- def optimize_CALL_PURE(self, op):
- for arg in op.args:
- if self.get_constant_box(arg) is None:
- break
- else:
- # all constant arguments: constant-fold away
- self.make_constant(op.result, op.args[0])
- return
- # replace CALL_PURE with just CALL
- self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
- op.descr))
-
optimize_ops = _findall(Optimizer, 'optimize_')
-class CachedArrayItems(object):
- def __init__(self):
- self.fixed_index_items = {}
- self.var_index_item = None
- self.var_index_indexvalue = None
-
-
-class HeapOpOptimizer(object):
- def __init__(self, optimizer):
- self.optimizer = optimizer
- # cached fields: {descr: {OptValue_instance: OptValue_fieldvalue}}
- self.cached_fields = {}
- # cached array items: {descr: CachedArrayItems}
- self.cached_arrayitems = {}
- # lazily written setfields (at most one per descr): {descr: op}
- self.lazy_setfields = {}
- self.lazy_setfields_descrs = [] # keys (at least) of previous dict
-
- def clean_caches(self):
- self.cached_fields.clear()
- self.cached_arrayitems.clear()
-
- def cache_field_value(self, descr, value, fieldvalue, write=False):
- if write:
- # when seeing a setfield, we have to clear the cache for the same
- # field on any other structure, just in case they are aliasing
- # each other
- d = self.cached_fields[descr] = {}
- else:
- d = self.cached_fields.setdefault(descr, {})
- d[value] = fieldvalue
-
- def read_cached_field(self, descr, value):
- # XXX self.cached_fields and self.lazy_setfields should probably
- # be merged somehow
- d = self.cached_fields.get(descr, None)
- if d is None:
- op = self.lazy_setfields.get(descr, None)
- if op is None:
- return None
- return self.optimizer.getvalue(op.args[1])
- return d.get(value, None)
-
- def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False):
- d = self.cached_arrayitems.get(descr, None)
- if d is None:
- d = self.cached_arrayitems[descr] = {}
- cache = d.get(value, None)
- if cache is None:
- cache = d[value] = CachedArrayItems()
- indexbox = self.optimizer.get_constant_box(indexvalue.box)
- if indexbox is not None:
- index = indexbox.getint()
- if write:
- for value, othercache in d.iteritems():
- # fixed index, clean the variable index cache, in case the
- # index is the same
- othercache.var_index_indexvalue = None
- othercache.var_index_item = None
- try:
- del othercache.fixed_index_items[index]
- except KeyError:
- pass
- cache.fixed_index_items[index] = fieldvalue
- else:
- if write:
- for value, othercache in d.iteritems():
- # variable index, clear all caches for this descr
- othercache.var_index_indexvalue = None
- othercache.var_index_item = None
- othercache.fixed_index_items.clear()
- cache.var_index_indexvalue = indexvalue
- cache.var_index_item = fieldvalue
-
- def read_cached_arrayitem(self, descr, value, indexvalue):
- d = self.cached_arrayitems.get(descr, None)
- if d is None:
- return None
- cache = d.get(value, None)
- if cache is None:
- return None
- indexbox = self.optimizer.get_constant_box(indexvalue.box)
- if indexbox is not None:
- return cache.fixed_index_items.get(indexbox.getint(), None)
- elif cache.var_index_indexvalue is indexvalue:
- return cache.var_index_item
- return None
-
- def emitting_operation(self, op):
- if op.has_no_side_effect():
- return
- if op.is_ovf():
- return
- if op.is_guard():
- return
- opnum = op.opnum
- if (opnum == rop.SETFIELD_GC or
- opnum == rop.SETARRAYITEM_GC or
- opnum == rop.DEBUG_MERGE_POINT):
- return
- assert opnum != rop.CALL_PURE
- if (opnum == rop.CALL or
- opnum == rop.CALL_MAY_FORCE or
- opnum == rop.CALL_ASSEMBLER):
- if opnum == rop.CALL_ASSEMBLER:
- effectinfo = None
- else:
- effectinfo = op.descr.get_extra_info()
- if effectinfo is not None:
- # XXX we can get the wrong complexity here, if the lists
- # XXX stored on effectinfo are large
- for fielddescr in effectinfo.readonly_descrs_fields:
- self.force_lazy_setfield(fielddescr)
- for fielddescr in effectinfo.write_descrs_fields:
- self.force_lazy_setfield(fielddescr)
- try:
- del self.cached_fields[fielddescr]
- except KeyError:
- pass
- for arraydescr in effectinfo.write_descrs_arrays:
- try:
- del self.cached_arrayitems[arraydescr]
- except KeyError:
- pass
- if effectinfo.check_forces_virtual_or_virtualizable():
- vrefinfo = self.optimizer.metainterp_sd.virtualref_info
- self.force_lazy_setfield(vrefinfo.descr_forced)
- # ^^^ we only need to force this field; the other fields
- # of virtualref_info and virtualizable_info are not gcptrs.
- return
- self.force_all_lazy_setfields()
- elif op.is_final() or (not we_are_translated() and
- op.opnum < 0): # escape() operations
- self.force_all_lazy_setfields()
- self.clean_caches()
-
- def force_lazy_setfield(self, descr, before_guard=False):
- try:
- op = self.lazy_setfields[descr]
- except KeyError:
- return
- del self.lazy_setfields[descr]
- self.optimizer._emit_operation(op)
- #
- # hackish: reverse the order of the last two operations if it makes
- # sense to avoid a situation like "int_eq/setfield_gc/guard_true",
- # which the backend (at least the x86 backend) does not handle well.
- newoperations = self.optimizer.newoperations
- if before_guard and len(newoperations) >= 2:
- lastop = newoperations[-1]
- prevop = newoperations[-2]
- # - is_comparison() for cases like "int_eq/setfield_gc/guard_true"
- # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced"
- # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow"
- opnum = prevop.opnum
- if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE
- or prevop.is_ovf())
- and prevop.result not in lastop.args):
- newoperations[-2] = lastop
- newoperations[-1] = prevop
-
- def force_all_lazy_setfields(self):
- if len(self.lazy_setfields_descrs) > 0:
- for descr in self.lazy_setfields_descrs:
- self.force_lazy_setfield(descr)
- del self.lazy_setfields_descrs[:]
-
- def force_lazy_setfields_for_guard(self):
- pendingfields = []
- for descr in self.lazy_setfields_descrs:
- try:
- op = self.lazy_setfields[descr]
- except KeyError:
- continue
- # the only really interesting case that we need to handle in the
- # guards' resume data is that of a virtual object that is stored
- # into a field of a non-virtual object.
- value = self.optimizer.getvalue(op.args[0])
- assert not value.is_virtual() # it must be a non-virtual
- fieldvalue = self.optimizer.getvalue(op.args[1])
- if fieldvalue.is_virtual():
- # this is the case that we leave to resume.py
- pendingfields.append((descr, value.box,
- fieldvalue.get_key_box()))
- else:
- self.force_lazy_setfield(descr, before_guard=True)
- return pendingfields
-
- def force_lazy_setfield_if_necessary(self, op, value, write=False):
- try:
- op1 = self.lazy_setfields[op.descr]
- except KeyError:
- if write:
- self.lazy_setfields_descrs.append(op.descr)
- else:
- if self.optimizer.getvalue(op1.args[0]) is not value:
- self.force_lazy_setfield(op.descr)
-
- def optimize_GETFIELD_GC(self, op, value):
- self.force_lazy_setfield_if_necessary(op, value)
- # check if the field was read from another getfield_gc just before
- # or has been written to recently
- fieldvalue = self.read_cached_field(op.descr, value)
- if fieldvalue is not None:
- self.optimizer.make_equal_to(op.result, fieldvalue)
- return
- # default case: produce the operation
- value.ensure_nonnull()
- self.optimizer.optimize_default(op)
- # then remember the result of reading the field
- fieldvalue = self.optimizer.getvalue(op.result)
- self.cache_field_value(op.descr, value, fieldvalue)
-
- def optimize_SETFIELD_GC(self, op, value, fieldvalue):
- self.force_lazy_setfield_if_necessary(op, value, write=True)
- self.lazy_setfields[op.descr] = op
- # remember the result of future reads of the field
- self.cache_field_value(op.descr, value, fieldvalue, write=True)
-
- def optimize_GETARRAYITEM_GC(self, op, value):
- indexvalue = self.optimizer.getvalue(op.args[1])
- fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue)
- if fieldvalue is not None:
- self.optimizer.make_equal_to(op.result, fieldvalue)
- return
- self.optimizer.optimize_default(op)
- fieldvalue = self.optimizer.getvalue(op.result)
- self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue)
-
- def optimize_SETARRAYITEM_GC(self, op, value, fieldvalue):
- self.optimizer.emit_operation(op)
- indexvalue = self.optimizer.getvalue(op.args[1])
- self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue,
- write=True)
-
Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py
==============================================================================
--- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py (original)
+++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/rewrite.py Sat Sep 4 12:16:13 2010
@@ -1,12 +1,12 @@
-from optimizer import Optimization, CONST_1, CONST_0
+from optimizer import *
from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex
from pypy.jit.metainterp.history import ConstInt
from pypy.jit.metainterp.optimizeutil import _findall
from pypy.jit.metainterp.resoperation import rop, ResOperation
class OptRewrite(Optimization):
- """Rewrite operations into equvivialent, already executed operations
- or constants.
+ """Rewrite operations into equvivialent, cheeper operations.
+ This includes already executed operations and constants.
"""
def propagate_forward(self, op):
@@ -127,6 +127,116 @@
else:
self.emit_operation(op)
+ def optimize_CALL_PURE(self, op):
+ for arg in op.args:
+ if self.get_constant_box(arg) is None:
+ break
+ else:
+ # all constant arguments: constant-fold away
+ self.make_constant(op.result, op.args[0])
+ return
+ # replace CALL_PURE with just CALL
+ self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
+ op.descr))
+ def optimize_guard(self, op, constbox, emit_operation=True):
+ value = self.getvalue(op.args[0])
+ if value.is_constant():
+ box = value.box
+ assert isinstance(box, Const)
+ if not box.same_constant(constbox):
+ raise InvalidLoop
+ return
+ if emit_operation:
+ self.emit_operation(op)
+ value.make_constant(constbox)
+
+ def optimize_GUARD_ISNULL(self, op):
+ value = self.getvalue(op.args[0])
+ if value.is_null():
+ return
+ elif value.is_nonnull():
+ raise InvalidLoop
+ self.emit_operation(op)
+ value.make_constant(self.optimizer.cpu.ts.CONST_NULL)
+
+ def optimize_GUARD_NONNULL(self, op):
+ value = self.getvalue(op.args[0])
+ if value.is_nonnull():
+ return
+ elif value.is_null():
+ raise InvalidLoop
+ self.emit_operation(op)
+ value.make_nonnull(len(self.optimizer.newoperations) - 1)
+
+ def optimize_GUARD_VALUE(self, op):
+ value = self.getvalue(op.args[0])
+ emit_operation = True
+ if value.last_guard_index != -1:
+ # there already has been a guard_nonnull or guard_class or
+ # guard_nonnull_class on this value, which is rather silly.
+ # replace the original guard with a guard_value
+ old_guard_op = self.optimizer.newoperations[value.last_guard_index]
+ old_opnum = old_guard_op.opnum
+ old_guard_op.opnum = rop.GUARD_VALUE
+ old_guard_op.args = [old_guard_op.args[0], op.args[1]]
+ # hack hack hack. Change the guard_opnum on
+ # old_guard_op.descr so that when resuming,
+ # the operation is not skipped by pyjitpl.py.
+ descr = old_guard_op.descr
+ assert isinstance(descr, compile.ResumeGuardDescr)
+ descr.guard_opnum = rop.GUARD_VALUE
+ descr.make_a_counter_per_value(old_guard_op)
+ emit_operation = False
+ constbox = op.args[1]
+ assert isinstance(constbox, Const)
+ self.optimize_guard(op, constbox, emit_operation)
+
+ def optimize_GUARD_TRUE(self, op):
+ self.optimize_guard(op, CONST_1)
+
+ def optimize_GUARD_FALSE(self, op):
+ self.optimize_guard(op, CONST_0)
+
+ def optimize_GUARD_CLASS(self, op):
+ value = self.getvalue(op.args[0])
+ expectedclassbox = op.args[1]
+ assert isinstance(expectedclassbox, Const)
+ realclassbox = value.get_constant_class(self.optimizer.cpu)
+ if realclassbox is not None:
+ # the following assert should always be true for now,
+ # because invalid loops that would fail it are detected
+ # earlier, in optimizefindnode.py.
+ assert realclassbox.same_constant(expectedclassbox)
+ return
+ emit_operation = True
+ if value.last_guard_index != -1:
+ # there already has been a guard_nonnull or guard_class or
+ # guard_nonnull_class on this value.
+ old_guard_op = self.optimizer.newoperations[value.last_guard_index]
+ if old_guard_op.opnum == rop.GUARD_NONNULL:
+ # it was a guard_nonnull, which we replace with a
+ # guard_nonnull_class.
+ old_guard_op.opnum = rop.GUARD_NONNULL_CLASS
+ old_guard_op.args = [old_guard_op.args[0], op.args[1]]
+ # hack hack hack. Change the guard_opnum on
+ # old_guard_op.descr so that when resuming,
+ # the operation is not skipped by pyjitpl.py.
+ descr = old_guard_op.descr
+ assert isinstance(descr, compile.ResumeGuardDescr)
+ descr.guard_opnum = rop.GUARD_NONNULL_CLASS
+ emit_operation = False
+ if emit_operation:
+ self.emit_operation(op)
+ last_guard_index = len(self.optimizer.newoperations) - 1
+ else:
+ last_guard_index = value.last_guard_index
+ value.make_constant_class(expectedclassbox, last_guard_index)
+
+ def optimize_GUARD_NO_EXCEPTION(self, op):
+ if not self.optimizer.exception_might_have_happened:
+ return
+ self.emit_operation(op)
+ self.optimizer.exception_might_have_happened = False
optimize_ops = _findall(OptRewrite, 'optimize_')
Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py
==============================================================================
--- pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py (original)
+++ pypy/branch/jit-bounds/pypy/jit/metainterp/optimizeopt/virtualize.py Sat Sep 4 12:16:13 2010
@@ -1,6 +1,456 @@
-from optimizer import Optimization
+from pypy.jit.metainterp.specnode import SpecNode, NotSpecNode, ConstantSpecNode
+from pypy.jit.metainterp.specnode import AbstractVirtualStructSpecNode
+from pypy.jit.metainterp.specnode import VirtualInstanceSpecNode
+from pypy.jit.metainterp.specnode import VirtualArraySpecNode
+from pypy.jit.metainterp.specnode import VirtualStructSpecNode
+from pypy.jit.metainterp.resoperation import rop, ResOperation
+from pypy.jit.metainterp.optimizeutil import _findall
+from pypy.rlib.objectmodel import we_are_translated
+from optimizer import *
-class Virtualize(Optimization):
+
+class AbstractVirtualValue(OptValue):
+ _attrs_ = ('optimizer', 'keybox', 'source_op', '_cached_vinfo')
+ box = None
+ level = LEVEL_NONNULL
+ _cached_vinfo = None
+
+ def __init__(self, optimizer, keybox, source_op=None):
+ self.optimizer = optimizer
+ self.keybox = keybox # only used as a key in dictionaries
+ self.source_op = source_op # the NEW_WITH_VTABLE/NEW_ARRAY operation
+ # that builds this box
+
+ def get_key_box(self):
+ if self.box is None:
+ return self.keybox
+ return self.box
+
+ def force_box(self):
+ if self.box is None:
+ self.optimizer.forget_numberings(self.keybox)
+ self._really_force()
+ return self.box
+
+ def make_virtual_info(self, modifier, fieldnums):
+ vinfo = self._cached_vinfo
+ if vinfo is not None and vinfo.equals(fieldnums):
+ return vinfo
+ vinfo = self._make_virtual(modifier)
+ vinfo.set_content(fieldnums)
+ self._cached_vinfo = vinfo
+ return vinfo
+
+ def _make_virtual(self, modifier):
+ raise NotImplementedError("abstract base")
+
+ def _really_force(self):
+ raise NotImplementedError("abstract base")
+
+def get_fielddescrlist_cache(cpu):
+ if not hasattr(cpu, '_optimizeopt_fielddescrlist_cache'):
+ result = descrlist_dict()
+ cpu._optimizeopt_fielddescrlist_cache = result
+ return result
+ return cpu._optimizeopt_fielddescrlist_cache
+get_fielddescrlist_cache._annspecialcase_ = "specialize:memo"
+
+class AbstractVirtualStructValue(AbstractVirtualValue):
+ _attrs_ = ('_fields', '_cached_sorted_fields')
+
+ def __init__(self, optimizer, keybox, source_op=None):
+ AbstractVirtualValue.__init__(self, optimizer, keybox, source_op)
+ self._fields = {}
+ self._cached_sorted_fields = None
+
+ def getfield(self, ofs, default):
+ return self._fields.get(ofs, default)
+
+ def setfield(self, ofs, fieldvalue):
+ assert isinstance(fieldvalue, OptValue)
+ self._fields[ofs] = fieldvalue
+
+ def _really_force(self):
+ assert self.source_op is not None
+ # ^^^ This case should not occur any more (see test_bug_3).
+ #
+ newoperations = self.optimizer.newoperations
+ newoperations.append(self.source_op)
+ self.box = box = self.source_op.result
+ #
+ iteritems = self._fields.iteritems()
+ if not we_are_translated(): #random order is fine, except for tests
+ iteritems = list(iteritems)
+ iteritems.sort(key = lambda (x,y): x.sort_key())
+ for ofs, value in iteritems:
+ if value.is_null():
+ continue
+ subbox = value.force_box()
+ op = ResOperation(rop.SETFIELD_GC, [box, subbox], None,
+ descr=ofs)
+ newoperations.append(op)
+ self._fields = None
+
+ def _get_field_descr_list(self):
+ _cached_sorted_fields = self._cached_sorted_fields
+ if (_cached_sorted_fields is not None and
+ len(self._fields) == len(_cached_sorted_fields)):
+ lst = self._cached_sorted_fields
+ else:
+ lst = self._fields.keys()
+ sort_descrs(lst)
+ cache = get_fielddescrlist_cache(self.optimizer.cpu)
+ result = cache.get(lst, None)
+ if result is None:
+ cache[lst] = lst
+ else:
+ lst = result
+ # store on self, to not have to repeatedly get it from the global
+ # cache, which involves sorting
+ self._cached_sorted_fields = lst
+ return lst
+
+ def get_args_for_fail(self, modifier):
+ if self.box is None and not modifier.already_seen_virtual(self.keybox):
+ # checks for recursion: it is False unless
+ # we have already seen the very same keybox
+ lst = self._get_field_descr_list()
+ fieldboxes = [self._fields[ofs].get_key_box() for ofs in lst]
+ modifier.register_virtual_fields(self.keybox, fieldboxes)
+ for ofs in lst:
+ fieldvalue = self._fields[ofs]
+ fieldvalue.get_args_for_fail(modifier)
+
+
+class VirtualValue(AbstractVirtualStructValue):
+ level = LEVEL_KNOWNCLASS
+
+ def __init__(self, optimizer, known_class, keybox, source_op=None):
+ AbstractVirtualStructValue.__init__(self, optimizer, keybox, source_op)
+ assert isinstance(known_class, Const)
+ self.known_class = known_class
+
+ def _make_virtual(self, modifier):
+ fielddescrs = self._get_field_descr_list()
+ return modifier.make_virtual(self.known_class, fielddescrs)
+
+class VStructValue(AbstractVirtualStructValue):
+
+ def __init__(self, optimizer, structdescr, keybox, source_op=None):
+ AbstractVirtualStructValue.__init__(self, optimizer, keybox, source_op)
+ self.structdescr = structdescr
+
+ def _make_virtual(self, modifier):
+ fielddescrs = self._get_field_descr_list()
+ return modifier.make_vstruct(self.structdescr, fielddescrs)
+
+class VArrayValue(AbstractVirtualValue):
+
+ def __init__(self, optimizer, arraydescr, size, keybox, source_op=None):
+ AbstractVirtualValue.__init__(self, optimizer, keybox, source_op)
+ self.arraydescr = arraydescr
+ self.constvalue = optimizer.new_const_item(arraydescr)
+ self._items = [self.constvalue] * size
+
+ def getlength(self):
+ return len(self._items)
+
+ def getitem(self, index):
+ res = self._items[index]
+ return res
+
+ def setitem(self, index, itemvalue):
+ assert isinstance(itemvalue, OptValue)
+ self._items[index] = itemvalue
+
+ def _really_force(self):
+ assert self.source_op is not None
+ newoperations = self.optimizer.newoperations
+ newoperations.append(self.source_op)
+ self.box = box = self.source_op.result
+ for index in range(len(self._items)):
+ subvalue = self._items[index]
+ if subvalue is not self.constvalue:
+ if subvalue.is_null():
+ continue
+ subbox = subvalue.force_box()
+ op = ResOperation(rop.SETARRAYITEM_GC,
+ [box, ConstInt(index), subbox], None,
+ descr=self.arraydescr)
+ newoperations.append(op)
+
+ def get_args_for_fail(self, modifier):
+ if self.box is None and not modifier.already_seen_virtual(self.keybox):
+ # checks for recursion: it is False unless
+ # we have already seen the very same keybox
+ itemboxes = []
+ for itemvalue in self._items:
+ itemboxes.append(itemvalue.get_key_box())
+ modifier.register_virtual_fields(self.keybox, itemboxes)
+ for itemvalue in self._items:
+ if itemvalue is not self.constvalue:
+ itemvalue.get_args_for_fail(modifier)
+
+ def _make_virtual(self, modifier):
+ return modifier.make_varray(self.arraydescr)
+
+class __extend__(SpecNode):
+ def setup_virtual_node(self, optimizer, box, newinputargs):
+ raise NotImplementedError
+ def teardown_virtual_node(self, optimizer, value, newexitargs):
+ raise NotImplementedError
+
+class __extend__(NotSpecNode):
+ def setup_virtual_node(self, optimizer, box, newinputargs):
+ newinputargs.append(box)
+ def teardown_virtual_node(self, optimizer, value, newexitargs):
+ newexitargs.append(value.force_box())
+
+class __extend__(ConstantSpecNode):
+ def setup_virtual_node(self, optimizer, box, newinputargs):
+ optimizer.make_constant(box, self.constbox)
+ def teardown_virtual_node(self, optimizer, value, newexitargs):
+ pass
+
+class __extend__(AbstractVirtualStructSpecNode):
+ def setup_virtual_node(self, optimizer, box, newinputargs):
+ vvalue = self._setup_virtual_node_1(optimizer, box)
+ for ofs, subspecnode in self.fields:
+ subbox = optimizer.new_box(ofs)
+ subspecnode.setup_virtual_node(optimizer, subbox, newinputargs)
+ vvaluefield = optimizer.getvalue(subbox)
+ vvalue.setfield(ofs, vvaluefield)
+ def _setup_virtual_node_1(self, optimizer, box):
+ raise NotImplementedError
+ def teardown_virtual_node(self, optimizer, value, newexitargs):
+ assert value.is_virtual()
+ for ofs, subspecnode in self.fields:
+ subvalue = value.getfield(ofs, optimizer.new_const(ofs))
+ subspecnode.teardown_virtual_node(optimizer, subvalue, newexitargs)
+
+class __extend__(VirtualInstanceSpecNode):
+ def _setup_virtual_node_1(self, optimizer, box):
+ return optimizer.make_virtual(self.known_class, box)
+
+class __extend__(VirtualStructSpecNode):
+ def _setup_virtual_node_1(self, optimizer, box):
+ return optimizer.make_vstruct(self.typedescr, box)
+
+class __extend__(VirtualArraySpecNode):
+ def setup_virtual_node(self, optimizer, box, newinputargs):
+ vvalue = optimizer.make_varray(self.arraydescr, len(self.items), box)
+ for index in range(len(self.items)):
+ subbox = optimizer.new_box_item(self.arraydescr)
+ subspecnode = self.items[index]
+ subspecnode.setup_virtual_node(optimizer, subbox, newinputargs)
+ vvalueitem = optimizer.getvalue(subbox)
+ vvalue.setitem(index, vvalueitem)
+ def teardown_virtual_node(self, optimizer, value, newexitargs):
+ assert value.is_virtual()
+ for index in range(len(self.items)):
+ subvalue = value.getitem(index)
+ subspecnode = self.items[index]
+ subspecnode.teardown_virtual_node(optimizer, subvalue, newexitargs)
+
+class OptVirtualize(Optimization):
"Virtualize objects until they escape."
- # FIXME: Move here
-
+
+ def setup(self, virtuals):
+ if not virtuals:
+ return
+
+ inputargs = self.optimizer.loop.inputargs
+ specnodes = self.optimizer.loop.token.specnodes
+ assert len(inputargs) == len(specnodes)
+ newinputargs = []
+ for i in range(len(inputargs)):
+ specnodes[i].setup_virtual_node(self, inputargs[i], newinputargs)
+ self.optimizer.loop.inputargs = newinputargs
+
+ def make_virtual(self, known_class, box, source_op=None):
+ vvalue = VirtualValue(self.optimizer, known_class, box, source_op)
+ self.make_equal_to(box, vvalue)
+ return vvalue
+
+ def make_varray(self, arraydescr, size, box, source_op=None):
+ vvalue = VArrayValue(self.optimizer, arraydescr, size, box, source_op)
+ self.make_equal_to(box, vvalue)
+ return vvalue
+
+ def make_vstruct(self, structdescr, box, source_op=None):
+ vvalue = VStructValue(self.optimizer, structdescr, box, source_op)
+ self.make_equal_to(box, vvalue)
+ return vvalue
+
+ def optimize_JUMP(self, op):
+ orgop = self.optimizer.loop.operations[-1]
+ exitargs = []
+ target_loop_token = orgop.descr
+ assert isinstance(target_loop_token, LoopToken)
+ specnodes = target_loop_token.specnodes
+ assert len(op.args) == len(specnodes)
+ for i in range(len(specnodes)):
+ value = self.getvalue(op.args[i])
+ specnodes[i].teardown_virtual_node(self, value, exitargs)
+ op.args = exitargs[:]
+ self.emit_operation(op)
+
+ def optimize_VIRTUAL_REF(self, op):
+ indexbox = op.args[1]
+ #
+ # get some constants
+ vrefinfo = self.optimizer.metainterp_sd.virtualref_info
+ c_cls = vrefinfo.jit_virtual_ref_const_class
+ descr_virtual_token = vrefinfo.descr_virtual_token
+ descr_virtualref_index = vrefinfo.descr_virtualref_index
+ #
+ # Replace the VIRTUAL_REF operation with a virtual structure of type
+ # 'jit_virtual_ref'. The jit_virtual_ref structure may be forced soon,
+ # but the point is that doing so does not force the original structure.
+ op = ResOperation(rop.NEW_WITH_VTABLE, [c_cls], op.result)
+ vrefvalue = self.make_virtual(c_cls, op.result, op)
+ tokenbox = BoxInt()
+ self.emit_operation(ResOperation(rop.FORCE_TOKEN, [], tokenbox))
+ vrefvalue.setfield(descr_virtual_token, self.getvalue(tokenbox))
+ vrefvalue.setfield(descr_virtualref_index, self.getvalue(indexbox))
+
+ def optimize_VIRTUAL_REF_FINISH(self, op):
+ # Set the 'forced' field of the virtual_ref.
+ # In good cases, this is all virtual, so has no effect.
+ # Otherwise, this forces the real object -- but only now, as
+ # opposed to much earlier. This is important because the object is
+ # typically a PyPy PyFrame, and now is the end of its execution, so
+ # forcing it now does not have catastrophic effects.
+ vrefinfo = self.optimizer.metainterp_sd.virtualref_info
+ # op.args[1] should really never point to null here
+ # - set 'forced' to point to the real object
+ op1 = ResOperation(rop.SETFIELD_GC, op.args, None,
+ descr = vrefinfo.descr_forced)
+ self.optimize_SETFIELD_GC(op1)
+ # - set 'virtual_token' to TOKEN_NONE
+ args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)]
+ op1 = ResOperation(rop.SETFIELD_GC, args, None,
+ descr = vrefinfo.descr_virtual_token)
+ self.optimize_SETFIELD_GC(op1)
+ # Note that in some cases the virtual in op.args[1] has been forced
+ # already. This is fine. In that case, and *if* a residual
+ # CALL_MAY_FORCE suddenly turns out to access it, then it will
+ # trigger a ResumeGuardForcedDescr.handle_async_forcing() which
+ # will work too (but just be a little pointless, as the structure
+ # was already forced).
+
+ def optimize_GETFIELD_GC(self, op):
+ value = self.getvalue(op.args[0])
+ if value.is_virtual():
+ # optimizefindnode should ensure that fieldvalue is found
+ assert isinstance(value, AbstractVirtualValue)
+ fieldvalue = value.getfield(op.descr, None)
+ assert fieldvalue is not None
+ self.make_equal_to(op.result, fieldvalue)
+ else:
+ value.ensure_nonnull()
+ ###self.heap_op_optimizer.optimize_GETFIELD_GC(op, value)
+ self.emit_operation(op)
+
+ # note: the following line does not mean that the two operations are
+ # completely equivalent, because GETFIELD_GC_PURE is_always_pure().
+ optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC
+
+ def optimize_SETFIELD_GC(self, op):
+ value = self.getvalue(op.args[0])
+ fieldvalue = self.getvalue(op.args[1])
+ if value.is_virtual():
+ value.setfield(op.descr, fieldvalue)
+ else:
+ value.ensure_nonnull()
+ ###self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue)
+ self.emit_operation(op)
+
+ def optimize_NEW_WITH_VTABLE(self, op):
+ self.make_virtual(op.args[0], op.result, op)
+
+ def optimize_NEW(self, op):
+ self.make_vstruct(op.descr, op.result, op)
+
+ def optimize_NEW_ARRAY(self, op):
+ sizebox = self.get_constant_box(op.args[0])
+ if sizebox is not None:
+ # if the original 'op' did not have a ConstInt as argument,
+ # build a new one with the ConstInt argument
+ if not isinstance(op.args[0], ConstInt):
+ op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result,
+ descr=op.descr)
+ self.make_varray(op.descr, sizebox.getint(), op.result, op)
+ else:
+ ###self.optimize_default(op)
+ self.emit_operation(op)
+
+ def optimize_ARRAYLEN_GC(self, op):
+ value = self.getvalue(op.args[0])
+ if value.is_virtual():
+ self.make_constant_int(op.result, value.getlength())
+ else:
+ value.ensure_nonnull()
+ ###self.optimize_default(op)
+ self.emit_operation(op)
+
+ def optimize_GETARRAYITEM_GC(self, op):
+ value = self.getvalue(op.args[0])
+ if value.is_virtual():
+ indexbox = self.get_constant_box(op.args[1])
+ if indexbox is not None:
+ itemvalue = value.getitem(indexbox.getint())
+ self.make_equal_to(op.result, itemvalue)
+ return
+ value.ensure_nonnull()
+ ###self.heap_op_optimizer.optimize_GETARRAYITEM_GC(op, value)
+ self.emit_operation(op)
+
+ # note: the following line does not mean that the two operations are
+ # completely equivalent, because GETARRAYITEM_GC_PURE is_always_pure().
+ optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC
+
+ def optimize_SETARRAYITEM_GC(self, op):
+ value = self.getvalue(op.args[0])
+ if value.is_virtual():
+ indexbox = self.get_constant_box(op.args[1])
+ if indexbox is not None:
+ value.setitem(indexbox.getint(), self.getvalue(op.args[2]))
+ return
+ value.ensure_nonnull()
+ ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue)
+ self.emit_operation(op)
+
+ def optimize_ARRAYCOPY(self, op):
+ source_value = self.getvalue(op.args[2])
+ dest_value = self.getvalue(op.args[3])
+ source_start_box = self.get_constant_box(op.args[4])
+ dest_start_box = self.get_constant_box(op.args[5])
+ length = self.get_constant_box(op.args[6])
+ if (source_value.is_virtual() and source_start_box and dest_start_box
+ and length and dest_value.is_virtual()):
+ # XXX optimize the case where dest value is not virtual,
+ # but we still can avoid a mess
+ source_start = source_start_box.getint()
+ dest_start = dest_start_box.getint()
+ for index in range(length.getint()):
+ val = source_value.getitem(index + source_start)
+ dest_value.setitem(index + dest_start, val)
+ return
+ if length and length.getint() == 0:
+ return # 0-length arraycopy
+ descr = op.args[0]
+ assert isinstance(descr, AbstractDescr)
+ self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
+ descr))
+
+ def propagate_forward(self, op):
+ opnum = op.opnum
+ for value, func in optimize_ops:
+ if opnum == value:
+ func(self, op)
+ break
+ else:
+ self.emit_operation(op)
+
+optimize_ops = _findall(OptVirtualize, 'optimize_')
Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py
==============================================================================
--- pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py (original)
+++ pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_optimizeopt.py Sat Sep 4 12:16:13 2010
@@ -5,6 +5,7 @@
BaseTest)
from pypy.jit.metainterp.optimizefindnode import PerfectSpecializationFinder
import pypy.jit.metainterp.optimizeopt.optimizer as optimizeopt
+import pypy.jit.metainterp.optimizeopt.virtualize as virtualize
from pypy.jit.metainterp.optimizeopt import optimize_loop_1
from pypy.jit.metainterp.optimizeutil import InvalidLoop
from pypy.jit.metainterp.history import AbstractDescr, ConstInt, BoxInt
@@ -64,7 +65,7 @@
class cpu(object):
pass
opt = FakeOptimizer()
- virt1 = optimizeopt.AbstractVirtualStructValue(opt, None)
+ virt1 = virtualize.AbstractVirtualStructValue(opt, None)
lst1 = virt1._get_field_descr_list()
assert lst1 == []
lst2 = virt1._get_field_descr_list()
@@ -75,7 +76,7 @@
lst4 = virt1._get_field_descr_list()
assert lst3 is lst4
- virt2 = optimizeopt.AbstractVirtualStructValue(opt, None)
+ virt2 = virtualize.AbstractVirtualStructValue(opt, None)
lst5 = virt2._get_field_descr_list()
assert lst5 is lst1
virt2.setfield(LLtypeMixin.valuedescr, optimizeopt.OptValue(None))
@@ -88,7 +89,7 @@
self.fieldnums = fieldnums
def equals(self, fieldnums):
return self.fieldnums == fieldnums
- class FakeVirtualValue(optimizeopt.AbstractVirtualValue):
+ class FakeVirtualValue(virtualize.AbstractVirtualValue):
def _make_virtual(self, *args):
return FakeVInfo()
v1 = FakeVirtualValue(None, None, None)
@@ -257,6 +258,7 @@
optimize_loop_1(metainterp_sd, loop)
#
expected = self.parse(optops)
+ print '\n'.join([str(o) for o in loop.operations])
self.assert_equal(loop, expected)
def test_simple(self):
Modified: pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py
==============================================================================
--- pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py (original)
+++ pypy/branch/jit-bounds/pypy/jit/metainterp/test/test_resume.py Sat Sep 4 12:16:13 2010
@@ -1,7 +1,7 @@
import py
from pypy.rpython.lltypesystem import lltype, llmemory, rffi
-from pypy.jit.metainterp.optimizeopt.optimizer import VirtualValue, OptValue, VArrayValue
-from pypy.jit.metainterp.optimizeopt.optimizer import VStructValue
+from pypy.jit.metainterp.optimizeopt.virtualize import VirtualValue, OptValue, VArrayValue
+from pypy.jit.metainterp.optimizeopt.virtualize import VStructValue
from pypy.jit.metainterp.resume import *
from pypy.jit.metainterp.history import BoxInt, BoxPtr, ConstInt
from pypy.jit.metainterp.history import ConstPtr, ConstFloat
More information about the Pypy-commit
mailing list