[pypy-svn] r77265 - in pypy/trunk/pypy: jit/backend/cli jit/backend/llgraph jit/backend/llsupport jit/backend/llsupport/test jit/backend/llvm jit/backend/test jit/backend/x86 jit/backend/x86/test jit/metainterp jit/metainterp/optimizeopt jit/metainterp/test jit/tool module/array/benchmark module/array/test
antocuni at codespeak.net
antocuni at codespeak.net
Wed Sep 22 14:17:19 CEST 2010
Author: antocuni
Date: Wed Sep 22 14:17:16 2010
New Revision: 77265
Added:
pypy/trunk/pypy/jit/metainterp/test/test_resoperation.py
- copied unchanged from r77260, pypy/branch/resoperation-refactoring/pypy/jit/metainterp/test/test_resoperation.py
Modified:
pypy/trunk/pypy/jit/backend/cli/method.py
pypy/trunk/pypy/jit/backend/cli/runner.py
pypy/trunk/pypy/jit/backend/llgraph/runner.py
pypy/trunk/pypy/jit/backend/llsupport/gc.py
pypy/trunk/pypy/jit/backend/llsupport/regalloc.py
pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py
pypy/trunk/pypy/jit/backend/llvm/compile.py
pypy/trunk/pypy/jit/backend/test/runner_test.py
pypy/trunk/pypy/jit/backend/test/test_ll_random.py
pypy/trunk/pypy/jit/backend/test/test_random.py
pypy/trunk/pypy/jit/backend/x86/assembler.py
pypy/trunk/pypy/jit/backend/x86/regalloc.py
pypy/trunk/pypy/jit/backend/x86/test/test_recompilation.py
pypy/trunk/pypy/jit/backend/x86/test/test_regalloc.py
pypy/trunk/pypy/jit/backend/x86/test/test_runner.py
pypy/trunk/pypy/jit/metainterp/compile.py
pypy/trunk/pypy/jit/metainterp/graphpage.py
pypy/trunk/pypy/jit/metainterp/history.py
pypy/trunk/pypy/jit/metainterp/logger.py
pypy/trunk/pypy/jit/metainterp/optimize.py
pypy/trunk/pypy/jit/metainterp/optimizefindnode.py
pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py
pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py
pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py (contents, props changed)
pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py
pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py
pypy/trunk/pypy/jit/metainterp/pyjitpl.py
pypy/trunk/pypy/jit/metainterp/resoperation.py
pypy/trunk/pypy/jit/metainterp/simple_optimize.py
pypy/trunk/pypy/jit/metainterp/test/oparser.py
pypy/trunk/pypy/jit/metainterp/test/test_basic.py
pypy/trunk/pypy/jit/metainterp/test/test_logger.py
pypy/trunk/pypy/jit/metainterp/test/test_loop.py
pypy/trunk/pypy/jit/metainterp/test/test_oparser.py
pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py
pypy/trunk/pypy/jit/metainterp/test/test_recursive.py
pypy/trunk/pypy/jit/metainterp/test/test_virtualref.py
pypy/trunk/pypy/jit/tool/showstats.py
pypy/trunk/pypy/module/array/benchmark/Makefile (props changed)
pypy/trunk/pypy/module/array/benchmark/intimg.c (props changed)
pypy/trunk/pypy/module/array/benchmark/intimgtst.c (props changed)
pypy/trunk/pypy/module/array/benchmark/intimgtst.py (props changed)
pypy/trunk/pypy/module/array/benchmark/loop.c (props changed)
pypy/trunk/pypy/module/array/benchmark/sum.c (props changed)
pypy/trunk/pypy/module/array/benchmark/sumtst.c (props changed)
pypy/trunk/pypy/module/array/benchmark/sumtst.py (props changed)
pypy/trunk/pypy/module/array/test/test_array_old.py (props changed)
Log:
Merge the resoperation-refactoring branch.
The idea is to reduce the memory needed by ResOperations, by putting certain
fields (e.g. a variable-sized list of arguments, or descr, or fail_args) only
on the operations that actually need them.
The benchmarks don't show any speedup or slowdown, but it's still worth
merging because it saves a bit of memory: list of operations are thrown away
immediately right now, but we might want to keep them in the future.
Modified: pypy/trunk/pypy/jit/backend/cli/method.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/cli/method.py (original)
+++ pypy/trunk/pypy/jit/backend/cli/method.py Wed Sep 22 14:17:16 2010
@@ -207,9 +207,9 @@
def _collect_types(self, operations, box2classes):
for op in operations:
- if op.opnum in (rop.GETFIELD_GC, rop.SETFIELD_GC):
+ if op.getopnum() in (rop.GETFIELD_GC, rop.SETFIELD_GC):
box = op.args[0]
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.FieldDescr)
box2classes.setdefault(box, []).append(descr.selfclass)
if op in self.cliloop.guard2ops:
@@ -335,7 +335,7 @@
while self.i < N:
op = oplist[self.i]
self.emit_debug(op.repr())
- func = self.operations[op.opnum]
+ func = self.operations[op.getopnum()]
assert func is not None
func(self, op)
self.i += 1
@@ -357,10 +357,10 @@
assert op.is_guard()
if op in self.cliloop.guard2ops:
inputargs, suboperations = self.cliloop.guard2ops[op]
- self.match_var_fox_boxes(op.fail_args, inputargs)
+ self.match_var_fox_boxes(op.getfailargs(), inputargs)
self.emit_operations(suboperations)
else:
- self.emit_return_failed_op(op, op.fail_args)
+ self.emit_return_failed_op(op, op.getfailargs())
def emit_end(self):
assert self.branches == []
@@ -410,7 +410,7 @@
def emit_ovf_op(self, op, emit_op):
next_op = self.oplist[self.i+1]
- if next_op.opnum == rop.GUARD_NO_OVERFLOW:
+ if next_op.getopnum() == rop.GUARD_NO_OVERFLOW:
self.i += 1
self.emit_ovf_op_and_guard(op, next_op, emit_op)
return
@@ -544,7 +544,7 @@
self.emit_guard_overflow_impl(op, OpCodes.Brfalse)
def emit_op_jump(self, op):
- target_token = op.descr
+ target_token = op.getdescr()
assert isinstance(target_token, LoopToken)
if target_token.cliloop is self.cliloop:
# jump to the beginning of the loop
@@ -586,7 +586,7 @@
self.store_result(op)
def emit_op_instanceof(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.TypeDescr)
clitype = descr.get_clitype()
op.args[0].load(self)
@@ -604,7 +604,7 @@
self.store_result(op)
def emit_op_call_impl(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.StaticMethDescr)
delegate_type = descr.get_delegate_clitype()
meth_invoke = descr.get_meth_info()
@@ -619,7 +619,7 @@
emit_op_call_pure = emit_op_call
def emit_op_oosend(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.MethDescr)
clitype = descr.get_self_clitype()
methinfo = descr.get_meth_info()
@@ -639,7 +639,7 @@
self.store_result(op)
def emit_op_getfield_gc(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.FieldDescr)
clitype = descr.get_self_clitype()
fieldinfo = descr.get_field_info()
@@ -653,7 +653,7 @@
emit_op_getfield_gc_pure = emit_op_getfield_gc
def emit_op_setfield_gc(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.FieldDescr)
clitype = descr.get_self_clitype()
fieldinfo = descr.get_field_info()
@@ -665,7 +665,7 @@
self.il.Emit(OpCodes.Stfld, fieldinfo)
def emit_op_getarrayitem_gc(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.TypeDescr)
clitype = descr.get_array_clitype()
itemtype = descr.get_clitype()
@@ -678,7 +678,7 @@
emit_op_getarrayitem_gc_pure = emit_op_getarrayitem_gc
def emit_op_setarrayitem_gc(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.TypeDescr)
clitype = descr.get_array_clitype()
itemtype = descr.get_clitype()
@@ -689,7 +689,7 @@
self.il.Emit(OpCodes.Stelem, itemtype)
def emit_op_arraylen_gc(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.TypeDescr)
clitype = descr.get_array_clitype()
op.args[0].load(self)
@@ -698,7 +698,7 @@
self.store_result(op)
def emit_op_new_array(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.TypeDescr)
item_clitype = descr.get_clitype()
if item_clitype is None:
Modified: pypy/trunk/pypy/jit/backend/cli/runner.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/cli/runner.py (original)
+++ pypy/trunk/pypy/jit/backend/cli/runner.py Wed Sep 22 14:17:16 2010
@@ -105,7 +105,7 @@
def _attach_token_to_faildescrs(self, token, operations):
for op in operations:
if op.is_guard():
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, AbstractFailDescr)
descr._loop_token = token
descr._guard_op = op
@@ -136,7 +136,7 @@
func = cliloop.funcbox.holder.GetFunc()
func(self.get_inputargs())
op = self.failing_ops[self.inputargs.get_failed_op()]
- return op.descr
+ return op.getdescr()
def set_future_value_int(self, index, intvalue):
self.get_inputargs().set_int(index, intvalue)
Modified: pypy/trunk/pypy/jit/backend/llgraph/runner.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/llgraph/runner.py (original)
+++ pypy/trunk/pypy/jit/backend/llgraph/runner.py Wed Sep 22 14:17:16 2010
@@ -151,16 +151,17 @@
def _compile_operations(self, c, operations, var2index):
for op in operations:
- llimpl.compile_add(c, op.opnum)
- descr = op.descr
+ llimpl.compile_add(c, op.getopnum())
+ descr = op.getdescr()
if isinstance(descr, Descr):
llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo)
- if isinstance(descr, history.LoopToken) and op.opnum != rop.JUMP:
+ if isinstance(descr, history.LoopToken) and op.getopnum() != rop.JUMP:
llimpl.compile_add_loop_token(c, descr)
if self.is_oo and isinstance(descr, (OODescr, MethDescr)):
# hack hack, not rpython
- c._obj.externalobj.operations[-1].descr = descr
- for x in op.args:
+ c._obj.externalobj.operations[-1].setdescr(descr)
+ for i in range(op.numargs()):
+ x = op.getarg(i)
if isinstance(x, history.Box):
llimpl.compile_add_var(c, var2index[x])
elif isinstance(x, history.ConstInt):
@@ -173,10 +174,10 @@
raise Exception("'%s' args contain: %r" % (op.getopname(),
x))
if op.is_guard():
- faildescr = op.descr
+ faildescr = op.getdescr()
assert isinstance(faildescr, history.AbstractFailDescr)
faildescr._fail_args_types = []
- for box in op.fail_args:
+ for box in op.getfailargs():
if box is None:
type = history.HOLE
else:
@@ -185,7 +186,7 @@
fail_index = self.get_fail_descr_number(faildescr)
index = llimpl.compile_add_fail(c, fail_index)
faildescr._compiled_fail = c, index
- for box in op.fail_args:
+ for box in op.getfailargs():
if box is not None:
llimpl.compile_add_fail_arg(c, var2index[box])
else:
@@ -203,13 +204,13 @@
x))
op = operations[-1]
assert op.is_final()
- if op.opnum == rop.JUMP:
- targettoken = op.descr
+ if op.getopnum() == rop.JUMP:
+ targettoken = op.getdescr()
assert isinstance(targettoken, history.LoopToken)
compiled_version = targettoken._llgraph_compiled_version
llimpl.compile_add_jump_target(c, compiled_version)
- elif op.opnum == rop.FINISH:
- faildescr = op.descr
+ elif op.getopnum() == rop.FINISH:
+ faildescr = op.getdescr()
index = self.get_fail_descr_number(faildescr)
llimpl.compile_add_fail(c, index)
else:
@@ -280,7 +281,7 @@
def __init__(self, *args, **kwds):
BaseCPU.__init__(self, *args, **kwds)
self.fielddescrof_vtable = self.fielddescrof(rclass.OBJECT, 'typeptr')
-
+
def fielddescrof(self, S, fieldname):
ofs, size = symbolic.get_field_token(S, fieldname)
token = history.getkind(getattr(S, fieldname))
@@ -504,7 +505,7 @@
return ootype.cast_to_object(e)
else:
return ootype.NULL
-
+
def get_exc_value(self):
if llimpl._last_exception:
earg = llimpl._last_exception.args[1]
@@ -580,7 +581,7 @@
x = descr.callmeth(selfbox, argboxes)
# XXX: return None if METH.RESULT is Void
return x
-
+
def make_getargs(ARGS):
argsiter = unrolling_iterable(ARGS)
@@ -612,7 +613,7 @@
class KeyManager(object):
"""
Helper class to convert arbitrary dictionary keys to integers.
- """
+ """
def __init__(self):
self.keys = {}
@@ -695,7 +696,7 @@
self.ARRAY = ARRAY = ootype.Array(TYPE)
def create():
return boxresult(TYPE, ootype.new(TYPE))
-
+
def create_array(lengthbox):
n = lengthbox.getint()
return boxresult(ARRAY, ootype.oonewarray(ARRAY, n))
@@ -757,7 +758,7 @@
obj = objbox.getref(TYPE)
value = unwrap(T, valuebox)
setattr(obj, fieldname, value)
-
+
self.getfield = getfield
self.setfield = setfield
self._is_pointer_field = (history.getkind(T) == 'ref')
Modified: pypy/trunk/pypy/jit/backend/llsupport/gc.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/llsupport/gc.py (original)
+++ pypy/trunk/pypy/jit/backend/llsupport/gc.py Wed Sep 22 14:17:16 2010
@@ -559,12 +559,12 @@
#
newops = []
for op in operations:
- if op.opnum == rop.DEBUG_MERGE_POINT:
+ if op.getopnum() == rop.DEBUG_MERGE_POINT:
continue
# ---------- replace ConstPtrs with GETFIELD_RAW ----------
# xxx some performance issue here
- for i in range(len(op.args)):
- v = op.args[i]
+ for i in range(op.numargs()):
+ v = op.getarg(i)
if isinstance(v, ConstPtr) and bool(v.value):
addr = self.gcrefs.get_address_of_gcref(v.value)
# ^^^even for non-movable objects, to record their presence
@@ -574,23 +574,21 @@
newops.append(ResOperation(rop.GETFIELD_RAW,
[ConstInt(addr)], box,
self.single_gcref_descr))
- op.args[i] = box
+ op.setarg(i, box)
# ---------- write barrier for SETFIELD_GC ----------
- if op.opnum == rop.SETFIELD_GC:
- v = op.args[1]
+ if op.getopnum() == rop.SETFIELD_GC:
+ v = op.getarg(1)
if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
bool(v.value)): # store a non-NULL
- self._gen_write_barrier(newops, op.args[0], v)
- op = ResOperation(rop.SETFIELD_RAW, op.args, None,
- descr=op.descr)
+ self._gen_write_barrier(newops, op.getarg(0), v)
+ op = op.copy_and_change(rop.SETFIELD_RAW)
# ---------- write barrier for SETARRAYITEM_GC ----------
- if op.opnum == rop.SETARRAYITEM_GC:
- v = op.args[2]
+ if op.getopnum() == rop.SETARRAYITEM_GC:
+ v = op.getarg(2)
if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
bool(v.value)): # store a non-NULL
- self._gen_write_barrier(newops, op.args[0], v)
- op = ResOperation(rop.SETARRAYITEM_RAW, op.args, None,
- descr=op.descr)
+ self._gen_write_barrier(newops, op.getarg(0), v)
+ op = op.copy_and_change(rop.SETARRAYITEM_RAW)
# ----------
newops.append(op)
del operations[:]
Modified: pypy/trunk/pypy/jit/backend/llsupport/regalloc.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/llsupport/regalloc.py (original)
+++ pypy/trunk/pypy/jit/backend/llsupport/regalloc.py Wed Sep 22 14:17:16 2010
@@ -81,6 +81,10 @@
for v in vars:
self.possibly_free_var(v)
+ def possibly_free_vars_for_op(self, op):
+ for i in range(op.numargs()):
+ self.possibly_free_var(op.getarg(i))
+
def _check_invariants(self):
if not we_are_translated():
# make sure no duplicates
Modified: pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py (original)
+++ pypy/trunk/pypy/jit/backend/llsupport/test/test_gc.py Wed Sep 22 14:17:16 2010
@@ -258,18 +258,18 @@
gc_ll_descr._gen_write_barrier(newops, v_base, v_value)
assert llop1.record == []
assert len(newops) == 1
- assert newops[0].opnum == rop.COND_CALL_GC_WB
- assert newops[0].args[0] == v_base
- assert newops[0].args[1] == v_value
+ assert newops[0].getopnum() == rop.COND_CALL_GC_WB
+ assert newops[0].getarg(0) == v_base
+ assert newops[0].getarg(1) == v_value
assert newops[0].result is None
- wbdescr = newops[0].descr
+ wbdescr = newops[0].getdescr()
assert isinstance(wbdescr.jit_wb_if_flag, int)
assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int)
assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int)
def test_get_rid_of_debug_merge_point(self):
operations = [
- ResOperation(rop.DEBUG_MERGE_POINT, [], None),
+ ResOperation(rop.DEBUG_MERGE_POINT, ['dummy'], None),
]
gc_ll_descr = self.gc_ll_descr
gc_ll_descr.rewrite_assembler(None, operations)
@@ -298,13 +298,14 @@
gc_ll_descr.gcrefs = MyFakeGCRefList()
gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations)
assert len(operations) == 2
- assert operations[0].opnum == rop.GETFIELD_RAW
- assert operations[0].args == [ConstInt(43)]
- assert operations[0].descr == gc_ll_descr.single_gcref_descr
+ assert operations[0].getopnum() == rop.GETFIELD_RAW
+ assert operations[0].getarg(0) == ConstInt(43)
+ assert operations[0].getdescr() == gc_ll_descr.single_gcref_descr
v_box = operations[0].result
assert isinstance(v_box, BoxPtr)
- assert operations[1].opnum == rop.PTR_EQ
- assert operations[1].args == [v_random_box, v_box]
+ assert operations[1].getopnum() == rop.PTR_EQ
+ assert operations[1].getarg(0) == v_random_box
+ assert operations[1].getarg(1) == v_box
assert operations[1].result == v_result
def test_rewrite_assembler_1_cannot_move(self):
@@ -336,8 +337,9 @@
finally:
rgc.can_move = old_can_move
assert len(operations) == 1
- assert operations[0].opnum == rop.PTR_EQ
- assert operations[0].args == [v_random_box, ConstPtr(s_gcref)]
+ assert operations[0].getopnum() == rop.PTR_EQ
+ assert operations[0].getarg(0) == v_random_box
+ assert operations[0].getarg(1) == ConstPtr(s_gcref)
assert operations[0].result == v_result
# check that s_gcref gets added to the list anyway, to make sure
# that the GC sees it
@@ -356,14 +358,15 @@
gc_ll_descr.rewrite_assembler(self.fake_cpu, operations)
assert len(operations) == 2
#
- assert operations[0].opnum == rop.COND_CALL_GC_WB
- assert operations[0].args[0] == v_base
- assert operations[0].args[1] == v_value
+ assert operations[0].getopnum() == rop.COND_CALL_GC_WB
+ assert operations[0].getarg(0) == v_base
+ assert operations[0].getarg(1) == v_value
assert operations[0].result is None
#
- assert operations[1].opnum == rop.SETFIELD_RAW
- assert operations[1].args == [v_base, v_value]
- assert operations[1].descr == field_descr
+ assert operations[1].getopnum() == rop.SETFIELD_RAW
+ assert operations[1].getarg(0) == v_base
+ assert operations[1].getarg(1) == v_value
+ assert operations[1].getdescr() == field_descr
def test_rewrite_assembler_3(self):
# check write barriers before SETARRAYITEM_GC
@@ -379,11 +382,13 @@
gc_ll_descr.rewrite_assembler(self.fake_cpu, operations)
assert len(operations) == 2
#
- assert operations[0].opnum == rop.COND_CALL_GC_WB
- assert operations[0].args[0] == v_base
- assert operations[0].args[1] == v_value
+ assert operations[0].getopnum() == rop.COND_CALL_GC_WB
+ assert operations[0].getarg(0) == v_base
+ assert operations[0].getarg(1) == v_value
assert operations[0].result is None
#
- assert operations[1].opnum == rop.SETARRAYITEM_RAW
- assert operations[1].args == [v_base, v_index, v_value]
- assert operations[1].descr == array_descr
+ assert operations[1].getopnum() == rop.SETARRAYITEM_RAW
+ assert operations[1].getarg(0) == v_base
+ assert operations[1].getarg(1) == v_index
+ assert operations[1].getarg(2) == v_value
+ assert operations[1].getdescr() == array_descr
Modified: pypy/trunk/pypy/jit/backend/llvm/compile.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/llvm/compile.py (original)
+++ pypy/trunk/pypy/jit/backend/llvm/compile.py Wed Sep 22 14:17:16 2010
@@ -107,7 +107,7 @@
# store away the exception into self.backup_exc_xxx, *unless* the
# branch starts with a further GUARD_EXCEPTION/GUARD_NO_EXCEPTION.
if exc:
- opnum = operations[0].opnum
+ opnum = operations[0].getopnum()
if opnum not in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION):
self._store_away_exception()
# Normal handling of the operations follows.
@@ -115,7 +115,7 @@
self._generate_op(op)
def _generate_op(self, op):
- opnum = op.opnum
+ opnum = op.getopnum()
for i, name in all_operations:
if opnum == i:
meth = getattr(self, name)
@@ -475,7 +475,7 @@
return location
def generate_GETFIELD_GC(self, op):
- loc = self._generate_field_gep(op.args[0], op.descr)
+ loc = self._generate_field_gep(op.args[0], op.getdescr())
self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "")
generate_GETFIELD_GC_PURE = generate_GETFIELD_GC
@@ -483,7 +483,7 @@
generate_GETFIELD_RAW_PURE = generate_GETFIELD_GC
def generate_SETFIELD_GC(self, op):
- fielddescr = op.descr
+ fielddescr = op.getdescr()
loc = self._generate_field_gep(op.args[0], fielddescr)
assert isinstance(fielddescr, FieldDescr)
getarg = self.cpu.getarg_by_index[fielddescr.size_index]
@@ -491,7 +491,7 @@
llvm_rffi.LLVMBuildStore(self.builder, value_ref, loc, "")
def generate_CALL(self, op):
- calldescr = op.descr
+ calldescr = op.getdescr()
assert isinstance(calldescr, CallDescr)
ty_function_ptr = self.cpu.get_calldescr_ty_function_ptr(calldescr)
v = op.args[0]
@@ -579,7 +579,7 @@
self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "")
def generate_ARRAYLEN_GC(self, op):
- arraydescr = op.descr
+ arraydescr = op.getdescr()
assert isinstance(arraydescr, ArrayDescr)
self._generate_len(op, arraydescr.ty_array_ptr,
self.cpu.const_array_index_length)
@@ -598,7 +598,7 @@
return location
def _generate_array_gep(self, op):
- arraydescr = op.descr
+ arraydescr = op.getdescr()
assert isinstance(arraydescr, ArrayDescr)
location = self._generate_gep(op, arraydescr.ty_array_ptr,
self.cpu.const_array_index_array)
@@ -612,7 +612,7 @@
def generate_SETARRAYITEM_GC(self, op):
loc = self._generate_array_gep(op)
- arraydescr = op.descr
+ arraydescr = op.getdescr()
assert isinstance(arraydescr, ArrayDescr)
getarg = self.cpu.getarg_by_index[arraydescr.itemsize_index]
value_ref = getarg(self, op.args[2])
@@ -660,7 +660,7 @@
return res
def generate_NEW(self, op):
- sizedescr = op.descr
+ sizedescr = op.getdescr()
assert isinstance(sizedescr, SizeDescr)
res = self._generate_new(self.cpu._make_const_int(sizedescr.size))
self.vars[op.result] = res
@@ -695,7 +695,7 @@
self.vars[op.result] = res
def generate_NEW_ARRAY(self, op):
- arraydescr = op.descr
+ arraydescr = op.getdescr()
assert isinstance(arraydescr, ArrayDescr)
self._generate_new_array(op, arraydescr.ty_array_ptr,
self.cpu._make_const_int(arraydescr.itemsize),
Modified: pypy/trunk/pypy/jit/backend/test/runner_test.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/test/runner_test.py (original)
+++ pypy/trunk/pypy/jit/backend/test/runner_test.py Wed Sep 22 14:17:16 2010
@@ -1,5 +1,6 @@
import py, sys, random, os, struct, operator
from pypy.jit.metainterp.history import (AbstractFailDescr,
+ AbstractDescr,
BasicFailDescr,
BoxInt, Box, BoxPtr,
LoopToken,
@@ -39,7 +40,7 @@
else:
raise NotImplementedError(box)
res = self.cpu.execute_token(looptoken)
- if res is operations[-1].descr:
+ if res is operations[-1].getdescr():
self.guard_failed = False
else:
self.guard_failed = True
@@ -74,10 +75,11 @@
ResOperation(rop.FINISH, results, None,
descr=BasicFailDescr(0))]
if operations[0].is_guard():
- operations[0].fail_args = []
+ operations[0].setfailargs([])
if not descr:
descr = BasicFailDescr(1)
- operations[0].descr = descr
+ if descr is not None:
+ operations[0].setdescr(descr)
inputargs = []
for box in valueboxes:
if isinstance(box, Box) and box not in inputargs:
@@ -116,7 +118,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [i1]
+ operations[2].setfailargs([i1])
self.cpu.compile_loop(inputargs, operations, looptoken)
self.cpu.set_future_value_int(0, 2)
@@ -137,7 +139,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [None, None, i1, None]
+ operations[2].setfailargs([None, None, i1, None])
self.cpu.compile_loop(inputargs, operations, looptoken)
self.cpu.set_future_value_int(0, 2)
@@ -160,7 +162,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [i1]
+ operations[2].setfailargs([i1])
wr_i1 = weakref.ref(i1)
wr_guard = weakref.ref(operations[2])
self.cpu.compile_loop(inputargs, operations, looptoken)
@@ -184,7 +186,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [i1]
+ operations[2].setfailargs([i1])
self.cpu.compile_loop(inputargs, operations, looptoken)
i1b = BoxInt()
@@ -194,7 +196,7 @@
ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2),
ResOperation(rop.JUMP, [i1b], None, descr=looptoken),
]
- bridge[1].fail_args = [i1b]
+ bridge[1].setfailargs([i1b])
self.cpu.compile_bridge(faildescr1, [i1b], bridge)
@@ -218,7 +220,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [None, i1, None]
+ operations[2].setfailargs([None, i1, None])
self.cpu.compile_loop(inputargs, operations, looptoken)
i1b = BoxInt()
@@ -228,7 +230,7 @@
ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2),
ResOperation(rop.JUMP, [i1b], None, descr=looptoken),
]
- bridge[1].fail_args = [i1b]
+ bridge[1].setfailargs([i1b])
self.cpu.compile_bridge(faildescr1, [i1b], bridge)
@@ -251,7 +253,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [None, i1, None]
+ operations[2].setfailargs([None, i1, None])
self.cpu.compile_loop(inputargs, operations, looptoken)
self.cpu.set_future_value_int(0, 2)
@@ -317,7 +319,7 @@
descr=BasicFailDescr()),
ResOperation(rop.JUMP, [z, t], None, descr=looptoken),
]
- operations[-2].fail_args = [t, z]
+ operations[-2].setfailargs([t, z])
cpu.compile_loop([x, y], operations, looptoken)
self.cpu.set_future_value_int(0, 0)
self.cpu.set_future_value_int(1, 10)
@@ -363,7 +365,7 @@
ResOperation(rop.FINISH, [v_res], None,
descr=BasicFailDescr(2)),
]
- ops[1].fail_args = []
+ ops[1].setfailargs([])
else:
v_exc = self.cpu.ts.BoxRef()
ops = [
@@ -372,7 +374,7 @@
descr=BasicFailDescr(1)),
ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)),
]
- ops[1].fail_args = [v_res]
+ ops[1].setfailargs([v_res])
#
looptoken = LoopToken()
self.cpu.compile_loop([v1, v2], ops, looptoken)
@@ -909,8 +911,8 @@
ResOperation(rop.GUARD_TRUE, [i2], None),
ResOperation(rop.JUMP, jumpargs, None, descr=looptoken),
]
- operations[2].fail_args = inputargs[:]
- operations[2].descr = faildescr
+ operations[2].setfailargs(inputargs[:])
+ operations[2].setdescr(faildescr)
#
self.cpu.compile_loop(inputargs, operations, looptoken)
#
@@ -975,7 +977,7 @@
ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1),
ResOperation(rop.FINISH, fboxes, None, descr=faildescr2),
]
- operations[-2].fail_args = fboxes
+ operations[-2].setfailargs(fboxes)
looptoken = LoopToken()
self.cpu.compile_loop(fboxes, operations, looptoken)
@@ -1098,7 +1100,7 @@
descr=BasicFailDescr(4)),
ResOperation(rop.FINISH, [], None,
descr=BasicFailDescr(5))]
- operations[1].fail_args = []
+ operations[1].setfailargs([])
looptoken = LoopToken()
# Use "set" to unique-ify inputargs
unique_testcase_list = list(set(testcase))
@@ -1412,7 +1414,7 @@
FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed], lltype.Void)
func_ptr = llhelper(lltype.Ptr(FUNC), func_void)
funcbox = self.get_funcbox(self.cpu, func_ptr)
- class WriteBarrierDescr:
+ class WriteBarrierDescr(AbstractDescr):
jit_wb_if_flag = 4096
jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10')
jit_wb_if_flag_singlebyte = 0x10
@@ -1462,7 +1464,7 @@
ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0))
]
- ops[2].fail_args = [i1, i0]
+ ops[2].setfailargs([i1, i0])
looptoken = LoopToken()
self.cpu.compile_loop([i0, i1], ops, looptoken)
self.cpu.set_future_value_int(0, 20)
@@ -1506,7 +1508,7 @@
ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0))
]
- ops[2].fail_args = [i1, i2, i0]
+ ops[2].setfailargs([i1, i2, i0])
looptoken = LoopToken()
self.cpu.compile_loop([i0, i1], ops, looptoken)
self.cpu.set_future_value_int(0, 20)
@@ -1551,7 +1553,7 @@
ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0))
]
- ops[2].fail_args = [i1, f2, i0]
+ ops[2].setfailargs([i1, f2, i0])
looptoken = LoopToken()
self.cpu.compile_loop([i0, i1], ops, looptoken)
self.cpu.set_future_value_int(0, 20)
@@ -1824,7 +1826,7 @@
f2 = float_add(f0, f1)
finish(f2)'''
loop = parse(ops)
- done_number = self.cpu.get_fail_descr_number(loop.operations[-1].descr)
+ done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr())
looptoken = LoopToken()
looptoken.outermost_jitdriver_sd = FakeJitDriverSD()
self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken)
Modified: pypy/trunk/pypy/jit/backend/test/test_ll_random.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/test/test_ll_random.py (original)
+++ pypy/trunk/pypy/jit/backend/test/test_ll_random.py Wed Sep 22 14:17:16 2010
@@ -464,7 +464,7 @@
self.put(builder, args, descr)
op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None,
descr=BasicFailDescr())
- op.fail_args = fail_subset
+ op.setfailargs(fail_subset)
builder.loop.operations.append(op)
# 5. Non raising-call and GUARD_EXCEPTION
@@ -486,7 +486,7 @@
exc_box = ConstAddr(llmemory.cast_ptr_to_adr(vtableptr), builder.cpu)
op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(),
descr=BasicFailDescr())
- op.fail_args = builder.subset_of_intvars(r)
+ op.setfailargs(builder.subset_of_intvars(r))
op._exc_box = None
builder.should_fail_by = op
builder.guard_op = op
@@ -507,7 +507,7 @@
exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(),
descr=BasicFailDescr())
- op.fail_args = fail_subset
+ op.setfailargs(fail_subset)
builder.loop.operations.append(op)
# 4. raising call and guard_no_exception
@@ -524,7 +524,7 @@
op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(),
descr=BasicFailDescr())
op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
- op.fail_args = builder.subset_of_intvars(r)
+ op.setfailargs(builder.subset_of_intvars(r))
builder.should_fail_by = op
builder.guard_op = op
builder.loop.operations.append(op)
@@ -548,7 +548,7 @@
op = ResOperation(rop.GUARD_EXCEPTION, [other_box], BoxPtr(),
descr=BasicFailDescr())
op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
- op.fail_args = builder.subset_of_intvars(r)
+ op.setfailargs(builder.subset_of_intvars(r))
builder.should_fail_by = op
builder.guard_op = op
builder.loop.operations.append(op)
Modified: pypy/trunk/pypy/jit/backend/test/test_random.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/test/test_random.py (original)
+++ pypy/trunk/pypy/jit/backend/test/test_random.py Wed Sep 22 14:17:16 2010
@@ -86,7 +86,7 @@
def process_operation(self, s, op, names, subops):
args = []
- for v in op.args:
+ for v in op.getarglist():
if v in names:
args.append(names[v])
## elif isinstance(v, ConstAddr):
@@ -105,11 +105,11 @@
args.append('ConstInt(%d)' % v.value)
else:
raise NotImplementedError(v)
- if op.descr is None:
+ if op.getdescr() is None:
descrstr = ''
else:
try:
- descrstr = ', ' + op.descr._random_info
+ descrstr = ', ' + op.getdescr()._random_info
except AttributeError:
descrstr = ', descr=...'
print >>s, ' ResOperation(rop.%s, [%s], %s%s),' % (
@@ -129,7 +129,7 @@
def print_loop_prebuilt(ops):
for op in ops:
- for arg in op.args:
+ for arg in op.getarglist():
if isinstance(arg, ConstPtr):
if arg not in names:
writevar(arg, 'const_ptr')
@@ -191,7 +191,7 @@
if self.should_fail_by is None:
fail_args = self.loop.operations[-1].args
else:
- fail_args = self.should_fail_by.fail_args
+ fail_args = self.should_fail_by.getfailargs()
for i, v in enumerate(fail_args):
if isinstance(v, (BoxFloat, ConstFloat)):
print >>s, (' assert cpu.get_latest_value_float(%d) == %r'
@@ -284,8 +284,8 @@
builder.intvars[:] = original_intvars
else:
op = ResOperation(rop.GUARD_NO_OVERFLOW, [], None)
- op.descr = BasicFailDescr()
- op.fail_args = fail_subset
+ op.setdescr(BasicFailDescr())
+ op.setfailargs(fail_subset)
builder.loop.operations.append(op)
class BinaryOvfOperation(AbstractOvfOperation, BinaryOperation):
@@ -345,8 +345,8 @@
def produce_into(self, builder, r):
op, passing = self.gen_guard(builder, r)
builder.loop.operations.append(op)
- op.descr = BasicFailDescr()
- op.fail_args = builder.subset_of_intvars(r)
+ op.setdescr(BasicFailDescr())
+ op.setfailargs(builder.subset_of_intvars(r))
if not passing:
builder.should_fail_by = op
builder.guard_op = op
@@ -553,7 +553,7 @@
endvars = []
used_later = {}
for op in loop.operations:
- for v in op.args:
+ for v in op.getarglist():
used_later[v] = True
for v in startvars:
if v not in used_later:
@@ -577,11 +577,11 @@
def get_fail_args(self):
if self.should_fail_by.is_guard():
- assert self.should_fail_by.fail_args is not None
- return self.should_fail_by.fail_args
+ assert self.should_fail_by.getfailargs() is not None
+ return self.should_fail_by.getfailargs()
else:
- assert self.should_fail_by.opnum == rop.FINISH
- return self.should_fail_by.args
+ assert self.should_fail_by.getopnum() == rop.FINISH
+ return self.should_fail_by.getarglist()
def clear_state(self):
for v, S, fields in self.prebuilt_ptr_consts:
@@ -606,7 +606,7 @@
else:
raise NotImplementedError(box)
fail = cpu.execute_token(self.loop.token)
- assert fail is self.should_fail_by.descr
+ assert fail is self.should_fail_by.getdescr()
for i, v in enumerate(self.get_fail_args()):
if isinstance(v, (BoxFloat, ConstFloat)):
value = cpu.get_latest_value_float(i)
@@ -620,7 +620,7 @@
exc = cpu.grab_exc_value()
if (self.guard_op is not None and
self.guard_op.is_guard_exception()):
- if self.guard_op.opnum == rop.GUARD_NO_EXCEPTION:
+ if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION:
assert exc
else:
assert not exc
@@ -633,26 +633,26 @@
else:
op = ResOperation(rop.GUARD_EXCEPTION, [guard_op._exc_box],
BoxPtr())
- op.descr = BasicFailDescr()
- op.fail_args = []
+ op.setdescr(BasicFailDescr())
+ op.setfailargs([])
return op
if self.dont_generate_more:
return False
r = self.r
guard_op = self.guard_op
- fail_args = guard_op.fail_args
- fail_descr = guard_op.descr
+ fail_args = guard_op.getfailargs()
+ fail_descr = guard_op.getdescr()
op = self.should_fail_by
- if not op.fail_args:
+ if not op.getfailargs():
return False
# generate the branch: a sequence of operations that ends in a FINISH
subloop = DummyLoop([])
if guard_op.is_guard_exception():
subloop.operations.append(exc_handling(guard_op))
bridge_builder = self.builder.fork(self.builder.cpu, subloop,
- op.fail_args[:])
- self.generate_ops(bridge_builder, r, subloop, op.fail_args[:])
+ op.getfailargs()[:])
+ self.generate_ops(bridge_builder, r, subloop, op.getfailargs()[:])
# note that 'self.guard_op' now points to the guard that will fail in
# this new bridge, while 'guard_op' still points to the guard that
# has just failed.
Modified: pypy/trunk/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/x86/assembler.py (original)
+++ pypy/trunk/pypy/jit/backend/x86/assembler.py Wed Sep 22 14:17:16 2010
@@ -390,8 +390,8 @@
def _find_debug_merge_point(self, operations):
for op in operations:
- if op.opnum == rop.DEBUG_MERGE_POINT:
- funcname = op.args[0]._get_str()
+ if op.getopnum() == rop.DEBUG_MERGE_POINT:
+ funcname = op.getarg(0)._get_str()
break
else:
funcname = "<loop %d>" % len(self.loop_run_counters)
@@ -684,25 +684,25 @@
self.mc.POP(loc)
def regalloc_perform(self, op, arglocs, resloc):
- genop_list[op.opnum](self, op, arglocs, resloc)
+ genop_list[op.getopnum()](self, op, arglocs, resloc)
def regalloc_perform_discard(self, op, arglocs):
- genop_discard_list[op.opnum](self, op, arglocs)
+ genop_discard_list[op.getopnum()](self, op, arglocs)
def regalloc_perform_with_guard(self, op, guard_op, faillocs,
arglocs, resloc, current_depths):
- faildescr = guard_op.descr
+ faildescr = guard_op.getdescr()
assert isinstance(faildescr, AbstractFailDescr)
faildescr._x86_current_depths = current_depths
- failargs = guard_op.fail_args
- guard_opnum = guard_op.opnum
+ failargs = guard_op.getfailargs()
+ guard_opnum = guard_op.getopnum()
guard_token = self.implement_guard_recovery(guard_opnum,
faildescr, failargs,
faillocs)
if op is None:
dispatch_opnum = guard_opnum
else:
- dispatch_opnum = op.opnum
+ dispatch_opnum = op.getopnum()
res = genop_guard_list[dispatch_opnum](self, op, guard_op, guard_token,
arglocs, resloc)
faildescr._x86_adr_jump_offset = res
@@ -728,7 +728,7 @@
def _cmpop(cond, rev_cond):
def genop_cmp(self, op, arglocs, result_loc):
rl = result_loc.lowest8bits()
- if isinstance(op.args[0], Const):
+ if isinstance(op.getarg(0), Const):
self.mc.CMP(arglocs[1], arglocs[0])
self.mc.SET_ir(rx86.Conditions[rev_cond], rl.value)
else:
@@ -758,8 +758,8 @@
def _cmpop_guard(cond, rev_cond, false_cond, false_rev_cond):
def genop_cmp_guard(self, op, guard_op, guard_token, arglocs, result_loc):
- guard_opnum = guard_op.opnum
- if isinstance(op.args[0], Const):
+ guard_opnum = guard_op.getopnum()
+ if isinstance(op.getarg(0), Const):
self.mc.CMP(arglocs[1], arglocs[0])
if guard_opnum == rop.GUARD_FALSE:
return self.implement_guard(guard_token, rev_cond)
@@ -776,7 +776,7 @@
def _cmpop_guard_float(cond, false_cond, need_jp):
def genop_cmp_guard_float(self, op, guard_op, guard_token, arglocs,
result_loc):
- guard_opnum = guard_op.opnum
+ guard_opnum = guard_op.getopnum()
self.mc.UCOMISD(arglocs[0], arglocs[1])
# 16 is enough space for the rel8 jumps below and the rel32
# jump in implement_guard
@@ -945,7 +945,7 @@
genop_guard_float_ge = _cmpop_guard_float("AE", "B", False)
def genop_guard_float_ne(self, op, guard_op, guard_token, arglocs, result_loc):
- guard_opnum = guard_op.opnum
+ guard_opnum = guard_op.getopnum()
self.mc.UCOMISD(arglocs[0], arglocs[1])
# 16 is enough space for the rel8 jumps below and the rel32
# jump in implement_guard
@@ -973,7 +973,7 @@
self.mc.CVTSI2SD(resloc, arglocs[0])
def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc):
- guard_opnum = guard_op.opnum
+ guard_opnum = guard_op.getopnum()
self.mc.CMP(arglocs[0], imm(0))
if guard_opnum == rop.GUARD_TRUE:
return self.implement_guard(guard_token, 'Z')
@@ -987,7 +987,7 @@
self.mc.MOVZX8(resloc, rl)
def genop_guard_int_is_zero(self, op, guard_op, guard_token, arglocs, resloc):
- guard_opnum = guard_op.opnum
+ guard_opnum = guard_op.getopnum()
self.mc.CMP(arglocs[0], imm(0))
if guard_opnum == rop.GUARD_TRUE:
return self.implement_guard(guard_token, 'NZ')
@@ -1123,7 +1123,7 @@
assert isinstance(baseofs, ImmedLoc)
assert isinstance(scale_loc, ImmedLoc)
dest_addr = AddressLoc(base_loc, ofs_loc, scale_loc.value, baseofs.value)
- if op.args[2].type == FLOAT:
+ if op.getarg(2).type == FLOAT:
self.mc.MOVSD(dest_addr, value_loc)
else:
if IS_X86_64 and scale_loc.value == 3:
@@ -1219,7 +1219,7 @@
return addr
def _gen_guard_overflow(self, guard_op, guard_token):
- guard_opnum = guard_op.opnum
+ guard_opnum = guard_op.getopnum()
if guard_opnum == rop.GUARD_NO_OVERFLOW:
return self.implement_guard(guard_token, 'O')
elif guard_opnum == rop.GUARD_OVERFLOW:
@@ -1247,8 +1247,8 @@
genop_guard_guard_isnull = genop_guard_guard_false
def genop_guard_guard_value(self, ign_1, guard_op, guard_token, locs, ign_2):
- if guard_op.args[0].type == FLOAT:
- assert guard_op.args[1].type == FLOAT
+ if guard_op.getarg(0).type == FLOAT:
+ assert guard_op.getarg(1).type == FLOAT
self.mc.UCOMISD(locs[0], locs[1])
else:
self.mc.CMP(locs[0], locs[1])
@@ -1639,8 +1639,8 @@
assert isinstance(sizeloc, ImmedLoc)
size = sizeloc.value
- if isinstance(op.args[0], Const):
- x = imm(op.args[0].getint())
+ if isinstance(op.getarg(0), Const):
+ x = imm(op.getarg(0).getint())
else:
x = arglocs[1]
if x is eax:
@@ -1659,7 +1659,7 @@
def genop_guard_call_may_force(self, op, guard_op, guard_token,
arglocs, result_loc):
- faildescr = guard_op.descr
+ faildescr = guard_op.getdescr()
fail_index = self.cpu.get_fail_descr_number(faildescr)
self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index)
self.genop_call(op, arglocs, result_loc)
@@ -1668,10 +1668,10 @@
def genop_guard_call_assembler(self, op, guard_op, guard_token,
arglocs, result_loc):
- faildescr = guard_op.descr
+ faildescr = guard_op.getdescr()
fail_index = self.cpu.get_fail_descr_number(faildescr)
self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index)
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, LoopToken)
assert len(arglocs) - 2 == len(descr._x86_arglocs[0])
#
@@ -1756,7 +1756,7 @@
def genop_discard_cond_call_gc_wb(self, op, arglocs):
# use 'mc._mc' directly instead of 'mc', to avoid
# bad surprizes if the code buffer is mostly full
- descr = op.descr
+ descr = op.getdescr()
if we_are_translated():
cls = self.cpu.gc_ll_descr.has_write_barrier_class()
assert cls is not None and isinstance(descr, cls)
Modified: pypy/trunk/pypy/jit/backend/x86/regalloc.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/x86/regalloc.py (original)
+++ pypy/trunk/pypy/jit/backend/x86/regalloc.py Wed Sep 22 14:17:16 2010
@@ -234,6 +234,12 @@
else:
self.rm.possibly_free_var(var)
+ def possibly_free_vars_for_op(self, op):
+ for i in range(op.numargs()):
+ var = op.getarg(i)
+ if var is not None: # xxx kludgy
+ self.possibly_free_var(var)
+
def possibly_free_vars(self, vars):
for var in vars:
if var is not None: # xxx kludgy
@@ -262,12 +268,12 @@
selected_reg, need_lower_byte)
def _compute_loop_consts(self, inputargs, jump, looptoken):
- if jump.opnum != rop.JUMP or jump.descr is not looptoken:
+ if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken:
loop_consts = {}
else:
loop_consts = {}
for i in range(len(inputargs)):
- if inputargs[i] is jump.args[i]:
+ if inputargs[i] is jump.getarg(i):
loop_consts[inputargs[i]] = i
return loop_consts
@@ -312,7 +318,7 @@
self.assembler.regalloc_perform(op, arglocs, result_loc)
def locs_for_fail(self, guard_op):
- return [self.loc(v) for v in guard_op.fail_args]
+ return [self.loc(v) for v in guard_op.getfailargs()]
def perform_with_guard(self, op, guard_op, arglocs, result_loc):
faillocs = self.locs_for_fail(guard_op)
@@ -324,7 +330,7 @@
current_depths)
if op.result is not None:
self.possibly_free_var(op.result)
- self.possibly_free_vars(guard_op.fail_args)
+ self.possibly_free_vars(guard_op.getfailargs())
def perform_guard(self, guard_op, arglocs, result_loc):
faillocs = self.locs_for_fail(guard_op)
@@ -338,7 +344,7 @@
self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs,
result_loc,
current_depths)
- self.possibly_free_vars(guard_op.fail_args)
+ self.possibly_free_vars(guard_op.getfailargs())
def PerformDiscard(self, op, arglocs):
if not we_are_translated():
@@ -346,24 +352,24 @@
self.assembler.regalloc_perform_discard(op, arglocs)
def can_merge_with_next_guard(self, op, i, operations):
- if op.opnum == rop.CALL_MAY_FORCE or op.opnum == rop.CALL_ASSEMBLER:
- assert operations[i + 1].opnum == rop.GUARD_NOT_FORCED
+ if op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER:
+ assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED
return True
if not op.is_comparison():
if op.is_ovf():
- if (operations[i + 1].opnum != rop.GUARD_NO_OVERFLOW and
- operations[i + 1].opnum != rop.GUARD_OVERFLOW):
+ if (operations[i + 1].getopnum() != rop.GUARD_NO_OVERFLOW and
+ operations[i + 1].getopnum() != rop.GUARD_OVERFLOW):
print "int_xxx_ovf not followed by guard_(no)_overflow"
raise AssertionError
return True
return False
- if (operations[i + 1].opnum != rop.GUARD_TRUE and
- operations[i + 1].opnum != rop.GUARD_FALSE):
+ if (operations[i + 1].getopnum() != rop.GUARD_TRUE and
+ operations[i + 1].getopnum() != rop.GUARD_FALSE):
return False
- if operations[i + 1].args[0] is not op.result:
+ if operations[i + 1].getarg(0) is not op.result:
return False
if (self.longevity[op.result][1] > i + 1 or
- op.result in operations[i + 1].fail_args):
+ op.result in operations[i + 1].getfailargs()):
return False
return True
@@ -376,13 +382,13 @@
self.xrm.position = i
if op.has_no_side_effect() and op.result not in self.longevity:
i += 1
- self.possibly_free_vars(op.args)
+ self.possibly_free_vars_for_op(op)
continue
if self.can_merge_with_next_guard(op, i, operations):
- oplist_with_guard[op.opnum](self, op, operations[i + 1])
+ oplist_with_guard[op.getopnum()](self, op, operations[i + 1])
i += 1
else:
- oplist[op.opnum](self, op)
+ oplist[op.getopnum()](self, op)
if op.result is not None:
self.possibly_free_var(op.result)
self.rm._check_invariants()
@@ -402,19 +408,20 @@
op = operations[i]
if op.result is not None:
start_live[op.result] = i
- for arg in op.args:
+ for j in range(op.numargs()):
+ arg = op.getarg(j)
if isinstance(arg, Box):
if arg not in start_live:
- print "Bogus arg in operation %d at %d" % (op.opnum, i)
+ print "Bogus arg in operation %d at %d" % (op.getopnum(), i)
raise AssertionError
longevity[arg] = (start_live[arg], i)
if op.is_guard():
- for arg in op.fail_args:
+ for arg in op.getfailargs():
if arg is None: # hole
continue
assert isinstance(arg, Box)
if arg not in start_live:
- print "Bogus arg in guard %d at %d" % (op.opnum, i)
+ print "Bogus arg in guard %d at %d" % (op.getopnum(), i)
raise AssertionError
longevity[arg] = (start_live[arg], i)
for arg in inputargs:
@@ -432,9 +439,9 @@
return self.rm.loc(v)
def _consider_guard(self, op):
- loc = self.rm.make_sure_var_in_reg(op.args[0])
+ loc = self.rm.make_sure_var_in_reg(op.getarg(0))
self.perform_guard(op, [loc], None)
- self.rm.possibly_free_var(op.args[0])
+ self.rm.possibly_free_var(op.getarg(0))
consider_guard_true = _consider_guard
consider_guard_false = _consider_guard
@@ -442,52 +449,54 @@
consider_guard_isnull = _consider_guard
def consider_finish(self, op):
- locs = [self.loc(v) for v in op.args]
- locs_are_ref = [v.type == REF for v in op.args]
- fail_index = self.assembler.cpu.get_fail_descr_number(op.descr)
+ locs = [self.loc(op.getarg(i)) for i in range(op.numargs())]
+ locs_are_ref = [op.getarg(i).type == REF for i in range(op.numargs())]
+ fail_index = self.assembler.cpu.get_fail_descr_number(op.getdescr())
self.assembler.generate_failure(fail_index, locs, self.exc,
locs_are_ref)
- self.possibly_free_vars(op.args)
+ self.possibly_free_vars_for_op(op)
def consider_guard_no_exception(self, op):
self.perform_guard(op, [], None)
def consider_guard_exception(self, op):
- loc = self.rm.make_sure_var_in_reg(op.args[0])
+ loc = self.rm.make_sure_var_in_reg(op.getarg(0))
box = TempBox()
- loc1 = self.rm.force_allocate_reg(box, op.args)
+ args = op.getarglist()
+ loc1 = self.rm.force_allocate_reg(box, args)
if op.result in self.longevity:
# this means, is it ever used
- resloc = self.rm.force_allocate_reg(op.result, op.args + [box])
+ resloc = self.rm.force_allocate_reg(op.result, args + [box])
else:
resloc = None
self.perform_guard(op, [loc, loc1], resloc)
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
self.rm.possibly_free_var(box)
consider_guard_no_overflow = consider_guard_no_exception
consider_guard_overflow = consider_guard_no_exception
def consider_guard_value(self, op):
- x = self.make_sure_var_in_reg(op.args[0])
- y = self.loc(op.args[1])
+ x = self.make_sure_var_in_reg(op.getarg(0))
+ y = self.loc(op.getarg(1))
self.perform_guard(op, [x, y], None)
- self.possibly_free_vars(op.args)
+ self.possibly_free_vars_for_op(op)
def consider_guard_class(self, op):
- assert isinstance(op.args[0], Box)
- x = self.rm.make_sure_var_in_reg(op.args[0])
- y = self.loc(op.args[1])
+ assert isinstance(op.getarg(0), Box)
+ x = self.rm.make_sure_var_in_reg(op.getarg(0))
+ y = self.loc(op.getarg(1))
self.perform_guard(op, [x, y], None)
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
consider_guard_nonnull_class = consider_guard_class
def _consider_binop_part(self, op):
- x = op.args[0]
- argloc = self.loc(op.args[1])
- loc = self.rm.force_result_in_reg(op.result, x, op.args)
- self.rm.possibly_free_var(op.args[1])
+ x = op.getarg(0)
+ argloc = self.loc(op.getarg(1))
+ args = op.getarglist()
+ loc = self.rm.force_result_in_reg(op.result, x, args)
+ self.rm.possibly_free_var(op.getarg(1))
return loc, argloc
def _consider_binop(self, op):
@@ -510,26 +519,27 @@
consider_int_add_ovf = _consider_binop_with_guard
def consider_int_neg(self, op):
- res = self.rm.force_result_in_reg(op.result, op.args[0])
+ res = self.rm.force_result_in_reg(op.result, op.getarg(0))
self.Perform(op, [res], res)
consider_int_invert = consider_int_neg
def consider_int_lshift(self, op):
- if isinstance(op.args[1], Const):
- loc2 = self.rm.convert_to_imm(op.args[1])
+ if isinstance(op.getarg(1), Const):
+ loc2 = self.rm.convert_to_imm(op.getarg(1))
else:
- loc2 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx)
- loc1 = self.rm.force_result_in_reg(op.result, op.args[0], op.args)
+ loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx)
+ args = op.getarglist()
+ loc1 = self.rm.force_result_in_reg(op.result, op.getarg(0), args)
self.Perform(op, [loc1, loc2], loc1)
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
consider_int_rshift = consider_int_lshift
consider_uint_rshift = consider_int_lshift
def _consider_int_div_or_mod(self, op, resultreg, trashreg):
- l0 = self.rm.make_sure_var_in_reg(op.args[0], selected_reg=eax)
- l1 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx)
+ l0 = self.rm.make_sure_var_in_reg(op.getarg(0), selected_reg=eax)
+ l1 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx)
l2 = self.rm.force_allocate_reg(op.result, selected_reg=resultreg)
# the register (eax or edx) not holding what we are looking for
# will be just trash after that operation
@@ -538,7 +548,7 @@
assert l0 is eax
assert l1 is ecx
assert l2 is resultreg
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
self.rm.possibly_free_var(tmpvar)
def consider_int_mod(self, op):
@@ -552,17 +562,18 @@
consider_uint_floordiv = consider_int_floordiv
def _consider_compop(self, op, guard_op):
- vx = op.args[0]
- vy = op.args[1]
+ vx = op.getarg(0)
+ vy = op.getarg(1)
arglocs = [self.loc(vx), self.loc(vy)]
if (vx in self.rm.reg_bindings or vy in self.rm.reg_bindings or
isinstance(vx, Const) or isinstance(vy, Const)):
pass
else:
arglocs[0] = self.rm.make_sure_var_in_reg(vx)
- self.rm.possibly_free_vars(op.args)
+ args = op.getarglist()
+ self.rm.possibly_free_vars(args)
if guard_op is None:
- loc = self.rm.force_allocate_reg(op.result, op.args,
+ loc = self.rm.force_allocate_reg(op.result, args,
need_lower_byte=True)
self.Perform(op, arglocs, loc)
else:
@@ -582,10 +593,11 @@
consider_ptr_ne = _consider_compop
def _consider_float_op(self, op):
- loc1 = self.xrm.loc(op.args[1])
- loc0 = self.xrm.force_result_in_reg(op.result, op.args[0], op.args)
+ loc1 = self.xrm.loc(op.getarg(1))
+ args = op.getarglist()
+ loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0), args)
self.Perform(op, [loc0, loc1], loc0)
- self.xrm.possibly_free_vars(op.args)
+ self.xrm.possibly_free_vars_for_op(op)
consider_float_add = _consider_float_op
consider_float_sub = _consider_float_op
@@ -593,11 +605,12 @@
consider_float_truediv = _consider_float_op
def _consider_float_cmp(self, op, guard_op):
- loc0 = self.xrm.make_sure_var_in_reg(op.args[0], op.args,
+ args = op.getarglist()
+ loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), args,
imm_fine=False)
- loc1 = self.xrm.loc(op.args[1])
+ loc1 = self.xrm.loc(op.getarg(1))
arglocs = [loc0, loc1]
- self.xrm.possibly_free_vars(op.args)
+ self.xrm.possibly_free_vars_for_op(op)
if guard_op is None:
res = self.rm.force_allocate_reg(op.result, need_lower_byte=True)
self.Perform(op, arglocs, res)
@@ -612,26 +625,26 @@
consider_float_ge = _consider_float_cmp
def consider_float_neg(self, op):
- loc0 = self.xrm.force_result_in_reg(op.result, op.args[0])
+ loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0))
self.Perform(op, [loc0], loc0)
- self.xrm.possibly_free_var(op.args[0])
+ self.xrm.possibly_free_var(op.getarg(0))
def consider_float_abs(self, op):
- loc0 = self.xrm.force_result_in_reg(op.result, op.args[0])
+ loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0))
self.Perform(op, [loc0], loc0)
- self.xrm.possibly_free_var(op.args[0])
+ self.xrm.possibly_free_var(op.getarg(0))
def consider_cast_float_to_int(self, op):
- loc0 = self.xrm.make_sure_var_in_reg(op.args[0], imm_fine=False)
+ loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), imm_fine=False)
loc1 = self.rm.force_allocate_reg(op.result)
self.Perform(op, [loc0], loc1)
- self.xrm.possibly_free_var(op.args[0])
+ self.xrm.possibly_free_var(op.getarg(0))
def consider_cast_int_to_float(self, op):
- loc0 = self.rm.loc(op.args[0])
+ loc0 = self.rm.loc(op.getarg(0))
loc1 = self.xrm.force_allocate_reg(op.result)
self.Perform(op, [loc0], loc1)
- self.rm.possibly_free_var(op.args[0])
+ self.rm.possibly_free_var(op.getarg(0))
def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None):
save_all_regs = guard_not_forced_op is not None
@@ -650,11 +663,11 @@
self.Perform(op, arglocs, resloc)
def _consider_call(self, op, guard_not_forced_op=None):
- calldescr = op.descr
+ calldescr = op.getdescr()
assert isinstance(calldescr, BaseCallDescr)
- assert len(calldescr.arg_classes) == len(op.args) - 1
+ assert len(calldescr.arg_classes) == op.numargs() - 1
size = calldescr.get_result_size(self.translate_support_code)
- self._call(op, [imm(size)] + [self.loc(arg) for arg in op.args],
+ self._call(op, [imm(size)] + [self.loc(op.getarg(i)) for i in range(op.numargs())],
guard_not_forced_op=guard_not_forced_op)
def consider_call(self, op):
@@ -665,28 +678,29 @@
self._consider_call(op, guard_op)
def consider_call_assembler(self, op, guard_op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, LoopToken)
jd = descr.outermost_jitdriver_sd
assert jd is not None
size = jd.portal_calldescr.get_result_size(self.translate_support_code)
vable_index = jd.index_of_virtualizable
if vable_index >= 0:
- self.rm._sync_var(op.args[vable_index])
- vable = self.fm.loc(op.args[vable_index])
+ self.rm._sync_var(op.getarg(vable_index))
+ vable = self.fm.loc(op.getarg(vable_index))
else:
vable = imm(0)
self._call(op, [imm(size), vable] +
- [self.loc(arg) for arg in op.args],
+ [self.loc(op.getarg(i)) for i in range(op.numargs())],
guard_not_forced_op=guard_op)
def consider_cond_call_gc_wb(self, op):
assert op.result is None
- loc_newvalue = self.rm.make_sure_var_in_reg(op.args[1], op.args)
+ args = op.getarglist()
+ loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), args)
# ^^^ we force loc_newvalue in a reg (unless it's a Const),
# because it will be needed anyway by the following setfield_gc.
# It avoids loading it twice from the memory.
- loc_base = self.rm.make_sure_var_in_reg(op.args[0], op.args,
+ loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), args,
imm_fine=False)
arglocs = [loc_base, loc_newvalue]
# add eax, ecx and edx as extra "arguments" to ensure they are
@@ -700,7 +714,7 @@
and self.rm.stays_alive(v)):
arglocs.append(reg)
self.PerformDiscard(op, arglocs)
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
def _fastpath_malloc(self, op, descr):
assert isinstance(descr, BaseSizeDescr)
@@ -725,15 +739,15 @@
def consider_new(self, op):
gc_ll_descr = self.assembler.cpu.gc_ll_descr
- if gc_ll_descr.can_inline_malloc(op.descr):
- self._fastpath_malloc(op, op.descr)
+ if gc_ll_descr.can_inline_malloc(op.getdescr()):
+ self._fastpath_malloc(op, op.getdescr())
else:
- args = gc_ll_descr.args_for_new(op.descr)
+ args = gc_ll_descr.args_for_new(op.getdescr())
arglocs = [imm(x) for x in args]
return self._call(op, arglocs)
def consider_new_with_vtable(self, op):
- classint = op.args[0].getint()
+ classint = op.getarg(0).getint()
descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint)
if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize):
self._fastpath_malloc(op, descrsize)
@@ -742,34 +756,34 @@
else:
args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize)
arglocs = [imm(x) for x in args]
- arglocs.append(self.loc(op.args[0]))
+ arglocs.append(self.loc(op.getarg(0)))
return self._call(op, arglocs)
def consider_newstr(self, op):
gc_ll_descr = self.assembler.cpu.gc_ll_descr
if gc_ll_descr.get_funcptr_for_newstr is not None:
# framework GC
- loc = self.loc(op.args[0])
+ loc = self.loc(op.getarg(0))
return self._call(op, [loc])
# boehm GC (XXX kill the following code at some point)
ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.STR, self.translate_support_code)
assert itemsize == 1
- return self._malloc_varsize(ofs_items, ofs, 0, op.args[0],
+ return self._malloc_varsize(ofs_items, ofs, 0, op.getarg(0),
op.result)
def consider_newunicode(self, op):
gc_ll_descr = self.assembler.cpu.gc_ll_descr
if gc_ll_descr.get_funcptr_for_newunicode is not None:
# framework GC
- loc = self.loc(op.args[0])
+ loc = self.loc(op.getarg(0))
return self._call(op, [loc])
# boehm GC (XXX kill the following code at some point)
ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code)
if itemsize == 4:
- return self._malloc_varsize(ofs_items, ofs, 2, op.args[0],
+ return self._malloc_varsize(ofs_items, ofs, 2, op.getarg(0),
op.result)
elif itemsize == 2:
- return self._malloc_varsize(ofs_items, ofs, 1, op.args[0],
+ return self._malloc_varsize(ofs_items, ofs, 1, op.getarg(0),
op.result)
else:
assert False, itemsize
@@ -784,7 +798,7 @@
else:
tempbox = None
other_loc = imm(ofs_items + (v.getint() << scale))
- self._call(ResOperation(rop.NEW, [v], res_v),
+ self._call(ResOperation(rop.NEW, [], res_v),
[other_loc], [v])
loc = self.rm.make_sure_var_in_reg(v, [res_v])
assert self.loc(res_v) == eax
@@ -792,22 +806,22 @@
self.rm.possibly_free_var(v)
if tempbox is not None:
self.rm.possibly_free_var(tempbox)
- self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [], None),
+ self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [None, None], None),
[eax, imm(ofs_length), imm(WORD), loc])
def consider_new_array(self, op):
gc_ll_descr = self.assembler.cpu.gc_ll_descr
if gc_ll_descr.get_funcptr_for_newarray is not None:
# framework GC
- args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.descr)
+ args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr())
arglocs = [imm(x) for x in args]
- arglocs.append(self.loc(op.args[0]))
+ arglocs.append(self.loc(op.getarg(0)))
return self._call(op, arglocs)
# boehm GC (XXX kill the following code at some point)
scale_of_field, basesize, ofs_length, _ = (
- self._unpack_arraydescr(op.descr))
+ self._unpack_arraydescr(op.getdescr()))
return self._malloc_varsize(basesize, ofs_length, scale_of_field,
- op.args[0], op.result)
+ op.getarg(0), op.result)
def _unpack_arraydescr(self, arraydescr):
assert isinstance(arraydescr, BaseArrayDescr)
@@ -829,50 +843,54 @@
return imm(ofs), imm(size), ptr
def consider_setfield_gc(self, op):
- ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.descr)
+ ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.getdescr())
assert isinstance(size_loc, ImmedLoc)
if size_loc.value == 1:
need_lower_byte = True
else:
need_lower_byte = False
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- value_loc = self.make_sure_var_in_reg(op.args[1], op.args,
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ value_loc = self.make_sure_var_in_reg(op.getarg(1), args,
need_lower_byte=need_lower_byte)
- self.possibly_free_vars(op.args)
+ self.possibly_free_vars(args)
self.PerformDiscard(op, [base_loc, ofs_loc, size_loc, value_loc])
consider_setfield_raw = consider_setfield_gc
def consider_strsetitem(self, op):
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
- value_loc = self.rm.make_sure_var_in_reg(op.args[2], op.args,
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+ value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), args,
need_lower_byte=True)
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
self.PerformDiscard(op, [base_loc, ofs_loc, value_loc])
consider_unicodesetitem = consider_strsetitem
def consider_setarrayitem_gc(self, op):
- scale, ofs, _, ptr = self._unpack_arraydescr(op.descr)
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
+ scale, ofs, _, ptr = self._unpack_arraydescr(op.getdescr())
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
if scale == 0:
need_lower_byte = True
else:
need_lower_byte = False
- value_loc = self.make_sure_var_in_reg(op.args[2], op.args,
+ value_loc = self.make_sure_var_in_reg(op.getarg(2), args,
need_lower_byte=need_lower_byte)
- ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
- self.possibly_free_vars(op.args)
+ ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+ self.possibly_free_vars(args)
self.PerformDiscard(op, [base_loc, ofs_loc, value_loc,
imm(scale), imm(ofs)])
consider_setarrayitem_raw = consider_setarrayitem_gc
def consider_getfield_gc(self, op):
- ofs_loc, size_loc, _ = self._unpack_fielddescr(op.descr)
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- self.rm.possibly_free_vars(op.args)
+ ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr())
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ self.rm.possibly_free_vars(args)
result_loc = self.force_allocate_reg(op.result)
self.Perform(op, [base_loc, ofs_loc, size_loc], result_loc)
@@ -881,10 +899,11 @@
consider_getfield_gc_pure = consider_getfield_gc
def consider_getarrayitem_gc(self, op):
- scale, ofs, _, _ = self._unpack_arraydescr(op.descr)
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
- self.rm.possibly_free_vars(op.args)
+ scale, ofs, _, _ = self._unpack_arraydescr(op.getdescr())
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+ self.rm.possibly_free_vars_for_op(op)
result_loc = self.force_allocate_reg(op.result)
self.Perform(op, [base_loc, ofs_loc, imm(scale), imm(ofs)], result_loc)
@@ -893,8 +912,8 @@
def consider_int_is_true(self, op, guard_op):
# doesn't need arg to be in a register
- argloc = self.loc(op.args[0])
- self.rm.possibly_free_var(op.args[0])
+ argloc = self.loc(op.getarg(0))
+ self.rm.possibly_free_var(op.getarg(0))
if guard_op is not None:
self.perform_with_guard(op, guard_op, [argloc], None)
else:
@@ -904,33 +923,36 @@
consider_int_is_zero = consider_int_is_true
def consider_same_as(self, op):
- argloc = self.loc(op.args[0])
- self.possibly_free_var(op.args[0])
+ argloc = self.loc(op.getarg(0))
+ self.possibly_free_var(op.getarg(0))
resloc = self.force_allocate_reg(op.result)
self.Perform(op, [argloc], resloc)
#consider_cast_ptr_to_int = consider_same_as
def consider_strlen(self, op):
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- self.rm.possibly_free_vars(op.args)
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ self.rm.possibly_free_vars_for_op(op)
result_loc = self.rm.force_allocate_reg(op.result)
self.Perform(op, [base_loc], result_loc)
consider_unicodelen = consider_strlen
def consider_arraylen_gc(self, op):
- arraydescr = op.descr
+ arraydescr = op.getdescr()
assert isinstance(arraydescr, BaseArrayDescr)
ofs = arraydescr.get_ofs_length(self.translate_support_code)
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- self.rm.possibly_free_vars(op.args)
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ self.rm.possibly_free_vars_for_op(op)
result_loc = self.rm.force_allocate_reg(op.result)
self.Perform(op, [base_loc, imm(ofs)], result_loc)
def consider_strgetitem(self, op):
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
- self.rm.possibly_free_vars(op.args)
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+ self.rm.possibly_free_vars_for_op(op)
result_loc = self.rm.force_allocate_reg(op.result)
self.Perform(op, [base_loc, ofs_loc], result_loc)
@@ -939,7 +961,7 @@
def consider_jump(self, op):
assembler = self.assembler
assert self.jump_target_descr is None
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, LoopToken)
self.jump_target_descr = descr
nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr)
@@ -951,17 +973,20 @@
xmmtmp = X86XMMRegisterManager.all_regs[0]
xmmtmploc = self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp)
# Part about non-floats
- src_locations = [self.loc(arg) for arg in op.args if arg.type != FLOAT]
+ # XXX we don't need a copy, we only just the original list
+ src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs())
+ if op.getarg(i).type != FLOAT]
assert tmploc not in nonfloatlocs
dst_locations = [loc for loc in nonfloatlocs if loc is not None]
remap_frame_layout(assembler, src_locations, dst_locations, tmploc)
# Part about floats
- src_locations = [self.loc(arg) for arg in op.args if arg.type == FLOAT]
+ src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs())
+ if op.getarg(i).type == FLOAT]
dst_locations = [loc for loc in floatlocs if loc is not None]
remap_frame_layout(assembler, src_locations, dst_locations, xmmtmp)
self.rm.possibly_free_var(box)
self.xrm.possibly_free_var(box1)
- self.possibly_free_vars(op.args)
+ self.possibly_free_vars_for_op(op)
assembler.closing_jump(self.jump_target_descr)
def consider_debug_merge_point(self, op):
@@ -1002,12 +1027,21 @@
def add_none_argument(fn):
return lambda self, op: fn(self, op, None)
+def is_comparison_or_ovf_op(opnum):
+ from pypy.jit.metainterp.resoperation import opclasses, AbstractResOp
+ cls = opclasses[opnum]
+ # hack hack: in theory they are instance method, but they don't use
+ # any instance field, we can use a fake object
+ class Fake(cls):
+ pass
+ op = Fake(None)
+ return op.is_comparison() or op.is_ovf()
+
for name, value in RegAlloc.__dict__.iteritems():
if name.startswith('consider_'):
name = name[len('consider_'):]
num = getattr(rop, name.upper())
- if (ResOperation(num, [], None).is_comparison()
- or ResOperation(num, [], None).is_ovf()
+ if (is_comparison_or_ovf_op(num)
or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER):
oplist_with_guard[num] = value
oplist[num] = add_none_argument(value)
Modified: pypy/trunk/pypy/jit/backend/x86/test/test_recompilation.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/x86/test/test_recompilation.py (original)
+++ pypy/trunk/pypy/jit/backend/x86/test/test_recompilation.py Wed Sep 22 14:17:16 2010
@@ -47,7 +47,7 @@
finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2)
'''
bridge = self.attach_bridge(ops, loop, -2)
- descr = loop.operations[2].descr
+ descr = loop.operations[2].getdescr()
new = descr._x86_bridge_frame_depth
assert descr._x86_bridge_param_depth == 0
# XXX: Maybe add enough ops to force stack on 64-bit as well?
@@ -114,8 +114,8 @@
assert loop.token._x86_param_depth == 0
# XXX: Maybe add enough ops to force stack on 64-bit as well?
if IS_X86_32:
- assert guard_op.descr._x86_bridge_frame_depth > loop_frame_depth
- assert guard_op.descr._x86_bridge_param_depth == 0
+ assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth
+ assert guard_op.getdescr()._x86_bridge_param_depth == 0
self.cpu.set_future_value_int(0, 0)
self.cpu.set_future_value_int(1, 0)
self.cpu.set_future_value_int(2, 0)
Modified: pypy/trunk/pypy/jit/backend/x86/test/test_regalloc.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/x86/test/test_regalloc.py (original)
+++ pypy/trunk/pypy/jit/backend/x86/test/test_regalloc.py Wed Sep 22 14:17:16 2010
@@ -9,7 +9,7 @@
from pypy.jit.backend.llsupport.descr import GcCache
from pypy.jit.backend.detect_cpu import getcpuclass
from pypy.jit.backend.x86.regalloc import RegAlloc, X86RegisterManager,\
- FloatConstants
+ FloatConstants, is_comparison_or_ovf_op
from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64
from pypy.jit.metainterp.test.oparser import parse
from pypy.rpython.lltypesystem import lltype, llmemory, rffi
@@ -17,6 +17,11 @@
from pypy.rpython.lltypesystem import rclass, rstr
from pypy.jit.backend.x86.rx86 import *
+def test_is_comparison_or_ovf_op():
+ assert not is_comparison_or_ovf_op(rop.INT_ADD)
+ assert is_comparison_or_ovf_op(rop.INT_ADD_OVF)
+ assert is_comparison_or_ovf_op(rop.INT_EQ)
+
CPU = getcpuclass()
class MockGcDescr(GcCache):
def get_funcptr_for_new(self):
@@ -159,8 +164,8 @@
assert guard_op.is_guard()
bridge = self.parse(ops, **kwds)
assert ([box.type for box in bridge.inputargs] ==
- [box.type for box in guard_op.fail_args])
- faildescr = guard_op.descr
+ [box.type for box in guard_op.getfailargs()])
+ faildescr = guard_op.getdescr()
self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations)
return bridge
@@ -607,7 +612,7 @@
'''
bridge = self.attach_bridge(ops, loop, -2)
- assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2)
+ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
self.cpu.set_future_value_int(0, 4)
self.cpu.set_future_value_int(1, 7)
@@ -630,7 +635,7 @@
'''
bridge = self.attach_bridge(ops, loop, -2)
- assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2)
+ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
self.cpu.set_future_value_int(0, 4)
self.cpu.set_future_value_int(1, 7)
Modified: pypy/trunk/pypy/jit/backend/x86/test/test_runner.py
==============================================================================
--- pypy/trunk/pypy/jit/backend/x86/test/test_runner.py (original)
+++ pypy/trunk/pypy/jit/backend/x86/test/test_runner.py Wed Sep 22 14:17:16 2010
@@ -265,7 +265,7 @@
ResOperation(rop.FINISH, [ConstInt(0)], None,
descr=BasicFailDescr()),
]
- ops[-2].fail_args = [i1]
+ ops[-2].setfailargs([i1])
looptoken = LoopToken()
self.cpu.compile_loop([b], ops, looptoken)
if op == rop.INT_IS_TRUE:
@@ -314,7 +314,7 @@
ResOperation(rop.FINISH, [ConstInt(0)], None,
descr=BasicFailDescr()),
]
- ops[-2].fail_args = [i1]
+ ops[-2].setfailargs([i1])
inputargs = [i for i in (a, b) if isinstance(i, Box)]
looptoken = LoopToken()
self.cpu.compile_loop(inputargs, ops, looptoken)
@@ -353,7 +353,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[3].fail_args = [i1]
+ operations[3].setfailargs([i1])
self.cpu.compile_loop(inputargs, operations, looptoken)
name, loopaddress, loopsize = agent.functions[0]
assert name == "Loop # 0: hello"
@@ -368,7 +368,7 @@
ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye")], None),
ResOperation(rop.JUMP, [i1b], None, descr=looptoken),
]
- bridge[1].fail_args = [i1b]
+ bridge[1].setfailargs([i1b])
self.cpu.compile_bridge(faildescr1, [i1b], bridge)
name, address, size = agent.functions[1]
@@ -462,7 +462,7 @@
cmp_result = BoxInt()
ops.append(ResOperation(float_op, args, cmp_result))
ops.append(ResOperation(guard_op, [cmp_result], None, descr=BasicFailDescr()))
- ops[-1].fail_args = [failed]
+ ops[-1].setfailargs([failed])
ops.append(ResOperation(rop.FINISH, [finished], None, descr=BasicFailDescr()))
Modified: pypy/trunk/pypy/jit/metainterp/compile.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/compile.py (original)
+++ pypy/trunk/pypy/jit/metainterp/compile.py Wed Sep 22 14:17:16 2010
@@ -51,7 +51,7 @@
def compile_new_loop(metainterp, old_loop_tokens, greenkey, start):
"""Try to compile a new loop by closing the current history back
to the first operation.
- """
+ """
history = metainterp.history
loop = create_empty_loop(metainterp)
loop.greenkey = greenkey
@@ -65,7 +65,7 @@
jitdriver_sd = metainterp.jitdriver_sd
loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd)
loop.token = loop_token
- loop.operations[-1].descr = loop_token # patch the target of the JUMP
+ loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP
try:
old_loop_token = jitdriver_sd.warmstate.optimize_loop(
metainterp_sd, old_loop_tokens, loop)
@@ -133,7 +133,7 @@
metainterp_sd.profiler.end_backend()
if not we_are_translated():
metainterp_sd.stats.compiled()
- metainterp_sd.log("compiled new bridge")
+ metainterp_sd.log("compiled new bridge")
# ____________________________________________________________
@@ -177,7 +177,7 @@
class TerminatingLoopToken(LoopToken):
terminating = True
-
+
def __init__(self, nargs, finishdescr):
self.specnodes = [prebuiltNotSpecNode]*nargs
self.finishdescr = finishdescr
@@ -233,14 +233,14 @@
self.metainterp_sd = metainterp_sd
def store_final_boxes(self, guard_op, boxes):
- guard_op.fail_args = boxes
- self.guard_opnum = guard_op.opnum
+ guard_op.setfailargs(boxes)
+ self.guard_opnum = guard_op.getopnum()
def make_a_counter_per_value(self, guard_value_op):
- assert guard_value_op.opnum == rop.GUARD_VALUE
- box = guard_value_op.args[0]
+ assert guard_value_op.getopnum() == rop.GUARD_VALUE
+ box = guard_value_op.getarg(0)
try:
- i = guard_value_op.fail_args.index(box)
+ i = guard_value_op.getfailargs().index(box)
except ValueError:
return # xxx probably very rare
else:
@@ -508,7 +508,7 @@
def compile_new_bridge(metainterp, old_loop_tokens, resumekey):
"""Try to compile a new bridge leading from the beginning of the history
to some existing place.
- """
+ """
# The history contains new operations to attach as the code for the
# failure of 'resumekey.guard_op'.
#
@@ -540,13 +540,14 @@
op = new_loop.operations[-1]
if not isinstance(target_loop_token, TerminatingLoopToken):
# normal case
- op.descr = target_loop_token # patch the jump target
+ op.setdescr(target_loop_token) # patch the jump target
else:
# The target_loop_token is a pseudo loop token,
# e.g. loop_tokens_done_with_this_frame_void[0]
# Replace the operation with the real operation we want, i.e. a FINISH
descr = target_loop_token.finishdescr
- new_op = ResOperation(rop.FINISH, op.args, None, descr=descr)
+ args = op.getarglist()
+ new_op = ResOperation(rop.FINISH, args, None, descr=descr)
new_loop.operations[-1] = new_op
# ____________________________________________________________
@@ -597,6 +598,6 @@
ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr),
ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken)
]
- operations[1].fail_args = []
+ operations[1].setfailargs([])
cpu.compile_loop(inputargs, operations, loop_token)
return loop_token
Modified: pypy/trunk/pypy/jit/metainterp/graphpage.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/graphpage.py (original)
+++ pypy/trunk/pypy/jit/metainterp/graphpage.py Wed Sep 22 14:17:16 2010
@@ -17,13 +17,13 @@
for graph, highlight in graphs:
for op in graph.get_operations():
if is_interesting_guard(op):
- graphs.append((SubGraph(op.descr._debug_suboperations),
+ graphs.append((SubGraph(op.getdescr()._debug_suboperations),
highlight))
graphpage = ResOpGraphPage(graphs, errmsg)
graphpage.display()
def is_interesting_guard(op):
- return hasattr(op.descr, '_debug_suboperations')
+ return hasattr(op.getdescr(), '_debug_suboperations')
class ResOpGraphPage(GraphPage):
@@ -76,7 +76,7 @@
for i, op in enumerate(graph.get_operations()):
if is_interesting_guard(op):
self.mark_starter(graphindex, i+1)
- if op.opnum == rop.DEBUG_MERGE_POINT:
+ if op.getopnum() == rop.DEBUG_MERGE_POINT:
if not last_was_mergepoint:
last_was_mergepoint = True
self.mark_starter(graphindex, i)
@@ -155,7 +155,7 @@
op = operations[opindex]
lines.append(repr(op))
if is_interesting_guard(op):
- tgt = op.descr._debug_suboperations[0]
+ tgt = op.getdescr()._debug_suboperations[0]
tgt_g, tgt_i = self.all_operations[tgt]
self.genedge((graphindex, opstartindex),
(tgt_g, tgt_i),
@@ -167,8 +167,8 @@
self.genedge((graphindex, opstartindex),
(graphindex, opindex))
break
- if op.opnum == rop.JUMP:
- tgt = op.descr
+ if op.getopnum() == rop.JUMP:
+ tgt = op.getdescr()
tgt_g = -1
if tgt is None:
tgt_g = graphindex
@@ -191,7 +191,9 @@
def getlinks(self):
boxes = {}
for op in self.all_operations:
- for box in op.args + [op.result]:
+ args = op.getarglist()
+ args.append(op.result)
+ for box in args:
if getattr(box, 'is_box', False):
boxes[box] = True
links = {}
Modified: pypy/trunk/pypy/jit/metainterp/history.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/history.py (original)
+++ pypy/trunk/pypy/jit/metainterp/history.py Wed Sep 22 14:17:16 2010
@@ -532,7 +532,7 @@
class BoxFloat(Box):
type = FLOAT
_attrs_ = ('value',)
-
+
def __init__(self, floatval=0.0):
assert isinstance(floatval, float)
self.value = floatval
@@ -759,33 +759,34 @@
assert len(seen) == len(inputargs), (
"duplicate Box in the Loop.inputargs")
TreeLoop.check_consistency_of_branch(operations, seen)
-
+
@staticmethod
def check_consistency_of_branch(operations, seen):
"NOT_RPYTHON"
for op in operations:
- for box in op.args:
+ for i in range(op.numargs()):
+ box = op.getarg(i)
if isinstance(box, Box):
assert box in seen
if op.is_guard():
- assert op.descr is not None
- if hasattr(op.descr, '_debug_suboperations'):
- ops = op.descr._debug_suboperations
+ assert op.getdescr() is not None
+ if hasattr(op.getdescr(), '_debug_suboperations'):
+ ops = op.getdescr()._debug_suboperations
TreeLoop.check_consistency_of_branch(ops, seen.copy())
- for box in op.fail_args or []:
+ for box in op.getfailargs() or []:
if box is not None:
assert isinstance(box, Box)
assert box in seen
else:
- assert op.fail_args is None
+ assert op.getfailargs() is None
box = op.result
if box is not None:
assert isinstance(box, Box)
assert box not in seen
seen[box] = True
assert operations[-1].is_final()
- if operations[-1].opnum == rop.JUMP:
- target = operations[-1].descr
+ if operations[-1].getopnum() == rop.JUMP:
+ target = operations[-1].getdescr()
if target is not None:
assert isinstance(target, LoopToken)
@@ -793,7 +794,8 @@
# RPython-friendly
print '%r: inputargs =' % self, self._dump_args(self.inputargs)
for op in self.operations:
- print '\t', op.getopname(), self._dump_args(op.args), \
+ args = op.getarglist()
+ print '\t', op.getopname(), self._dump_args(args), \
self._dump_box(op.result)
def _dump_args(self, boxes):
@@ -809,14 +811,14 @@
return '<%s>' % (self.name,)
def _list_all_operations(result, operations, omit_finish=True):
- if omit_finish and operations[-1].opnum == rop.FINISH:
+ if omit_finish and operations[-1].getopnum() == rop.FINISH:
# xxx obscure
return
result.extend(operations)
for op in operations:
- if op.is_guard() and op.descr:
- if hasattr(op.descr, '_debug_suboperations'):
- ops = op.descr._debug_suboperations
+ if op.is_guard() and op.getdescr():
+ if hasattr(op.getdescr(), '_debug_suboperations'):
+ ops = op.getdescr()._debug_suboperations
_list_all_operations(result, ops, omit_finish)
# ____________________________________________________________
@@ -885,7 +887,7 @@
self.aborted_count += 1
def entered(self):
- self.enter_count += 1
+ self.enter_count += 1
def compiled(self):
self.compiled_count += 1
@@ -898,7 +900,7 @@
def add_new_loop(self, loop):
self.loops.append(loop)
-
+
# test read interface
def get_all_loops(self):
Modified: pypy/trunk/pypy/jit/metainterp/logger.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/logger.py (original)
+++ pypy/trunk/pypy/jit/metainterp/logger.py Wed Sep 22 14:17:16 2010
@@ -79,27 +79,27 @@
debug_print('[' + args + ']')
for i in range(len(operations)):
op = operations[i]
- if op.opnum == rop.DEBUG_MERGE_POINT:
- loc = op.args[0]._get_str()
+ if op.getopnum() == rop.DEBUG_MERGE_POINT:
+ loc = op.getarg(0)._get_str()
debug_print("debug_merge_point('%s')" % (loc,))
continue
- args = ", ".join([self.repr_of_arg(memo, arg) for arg in op.args])
+ args = ", ".join([self.repr_of_arg(memo, op.getarg(i)) for i in range(op.numargs())])
if op.result is not None:
res = self.repr_of_arg(memo, op.result) + " = "
else:
res = ""
is_guard = op.is_guard()
- if op.descr is not None:
- descr = op.descr
+ if op.getdescr() is not None:
+ descr = op.getdescr()
if is_guard and self.guard_number:
index = self.metainterp_sd.cpu.get_fail_descr_number(descr)
r = "<Guard%d>" % index
else:
r = self.repr_of_descr(descr)
args += ', descr=' + r
- if is_guard and op.fail_args is not None:
+ if is_guard and op.getfailargs() is not None:
fail_args = ' [' + ", ".join([self.repr_of_arg(memo, arg)
- for arg in op.fail_args]) + ']'
+ for arg in op.getfailargs()]) + ']'
else:
fail_args = ''
debug_print(res + op.getopname() +
Modified: pypy/trunk/pypy/jit/metainterp/optimize.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/optimize.py (original)
+++ pypy/trunk/pypy/jit/metainterp/optimize.py Wed Sep 22 14:17:16 2010
@@ -43,7 +43,7 @@
finder.find_nodes_bridge(bridge)
for old_loop_token in old_loop_tokens:
if finder.bridge_matches(old_loop_token.specnodes):
- bridge.operations[-1].descr = old_loop_token # patch jump target
+ bridge.operations[-1].setdescr(old_loop_token) # patch jump target
optimize_bridge_1(metainterp_sd, bridge)
return old_loop_token
return None
Modified: pypy/trunk/pypy/jit/metainterp/optimizefindnode.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/optimizefindnode.py (original)
+++ pypy/trunk/pypy/jit/metainterp/optimizefindnode.py Wed Sep 22 14:17:16 2010
@@ -144,7 +144,7 @@
def find_nodes(self, operations):
for op in operations:
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in find_nodes_ops:
if opnum == value:
func(self, op)
@@ -154,18 +154,20 @@
def find_nodes_default(self, op):
if op.is_always_pure():
- for arg in op.args:
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if self.get_constant_box(arg) is None:
break
else:
# all constant arguments: we can constant-fold
- argboxes = [self.get_constant_box(arg) for arg in op.args]
+ argboxes = [self.get_constant_box(op.getarg(i))
+ for i in range(op.numargs())]
resbox = execute_nonspec(self.cpu, None,
- op.opnum, argboxes, op.descr)
+ op.getopnum(), argboxes, op.getdescr())
self.set_constant_node(op.result, resbox.constbox())
# default case: mark the arguments as escaping
- for box in op.args:
- self.getnode(box).mark_escaped()
+ for i in range(op.numargs()):
+ self.getnode(op.getarg(i)).mark_escaped()
def find_nodes_no_escape(self, op):
pass # for operations that don't escape their arguments
@@ -178,53 +180,53 @@
def find_nodes_NEW_WITH_VTABLE(self, op):
instnode = InstanceNode()
- box = op.args[0]
+ box = op.getarg(0)
assert isinstance(box, Const)
instnode.knownclsbox = box
self.nodes[op.result] = instnode
def find_nodes_NEW(self, op):
instnode = InstanceNode()
- instnode.structdescr = op.descr
+ instnode.structdescr = op.getdescr()
self.nodes[op.result] = instnode
def find_nodes_NEW_ARRAY(self, op):
- lengthbox = op.args[0]
+ lengthbox = op.getarg(0)
lengthbox = self.get_constant_box(lengthbox)
if lengthbox is None:
return # var-sized arrays are not virtual
arraynode = InstanceNode()
arraynode.arraysize = lengthbox.getint()
- arraynode.arraydescr = op.descr
+ arraynode.arraydescr = op.getdescr()
self.nodes[op.result] = arraynode
def find_nodes_ARRAYLEN_GC(self, op):
- arraynode = self.getnode(op.args[0])
+ arraynode = self.getnode(op.getarg(0))
if arraynode.arraydescr is not None:
resbox = ConstInt(arraynode.arraysize)
self.set_constant_node(op.result, resbox)
def find_nodes_GUARD_CLASS(self, op):
- instnode = self.getnode(op.args[0])
+ instnode = self.getnode(op.getarg(0))
if instnode.fromstart: # only useful (and safe) in this case
- box = op.args[1]
+ box = op.getarg(1)
assert isinstance(box, Const)
instnode.knownclsbox = box
def find_nodes_GUARD_VALUE(self, op):
- instnode = self.getnode(op.args[0])
+ instnode = self.getnode(op.getarg(0))
if instnode.fromstart: # only useful (and safe) in this case
- box = op.args[1]
+ box = op.getarg(1)
assert isinstance(box, Const)
instnode.knownvaluebox = box
def find_nodes_SETFIELD_GC(self, op):
- instnode = self.getnode(op.args[0])
- fieldnode = self.getnode(op.args[1])
+ instnode = self.getnode(op.getarg(0))
+ fieldnode = self.getnode(op.getarg(1))
if instnode.escaped:
fieldnode.mark_escaped()
return # nothing to be gained from tracking the field
- field = op.descr
+ field = op.getdescr()
assert isinstance(field, AbstractValue)
if instnode.curfields is None:
instnode.curfields = {}
@@ -232,10 +234,10 @@
instnode.add_escape_dependency(fieldnode)
def find_nodes_GETFIELD_GC(self, op):
- instnode = self.getnode(op.args[0])
+ instnode = self.getnode(op.getarg(0))
if instnode.escaped:
return # nothing to be gained from tracking the field
- field = op.descr
+ field = op.getdescr()
assert isinstance(field, AbstractValue)
if instnode.curfields is not None and field in instnode.curfields:
fieldnode = instnode.curfields[field]
@@ -254,13 +256,13 @@
find_nodes_GETFIELD_GC_PURE = find_nodes_GETFIELD_GC
def find_nodes_SETARRAYITEM_GC(self, op):
- indexbox = op.args[1]
+ indexbox = op.getarg(1)
indexbox = self.get_constant_box(indexbox)
if indexbox is None:
self.find_nodes_default(op) # not a Const index
return
- arraynode = self.getnode(op.args[0])
- itemnode = self.getnode(op.args[2])
+ arraynode = self.getnode(op.getarg(0))
+ itemnode = self.getnode(op.getarg(2))
if arraynode.escaped:
itemnode.mark_escaped()
return # nothing to be gained from tracking the item
@@ -270,12 +272,12 @@
arraynode.add_escape_dependency(itemnode)
def find_nodes_GETARRAYITEM_GC(self, op):
- indexbox = op.args[1]
+ indexbox = op.getarg(1)
indexbox = self.get_constant_box(indexbox)
if indexbox is None:
self.find_nodes_default(op) # not a Const index
return
- arraynode = self.getnode(op.args[0])
+ arraynode = self.getnode(op.getarg(0))
if arraynode.escaped:
return # nothing to be gained from tracking the item
index = indexbox.getint()
@@ -298,13 +300,15 @@
def find_nodes_JUMP(self, op):
# only set up the 'unique' field of the InstanceNodes;
# real handling comes later (build_result_specnodes() for loops).
- for box in op.args:
+ for i in range(op.numargs()):
+ box = op.getarg(i)
self.getnode(box).set_unique_nodes()
def find_nodes_FINISH(self, op):
# only for bridges, and only for the ones that end in a 'return'
# or 'raise'; all other cases end with a JUMP.
- for box in op.args:
+ for i in range(op.numargs()):
+ box = op.getarg(i)
self.getnode(box).unique = UNIQUE_NO
find_nodes_ops = _findall(NodeFinder, 'find_nodes_')
@@ -324,7 +328,7 @@
def show(self):
from pypy.jit.metainterp.viewnode import viewnodes, view
op = self._loop.operations[-1]
- assert op.opnum == rop.JUMP
+ assert op.getopnum() == rop.JUMP
exitnodes = [self.getnode(arg) for arg in op.args]
viewnodes(self.inputnodes, exitnodes)
if hasattr(self._loop.token, "specnodes"):
@@ -343,14 +347,14 @@
# Build the list of specnodes based on the result
# computed by NodeFinder.find_nodes().
op = loop.operations[-1]
- assert op.opnum == rop.JUMP
- assert len(self.inputnodes) == len(op.args)
+ assert op.getopnum() == rop.JUMP
+ assert len(self.inputnodes) == op.numargs()
while True:
self.restart_needed = False
specnodes = []
- for i in range(len(op.args)):
+ for i in range(op.numargs()):
inputnode = self.inputnodes[i]
- exitnode = self.getnode(op.args[i])
+ exitnode = self.getnode(op.getarg(i))
specnodes.append(self.intersect(inputnode, exitnode))
if not self.restart_needed:
break
@@ -562,9 +566,9 @@
def bridge_matches(self, nextloop_specnodes):
jump_op = self.jump_op
- assert len(jump_op.args) == len(nextloop_specnodes)
+ assert jump_op.numargs() == len(nextloop_specnodes)
for i in range(len(nextloop_specnodes)):
- exitnode = self.getnode(jump_op.args[i])
+ exitnode = self.getnode(jump_op.getarg(i))
if not nextloop_specnodes[i].matches_instance_node(exitnode):
return False
return True
Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py (original)
+++ pypy/trunk/pypy/jit/metainterp/optimizeopt/heap.py Wed Sep 22 14:17:16 2010
@@ -45,7 +45,7 @@
op = self.lazy_setfields.get(descr, None)
if op is None:
return None
- return self.getvalue(op.args[1])
+ return self.getvalue(op.getarg(1))
return d.get(value, None)
def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False):
@@ -105,7 +105,7 @@
if op.is_guard():
self.optimizer.pendingfields = self.force_lazy_setfields_for_guard()
return
- opnum = op.opnum
+ opnum = op.getopnum()
if (opnum == rop.SETFIELD_GC or
opnum == rop.SETARRAYITEM_GC or
opnum == rop.DEBUG_MERGE_POINT):
@@ -117,7 +117,7 @@
if opnum == rop.CALL_ASSEMBLER:
effectinfo = None
else:
- effectinfo = op.descr.get_extra_info()
+ effectinfo = op.getdescr().get_extra_info()
if effectinfo is not None:
# XXX we can get the wrong complexity here, if the lists
# XXX stored on effectinfo are large
@@ -142,7 +142,7 @@
return
self.force_all_lazy_setfields()
elif op.is_final() or (not we_are_translated() and
- op.opnum < 0): # escape() operations
+ op.getopnum() < 0): # escape() operations
self.force_all_lazy_setfields()
self.clean_caches()
@@ -166,10 +166,11 @@
# - is_comparison() for cases like "int_eq/setfield_gc/guard_true"
# - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced"
# - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow"
- opnum = prevop.opnum
+ opnum = prevop.getopnum()
+ lastop_args = lastop.getarglist()
if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE
or prevop.is_ovf())
- and prevop.result not in lastop.args):
+ and prevop.result not in lastop_args):
newoperations[-2] = lastop
newoperations[-1] = prevop
@@ -189,9 +190,9 @@
# the only really interesting case that we need to handle in the
# guards' resume data is that of a virtual object that is stored
# into a field of a non-virtual object.
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
assert not value.is_virtual() # it must be a non-virtual
- fieldvalue = self.getvalue(op.args[1])
+ fieldvalue = self.getvalue(op.getarg(1))
if fieldvalue.is_virtual():
# this is the case that we leave to resume.py
pendingfields.append((descr, value.box,
@@ -202,20 +203,20 @@
def force_lazy_setfield_if_necessary(self, op, value, write=False):
try:
- op1 = self.lazy_setfields[op.descr]
+ op1 = self.lazy_setfields[op.getdescr()]
except KeyError:
if write:
- self.lazy_setfields_descrs.append(op.descr)
+ self.lazy_setfields_descrs.append(op.getdescr())
else:
- if self.getvalue(op1.args[0]) is not value:
- self.force_lazy_setfield(op.descr)
+ if self.getvalue(op1.getarg(0)) is not value:
+ self.force_lazy_setfield(op.getdescr())
def optimize_GETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
self.force_lazy_setfield_if_necessary(op, value)
# check if the field was read from another getfield_gc just before
# or has been written to recently
- fieldvalue = self.read_cached_field(op.descr, value)
+ fieldvalue = self.read_cached_field(op.getdescr(), value)
if fieldvalue is not None:
self.make_equal_to(op.result, fieldvalue)
return
@@ -225,38 +226,38 @@
self.emit_operation(op) # FIXME: These might need constant propagation?
# then remember the result of reading the field
fieldvalue = self.getvalue(op.result)
- self.cache_field_value(op.descr, value, fieldvalue)
+ self.cache_field_value(op.getdescr(), value, fieldvalue)
def optimize_SETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
- fieldvalue = self.getvalue(op.args[1])
+ value = self.getvalue(op.getarg(0))
+ fieldvalue = self.getvalue(op.getarg(1))
self.force_lazy_setfield_if_necessary(op, value, write=True)
- self.lazy_setfields[op.descr] = op
+ self.lazy_setfields[op.getdescr()] = op
# remember the result of future reads of the field
- self.cache_field_value(op.descr, value, fieldvalue, write=True)
+ self.cache_field_value(op.getdescr(), value, fieldvalue, write=True)
def optimize_GETARRAYITEM_GC(self, op):
- value = self.getvalue(op.args[0])
- indexvalue = self.getvalue(op.args[1])
- fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue)
+ value = self.getvalue(op.getarg(0))
+ indexvalue = self.getvalue(op.getarg(1))
+ fieldvalue = self.read_cached_arrayitem(op.getdescr(), value, indexvalue)
if fieldvalue is not None:
self.make_equal_to(op.result, fieldvalue)
return
###self.optimizer.optimize_default(op)
self.emit_operation(op) # FIXME: These might need constant propagation?
fieldvalue = self.getvalue(op.result)
- self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue)
+ self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue)
def optimize_SETARRAYITEM_GC(self, op):
self.emit_operation(op)
- value = self.getvalue(op.args[0])
- fieldvalue = self.getvalue(op.args[2])
- indexvalue = self.getvalue(op.args[1])
- self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue,
+ value = self.getvalue(op.getarg(0))
+ fieldvalue = self.getvalue(op.getarg(2))
+ indexvalue = self.getvalue(op.getarg(1))
+ self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue,
write=True)
def propagate_forward(self, op):
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in optimize_ops:
if opnum == value:
func(self, op)
Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py (original)
+++ pypy/trunk/pypy/jit/metainterp/optimizeopt/intbounds.py Wed Sep 22 14:17:16 2010
@@ -10,7 +10,7 @@
remove redundant guards"""
def propagate_forward(self, op):
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in optimize_ops:
if opnum == value:
func(self, op)
@@ -31,7 +31,7 @@
op = self.optimizer.producer[box]
except KeyError:
return
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in propagate_bounds_ops:
if opnum == value:
func(self, op)
@@ -39,14 +39,14 @@
def optimize_GUARD_TRUE(self, op):
self.emit_operation(op)
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
optimize_GUARD_FALSE = optimize_GUARD_TRUE
optimize_GUARD_VALUE = optimize_GUARD_TRUE
def optimize_INT_AND(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op.result)
@@ -60,74 +60,74 @@
r.intbound.intersect(IntBound(0,val))
def optimize_INT_SUB(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(v1.intbound.sub_bound(v2.intbound))
def optimize_INT_ADD(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(v1.intbound.add_bound(v2.intbound))
def optimize_INT_MUL(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(v1.intbound.mul_bound(v2.intbound))
def optimize_INT_ADD_OVF(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
resbound = v1.intbound.add_bound(v2.intbound)
if resbound.has_lower and resbound.has_upper and \
- self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
+ self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW:
# Transform into INT_ADD and remove guard
- op.opnum = rop.INT_ADD
+ op = op.copy_and_change(rop.INT_ADD)
self.skip_nextop()
- self.optimize_INT_ADD(op)
+ self.optimize_INT_ADD(op) # emit the op
else:
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(resbound)
def optimize_INT_SUB_OVF(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
resbound = v1.intbound.sub_bound(v2.intbound)
if resbound.has_lower and resbound.has_upper and \
- self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
+ self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW:
# Transform into INT_SUB and remove guard
- op.opnum = rop.INT_SUB
+ op = op.copy_and_change(rop.INT_SUB)
self.skip_nextop()
- self.optimize_INT_SUB(op)
+ self.optimize_INT_SUB(op) # emit the op
else:
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(resbound)
def optimize_INT_MUL_OVF(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
resbound = v1.intbound.mul_bound(v2.intbound)
if resbound.has_lower and resbound.has_upper and \
- self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
+ self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW:
# Transform into INT_MUL and remove guard
- op.opnum = rop.INT_MUL
+ op = op.copy_and_change(rop.INT_MUL)
self.skip_nextop()
- self.optimize_INT_MUL(op)
+ self.optimize_INT_MUL(op) # emit the op
else:
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(resbound)
def optimize_INT_LT(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_lt(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_ge(v2.intbound):
@@ -136,8 +136,8 @@
self.emit_operation(op)
def optimize_INT_GT(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_gt(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_le(v2.intbound):
@@ -146,8 +146,8 @@
self.emit_operation(op)
def optimize_INT_LE(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_le(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_gt(v2.intbound):
@@ -156,8 +156,8 @@
self.emit_operation(op)
def optimize_INT_GE(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_ge(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_lt(v2.intbound):
@@ -166,8 +166,8 @@
self.emit_operation(op)
def optimize_INT_EQ(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_gt(v2.intbound):
self.make_constant_int(op.result, 0)
elif v1.intbound.known_lt(v2.intbound):
@@ -176,8 +176,8 @@
self.emit_operation(op)
def optimize_INT_NE(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_gt(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_lt(v2.intbound):
@@ -192,115 +192,114 @@
optimize_STRLEN = optimize_ARRAYLEN_GC
- def make_int_lt(self, args):
- v1 = self.getvalue(args[0])
- v2 = self.getvalue(args[1])
+ def make_int_lt(self, box1, box2):
+ v1 = self.getvalue(box1)
+ v2 = self.getvalue(box2)
if v1.intbound.make_lt(v2.intbound):
- self.propagate_bounds_backward(args[0])
+ self.propagate_bounds_backward(box1)
if v2.intbound.make_gt(v1.intbound):
- self.propagate_bounds_backward(args[1])
+ self.propagate_bounds_backward(box2)
-
- def make_int_le(self, args):
- v1 = self.getvalue(args[0])
- v2 = self.getvalue(args[1])
+ def make_int_le(self, box1, box2):
+ v1 = self.getvalue(box1)
+ v2 = self.getvalue(box2)
if v1.intbound.make_le(v2.intbound):
- self.propagate_bounds_backward(args[0])
+ self.propagate_bounds_backward(box1)
if v2.intbound.make_ge(v1.intbound):
- self.propagate_bounds_backward(args[1])
+ self.propagate_bounds_backward(box2)
- def make_int_gt(self, args):
- self.make_int_lt([args[1], args[0]])
+ def make_int_gt(self, box1, box2):
+ self.make_int_lt(box2, box1)
- def make_int_ge(self, args):
- self.make_int_le([args[1], args[0]])
+ def make_int_ge(self, box1, box2):
+ self.make_int_le(box2, box1)
def propagate_bounds_INT_LT(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_lt(op.args)
+ self.make_int_lt(op.getarg(0), op.getarg(1))
else:
- self.make_int_ge(op.args)
+ self.make_int_ge(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_GT(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_gt(op.args)
+ self.make_int_gt(op.getarg(0), op.getarg(1))
else:
- self.make_int_le(op.args)
+ self.make_int_le(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_LE(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_le(op.args)
+ self.make_int_le(op.getarg(0), op.getarg(1))
else:
- self.make_int_gt(op.args)
+ self.make_int_gt(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_GE(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_ge(op.args)
+ self.make_int_ge(op.getarg(0), op.getarg(1))
else:
- self.make_int_lt(op.args)
+ self.make_int_lt(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_EQ(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.intersect(v2.intbound):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
if v2.intbound.intersect(v1.intbound):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_NE(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_0):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.intersect(v2.intbound):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
if v2.intbound.intersect(v1.intbound):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_ADD(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op.result)
b = r.intbound.sub_bound(v2.intbound)
if v1.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
b = r.intbound.sub_bound(v1.intbound)
if v2.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_SUB(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op.result)
b = r.intbound.add_bound(v2.intbound)
if v1.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
b = r.intbound.sub_bound(v1.intbound).mul(-1)
if v2.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_MUL(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op.result)
b = r.intbound.div_bound(v2.intbound)
if v1.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
b = r.intbound.div_bound(v1.intbound)
if v2.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD
propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB
Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py (original)
+++ pypy/trunk/pypy/jit/metainterp/optimizeopt/optimizer.py Wed Sep 22 14:17:16 2010
@@ -16,12 +16,12 @@
LEVEL_UNKNOWN = '\x00'
LEVEL_NONNULL = '\x01'
LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays
-LEVEL_CONSTANT = '\x03'
+LEVEL_CONSTANT = '\x03'
import sys
MAXINT = sys.maxint
MININT = -sys.maxint - 1
-
+
class OptValue(object):
_attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound')
last_guard_index = -1
@@ -36,7 +36,7 @@
if isinstance(box, Const):
self.make_constant(box)
# invariant: box is a Const if and only if level == LEVEL_CONSTANT
-
+
def force_box(self):
return self.box
@@ -171,7 +171,7 @@
def new_const_item(self, arraydescr):
return self.optimizer.new_const_item(arraydescr)
-
+
def pure(self, opnum, args, result):
op = ResOperation(opnum, args, result)
self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op
@@ -184,7 +184,7 @@
def setup(self, virtuals):
pass
-
+
class Optimizer(Optimization):
def __init__(self, metainterp_sd, loop, optimizations=None, virtuals=True):
@@ -308,7 +308,7 @@
def propagate_forward(self, op):
self.producer[op.result] = op
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in optimize_ops:
if opnum == value:
func(self, op)
@@ -323,15 +323,15 @@
self._emit_operation(op)
def _emit_operation(self, op):
- for i in range(len(op.args)):
- arg = op.args[i]
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if arg in self.values:
box = self.values[arg].force_box()
- op.args[i] = box
+ op.setarg(i, box)
self.metainterp_sd.profiler.count(jitprof.OPT_OPS)
if op.is_guard():
self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS)
- self.store_final_boxes_in_guard(op)
+ op = self.store_final_boxes_in_guard(op)
elif op.can_raise():
self.exception_might_have_happened = True
elif op.returns_bool_result():
@@ -340,7 +340,7 @@
def store_final_boxes_in_guard(self, op):
###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard()
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, compile.ResumeGuardDescr)
modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo)
newboxes = modifier.finish(self.values, self.pendingfields)
@@ -348,49 +348,54 @@
compile.giveup()
descr.store_final_boxes(op, newboxes)
#
- if op.opnum == rop.GUARD_VALUE:
- if self.getvalue(op.args[0]) in self.bool_boxes:
+ if op.getopnum() == rop.GUARD_VALUE:
+ if self.getvalue(op.getarg(0)) in self.bool_boxes:
# Hack: turn guard_value(bool) into guard_true/guard_false.
# This is done after the operation is emitted to let
# store_final_boxes_in_guard set the guard_opnum field of the
# descr to the original rop.GUARD_VALUE.
- constvalue = op.args[1].getint()
+ constvalue = op.getarg(1).getint()
if constvalue == 0:
opnum = rop.GUARD_FALSE
elif constvalue == 1:
opnum = rop.GUARD_TRUE
else:
raise AssertionError("uh?")
- op.opnum = opnum
- op.args = [op.args[0]]
+ newop = ResOperation(opnum, [op.getarg(0)], op.result, descr)
+ newop.setfailargs(op.getfailargs())
+ return newop
else:
# a real GUARD_VALUE. Make it use one counter per value.
descr.make_a_counter_per_value(op)
+ return op
def make_args_key(self, op):
- args = op.args[:]
- for i in range(len(args)):
- arg = args[i]
+ args = []
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if arg in self.values:
- args[i] = self.values[arg].get_key_box()
- args.append(ConstInt(op.opnum))
+ args.append(self.values[arg].get_key_box())
+ else:
+ args.append(arg)
+ args.append(ConstInt(op.getopnum()))
return args
-
+
def optimize_default(self, op):
canfold = op.is_always_pure()
is_ovf = op.is_ovf()
if is_ovf:
nextop = self.loop.operations[self.i + 1]
- canfold = nextop.opnum == rop.GUARD_NO_OVERFLOW
+ canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW
if canfold:
- for arg in op.args:
- if self.get_constant_box(arg) is None:
+ for i in range(op.numargs()):
+ if self.get_constant_box(op.getarg(i)) is None:
break
else:
# all constant arguments: constant-fold away
- argboxes = [self.get_constant_box(arg) for arg in op.args]
+ argboxes = [self.get_constant_box(op.getarg(i))
+ for i in range(op.numargs())]
resbox = execute_nonspec(self.cpu, None,
- op.opnum, argboxes, op.descr)
+ op.getopnum(), argboxes, op.getdescr())
self.make_constant(op.result, resbox.constbox())
if is_ovf:
self.i += 1 # skip next operation, it is the unneeded guard
@@ -399,8 +404,8 @@
# did we do the exact same operation already?
args = self.make_args_key(op)
oldop = self.pure_operations.get(args, None)
- if oldop is not None and oldop.descr is op.descr:
- assert oldop.opnum == op.opnum
+ if oldop is not None and oldop.getdescr() is op.getdescr():
+ assert oldop.getopnum() == op.getopnum()
self.make_equal_to(op.result, self.getvalue(oldop.result))
if is_ovf:
self.i += 1 # skip next operation, it is the unneeded guard
Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py (original)
+++ pypy/trunk/pypy/jit/metainterp/optimizeopt/rewrite.py Wed Sep 22 14:17:16 2010
@@ -14,7 +14,7 @@
if self.find_rewritable_bool(op, args):
return
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in optimize_ops:
if opnum == value:
func(self, op)
@@ -24,7 +24,7 @@
def try_boolinvers(self, op, targs):
oldop = self.optimizer.pure_operations.get(targs, None)
- if oldop is not None and oldop.descr is op.descr:
+ if oldop is not None and oldop.getdescr() is op.getdescr():
value = self.getvalue(oldop.result)
if value.is_constant():
if value.box.same_constant(CONST_1):
@@ -39,7 +39,7 @@
def find_rewritable_bool(self, op, args):
try:
- oldopnum = opboolinvers[op.opnum]
+ oldopnum = opboolinvers[op.getopnum()]
targs = [args[0], args[1], ConstInt(oldopnum)]
if self.try_boolinvers(op, targs):
return True
@@ -47,17 +47,17 @@
pass
try:
- oldopnum = opboolreflex[op.opnum] # FIXME: add INT_ADD, INT_MUL
+ oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL
targs = [args[1], args[0], ConstInt(oldopnum)]
oldop = self.optimizer.pure_operations.get(targs, None)
- if oldop is not None and oldop.descr is op.descr:
+ if oldop is not None and oldop.getdescr() is op.getdescr():
self.make_equal_to(op.result, self.getvalue(oldop.result))
return True
except KeyError:
pass
try:
- oldopnum = opboolinvers[opboolreflex[op.opnum]]
+ oldopnum = opboolinvers[opboolreflex[op.getopnum()]]
targs = [args[1], args[0], ConstInt(oldopnum)]
if self.try_boolinvers(op, targs):
return True
@@ -67,16 +67,16 @@
return False
def optimize_INT_AND(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.is_null() or v2.is_null():
self.make_constant_int(op.result, 0)
else:
self.emit_operation(op)
def optimize_INT_OR(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.is_null():
self.make_equal_to(op.result, v2)
elif v2.is_null():
@@ -85,20 +85,20 @@
self.emit_operation(op)
def optimize_INT_SUB(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v2.is_constant() and v2.box.getint() == 0:
self.make_equal_to(op.result, v1)
else:
self.emit_operation(op)
# Synthesize the reverse ops for optimize_default to reuse
- self.pure(rop.INT_ADD, [op.result, op.args[1]], op.args[0])
- self.pure(rop.INT_SUB, [op.args[0], op.result], op.args[1])
+ self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0))
+ self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1))
def optimize_INT_ADD(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
# If one side of the op is 0 the result is the other side.
if v1.is_constant() and v1.box.getint() == 0:
@@ -109,12 +109,12 @@
self.emit_operation(op)
# Synthesize the reverse op for optimize_default to reuse
- self.pure(rop.INT_SUB, [op.result, op.args[1]], op.args[0])
- self.pure(rop.INT_SUB, [op.result, op.args[0]], op.args[1])
+ self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0))
+ self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1))
def optimize_INT_MUL(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
# If one side of the op is 1 the result is the other side.
if v1.is_constant() and v1.box.getint() == 1:
@@ -128,18 +128,20 @@
self.emit_operation(op)
def optimize_CALL_PURE(self, op):
- for arg in op.args:
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if self.get_constant_box(arg) is None:
break
else:
# all constant arguments: constant-fold away
- self.make_constant(op.result, op.args[0])
+ self.make_constant(op.result, op.getarg(0))
return
# replace CALL_PURE with just CALL
- self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
- op.descr))
+ args = op.getarglist()[1:]
+ self.emit_operation(ResOperation(rop.CALL, args, op.result,
+ op.getdescr()))
def optimize_guard(self, op, constbox, emit_operation=True):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_constant():
box = value.box
assert isinstance(box, Const)
@@ -151,7 +153,7 @@
value.make_constant(constbox)
def optimize_GUARD_ISNULL(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_null():
return
elif value.is_nonnull():
@@ -160,7 +162,7 @@
value.make_constant(self.optimizer.cpu.ts.CONST_NULL)
def optimize_GUARD_NONNULL(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_nonnull():
return
elif value.is_null():
@@ -169,25 +171,25 @@
value.make_nonnull(len(self.optimizer.newoperations) - 1)
def optimize_GUARD_VALUE(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
emit_operation = True
if value.last_guard_index != -1:
# there already has been a guard_nonnull or guard_class or
# guard_nonnull_class on this value, which is rather silly.
# replace the original guard with a guard_value
old_guard_op = self.optimizer.newoperations[value.last_guard_index]
- old_opnum = old_guard_op.opnum
- old_guard_op.opnum = rop.GUARD_VALUE
- old_guard_op.args = [old_guard_op.args[0], op.args[1]]
+ new_guard_op = old_guard_op.copy_and_change(rop.GUARD_VALUE,
+ args = [old_guard_op.getarg(0), op.getarg(1)])
+ self.optimizer.newoperations[value.last_guard_index] = new_guard_op
# hack hack hack. Change the guard_opnum on
- # old_guard_op.descr so that when resuming,
+ # new_guard_op.getdescr() so that when resuming,
# the operation is not skipped by pyjitpl.py.
- descr = old_guard_op.descr
+ descr = new_guard_op.getdescr()
assert isinstance(descr, compile.ResumeGuardDescr)
descr.guard_opnum = rop.GUARD_VALUE
- descr.make_a_counter_per_value(old_guard_op)
+ descr.make_a_counter_per_value(new_guard_op)
emit_operation = False
- constbox = op.args[1]
+ constbox = op.getarg(1)
assert isinstance(constbox, Const)
self.optimize_guard(op, constbox, emit_operation)
@@ -198,8 +200,8 @@
self.optimize_guard(op, CONST_0)
def optimize_GUARD_CLASS(self, op):
- value = self.getvalue(op.args[0])
- expectedclassbox = op.args[1]
+ value = self.getvalue(op.getarg(0))
+ expectedclassbox = op.getarg(1)
assert isinstance(expectedclassbox, Const)
realclassbox = value.get_constant_class(self.optimizer.cpu)
if realclassbox is not None:
@@ -213,15 +215,16 @@
# there already has been a guard_nonnull or guard_class or
# guard_nonnull_class on this value.
old_guard_op = self.optimizer.newoperations[value.last_guard_index]
- if old_guard_op.opnum == rop.GUARD_NONNULL:
+ if old_guard_op.getopnum() == rop.GUARD_NONNULL:
# it was a guard_nonnull, which we replace with a
# guard_nonnull_class.
- old_guard_op.opnum = rop.GUARD_NONNULL_CLASS
- old_guard_op.args = [old_guard_op.args[0], op.args[1]]
+ new_guard_op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS,
+ args = [old_guard_op.getarg(0), op.getarg(1)])
+ self.optimizer.newoperations[value.last_guard_index] = new_guard_op
# hack hack hack. Change the guard_opnum on
- # old_guard_op.descr so that when resuming,
+ # new_guard_op.getdescr() so that when resuming,
# the operation is not skipped by pyjitpl.py.
- descr = old_guard_op.descr
+ descr = new_guard_op.getdescr()
assert isinstance(descr, compile.ResumeGuardDescr)
descr.guard_opnum = rop.GUARD_NONNULL_CLASS
emit_operation = False
@@ -239,18 +242,18 @@
self.optimizer.exception_might_have_happened = False
def optimize_CALL_LOOPINVARIANT(self, op):
- funcvalue = self.getvalue(op.args[0])
+ funcvalue = self.getvalue(op.getarg(0))
if not funcvalue.is_constant():
self.emit_operation(op)
return
- key = make_hashable_int(op.args[0].getint())
+ key = make_hashable_int(op.getarg(0).getint())
resvalue = self.optimizer.loop_invariant_results.get(key, None)
if resvalue is not None:
self.make_equal_to(op.result, resvalue)
return
# change the op to be a normal call, from the backend's point of view
# there is no reason to have a separate operation for this
- op.opnum = rop.CALL
+ op = op.copy_and_change(rop.CALL)
self.emit_operation(op)
resvalue = self.getvalue(op.result)
self.optimizer.loop_invariant_results[key] = resvalue
@@ -265,17 +268,17 @@
self.emit_operation(op)
def optimize_INT_IS_TRUE(self, op):
- if self.getvalue(op.args[0]) in self.optimizer.bool_boxes:
- self.make_equal_to(op.result, self.getvalue(op.args[0]))
+ if self.getvalue(op.getarg(0)) in self.optimizer.bool_boxes:
+ self.make_equal_to(op.result, self.getvalue(op.getarg(0)))
return
- self._optimize_nullness(op, op.args[0], True)
+ self._optimize_nullness(op, op.getarg(0), True)
def optimize_INT_IS_ZERO(self, op):
- self._optimize_nullness(op, op.args[0], False)
+ self._optimize_nullness(op, op.getarg(0), False)
def _optimize_oois_ooisnot(self, op, expect_isnot):
- value0 = self.getvalue(op.args[0])
- value1 = self.getvalue(op.args[1])
+ value0 = self.getvalue(op.getarg(0))
+ value1 = self.getvalue(op.getarg(1))
if value0.is_virtual():
if value1.is_virtual():
intres = (value0 is value1) ^ expect_isnot
@@ -285,9 +288,9 @@
elif value1.is_virtual():
self.make_constant_int(op.result, expect_isnot)
elif value1.is_null():
- self._optimize_nullness(op, op.args[0], expect_isnot)
+ self._optimize_nullness(op, op.getarg(0), expect_isnot)
elif value0.is_null():
- self._optimize_nullness(op, op.args[1], expect_isnot)
+ self._optimize_nullness(op, op.getarg(1), expect_isnot)
elif value0 is value1:
self.make_constant_int(op.result, not expect_isnot)
else:
@@ -308,10 +311,10 @@
self._optimize_oois_ooisnot(op, False)
def optimize_INSTANCEOF(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
realclassbox = value.get_constant_class(self.optimizer.cpu)
if realclassbox is not None:
- checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr)
+ checkclassbox = self.optimizer.cpu.typedescr2classbox(op.getdescr())
result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu,
realclassbox,
checkclassbox)
Modified: pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py (original)
+++ pypy/trunk/pypy/jit/metainterp/optimizeopt/virtualize.py Wed Sep 22 14:17:16 2010
@@ -258,7 +258,7 @@
def setup(self, virtuals):
if not virtuals:
return
-
+
inputargs = self.optimizer.loop.inputargs
specnodes = self.optimizer.loop.token.specnodes
assert len(inputargs) == len(specnodes)
@@ -285,18 +285,18 @@
def optimize_JUMP(self, op):
orgop = self.optimizer.loop.operations[-1]
exitargs = []
- target_loop_token = orgop.descr
+ target_loop_token = orgop.getdescr()
assert isinstance(target_loop_token, LoopToken)
specnodes = target_loop_token.specnodes
- assert len(op.args) == len(specnodes)
+ assert op.numargs() == len(specnodes)
for i in range(len(specnodes)):
- value = self.getvalue(op.args[i])
+ value = self.getvalue(op.getarg(i))
specnodes[i].teardown_virtual_node(self, value, exitargs)
- op.args = exitargs[:]
+ op = op.copy_and_change(op.getopnum(), args=exitargs[:])
self.emit_operation(op)
def optimize_VIRTUAL_REF(self, op):
- indexbox = op.args[1]
+ indexbox = op.getarg(1)
#
# get some constants
vrefinfo = self.optimizer.metainterp_sd.virtualref_info
@@ -322,17 +322,17 @@
# typically a PyPy PyFrame, and now is the end of its execution, so
# forcing it now does not have catastrophic effects.
vrefinfo = self.optimizer.metainterp_sd.virtualref_info
- # op.args[1] should really never point to null here
+ # op.getarg(1) should really never point to null here
# - set 'forced' to point to the real object
- op1 = ResOperation(rop.SETFIELD_GC, op.args, None,
+ op1 = ResOperation(rop.SETFIELD_GC, op.getarglist(), None,
descr = vrefinfo.descr_forced)
self.optimize_SETFIELD_GC(op1)
# - set 'virtual_token' to TOKEN_NONE
- args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)]
+ args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)]
op1 = ResOperation(rop.SETFIELD_GC, args, None,
descr = vrefinfo.descr_virtual_token)
self.optimize_SETFIELD_GC(op1)
- # Note that in some cases the virtual in op.args[1] has been forced
+ # Note that in some cases the virtual in op.getarg(1) has been forced
# already. This is fine. In that case, and *if* a residual
# CALL_MAY_FORCE suddenly turns out to access it, then it will
# trigger a ResumeGuardForcedDescr.handle_async_forcing() which
@@ -340,11 +340,11 @@
# was already forced).
def optimize_GETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
# optimizefindnode should ensure that fieldvalue is found
assert isinstance(value, AbstractVirtualValue)
- fieldvalue = value.getfield(op.descr, None)
+ fieldvalue = value.getfield(op.getdescr(), None)
assert fieldvalue is not None
self.make_equal_to(op.result, fieldvalue)
else:
@@ -357,36 +357,36 @@
optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC
def optimize_SETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
- fieldvalue = self.getvalue(op.args[1])
+ value = self.getvalue(op.getarg(0))
+ fieldvalue = self.getvalue(op.getarg(1))
if value.is_virtual():
- value.setfield(op.descr, fieldvalue)
+ value.setfield(op.getdescr(), fieldvalue)
else:
value.ensure_nonnull()
###self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue)
self.emit_operation(op)
def optimize_NEW_WITH_VTABLE(self, op):
- self.make_virtual(op.args[0], op.result, op)
+ self.make_virtual(op.getarg(0), op.result, op)
def optimize_NEW(self, op):
- self.make_vstruct(op.descr, op.result, op)
+ self.make_vstruct(op.getdescr(), op.result, op)
def optimize_NEW_ARRAY(self, op):
- sizebox = self.get_constant_box(op.args[0])
+ sizebox = self.get_constant_box(op.getarg(0))
if sizebox is not None:
# if the original 'op' did not have a ConstInt as argument,
# build a new one with the ConstInt argument
- if not isinstance(op.args[0], ConstInt):
+ if not isinstance(op.getarg(0), ConstInt):
op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result,
- descr=op.descr)
- self.make_varray(op.descr, sizebox.getint(), op.result, op)
+ descr=op.getdescr())
+ self.make_varray(op.getdescr(), sizebox.getint(), op.result, op)
else:
###self.optimize_default(op)
self.emit_operation(op)
def optimize_ARRAYLEN_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
self.make_constant_int(op.result, value.getlength())
else:
@@ -395,9 +395,9 @@
self.emit_operation(op)
def optimize_GETARRAYITEM_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
- indexbox = self.get_constant_box(op.args[1])
+ indexbox = self.get_constant_box(op.getarg(1))
if indexbox is not None:
itemvalue = value.getitem(indexbox.getint())
self.make_equal_to(op.result, itemvalue)
@@ -411,22 +411,22 @@
optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC
def optimize_SETARRAYITEM_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
- indexbox = self.get_constant_box(op.args[1])
+ indexbox = self.get_constant_box(op.getarg(1))
if indexbox is not None:
- value.setitem(indexbox.getint(), self.getvalue(op.args[2]))
+ value.setitem(indexbox.getint(), self.getvalue(op.getarg(2)))
return
value.ensure_nonnull()
###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue)
self.emit_operation(op)
def optimize_ARRAYCOPY(self, op):
- source_value = self.getvalue(op.args[2])
- dest_value = self.getvalue(op.args[3])
- source_start_box = self.get_constant_box(op.args[4])
- dest_start_box = self.get_constant_box(op.args[5])
- length = self.get_constant_box(op.args[6])
+ source_value = self.getvalue(op.getarg(2))
+ dest_value = self.getvalue(op.getarg(3))
+ source_start_box = self.get_constant_box(op.getarg(4))
+ dest_start_box = self.get_constant_box(op.getarg(5))
+ length = self.get_constant_box(op.getarg(6))
if (source_value.is_virtual() and source_start_box and dest_start_box
and length and dest_value.is_virtual()):
# XXX optimize the case where dest value is not virtual,
@@ -439,13 +439,14 @@
return
if length and length.getint() == 0:
return # 0-length arraycopy
- descr = op.args[0]
+ descr = op.getarg(0)
assert isinstance(descr, AbstractDescr)
- self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
+ args = op.getarglist()[1:]
+ self.emit_operation(ResOperation(rop.CALL, args, op.result,
descr))
def propagate_forward(self, op):
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in optimize_ops:
if opnum == value:
func(self, op)
Modified: pypy/trunk/pypy/jit/metainterp/pyjitpl.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/pyjitpl.py (original)
+++ pypy/trunk/pypy/jit/metainterp/pyjitpl.py Wed Sep 22 14:17:16 2010
@@ -159,7 +159,7 @@
if got_type == history.INT:
self.registers_i[target_index] = resultbox
elif got_type == history.REF:
- #debug_print(' ->',
+ #debug_print(' ->',
# llmemory.cast_ptr_to_adr(resultbox.getref_base()))
self.registers_r[target_index] = resultbox
elif got_type == history.FLOAT:
@@ -446,7 +446,7 @@
def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr,
sizebox):
sbox = self.metainterp.execute_and_record(rop.NEW, structdescr)
- self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr,
+ self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr,
sbox, sizebox)
abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr,
sizebox)
@@ -1004,7 +1004,7 @@
resumedescr = compile.ResumeGuardDescr(metainterp_sd,
original_greenkey)
guard_op = metainterp.history.record(opnum, moreargs, None,
- descr=resumedescr)
+ descr=resumedescr)
virtualizable_boxes = None
if metainterp.jitdriver_sd.virtualizable_info is not None:
virtualizable_boxes = metainterp.virtualizable_boxes
@@ -1463,7 +1463,7 @@
resbox = self._record_helper_nonpure_varargs(opnum, resbox, descr, argboxes)
return resbox
- def _record_helper_pure(self, opnum, resbox, descr, *argboxes):
+ def _record_helper_pure(self, opnum, resbox, descr, *argboxes):
canfold = self._all_constants(*argboxes)
if canfold:
resbox = resbox.constbox() # ensure it is a Const
@@ -1472,7 +1472,7 @@
resbox = resbox.nonconstbox() # ensure it is a Box
return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes))
- def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes):
+ def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes):
canfold = self._all_constants_varargs(argboxes)
if canfold:
resbox = resbox.constbox() # ensure it is a Const
@@ -1485,7 +1485,7 @@
assert resbox is None or isinstance(resbox, Box)
# record the operation
profiler = self.staticdata.profiler
- profiler.count_ops(opnum, RECORDED_OPS)
+ profiler.count_ops(opnum, RECORDED_OPS)
op = self.history.record(opnum, argboxes, resbox, descr)
self.attach_debug_info(op)
return resbox
@@ -1667,7 +1667,7 @@
# Search in current_merge_points for original_boxes with compatible
# green keys, representing the beginning of the same loop as the one
- # we end now.
+ # we end now.
num_green_args = self.jitdriver_sd.num_green_args
for j in range(len(self.current_merge_points)-1, -1, -1):
@@ -1922,7 +1922,7 @@
vrefbox = self.virtualref_boxes[i+1]
# record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE
call_may_force_op = self.history.operations.pop()
- assert call_may_force_op.opnum == rop.CALL_MAY_FORCE
+ assert call_may_force_op.getopnum() == rop.CALL_MAY_FORCE
self.history.record(rop.VIRTUAL_REF_FINISH,
[vrefbox, virtualbox], None)
self.history.operations.append(call_may_force_op)
@@ -2088,10 +2088,10 @@
""" Patch a CALL into a CALL_PURE.
"""
op = self.history.operations[-1]
- assert op.opnum == rop.CALL
+ assert op.getopnum() == rop.CALL
resbox_as_const = resbox.constbox()
- for arg in op.args:
- if not isinstance(arg, Const):
+ for i in range(op.numargs()):
+ if not isinstance(op.getarg(i), Const):
break
else:
# all-constants: remove the CALL operation now and propagate a
@@ -2100,8 +2100,8 @@
return resbox_as_const
# not all constants (so far): turn CALL into CALL_PURE, which might
# be either removed later by optimizeopt or turned back into CALL.
- op.opnum = rop.CALL_PURE
- op.args = [resbox_as_const] + op.args
+ newop = op.copy_and_change(rop.CALL_PURE, args=[resbox_as_const]+op.getarglist())
+ self.history.operations[-1] = newop
return resbox
def direct_assembler_call(self, targetjitdriver_sd):
@@ -2109,10 +2109,11 @@
patching the CALL_MAY_FORCE that occurred just now.
"""
op = self.history.operations.pop()
- assert op.opnum == rop.CALL_MAY_FORCE
+ assert op.getopnum() == rop.CALL_MAY_FORCE
num_green_args = targetjitdriver_sd.num_green_args
- greenargs = op.args[1:num_green_args+1]
- args = op.args[num_green_args+1:]
+ arglist = op.getarglist()
+ greenargs = arglist[1:num_green_args+1]
+ args = arglist[num_green_args+1:]
assert len(args) == targetjitdriver_sd.num_red_args
vinfo = targetjitdriver_sd.virtualizable_info
if vinfo is not None:
@@ -2122,9 +2123,7 @@
# ^^^ and not "+=", which makes 'args' a resizable list
warmrunnerstate = targetjitdriver_sd.warmstate
token = warmrunnerstate.get_assembler_token(greenargs, args)
- op.opnum = rop.CALL_ASSEMBLER
- op.args = args
- op.descr = token
+ op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token)
self.history.operations.append(op)
# ____________________________________________________________
Modified: pypy/trunk/pypy/jit/metainterp/resoperation.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/resoperation.py (original)
+++ pypy/trunk/pypy/jit/metainterp/resoperation.py Wed Sep 22 14:17:16 2010
@@ -1,42 +1,90 @@
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib.debug import make_sure_not_resized
-class ResOperation(object):
- """The central ResOperation class, representing one operation."""
+def ResOperation(opnum, args, result, descr=None):
+ cls = opclasses[opnum]
+ op = cls(result)
+ op.initarglist(args)
+ if descr is not None:
+ assert isinstance(op, ResOpWithDescr)
+ op.setdescr(descr)
+ return op
+
- # for 'guard_*'
- fail_args = None
+class AbstractResOp(object):
+ """The central ResOperation class, representing one operation."""
# debug
name = ""
pc = 0
- def __init__(self, opnum, args, result, descr=None):
- make_sure_not_resized(args)
- assert isinstance(opnum, int)
- self.opnum = opnum
- self.args = list(args)
- make_sure_not_resized(self.args)
- assert not isinstance(result, list)
+ def __init__(self, result):
self.result = result
- self.setdescr(descr)
+
+ # methods implemented by each concrete class
+ # ------------------------------------------
+
+ def getopnum(self):
+ raise NotImplementedError
+
+ # methods implemented by the arity mixins
+ # ---------------------------------------
+
+ def initarglist(self, args):
+ "This is supposed to be called only just after the ResOp has been created"
+ raise NotImplementedError
+
+ def getarglist(self):
+ raise NotImplementedError
+
+ def getarg(self, i):
+ raise NotImplementedError
+
+ def setarg(self, i, box):
+ raise NotImplementedError
+
+ def numargs(self):
+ raise NotImplementedError
+
+
+ # methods implemented by GuardResOp
+ # ---------------------------------
+
+ def getfailargs(self):
+ return None
+
+ def setfailargs(self, fail_args):
+ raise NotImplementedError
+
+ # methods implemented by ResOpWithDescr
+ # -------------------------------------
+
+ def getdescr(self):
+ return None
def setdescr(self, descr):
- # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt
- # instance provided by the backend holding details about the type
- # of the operation. It must inherit from AbstractDescr. The
- # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(),
- # cpu.calldescrof(), and cpu.typedescrof().
- from pypy.jit.metainterp.history import check_descr
- check_descr(descr)
- self.descr = descr
+ raise NotImplementedError
+
+ # common methods
+ # --------------
+
+ def copy_and_change(self, opnum, args=None, result=None, descr=None):
+ "shallow copy: the returned operation is meant to be used in place of self"
+ if args is None:
+ args = self.getarglist()
+ if result is None:
+ result = self.result
+ if descr is None:
+ descr = self.getdescr()
+ newop = ResOperation(opnum, args, result, descr)
+ return newop
def clone(self):
- descr = self.descr
+ args = self.getarglist()
+ descr = self.getdescr()
if descr is not None:
descr = descr.clone_if_mutable()
- op = ResOperation(self.opnum, self.args, self.result, descr)
- op.fail_args = self.fail_args
+ op = ResOperation(self.getopnum(), args, self.result, descr)
if not we_are_translated():
op.name = self.name
op.pc = self.pc
@@ -55,82 +103,271 @@
prefix = "%s:%s " % (self.name, self.pc)
else:
prefix = ""
- if self.descr is None or we_are_translated():
+ args = self.getarglist()
+ descr = self.getdescr()
+ if descr is None or we_are_translated():
return '%s%s%s(%s)' % (prefix, sres, self.getopname(),
- ', '.join([str(a) for a in self.args]))
+ ', '.join([str(a) for a in args]))
else:
return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(),
- ', '.join([str(a) for a in self.args]), self.descr)
+ ', '.join([str(a) for a in args]), descr)
def getopname(self):
try:
- return opname[self.opnum].lower()
+ return opname[self.getopnum()].lower()
except KeyError:
- return '<%d>' % self.opnum
+ return '<%d>' % self.getopnum()
def is_guard(self):
- return rop._GUARD_FIRST <= self.opnum <= rop._GUARD_LAST
+ return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST
def is_foldable_guard(self):
- return rop._GUARD_FOLDABLE_FIRST <= self.opnum <= rop._GUARD_FOLDABLE_LAST
+ return rop._GUARD_FOLDABLE_FIRST <= self.getopnum() <= rop._GUARD_FOLDABLE_LAST
def is_guard_exception(self):
- return (self.opnum == rop.GUARD_EXCEPTION or
- self.opnum == rop.GUARD_NO_EXCEPTION)
+ return (self.getopnum() == rop.GUARD_EXCEPTION or
+ self.getopnum() == rop.GUARD_NO_EXCEPTION)
def is_guard_overflow(self):
- return (self.opnum == rop.GUARD_OVERFLOW or
- self.opnum == rop.GUARD_NO_OVERFLOW)
+ return (self.getopnum() == rop.GUARD_OVERFLOW or
+ self.getopnum() == rop.GUARD_NO_OVERFLOW)
def is_always_pure(self):
- return rop._ALWAYS_PURE_FIRST <= self.opnum <= rop._ALWAYS_PURE_LAST
+ return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST
def has_no_side_effect(self):
- return rop._NOSIDEEFFECT_FIRST <= self.opnum <= rop._NOSIDEEFFECT_LAST
+ return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST
def can_raise(self):
- return rop._CANRAISE_FIRST <= self.opnum <= rop._CANRAISE_LAST
+ return rop._CANRAISE_FIRST <= self.getopnum() <= rop._CANRAISE_LAST
def is_ovf(self):
- return rop._OVF_FIRST <= self.opnum <= rop._OVF_LAST
+ return rop._OVF_FIRST <= self.getopnum() <= rop._OVF_LAST
def is_comparison(self):
return self.is_always_pure() and self.returns_bool_result()
def is_final(self):
- return rop._FINAL_FIRST <= self.opnum <= rop._FINAL_LAST
+ return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST
def returns_bool_result(self):
- opnum = self.opnum
+ opnum = self.getopnum()
if we_are_translated():
assert opnum >= 0
elif opnum < 0:
return False # for tests
return opboolresult[opnum]
+
+# ===================
+# Top of the hierachy
+# ===================
+
+class PlainResOp(AbstractResOp):
+ pass
+
+class ResOpWithDescr(AbstractResOp):
+
+ _descr = None
+
+ def getdescr(self):
+ return self._descr
+
+ def setdescr(self, descr):
+ # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt
+ # instance provided by the backend holding details about the type
+ # of the operation. It must inherit from AbstractDescr. The
+ # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(),
+ # cpu.calldescrof(), and cpu.typedescrof().
+ from pypy.jit.metainterp.history import check_descr
+ check_descr(descr)
+ self._descr = descr
+
+class GuardResOp(ResOpWithDescr):
+
+ _fail_args = None
+
+ def getfailargs(self):
+ return self._fail_args
+
+ def setfailargs(self, fail_args):
+ self._fail_args = fail_args
+
+ def copy_and_change(self, opnum, args=None, result=None, descr=None):
+ newop = AbstractResOp.copy_and_change(self, opnum, args, result, descr)
+ newop.setfailargs(self.getfailargs())
+ return newop
+
+ def clone(self):
+ newop = AbstractResOp.clone(self)
+ newop.setfailargs(self.getfailargs())
+ return newop
+
+
+# ============
+# arity mixins
+# ============
+
+class NullaryOp(object):
+ _mixin_ = True
+
+ def initarglist(self, args):
+ assert len(args) == 0
+
+ def getarglist(self):
+ return []
+
+ def numargs(self):
+ return 0
+
+ def getarg(self, i):
+ raise IndexError
+
+ def setarg(self, i, box):
+ raise IndexError
+
+
+class UnaryOp(object):
+ _mixin_ = True
+ _arg0 = None
+
+ def initarglist(self, args):
+ assert len(args) == 1
+ self._arg0, = args
+
+ def getarglist(self):
+ return [self._arg0]
+
+ def numargs(self):
+ return 1
+
+ def getarg(self, i):
+ if i == 0:
+ return self._arg0
+ else:
+ raise IndexError
+
+ def setarg(self, i, box):
+ if i == 0:
+ self._arg0 = box
+ else:
+ raise IndexError
+
+
+class BinaryOp(object):
+ _mixin_ = True
+ _arg0 = None
+ _arg1 = None
+
+ def initarglist(self, args):
+ assert len(args) == 2
+ self._arg0, self._arg1 = args
+
+ def getarglist(self):
+ return [self._arg0, self._arg1, self._arg2]
+
+ def numargs(self):
+ return 2
+
+ def getarg(self, i):
+ if i == 0:
+ return self._arg0
+ elif i == 1:
+ return self._arg1
+ else:
+ raise IndexError
+
+ def setarg(self, i, box):
+ if i == 0:
+ self._arg0 = box
+ elif i == 1:
+ self._arg1 = box
+ else:
+ raise IndexError
+
+ def getarglist(self):
+ return [self._arg0, self._arg1]
+
+
+class TernaryOp(object):
+ _mixin_ = True
+ _arg0 = None
+ _arg1 = None
+ _arg2 = None
+
+ def initarglist(self, args):
+ assert len(args) == 3
+ self._arg0, self._arg1, self._arg2 = args
+
+ def getarglist(self):
+ return [self._arg0, self._arg1, self._arg2]
+
+ def numargs(self):
+ return 3
+
+ def getarg(self, i):
+ if i == 0:
+ return self._arg0
+ elif i == 1:
+ return self._arg1
+ elif i == 2:
+ return self._arg2
+ else:
+ raise IndexError
+
+ def setarg(self, i, box):
+ if i == 0:
+ self._arg0 = box
+ elif i == 1:
+ self._arg1 = box
+ elif i == 2:
+ self._arg2 = box
+ else:
+ raise IndexError
+
+class N_aryOp(object):
+ _mixin_ = True
+ _args = None
+
+ def initarglist(self, args):
+ self._args = args
+
+ def getarglist(self):
+ return self._args
+
+ def numargs(self):
+ return len(self._args)
+
+ def getarg(self, i):
+ return self._args[i]
+
+ def setarg(self, i, box):
+ self._args[i] = box
+
+
# ____________________________________________________________
_oplist = [
'_FINAL_FIRST',
- 'JUMP',
- 'FINISH',
+ 'JUMP/*d',
+ 'FINISH/*d',
'_FINAL_LAST',
'_GUARD_FIRST',
'_GUARD_FOLDABLE_FIRST',
- 'GUARD_TRUE',
- 'GUARD_FALSE',
- 'GUARD_VALUE',
- 'GUARD_CLASS',
- 'GUARD_NONNULL',
- 'GUARD_ISNULL',
- 'GUARD_NONNULL_CLASS',
+ 'GUARD_TRUE/1d',
+ 'GUARD_FALSE/1d',
+ 'GUARD_VALUE/2d',
+ 'GUARD_CLASS/2d',
+ 'GUARD_NONNULL/1d',
+ 'GUARD_ISNULL/1d',
+ 'GUARD_NONNULL_CLASS/2d',
'_GUARD_FOLDABLE_LAST',
- 'GUARD_NO_EXCEPTION',
- 'GUARD_EXCEPTION',
- 'GUARD_NO_OVERFLOW',
- 'GUARD_OVERFLOW',
- 'GUARD_NOT_FORCED',
+ 'GUARD_NO_EXCEPTION/0d',
+ 'GUARD_EXCEPTION/1d',
+ 'GUARD_NO_OVERFLOW/0d',
+ 'GUARD_OVERFLOW/0d',
+ 'GUARD_NOT_FORCED/0d',
'_GUARD_LAST', # ----- end of guard operations -----
'_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations -----
@@ -218,19 +455,19 @@
'STRSETITEM/3',
'UNICODESETITEM/3',
'NEWUNICODE/1',
- #'RUNTIMENEW/1', # ootype operation
- 'COND_CALL_GC_WB', # [objptr, newvalue] (for the write barrier)
+ #'RUNTIMENEW/1', # ootype operation
+ 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier)
'DEBUG_MERGE_POINT/1', # debugging only
'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend
'_CANRAISE_FIRST', # ----- start of can_raise operations -----
- 'CALL',
- 'CALL_ASSEMBLER',
- 'CALL_MAY_FORCE',
- 'CALL_LOOPINVARIANT',
+ 'CALL/*d',
+ 'CALL_ASSEMBLER/*d',
+ 'CALL_MAY_FORCE/*d',
+ 'CALL_LOOPINVARIANT/*d',
#'OOSEND', # ootype operation
#'OOSEND_PURE', # ootype operation
- 'CALL_PURE', # removed before it's passed to the backend
+ 'CALL_PURE/*d', # removed before it's passed to the backend
# CALL_PURE(result, func, arg_1,..,arg_n)
'_CANRAISE_LAST', # ----- end of can_raise operations -----
@@ -247,6 +484,7 @@
class rop(object):
pass
+opclasses = [] # mapping numbers to the concrete ResOp class
opname = {} # mapping numbers to the original names, for debugging
oparity = [] # mapping numbers to the arity of the operation or -1
opwithdescr = [] # mapping numbers to a flag "takes a descr"
@@ -261,16 +499,62 @@
name, arity = name.split('/')
withdescr = 'd' in arity
boolresult = 'b' in arity
- arity = int(arity.rstrip('db'))
+ arity = arity.rstrip('db')
+ if arity == '*':
+ arity = -1
+ else:
+ arity = int(arity)
else:
arity, withdescr, boolresult = -1, True, False # default
setattr(rop, name, i)
if not name.startswith('_'):
opname[i] = name
+ cls = create_class_for_op(name, i, arity, withdescr)
+ else:
+ cls = None
+ opclasses.append(cls)
oparity.append(arity)
opwithdescr.append(withdescr)
opboolresult.append(boolresult)
- assert len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist)
+ assert len(opclasses)==len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist)
+
+def get_base_class(mixin, base):
+ try:
+ return get_base_class.cache[(mixin, base)]
+ except KeyError:
+ arity_name = mixin.__name__[:-2] # remove the trailing "Op"
+ name = arity_name + base.__name__ # something like BinaryPlainResOp
+ bases = (mixin, base)
+ cls = type(name, bases, {})
+ get_base_class.cache[(mixin, base)] = cls
+ return cls
+get_base_class.cache = {}
+
+def create_class_for_op(name, opnum, arity, withdescr):
+ arity2mixin = {
+ 0: NullaryOp,
+ 1: UnaryOp,
+ 2: BinaryOp,
+ 3: TernaryOp
+ }
+
+ is_guard = name.startswith('GUARD')
+ if is_guard:
+ assert withdescr
+ baseclass = GuardResOp
+ elif withdescr:
+ baseclass = ResOpWithDescr
+ else:
+ baseclass = PlainResOp
+ mixin = arity2mixin.get(arity, N_aryOp)
+
+ def getopnum(self):
+ return opnum
+
+ cls_name = '%s_OP' % name
+ bases = (get_base_class(mixin, baseclass),)
+ dic = {'getopnum': getopnum}
+ return type(cls_name, bases, dic)
setup(__name__ == '__main__') # print out the table when run directly
del _oplist
Modified: pypy/trunk/pypy/jit/metainterp/simple_optimize.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/simple_optimize.py (original)
+++ pypy/trunk/pypy/jit/metainterp/simple_optimize.py Wed Sep 22 14:17:16 2010
@@ -11,15 +11,17 @@
from pypy.jit.metainterp.history import AbstractDescr
# change ARRAYCOPY to call, so we don't have to pass around
# unnecessary information to the backend. Do the same with VIRTUAL_REF_*.
- if op.opnum == rop.ARRAYCOPY:
- descr = op.args[0]
+ if op.getopnum() == rop.ARRAYCOPY:
+ descr = op.getarg(0)
assert isinstance(descr, AbstractDescr)
- op = ResOperation(rop.CALL, op.args[1:], op.result, descr=descr)
- elif op.opnum == rop.CALL_PURE:
- op = ResOperation(rop.CALL, op.args[1:], op.result, op.descr)
- elif op.opnum == rop.VIRTUAL_REF:
- op = ResOperation(rop.SAME_AS, [op.args[0]], op.result)
- elif op.opnum == rop.VIRTUAL_REF_FINISH:
+ args = op.getarglist()[1:]
+ op = ResOperation(rop.CALL, args, op.result, descr=descr)
+ elif op.getopnum() == rop.CALL_PURE:
+ args = op.getarglist()[1:]
+ op = ResOperation(rop.CALL, args, op.result, op.getdescr())
+ elif op.getopnum() == rop.VIRTUAL_REF:
+ op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result)
+ elif op.getopnum() == rop.VIRTUAL_REF_FINISH:
return []
return [op]
@@ -36,7 +38,7 @@
newoperations = []
for op in loop.operations:
if op.is_guard():
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, compile.ResumeGuardDescr)
modifier = resume.ResumeDataVirtualAdder(descr, memo)
newboxes = modifier.finish(EMPTY_VALUES)
Modified: pypy/trunk/pypy/jit/metainterp/test/oparser.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/test/oparser.py (original)
+++ pypy/trunk/pypy/jit/metainterp/test/oparser.py Wed Sep 22 14:17:16 2010
@@ -6,7 +6,7 @@
from pypy.jit.metainterp.history import TreeLoop, BoxInt, ConstInt,\
ConstObj, ConstPtr, Box, BasicFailDescr, BoxFloat, ConstFloat,\
LoopToken
-from pypy.jit.metainterp.resoperation import rop, ResOperation
+from pypy.jit.metainterp.resoperation import rop, ResOperation, ResOpWithDescr, N_aryOp
from pypy.jit.metainterp.typesystem import llhelper
from pypy.jit.codewriter.heaptracker import adr2int
from pypy.rpython.lltypesystem import lltype, llmemory
@@ -16,17 +16,29 @@
class ParseError(Exception):
pass
-
class Boxes(object):
pass
+class ESCAPE_OP(N_aryOp, ResOpWithDescr):
+
+ OPNUM = -123
+
+ def __init__(self, opnum, args, result, descr=None):
+ assert opnum == self.OPNUM
+ self.result = result
+ self.initarglist(args)
+ self.setdescr(descr)
+
+ def getopnum(self):
+ return self.OPNUM
+
class ExtendedTreeLoop(TreeLoop):
def getboxes(self):
def opboxes(operations):
for op in operations:
yield op.result
- for box in op.args:
+ for box in op.getarglist():
yield box
def allboxes():
for box in self.inputargs:
@@ -171,7 +183,7 @@
opnum = getattr(rop, opname.upper())
except AttributeError:
if opname == 'escape':
- opnum = -123
+ opnum = ESCAPE_OP.OPNUM
else:
raise ParseError("unknown op: %s" % opname)
endnum = line.rfind(')')
@@ -228,6 +240,12 @@
descr = self.looptoken
return opnum, args, descr, fail_args
+ def create_op(self, opnum, args, result, descr):
+ if opnum == ESCAPE_OP.OPNUM:
+ return ESCAPE_OP(opnum, args, result, descr)
+ else:
+ return ResOperation(opnum, args, result, descr)
+
def parse_result_op(self, line):
res, op = line.split("=", 1)
res = res.strip()
@@ -237,14 +255,16 @@
raise ParseError("Double assign to var %s in line: %s" % (res, line))
rvar = self.box_for_var(res)
self.vars[res] = rvar
- res = ResOperation(opnum, args, rvar, descr)
- res.fail_args = fail_args
+ res = self.create_op(opnum, args, rvar, descr)
+ if fail_args is not None:
+ res.setfailargs(fail_args)
return res
def parse_op_no_result(self, line):
opnum, args, descr, fail_args = self.parse_op(line)
- res = ResOperation(opnum, args, None, descr)
- res.fail_args = fail_args
+ res = self.create_op(opnum, args, None, descr)
+ if fail_args is not None:
+ res.setfailargs(fail_args)
return res
def parse_next_op(self, line):
Modified: pypy/trunk/pypy/jit/metainterp/test/test_basic.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/test/test_basic.py (original)
+++ pypy/trunk/pypy/jit/metainterp/test/test_basic.py Wed Sep 22 14:17:16 2010
@@ -296,7 +296,7 @@
found = 0
for op in get_stats().loops[0]._all_operations():
if op.getopname() == 'guard_true':
- liveboxes = op.fail_args
+ liveboxes = op.getfailargs()
assert len(liveboxes) == 3
for box in liveboxes:
assert isinstance(box, history.BoxInt)
Modified: pypy/trunk/pypy/jit/metainterp/test/test_logger.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/test/test_logger.py (original)
+++ pypy/trunk/pypy/jit/metainterp/test/test_logger.py Wed Sep 22 14:17:16 2010
@@ -100,8 +100,8 @@
debug_merge_point("info")
'''
loop, oloop = self.reparse(inp, check_equal=False)
- assert loop.operations[0].args[0]._get_str() == 'info'
- assert oloop.operations[0].args[0]._get_str() == 'info'
+ assert loop.operations[0].getarg(0)._get_str() == 'info'
+ assert oloop.operations[0].getarg(0)._get_str() == 'info'
def test_floats(self):
inp = '''
Modified: pypy/trunk/pypy/jit/metainterp/test/test_loop.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/test/test_loop.py (original)
+++ pypy/trunk/pypy/jit/metainterp/test/test_loop.py Wed Sep 22 14:17:16 2010
@@ -178,7 +178,7 @@
found = 0
for op in get_stats().loops[0]._all_operations():
if op.getopname() == 'guard_true':
- liveboxes = op.fail_args
+ liveboxes = op.getfailargs()
assert len(liveboxes) == 2 # x, y (in some order)
assert isinstance(liveboxes[0], history.BoxInt)
assert isinstance(liveboxes[1], history.BoxInt)
Modified: pypy/trunk/pypy/jit/metainterp/test/test_oparser.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/test/test_oparser.py (original)
+++ pypy/trunk/pypy/jit/metainterp/test/test_oparser.py Wed Sep 22 14:17:16 2010
@@ -16,10 +16,10 @@
"""
loop = parse(x)
assert len(loop.operations) == 3
- assert [op.opnum for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB,
+ assert [op.getopnum() for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB,
rop.FINISH]
assert len(loop.inputargs) == 2
- assert loop.operations[-1].descr
+ assert loop.operations[-1].getdescr()
def test_const_ptr_subops():
x = """
@@ -30,8 +30,8 @@
vtable = lltype.nullptr(S)
loop = parse(x, None, locals())
assert len(loop.operations) == 1
- assert loop.operations[0].descr
- assert loop.operations[0].fail_args == []
+ assert loop.operations[0].getdescr()
+ assert loop.operations[0].getfailargs() == []
def test_descr():
class Xyz(AbstractDescr):
@@ -43,7 +43,7 @@
"""
stuff = Xyz()
loop = parse(x, None, locals())
- assert loop.operations[0].descr is stuff
+ assert loop.operations[0].getdescr() is stuff
def test_after_fail():
x = """
@@ -64,7 +64,7 @@
"""
stuff = Xyz()
loop = parse(x, None, locals())
- assert loop.operations[0].descr is stuff
+ assert loop.operations[0].getdescr() is stuff
def test_boxname():
x = """
@@ -111,7 +111,7 @@
TP = lltype.GcArray(lltype.Signed)
NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP))
loop = parse(x, None, {'func_ptr' : NULL})
- assert loop.operations[0].args[0].value == NULL
+ assert loop.operations[0].getarg(0).value == NULL
def test_jump_target():
x = '''
@@ -119,7 +119,7 @@
jump()
'''
loop = parse(x)
- assert loop.operations[0].descr is loop.token
+ assert loop.operations[0].getdescr() is loop.token
def test_jump_target_other():
looptoken = LoopToken()
@@ -128,7 +128,7 @@
jump(descr=looptoken)
'''
loop = parse(x, namespace=locals())
- assert loop.operations[0].descr is looptoken
+ assert loop.operations[0].getdescr() is looptoken
def test_floats():
x = '''
@@ -136,7 +136,7 @@
f1 = float_add(f0, 3.5)
'''
loop = parse(x)
- assert isinstance(loop.operations[0].args[0], BoxFloat)
+ assert isinstance(loop.operations[0].getarg(0), BoxFloat)
def test_debug_merge_point():
x = '''
@@ -147,10 +147,10 @@
debug_merge_point('(stuff) #1')
'''
loop = parse(x)
- assert loop.operations[0].args[0]._get_str() == 'info'
- assert loop.operations[1].args[0]._get_str() == 'info'
- assert loop.operations[2].args[0]._get_str() == "<some ('other,')> info"
- assert loop.operations[3].args[0]._get_str() == "(stuff) #1"
+ assert loop.operations[0].getarg(0)._get_str() == 'info'
+ assert loop.operations[1].getarg(0)._get_str() == 'info'
+ assert loop.operations[2].getarg(0)._get_str() == "<some ('other,')> info"
+ assert loop.operations[3].getarg(0)._get_str() == "(stuff) #1"
def test_descr_with_obj_print():
Modified: pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py (original)
+++ pypy/trunk/pypy/jit/metainterp/test/test_optimizeopt.py Wed Sep 22 14:17:16 2010
@@ -42,7 +42,7 @@
opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu),
None)
fdescr = ResumeGuardDescr(None, None)
- op = ResOperation(rop.GUARD_TRUE, [], None, descr=fdescr)
+ op = ResOperation(rop.GUARD_TRUE, ['dummy'], None, descr=fdescr)
# setup rd data
fi0 = resume.FrameInfo(None, "code0", 11)
fdescr.rd_frame_info_list = resume.FrameInfo(fi0, "code1", 33)
@@ -50,11 +50,11 @@
fdescr.rd_snapshot = resume.Snapshot(snapshot0, [b1])
#
opt.store_final_boxes_in_guard(op)
- if op.fail_args == [b0, b1]:
+ if op.getfailargs() == [b0, b1]:
assert fdescr.rd_numb.nums == [tag(1, TAGBOX)]
assert fdescr.rd_numb.prev.nums == [tag(0, TAGBOX)]
else:
- assert op.fail_args == [b1, b0]
+ assert op.getfailargs() == [b1, b0]
assert fdescr.rd_numb.nums == [tag(0, TAGBOX)]
assert fdescr.rd_numb.prev.nums == [tag(1, TAGBOX)]
assert fdescr.rd_virtuals is None
@@ -140,24 +140,26 @@
print '%-39s| %s' % (txt1[:39], txt2[:39])
txt1 = txt1[39:]
txt2 = txt2[39:]
- assert op1.opnum == op2.opnum
- assert len(op1.args) == len(op2.args)
- for x, y in zip(op1.args, op2.args):
+ assert op1.getopnum() == op2.getopnum()
+ assert op1.numargs() == op2.numargs()
+ for i in range(op1.numargs()):
+ x = op1.getarg(i)
+ y = op2.getarg(i)
assert x == remap.get(y, y)
if op2.result in remap:
assert op1.result == remap[op2.result]
else:
remap[op2.result] = op1.result
- if op1.opnum != rop.JUMP: # xxx obscure
- assert op1.descr == op2.descr
- if op1.fail_args or op2.fail_args:
- assert len(op1.fail_args) == len(op2.fail_args)
+ if op1.getopnum() != rop.JUMP: # xxx obscure
+ assert op1.getdescr() == op2.getdescr()
+ if op1.getfailargs() or op2.getfailargs():
+ assert len(op1.getfailargs()) == len(op2.getfailargs())
if strict_fail_args:
- for x, y in zip(op1.fail_args, op2.fail_args):
+ for x, y in zip(op1.getfailargs(), op2.getfailargs()):
assert x == remap.get(y, y)
else:
- fail_args1 = set(op1.fail_args)
- fail_args2 = set([remap.get(y, y) for y in op2.fail_args])
+ fail_args1 = set(op1.getfailargs())
+ fail_args2 = set([remap.get(y, y) for y in op2.getfailargs()])
assert fail_args1 == fail_args2
assert len(oplist1) == len(oplist2)
print '-'*57
@@ -209,7 +211,7 @@
self.metainterp_sd = metainterp_sd
self.original_greenkey = original_greenkey
def store_final_boxes(self, op, boxes):
- op.fail_args = boxes
+ op.setfailargs(boxes)
def __eq__(self, other):
return type(self) is type(other) # xxx obscure
@@ -2361,8 +2363,8 @@
from pypy.jit.metainterp.test.test_resume import ResumeDataFakeReader
from pypy.jit.metainterp.test.test_resume import MyMetaInterp
guard_op, = [op for op in self.loop.operations if op.is_guard()]
- fail_args = guard_op.fail_args
- fdescr = guard_op.descr
+ fail_args = guard_op.getfailargs()
+ fdescr = guard_op.getdescr()
assert fdescr.guard_opnum == guard_opnum
reader = ResumeDataFakeReader(fdescr, fail_args,
MyMetaInterp(self.cpu))
Modified: pypy/trunk/pypy/jit/metainterp/test/test_recursive.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/test/test_recursive.py (original)
+++ pypy/trunk/pypy/jit/metainterp/test/test_recursive.py Wed Sep 22 14:17:16 2010
@@ -319,8 +319,8 @@
for loop in get_stats().loops:
assert len(loop.operations) <= length + 5 # because we only check once per metainterp bytecode
for op in loop.operations:
- if op.is_guard() and hasattr(op.descr, '_debug_suboperations'):
- assert len(op.descr._debug_suboperations) <= length + 5
+ if op.is_guard() and hasattr(op.getdescr(), '_debug_suboperations'):
+ assert len(op.getdescr()._debug_suboperations) <= length + 5
def test_inline_trace_limit(self):
myjitdriver = JitDriver(greens=[], reds=['n'])
Modified: pypy/trunk/pypy/jit/metainterp/test/test_virtualref.py
==============================================================================
--- pypy/trunk/pypy/jit/metainterp/test/test_virtualref.py (original)
+++ pypy/trunk/pypy/jit/metainterp/test/test_virtualref.py Wed Sep 22 14:17:16 2010
@@ -71,11 +71,11 @@
#
ops = self.metainterp.staticdata.stats.loops[0].operations
[guard_op] = [op for op in ops
- if op.opnum == rop.GUARD_NOT_FORCED]
- bxs1 = [box for box in guard_op.fail_args
+ if op.getopnum() == rop.GUARD_NOT_FORCED]
+ bxs1 = [box for box in guard_op.getfailargs()
if str(box._getrepr_()).endswith('.X')]
assert len(bxs1) == 1
- bxs2 = [box for box in guard_op.fail_args
+ bxs2 = [box for box in guard_op.getfailargs()
if str(box._getrepr_()).endswith('JitVirtualRef')]
assert len(bxs2) == 1
JIT_VIRTUAL_REF = self.vrefinfo.JIT_VIRTUAL_REF
@@ -84,11 +84,11 @@
# try reloading from blackhole.py's point of view
from pypy.jit.metainterp.resume import ResumeDataDirectReader
cpu = self.metainterp.cpu
- cpu.get_latest_value_count = lambda : len(guard_op.fail_args)
- cpu.get_latest_value_int = lambda i:guard_op.fail_args[i].getint()
- cpu.get_latest_value_ref = lambda i:guard_op.fail_args[i].getref_base()
+ cpu.get_latest_value_count = lambda : len(guard_op.getfailargs())
+ cpu.get_latest_value_int = lambda i:guard_op.getfailargs()[i].getint()
+ cpu.get_latest_value_ref = lambda i:guard_op.getfailargs()[i].getref_base()
cpu.clear_latest_values = lambda count: None
- resumereader = ResumeDataDirectReader(cpu, guard_op.descr)
+ resumereader = ResumeDataDirectReader(cpu, guard_op.getdescr())
vrefinfo = self.metainterp.staticdata.virtualref_info
lst = []
vrefinfo.continue_tracing = lambda vref, virtual: \
@@ -100,7 +100,7 @@
lst[0][0]) # assert correct type
#
# try reloading from pyjitpl's point of view
- self.metainterp.rebuild_state_after_failure(guard_op.descr)
+ self.metainterp.rebuild_state_after_failure(guard_op.getdescr())
assert len(self.metainterp.framestack) == 1
assert len(self.metainterp.virtualref_boxes) == 2
assert self.metainterp.virtualref_boxes[0].value == bxs1[0].value
Modified: pypy/trunk/pypy/jit/tool/showstats.py
==============================================================================
--- pypy/trunk/pypy/jit/tool/showstats.py (original)
+++ pypy/trunk/pypy/jit/tool/showstats.py Wed Sep 22 14:17:16 2010
@@ -17,7 +17,7 @@
num_dmp = 0
num_guards = 0
for op in loop.operations:
- if op.opnum == rop.DEBUG_MERGE_POINT:
+ if op.getopnum() == rop.DEBUG_MERGE_POINT:
num_dmp += 1
else:
num_ops += 1
More information about the Pypy-commit
mailing list