[pypy-commit] pypy optresult: hack enough to import optimizeopt tests
fijal
noreply at buildbot.pypy.org
Thu Nov 13 14:23:38 CET 2014
Author: Maciej Fijalkowski <fijall at gmail.com>
Branch: optresult
Changeset: r74500:64caf45b4229
Date: 2014-11-12 13:54 +0200
http://bitbucket.org/pypy/pypy/changeset/64caf45b4229/
Log: hack enough to import optimizeopt tests
diff --git a/rpython/jit/metainterp/executor.py b/rpython/jit/metainterp/executor.py
--- a/rpython/jit/metainterp/executor.py
+++ b/rpython/jit/metainterp/executor.py
@@ -75,32 +75,48 @@
return None
raise AssertionError("bad rettype")
-do_call_loopinvariant = do_call
-do_call_may_force = do_call
+do_call_r = do_call
+do_call_i = do_call
+do_call_f = do_call
+do_call_n = do_call
+do_call_loopinvariant_r = do_call
+do_call_loopinvariant_i = do_call
+do_call_loopinvariant_f = do_call
+do_call_loopinvariant_n = do_call
+do_call_may_force_r = do_call
+do_call_may_force_i = do_call
+do_call_may_force_f = do_call
+do_call_may_force_n = do_call
def do_cond_call(cpu, metainterp, argboxes, descr):
condbox = argboxes[0]
if condbox.getint():
- do_call(cpu, metainterp, argboxes[1:], descr)
+ do_call_n(cpu, metainterp, argboxes[1:], descr)
-def do_getarrayitem_gc(cpu, _, arraybox, indexbox, arraydescr):
+def do_getarrayitem_gc_i(cpu, _, arraybox, indexbox, arraydescr):
array = arraybox.getref_base()
index = indexbox.getint()
- if arraydescr.is_array_of_pointers():
- return BoxPtr(cpu.bh_getarrayitem_gc_r(array, index, arraydescr))
- elif arraydescr.is_array_of_floats():
- return BoxFloat(cpu.bh_getarrayitem_gc_f(array, index, arraydescr))
- else:
- return BoxInt(cpu.bh_getarrayitem_gc_i(array, index, arraydescr))
+ return cpu.bh_getarrayitem_gc_i(array, index, arraydescr)
-def do_getarrayitem_raw(cpu, _, arraybox, indexbox, arraydescr):
+def do_getarrayitem_gc_r(cpu, _, arraybox, indexbox, arraydescr):
+ array = arraybox.getref_base()
+ index = indexbox.getint()
+ return cpu.bh_getarrayitem_gc_r(array, index, arraydescr)
+
+def do_getarrayitem_gc_f(cpu, _, arraybox, indexbox, arraydescr):
+ array = arraybox.getref_base()
+ index = indexbox.getint()
+ return cpu.bh_getarrayitem_gc_f(array, index, arraydescr)
+
+def do_getarrayitem_raw_i(cpu, _, arraybox, indexbox, arraydescr):
array = arraybox.getint()
index = indexbox.getint()
- assert not arraydescr.is_array_of_pointers()
- if arraydescr.is_array_of_floats():
- return BoxFloat(cpu.bh_getarrayitem_raw_f(array, index, arraydescr))
- else:
- return BoxInt(cpu.bh_getarrayitem_raw_i(array, index, arraydescr))
+ return cpu.bh_getarrayitem_raw_i(array, index, arraydescr)
+
+def do_getarrayitem_raw_f(cpu, _, arraybox, indexbox, arraydescr):
+ array = arraybox.getint()
+ index = indexbox.getint()
+ return cpu.bh_getarrayitem_raw_f(array, index, arraydescr)
def do_setarrayitem_gc(cpu, _, arraybox, indexbox, itembox, arraydescr):
array = arraybox.getref_base()
@@ -146,14 +162,17 @@
else:
cpu.bh_setinteriorfield_gc_i(array, index, valuebox.getint(), descr)
-def do_getfield_gc(cpu, _, structbox, fielddescr):
+def do_getfield_gc_i(cpu, _, structbox, fielddescr):
struct = structbox.getref_base()
- if fielddescr.is_pointer_field():
- return BoxPtr(cpu.bh_getfield_gc_r(struct, fielddescr))
- elif fielddescr.is_float_field():
- return BoxFloat(cpu.bh_getfield_gc_f(struct, fielddescr))
- else:
- return BoxInt(cpu.bh_getfield_gc_i(struct, fielddescr))
+ return cpu.bh_getfield_gc_i(struct, fielddescr)
+
+def do_getfield_gc_r(cpu, _, structbox, fielddescr):
+ struct = structbox.getref_base()
+ return cpu.bh_getfield_gc_r(struct, fielddescr)
+
+def do_getfield_gc_f(cpu, _, structbox, fielddescr):
+ struct = structbox.getref_base()
+ return cpu.bh_getfield_gc_f(struct, fielddescr)
def do_getfield_raw(cpu, _, structbox, fielddescr):
check_descr(fielddescr)
@@ -246,8 +265,14 @@
z = 0
return BoxInt(z)
-def do_same_as(cpu, _, box):
- return box.clonebox()
+def do_same_as_i(cpu, _, v):
+ return v
+
+def do_same_as_r(cpu, _, v):
+ return v
+
+def do_same_as_f(cpu, _, v):
+ return v
def do_copystrcontent(cpu, _, srcbox, dstbox,
srcstartbox, dststartbox, lengthbox):
@@ -301,8 +326,8 @@
continue
#
# Maybe the same without the _PURE suffix?
- if key.endswith('_PURE'):
- key = key[:-5]
+ if key[-7:-2] == '_PURE':
+ key = key[:-7] + key[-2:]
name = 'do_' + key.lower()
if name in globals():
execute[value] = globals()[name]
@@ -321,7 +346,10 @@
execute[value] = func
continue
if value in (rop.FORCE_TOKEN,
- rop.CALL_ASSEMBLER,
+ rop.CALL_ASSEMBLER_R,
+ rop.CALL_ASSEMBLER_F,
+ rop.CALL_ASSEMBLER_I,
+ rop.CALL_ASSEMBLER_N,
rop.INCREMENT_DEBUG_COUNTER,
rop.COND_CALL_GC_WB,
rop.COND_CALL_GC_WB_ARRAY,
@@ -331,7 +359,10 @@
rop.JIT_DEBUG,
rop.SETARRAYITEM_RAW,
rop.SETINTERIORFIELD_RAW,
- rop.CALL_RELEASE_GIL,
+ rop.CALL_RELEASE_GIL_I,
+ rop.CALL_RELEASE_GIL_R,
+ rop.CALL_RELEASE_GIL_F,
+ rop.CALL_RELEASE_GIL_N,
rop.QUASIIMMUT_FIELD,
rop.CALL_MALLOC_GC,
rop.CALL_MALLOC_NURSERY,
@@ -352,10 +383,7 @@
return None
if list(func.argtypes).count('d') > 1:
return None
- if func.resulttype not in ('i', 'r', 'f', None):
- return None
argtypes = unrolling_iterable(func.argtypes)
- resulttype = func.resulttype
#
def do(cpu, _, *argboxes):
newargs = ()
@@ -375,12 +403,7 @@
newargs = newargs + (value,)
assert not argboxes
#
- result = func(*newargs)
- #
- if resulttype == 'i': return BoxInt(result)
- if resulttype == 'r': return BoxPtr(result)
- if resulttype == 'f': return BoxFloat(result)
- return None
+ return func(*newargs)
#
do.func_name = 'do_' + name
return do
@@ -408,6 +431,7 @@
def execute(cpu, metainterp, opnum, descr, *argboxes):
+ xxx
# only for opnums with a fixed arity
num_args = len(argboxes)
withdescr = has_descr(opnum)
@@ -422,6 +446,7 @@
execute._annspecialcase_ = 'specialize:arg(2)'
def execute_varargs(cpu, metainterp, opnum, argboxes, descr):
+ xxxx
# only for opnums with a variable arity (calls, typically)
check_descr(descr)
func = get_execute_function(opnum, -1, True)
@@ -430,6 +455,7 @@
def execute_nonspec(cpu, metainterp, opnum, argboxes, descr=None):
+ xxxx
arity = resoperation.oparity[opnum]
assert arity == -1 or len(argboxes) == arity
if resoperation.opwithdescr[opnum]:
diff --git a/rpython/jit/metainterp/heapcache.py b/rpython/jit/metainterp/heapcache.py
--- a/rpython/jit/metainterp/heapcache.py
+++ b/rpython/jit/metainterp/heapcache.py
@@ -76,7 +76,8 @@
self.dependencies.setdefault(box, []).append(valuebox)
else:
self._escape(valuebox)
- elif (opnum == rop.CALL and
+ elif ((opnum == rop.CALL_R or opnum == rop.CALL_I or
+ opnum == rop.CALL_N or opnum == rop.CALL_F) and
descr.get_extra_info().oopspecindex == descr.get_extra_info().OS_ARRAYCOPY and
isinstance(argboxes[3], ConstInt) and
isinstance(argboxes[4], ConstInt) and
@@ -87,8 +88,12 @@
pass
# GETFIELD_GC, MARK_OPAQUE_PTR, PTR_EQ, and PTR_NE don't escape their
# arguments
- elif (opnum != rop.GETFIELD_GC and
- opnum != rop.GETFIELD_GC_PURE and
+ elif (opnum != rop.GETFIELD_GC_R and
+ opnum != rop.GETFIELD_GC_I and
+ opnum != rop.GETFIELD_GC_F and
+ opnum != rop.GETFIELD_GC_PURE_R and
+ opnum != rop.GETFIELD_GC_PURE_I and
+ opnum != rop.GETFIELD_GC_PURE_F and
opnum != rop.MARK_OPAQUE_PTR and
opnum != rop.PTR_EQ and
opnum != rop.PTR_NE and
diff --git a/rpython/jit/metainterp/optimizeopt/heap.py b/rpython/jit/metainterp/optimizeopt/heap.py
--- a/rpython/jit/metainterp/optimizeopt/heap.py
+++ b/rpython/jit/metainterp/optimizeopt/heap.py
@@ -294,7 +294,7 @@
self.force_all_lazy_setfields_and_arrayitems()
self.clean_caches()
- def optimize_CALL(self, op):
+ def optimize_CALL_I(self, op):
# dispatch based on 'oopspecindex' to a method that handles
# specifically the given oopspec call. For non-oopspec calls,
# oopspecindex is just zero.
@@ -304,6 +304,9 @@
if self._optimize_CALL_DICT_LOOKUP(op):
return
self.emit_operation(op)
+ optimize_CALL_F = optimize_CALL_I
+ optimize_CALL_R = optimize_CALL_I
+ optimize_CALL_N = optimize_CALL_I
def _optimize_CALL_DICT_LOOKUP(self, op):
descrs = op.getdescr().get_extra_info().extradescrs
@@ -432,7 +435,7 @@
cf.force_lazy_setfield(self)
return pendingfields
- def optimize_GETFIELD_GC(self, op):
+ def optimize_GETFIELD_GC_I(self, op):
structvalue = self.getvalue(op.getarg(0))
cf = self.field_cache(op.getdescr())
fieldvalue = cf.getfield_from_cache(self, structvalue)
@@ -445,8 +448,10 @@
# then remember the result of reading the field
fieldvalue = self.getvalue(op.result)
cf.remember_field_value(structvalue, fieldvalue, op)
+ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I
+ optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I
- def optimize_GETFIELD_GC_PURE(self, op):
+ def optimize_GETFIELD_GC_PURE_I(self, op):
structvalue = self.getvalue(op.getarg(0))
cf = self.field_cache(op.getdescr())
fieldvalue = cf.getfield_from_cache(self, structvalue)
@@ -456,6 +461,8 @@
# default case: produce the operation
structvalue.ensure_nonnull()
self.emit_operation(op)
+ optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I
+ optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I
def optimize_SETFIELD_GC(self, op):
if self.has_pure_result(rop.GETFIELD_GC_PURE, [op.getarg(0)],
@@ -467,7 +474,7 @@
cf = self.field_cache(op.getdescr())
cf.do_setfield(self, op)
- def optimize_GETARRAYITEM_GC(self, op):
+ def optimize_GETARRAYITEM_GC_I(self, op):
arrayvalue = self.getvalue(op.getarg(0))
indexvalue = self.getvalue(op.getarg(1))
cf = None
@@ -489,8 +496,10 @@
if cf is not None:
fieldvalue = self.getvalue(op.result)
cf.remember_field_value(arrayvalue, fieldvalue, op)
+ optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I
+ optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I
- def optimize_GETARRAYITEM_GC_PURE(self, op):
+ def optimize_GETARRAYITEM_GC_PURE_I(self, op):
arrayvalue = self.getvalue(op.getarg(0))
indexvalue = self.getvalue(op.getarg(1))
cf = None
@@ -509,6 +518,9 @@
arrayvalue.ensure_nonnull()
self.emit_operation(op)
+ optimize_GETARRAYITEM_GC_PURE_R = optimize_GETARRAYITEM_GC_PURE_I
+ optimize_GETARRAYITEM_GC_PURE_F = optimize_GETARRAYITEM_GC_PURE_I
+
def optimize_SETARRAYITEM_GC(self, op):
if self.has_pure_result(rop.GETARRAYITEM_GC_PURE, [op.getarg(0),
op.getarg(1)],
diff --git a/rpython/jit/metainterp/optimizeopt/intbounds.py b/rpython/jit/metainterp/optimizeopt/intbounds.py
--- a/rpython/jit/metainterp/optimizeopt/intbounds.py
+++ b/rpython/jit/metainterp/optimizeopt/intbounds.py
@@ -372,7 +372,7 @@
v1.intbound.make_ge(IntLowerBound(0))
v1.intbound.make_lt(IntUpperBound(256))
- def optimize_GETFIELD_RAW(self, op):
+ def optimize_GETFIELD_RAW_I(self, op):
self.emit_operation(op)
descr = op.getdescr()
if descr.is_integer_bounded():
@@ -380,11 +380,16 @@
v1.intbound.make_ge(IntLowerBound(descr.get_integer_min()))
v1.intbound.make_le(IntUpperBound(descr.get_integer_max()))
- optimize_GETFIELD_GC = optimize_GETFIELD_RAW
+ optimize_GETFIELD_RAW_F = optimize_GETFIELD_RAW_I
+ optimize_GETFIELD_GC_I = optimize_GETFIELD_RAW_I
+ optimize_GETFIELD_GC_R = optimize_GETFIELD_RAW_I
+ optimize_GETFIELD_GC_F = optimize_GETFIELD_RAW_I
- optimize_GETINTERIORFIELD_GC = optimize_GETFIELD_RAW
+ optimize_GETINTERIORFIELD_GC_I = optimize_GETFIELD_RAW_I
+ optimize_GETINTERIORFIELD_GC_R = optimize_GETFIELD_RAW_I
+ optimize_GETINTERIORFIELD_GC_F = optimize_GETFIELD_RAW_I
- def optimize_GETARRAYITEM_RAW(self, op):
+ def optimize_GETARRAYITEM_RAW_I(self, op):
self.emit_operation(op)
descr = op.getdescr()
if descr and descr.is_item_integer_bounded():
@@ -392,7 +397,10 @@
v1.intbound.make_ge(IntLowerBound(descr.get_item_integer_min()))
v1.intbound.make_le(IntUpperBound(descr.get_item_integer_max()))
- optimize_GETARRAYITEM_GC = optimize_GETARRAYITEM_RAW
+ optimize_GETARRAYITEM_RAW_F = optimize_GETARRAYITEM_RAW_I
+ optimize_GETARRAYITEM_GC_I = optimize_GETARRAYITEM_RAW_I
+ optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_RAW_I
+ optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_RAW_I
def optimize_UNICODEGETITEM(self, op):
self.emit_operation(op)
diff --git a/rpython/jit/metainterp/optimizeopt/optimizer.py b/rpython/jit/metainterp/optimizeopt/optimizer.py
--- a/rpython/jit/metainterp/optimizeopt/optimizer.py
+++ b/rpython/jit/metainterp/optimizeopt/optimizer.py
@@ -677,8 +677,10 @@
# These are typically removed already by OptRewrite, but it can be
# dissabled and unrolling emits some SAME_AS ops to setup the
# optimizier state. These needs to always be optimized out.
- def optimize_SAME_AS(self, op):
+ def optimize_SAME_AS_I(self, op):
self.make_equal_to(op.result, self.getvalue(op.getarg(0)))
+ optimize_SAME_AS_R = optimize_SAME_AS_I
+ optimize_SAME_AS_F = optimize_SAME_AS_I
def optimize_MARK_OPAQUE_PTR(self, op):
value = self.getvalue(op.getarg(0))
diff --git a/rpython/jit/metainterp/optimizeopt/pure.py b/rpython/jit/metainterp/optimizeopt/pure.py
--- a/rpython/jit/metainterp/optimizeopt/pure.py
+++ b/rpython/jit/metainterp/optimizeopt/pure.py
@@ -56,7 +56,7 @@
if nextop:
self.emit_operation(nextop)
- def optimize_CALL_PURE(self, op):
+ def optimize_CALL_PURE_I(self, op):
# Step 1: check if all arguments are constant
result = self._can_optimize_call_pure(op)
if result is not None:
@@ -84,6 +84,9 @@
args = op.getarglist()
self.emit_operation(ResOperation(rop.CALL, args, op.result,
op.getdescr()))
+ optimize_CALL_PURE_R = optimize_CALL_PURE_I
+ optimize_CALL_PURE_F = optimize_CALL_PURE_I
+ optimize_CALL_PURE_N = optimize_CALL_PURE_I
def optimize_GUARD_NO_EXCEPTION(self, op):
if self.last_emitted_operation is REMOVED:
diff --git a/rpython/jit/metainterp/optimizeopt/rewrite.py b/rpython/jit/metainterp/optimizeopt/rewrite.py
--- a/rpython/jit/metainterp/optimizeopt/rewrite.py
+++ b/rpython/jit/metainterp/optimizeopt/rewrite.py
@@ -389,7 +389,7 @@
'always fail' % r)
self.optimize_GUARD_CLASS(op)
- def optimize_CALL_LOOPINVARIANT(self, op):
+ def optimize_CALL_LOOPINVARIANT_I(self, op):
arg = op.getarg(0)
# 'arg' must be a Const, because residual_call in codewriter
# expects a compile-time constant
@@ -408,6 +408,9 @@
self.emit_operation(op)
resvalue = self.getvalue(op.result)
self.loop_invariant_results[key] = resvalue
+ optimize_CALL_LOOPINVARIANT_R = optimize_CALL_LOOPINVARIANT_I
+ optimize_CALL_LOOPINVARIANT_F = optimize_CALL_LOOPINVARIANT_I
+ optimize_CALL_LOOPINVARIANT_N = optimize_CALL_LOOPINVARIANT_I
def optimize_COND_CALL(self, op):
arg = op.getarg(0)
@@ -478,7 +481,7 @@
def optimize_INSTANCE_PTR_NE(self, op):
self._optimize_oois_ooisnot(op, True, True)
- def optimize_CALL(self, op):
+ def optimize_CALL_N(self, op):
# dispatch based on 'oopspecindex' to a method that handles
# specifically the given oopspec call. For non-oopspec calls,
# oopspecindex is just zero.
@@ -538,7 +541,7 @@
return True # 0-length arraycopy
return False
- def optimize_CALL_PURE(self, op):
+ def optimize_CALL_PURE_I(self, op):
# this removes a CALL_PURE with all constant arguments.
# Note that it's also done in pure.py. For now we need both...
result = self._can_optimize_call_pure(op)
@@ -547,6 +550,9 @@
self.last_emitted_operation = REMOVED
return
self.emit_operation(op)
+ optimize_CALL_PURE_R = optimize_CALL_PURE_I
+ optimize_CALL_PURE_F = optimize_CALL_PURE_I
+ optimize_CALL_PURE_N = optimize_CALL_PURE_I
def optimize_GUARD_NO_EXCEPTION(self, op):
if self.last_emitted_operation is REMOVED:
@@ -583,8 +589,10 @@
self.pure(rop.CAST_PTR_TO_INT, [op.result], op.getarg(0))
self.emit_operation(op)
- def optimize_SAME_AS(self, op):
+ def optimize_SAME_AS_i(self, op):
self.make_equal_to(op.result, self.getvalue(op.getarg(0)))
+ optimize_SAME_AS_r = optimize_SAME_AS_i
+ optimize_SAME_AS_f = optimize_SAME_AS_i
dispatch_opt = make_dispatcher_method(OptRewrite, 'optimize_',
default=OptRewrite.emit_operation)
diff --git a/rpython/jit/metainterp/optimizeopt/simplify.py b/rpython/jit/metainterp/optimizeopt/simplify.py
--- a/rpython/jit/metainterp/optimizeopt/simplify.py
+++ b/rpython/jit/metainterp/optimizeopt/simplify.py
@@ -14,14 +14,20 @@
self.optimizer.pendingfields = []
Optimization.emit_operation(self, op)
- def optimize_CALL_PURE(self, op):
+ def optimize_CALL_PURE_I(self, op):
args = op.getarglist()
self.emit_operation(ResOperation(rop.CALL, args, op.result,
op.getdescr()))
+ optimize_CALL_PURE_R = optimize_CALL_PURE_I
+ optimize_CALL_PURE_F = optimize_CALL_PURE_I
+ optimize_CALL_PURE_N = optimize_CALL_PURE_I
- def optimize_CALL_LOOPINVARIANT(self, op):
+ def optimize_CALL_LOOPINVARIANT_I(self, op):
op = op.copy_and_change(rop.CALL)
self.emit_operation(op)
+ optimize_CALL_LOOPINVARIANT_R = optimize_CALL_LOOPINVARIANT_I
+ optimize_CALL_LOOPINVARIANT_F = optimize_CALL_LOOPINVARIANT_I
+ optimize_CALL_LOOPINVARIANT_N = optimize_CALL_LOOPINVARIANT_I
def optimize_VIRTUAL_REF_FINISH(self, op):
pass
diff --git a/rpython/jit/metainterp/optimizeopt/virtualize.py b/rpython/jit/metainterp/optimizeopt/virtualize.py
--- a/rpython/jit/metainterp/optimizeopt/virtualize.py
+++ b/rpython/jit/metainterp/optimizeopt/virtualize.py
@@ -571,13 +571,16 @@
else:
self.emit_operation(op)
- def optimize_CALL_MAY_FORCE(self, op):
+ def optimize_CALL_MAY_FORCE_I(self, op):
effectinfo = op.getdescr().get_extra_info()
oopspecindex = effectinfo.oopspecindex
if oopspecindex == EffectInfo.OS_JIT_FORCE_VIRTUAL:
if self._optimize_JIT_FORCE_VIRTUAL(op):
return
self.emit_operation(op)
+ optimize_CALL_MAY_FORCE_R = optimize_CALL_MAY_FORCE_I
+ optimize_CALL_MAY_FORCE_F = optimize_CALL_MAY_FORCE_I
+ optimize_CALL_MAY_FORCE_N = optimize_CALL_MAY_FORCE_I
def optimize_COND_CALL(self, op):
effectinfo = op.getdescr().get_extra_info()
@@ -653,7 +656,7 @@
return True
return False
- def optimize_GETFIELD_GC(self, op):
+ def optimize_GETFIELD_GC_I(self, op):
value = self.getvalue(op.getarg(0))
# If this is an immutable field (as indicated by op.is_always_pure())
# then it's safe to reuse the virtual's field, even if it has been
@@ -672,10 +675,14 @@
else:
value.ensure_nonnull()
self.emit_operation(op)
+ optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I
+ optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I
# note: the following line does not mean that the two operations are
# completely equivalent, because GETFIELD_GC_PURE is_always_pure().
- optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC
+ optimize_GETFIELD_GC_PURE_I = optimize_GETFIELD_GC_I
+ optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_I
+ optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_I
def optimize_SETFIELD_GC(self, op):
value = self.getvalue(op.getarg(0))
@@ -707,7 +714,7 @@
else:
self.emit_operation(op)
- def optimize_CALL(self, op):
+ def optimize_CALL_N(self, op):
effectinfo = op.getdescr().get_extra_info()
if effectinfo.oopspecindex == EffectInfo.OS_RAW_MALLOC_VARSIZE_CHAR:
self.do_RAW_MALLOC_VARSIZE_CHAR(op)
@@ -720,6 +727,7 @@
return
else:
self.emit_operation(op)
+ optimize_CALL_R = optimize_CALL_N
def do_RAW_MALLOC_VARSIZE_CHAR(self, op):
sizebox = self.get_constant_box(op.getarg(1))
@@ -761,7 +769,7 @@
value.ensure_nonnull()
self.emit_operation(op)
- def optimize_GETARRAYITEM_GC(self, op):
+ def optimize_GETARRAYITEM_GC_I(self, op):
value = self.getvalue(op.getarg(0))
if value.is_virtual():
assert isinstance(value, VArrayValue)
@@ -774,10 +782,14 @@
return
value.ensure_nonnull()
self.emit_operation(op)
+ optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I
+ optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I
# note: the following line does not mean that the two operations are
# completely equivalent, because GETARRAYITEM_GC_PURE is_always_pure().
- optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC
+ optimize_GETARRAYITEM_GC_PURE_I = optimize_GETARRAYITEM_GC_I
+ optimize_GETARRAYITEM_GC_PURE_R = optimize_GETARRAYITEM_GC_I
+ optimize_GETARRAYITEM_GC_PURE_F = optimize_GETARRAYITEM_GC_I
def optimize_SETARRAYITEM_GC(self, op):
value = self.getvalue(op.getarg(0))
@@ -797,7 +809,7 @@
offset = basesize + (itemsize*index)
return offset, itemsize, descr
- def optimize_GETARRAYITEM_RAW(self, op):
+ def optimize_GETARRAYITEM_RAW_I(self, op):
value = self.getvalue(op.getarg(0))
if value.is_virtual():
indexbox = self.get_constant_box(op.getarg(1))
@@ -814,6 +826,7 @@
return
value.ensure_nonnull()
self.emit_operation(op)
+ optimize_GETARRAYITEM_RAW_F = optimize_GETARRAYITEM_RAW_I
def optimize_SETARRAYITEM_RAW(self, op):
value = self.getvalue(op.getarg(0))
@@ -839,7 +852,7 @@
itemsize = cpu.unpack_arraydescr_size(descr)[1]
return offset, itemsize, descr
- def optimize_RAW_LOAD(self, op):
+ def optimize_RAW_LOAD_I(self, op):
value = self.getvalue(op.getarg(0))
if value.is_virtual():
offsetbox = self.get_constant_box(op.getarg(1))
@@ -856,6 +869,7 @@
return
value.ensure_nonnull()
self.emit_operation(op)
+ optimize_RAW_LOAD_F = optimize_RAW_LOAD_I
def optimize_RAW_STORE(self, op):
value = self.getvalue(op.getarg(0))
@@ -874,7 +888,7 @@
value.ensure_nonnull()
self.emit_operation(op)
- def optimize_GETINTERIORFIELD_GC(self, op):
+ def optimize_GETINTERIORFIELD_GC_I(self, op):
value = self.getvalue(op.getarg(0))
if value.is_virtual():
indexbox = self.get_constant_box(op.getarg(1))
@@ -889,6 +903,8 @@
return
value.ensure_nonnull()
self.emit_operation(op)
+ optimize_GETINTERIORFIELD_GC_R = optimize_GETINTERIORFIELD_GC_I
+ optimize_GETINTERIORFIELD_GC_F = optimize_GETINTERIORFIELD_GC_I
def optimize_SETINTERIORFIELD_GC(self, op):
value = self.getvalue(op.getarg(0))
diff --git a/rpython/jit/metainterp/optimizeopt/vstring.py b/rpython/jit/metainterp/optimizeopt/vstring.py
--- a/rpython/jit/metainterp/optimizeopt/vstring.py
+++ b/rpython/jit/metainterp/optimizeopt/vstring.py
@@ -541,7 +541,7 @@
mode, need_next_offset=False
)
- def optimize_CALL(self, op):
+ def optimize_CALL_I(self, op):
# dispatch based on 'oopspecindex' to a method that handles
# specifically the given oopspec call. For non-oopspec calls,
# oopspecindex is just zero.
@@ -565,8 +565,13 @@
if self.opt_call_SHRINK_ARRAY(op):
return
self.emit_operation(op)
-
- optimize_CALL_PURE = optimize_CALL
+ optimize_CALL_R = optimize_CALL_I
+ optimize_CALL_F = optimize_CALL_I
+ optimize_CALL_N = optimize_CALL_I
+ optimize_CALL_PURE_I = optimize_CALL_I
+ optimize_CALL_PURE_R = optimize_CALL_I
+ optimize_CALL_PURE_F = optimize_CALL_I
+ optimize_CALL_PURE_N = optimize_CALL_I
def optimize_GUARD_NO_EXCEPTION(self, op):
if self.last_emitted_operation is REMOVED:
diff --git a/rpython/jit/metainterp/pyjitpl.py b/rpython/jit/metainterp/pyjitpl.py
--- a/rpython/jit/metainterp/pyjitpl.py
+++ b/rpython/jit/metainterp/pyjitpl.py
@@ -12,7 +12,8 @@
from rpython.jit.metainterp.jitprof import EmptyProfiler
from rpython.jit.metainterp.logger import Logger
from rpython.jit.metainterp.optimizeopt.util import args_dict
-from rpython.jit.metainterp.resoperation import rop
+from rpython.jit.metainterp.resoperation import rop, InputArgInt,\
+ InputArgFloat, InputArgRef
from rpython.rlib import nonconst, rstack
from rpython.rlib.debug import debug_start, debug_stop, debug_print
from rpython.rlib.debug import have_debug_prints, make_sure_not_resized
diff --git a/rpython/jit/metainterp/quasiimmut.py b/rpython/jit/metainterp/quasiimmut.py
--- a/rpython/jit/metainterp/quasiimmut.py
+++ b/rpython/jit/metainterp/quasiimmut.py
@@ -1,7 +1,8 @@
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper import rclass
from rpython.rtyper.annlowlevel import cast_base_ptr_to_instance
-from rpython.jit.metainterp.history import AbstractDescr
+from rpython.jit.metainterp.history import AbstractDescr, ConstPtr, ConstInt,\
+ ConstFloat
from rpython.rlib.objectmodel import we_are_translated
@@ -114,9 +115,18 @@
def get_current_constant_fieldvalue(self):
from rpython.jit.metainterp import executor
from rpython.jit.metainterp.resoperation import rop
- fieldbox = executor.execute(self.cpu, None, rop.GETFIELD_GC,
- self.fielddescr, self.structbox)
- return fieldbox.constbox()
+ if self.fielddescr.is_pointer_field():
+ return ConstPtr(executor.do_getfield_gc_r(self.cpu, None, rop.GETFIELD_GC_R,
+ self.fielddescr, self.structbox))
+ elif self.fielddescr.is_float_field():
+ return ConstFloat(executor.execute(self.cpu, None,
+ rop.GETFIELD_GC_F,
+ self.fielddescr, self.structbox))
+
+ else:
+ return ConstInt(executor.do_getfield_gc_i(self.cpu, None,
+ self.structbox,
+ self.fielddescr))
def is_still_valid_for(self, structconst):
assert self.structbox is not None
diff --git a/rpython/jit/metainterp/resoperation.py b/rpython/jit/metainterp/resoperation.py
--- a/rpython/jit/metainterp/resoperation.py
+++ b/rpython/jit/metainterp/resoperation.py
@@ -1,5 +1,7 @@
from rpython.rlib.objectmodel import we_are_translated, specialize
+class AbstractValue(object):
+ pass
@specialize.argtype(2)
def ResOperation(opnum, args, result, descr=None):
@@ -18,11 +20,13 @@
elif isinstance(result, float):
op._resfloat = result
else:
+ from rpython.rtyper.lltypesystem import lltype, llmemory
+ assert lltype.typeOf(result) == llmemory.GCREF
op._resref = result
return op
-class AbstractResOp(object):
+class AbstractResOp(AbstractValue):
"""The central ResOperation class, representing one operation."""
# debug
@@ -254,21 +258,30 @@
class FloatOp(object):
_mixin_ = True
- def getfloat(self):
+ def getfloatstorage(self):
return self._resfloat
- def setfloat(self, floatval):
+ def setfloatstorage(self, floatval):
self._resfloat = floatval
class RefOp(object):
_mixin_ = True
- def getref(self):
+ def getref_base(self):
return self._resref
- def setref(self, refval):
+ def setref_base(self, refval):
self._resref = refval
+class InputArgInt(IntOp, AbstractValue):
+ pass
+
+class InputArgFloat(FloatOp, AbstractValue):
+ pass
+
+class InputArgRef(FloatOp, AbstractValue):
+ pass
+
# ============
# arity mixins
# ============
@@ -415,39 +428,39 @@
""" All the operations are desribed like this:
-NAME/no-of-args-or-*[b][d]/type-of-result-or-none
+NAME/no-of-args-or-*[b][d]/types-of-result
if b is present it means the operation produces a boolean
if d is present it means there is a descr
-type of result can be one of r i f, * for anything, + for i or f or nothing
+type of result can be one or more of r i f n
"""
_oplist = [
'_FINAL_FIRST',
- 'JUMP/*d/',
- 'FINISH/*d/',
+ 'JUMP/*d/n',
+ 'FINISH/*d/n',
'_FINAL_LAST',
- 'LABEL/*d/',
+ 'LABEL/*d/n',
'_GUARD_FIRST',
'_GUARD_FOLDABLE_FIRST',
- 'GUARD_TRUE/1d/',
- 'GUARD_FALSE/1d/',
- 'GUARD_VALUE/2d/',
- 'GUARD_CLASS/2d/',
- 'GUARD_NONNULL/1d/',
- 'GUARD_ISNULL/1d/',
- 'GUARD_NONNULL_CLASS/2d/',
+ 'GUARD_TRUE/1d/n',
+ 'GUARD_FALSE/1d/n',
+ 'GUARD_VALUE/2d/n',
+ 'GUARD_CLASS/2d/n',
+ 'GUARD_NONNULL/1d/n',
+ 'GUARD_ISNULL/1d/n',
+ 'GUARD_NONNULL_CLASS/2d/n',
'_GUARD_FOLDABLE_LAST',
- 'GUARD_NO_EXCEPTION/0d/', # may be called with an exception currently set
+ 'GUARD_NO_EXCEPTION/0d/n', # may be called with an exception currently set
'GUARD_EXCEPTION/1d/r', # may be called with an exception currently set
- 'GUARD_NO_OVERFLOW/0d/',
- 'GUARD_OVERFLOW/0d/',
- 'GUARD_NOT_FORCED/0d/', # may be called with an exception currently set
- 'GUARD_NOT_FORCED_2/0d/', # same as GUARD_NOT_FORCED, but for finish()
- 'GUARD_NOT_INVALIDATED/0d/',
- 'GUARD_FUTURE_CONDITION/0d/',
+ 'GUARD_NO_OVERFLOW/0d/n',
+ 'GUARD_OVERFLOW/0d/n',
+ 'GUARD_NOT_FORCED/0d/n', # may be called with an exception currently set
+ 'GUARD_NOT_FORCED_2/0d/n', # same as GUARD_NOT_FORCED, but for finish()
+ 'GUARD_NOT_INVALIDATED/0d/n',
+ 'GUARD_FUTURE_CONDITION/0d/n',
# is removable, may be patched by an optimization
'_GUARD_LAST', # ----- end of guard operations -----
@@ -501,7 +514,7 @@
'INT_INVERT/1/i',
'INT_FORCE_GE_ZERO/1/i',
#
- 'SAME_AS/1/*', # gets a Const or a Box, turns it into another Box
+ 'SAME_AS/1/rfi', # gets a Const or a Box, turns it into another Box
'CAST_PTR_TO_INT/1/i',
'CAST_INT_TO_PTR/1/r',
#
@@ -513,21 +526,21 @@
'ARRAYLEN_GC/1d/i',
'STRLEN/1/i',
'STRGETITEM/2/i',
- 'GETFIELD_GC_PURE/1d/*',
- 'GETFIELD_RAW_PURE/1d/*',
- 'GETARRAYITEM_GC_PURE/2d/*',
- 'GETARRAYITEM_RAW_PURE/2d/*',
+ 'GETFIELD_GC_PURE/1d/rfi',
+ 'GETFIELD_RAW_PURE/1d/fi',
+ 'GETARRAYITEM_GC_PURE/2d/rfi',
+ 'GETARRAYITEM_RAW_PURE/2d/fi',
'UNICODELEN/1/i',
'UNICODEGETITEM/2/i',
#
'_ALWAYS_PURE_LAST', # ----- end of always_pure operations -----
- 'GETARRAYITEM_GC/2d/*',
- 'GETARRAYITEM_RAW/2d/+',
- 'GETINTERIORFIELD_GC/2d/*',
- 'RAW_LOAD/2d/+',
- 'GETFIELD_GC/1d/*',
- 'GETFIELD_RAW/1d/+',
+ 'GETARRAYITEM_GC/2d/rfi',
+ 'GETARRAYITEM_RAW/2d/fi',
+ 'GETINTERIORFIELD_GC/2d/rfi',
+ 'RAW_LOAD/2d/fi',
+ 'GETFIELD_GC/1d/rfi',
+ 'GETFIELD_RAW/1d/fi',
'_MALLOC_FIRST',
'NEW/0d/r', #-> GcStruct, gcptrs inside are zeroed (not the rest)
'NEW_WITH_VTABLE/1/r',#-> GcStruct with vtable, gcptrs inside are zeroed
@@ -538,45 +551,47 @@
'_MALLOC_LAST',
'FORCE_TOKEN/0/i',
'VIRTUAL_REF/2/r', # removed before it's passed to the backend
- 'MARK_OPAQUE_PTR/1b/',
+ 'MARK_OPAQUE_PTR/1b/n',
# this one has no *visible* side effect, since the virtualizable
# must be forced, however we need to execute it anyway
'_NOSIDEEFFECT_LAST', # ----- end of no_side_effect operations -----
- 'INCREMENT_DEBUG_COUNTER/1/',
- 'SETARRAYITEM_GC/3d/',
- 'SETARRAYITEM_RAW/3d/',
- 'SETINTERIORFIELD_GC/3d/',
- 'SETINTERIORFIELD_RAW/3d/', # right now, only used by tests
- 'RAW_STORE/3d/',
- 'SETFIELD_GC/2d/',
- 'ZERO_PTR_FIELD/2/', # only emitted by the rewrite, clears a pointer field
+ 'INCREMENT_DEBUG_COUNTER/1/n',
+ 'SETARRAYITEM_GC/3d/n',
+ 'SETARRAYITEM_RAW/3d/n',
+ 'SETINTERIORFIELD_GC/3d/n',
+ 'SETINTERIORFIELD_RAW/3d/n', # right now, only used by tests
+ 'RAW_STORE/3d/n',
+ 'SETFIELD_GC/2d/n',
+ 'ZERO_PTR_FIELD/2/n', # only emitted by the rewrite, clears a pointer field
# at a given constant offset, no descr
- 'ZERO_ARRAY/3d/', # only emitted by the rewrite, clears (part of) an array
+ 'ZERO_ARRAY/3d/n', # only emitted by the rewrite, clears (part of) an array
# [arraygcptr, firstindex, length], descr=ArrayDescr
- 'SETFIELD_RAW/2d/',
- 'STRSETITEM/3/',
- 'UNICODESETITEM/3/',
- 'COND_CALL_GC_WB/1d/', # [objptr] (for the write barrier)
- 'COND_CALL_GC_WB_ARRAY/2d/', # [objptr, arrayindex] (write barr. for array)
- 'DEBUG_MERGE_POINT/*/', # debugging only
- 'JIT_DEBUG/*/', # debugging only
- 'VIRTUAL_REF_FINISH/2/', # removed before it's passed to the backend
- 'COPYSTRCONTENT/5/', # src, dst, srcstart, dststart, length
- 'COPYUNICODECONTENT/5/',
- 'QUASIIMMUT_FIELD/1d/', # [objptr], descr=SlowMutateDescr
- 'RECORD_KNOWN_CLASS/2/', # [objptr, clsptr]
- 'KEEPALIVE/1/',
+ 'SETFIELD_RAW/2d/n',
+ 'STRSETITEM/3/n',
+ 'UNICODESETITEM/3/n',
+ 'COND_CALL_GC_WB/1d/n', # [objptr] (for the write barrier)
+ 'COND_CALL_GC_WB_ARRAY/2d/n', # [objptr, arrayindex] (write barr. for array)
+ 'DEBUG_MERGE_POINT/*/n', # debugging only
+ 'JIT_DEBUG/*/n', # debugging only
+ 'VIRTUAL_REF_FINISH/2/n', # removed before it's passed to the backend
+ 'COPYSTRCONTENT/5/n', # src, dst, srcstart, dststart, length
+ 'COPYUNICODECONTENT/5/n',
+ 'QUASIIMMUT_FIELD/1d/n', # [objptr], descr=SlowMutateDescr
+ 'RECORD_KNOWN_CLASS/2/n', # [objptr, clsptr]
+ 'KEEPALIVE/1/n',
'_CANRAISE_FIRST', # ----- start of can_raise operations -----
'_CALL_FIRST',
- 'CALL/*d/*',
- 'COND_CALL/*d/*', # a conditional call, with first argument as a condition
- 'CALL_ASSEMBLER/*d/*', # call already compiled assembler
- 'CALL_MAY_FORCE/*d/*',
- 'CALL_LOOPINVARIANT/*d/*',
- 'CALL_RELEASE_GIL/*d/*', # release the GIL and "close the stack" for asmgcc
- 'CALL_PURE/*d/*', # removed before it's passed to the backend
+ 'CALL/*d/rfin',
+ 'COND_CALL/*d/n',
+ # a conditional call, with first argument as a condition
+ 'CALL_ASSEMBLER/*d/rfin', # call already compiled assembler
+ 'CALL_MAY_FORCE/*d/rfin',
+ 'CALL_LOOPINVARIANT/*d/rfin',
+ 'CALL_RELEASE_GIL/*d/rfin',
+ # release the GIL and "close the stack" for asmgcc
+ 'CALL_PURE/*d/rfin', # removed before it's passed to the backend
'CALL_MALLOC_GC/*d/r', # like CALL, but NULL => propagate MemoryError
'CALL_MALLOC_NURSERY/1/r', # nursery malloc, const number of bytes, zeroed
'CALL_MALLOC_NURSERY_VARSIZE/3d/r',
@@ -620,15 +635,12 @@
arity = int(arity)
else:
arity, withdescr, boolresult, result = -1, True, False, None # default
- if result == '*':
- result = 'rfiN'
- elif result == '+':
- result = 'fiN'
- elif result == '':
- result = 'N'
if not name.startswith('_'):
for r in result:
- cls_name = name + '_' + r
+ if len(result) == 1:
+ cls_name = name
+ else:
+ cls_name = name + '_' + r.upper()
setattr(rop, cls_name, i)
opname[i] = cls_name
cls = create_class_for_op(cls_name, i, arity, withdescr, r)
@@ -639,6 +651,14 @@
if debug_print:
print '%30s = %d' % (cls_name, i)
i += 1
+ else:
+ setattr(rop, name, i)
+ opclasses.append(None)
+ oparity.append(-1)
+ opwithdescr.append(False)
+ if debug_print:
+ print '%30s = %d' % (name, i)
+ i += 1
def get_base_class(mixins, base):
try:
@@ -676,7 +696,7 @@
elif result_type == 'r':
mixins.append(RefOp)
else:
- assert result_type == 'N'
+ assert result_type == 'n'
cls_name = '%s_OP' % name
bases = (get_base_class(tuple(mixins), baseclass),)
@@ -687,51 +707,51 @@
del _oplist
opboolinvers = {
- rop.INT_EQ_i: rop.INT_NE_i,
- rop.INT_NE_i: rop.INT_EQ_i,
- rop.INT_LT_i: rop.INT_GE_i,
- rop.INT_GE_i: rop.INT_LT_i,
- rop.INT_GT_i: rop.INT_LE_i,
- rop.INT_LE_i: rop.INT_GT_i,
+ rop.INT_EQ: rop.INT_NE,
+ rop.INT_NE: rop.INT_EQ,
+ rop.INT_LT: rop.INT_GE,
+ rop.INT_GE: rop.INT_LT,
+ rop.INT_GT: rop.INT_LE,
+ rop.INT_LE: rop.INT_GT,
- rop.UINT_LT_i: rop.UINT_GE_i,
- rop.UINT_GE_i: rop.UINT_LT_i,
- rop.UINT_GT_i: rop.UINT_LE_i,
- rop.UINT_LE_i: rop.UINT_GT_i,
+ rop.UINT_LT: rop.UINT_GE,
+ rop.UINT_GE: rop.UINT_LT,
+ rop.UINT_GT: rop.UINT_LE,
+ rop.UINT_LE: rop.UINT_GT,
- rop.FLOAT_EQ_i: rop.FLOAT_NE_i,
- rop.FLOAT_NE_i: rop.FLOAT_EQ_i,
- rop.FLOAT_LT_i: rop.FLOAT_GE_i,
- rop.FLOAT_GE_i: rop.FLOAT_LT_i,
- rop.FLOAT_GT_i: rop.FLOAT_LE_i,
- rop.FLOAT_LE_i: rop.FLOAT_GT_i,
+ rop.FLOAT_EQ: rop.FLOAT_NE,
+ rop.FLOAT_NE: rop.FLOAT_EQ,
+ rop.FLOAT_LT: rop.FLOAT_GE,
+ rop.FLOAT_GE: rop.FLOAT_LT,
+ rop.FLOAT_GT: rop.FLOAT_LE,
+ rop.FLOAT_LE: rop.FLOAT_GT,
- rop.PTR_EQ_i: rop.PTR_NE_i,
- rop.PTR_NE_i: rop.PTR_EQ_i,
+ rop.PTR_EQ: rop.PTR_NE,
+ rop.PTR_NE: rop.PTR_EQ,
}
opboolreflex = {
- rop.INT_EQ_i: rop.INT_EQ_i,
- rop.INT_NE_i: rop.INT_NE_i,
- rop.INT_LT_i: rop.INT_GT_i,
- rop.INT_GE_i: rop.INT_LE_i,
- rop.INT_GT_i: rop.INT_LT_i,
- rop.INT_LE_i: rop.INT_GE_i,
+ rop.INT_EQ: rop.INT_EQ,
+ rop.INT_NE: rop.INT_NE,
+ rop.INT_LT: rop.INT_GT,
+ rop.INT_GE: rop.INT_LE,
+ rop.INT_GT: rop.INT_LT,
+ rop.INT_LE: rop.INT_GE,
- rop.UINT_LT_i: rop.UINT_GT_i,
- rop.UINT_GE_i: rop.UINT_LE_i,
- rop.UINT_GT_i: rop.UINT_LT_i,
- rop.UINT_LE_i: rop.UINT_GE_i,
+ rop.UINT_LT: rop.UINT_GT,
+ rop.UINT_GE: rop.UINT_LE,
+ rop.UINT_GT: rop.UINT_LT,
+ rop.UINT_LE: rop.UINT_GE,
- rop.FLOAT_EQ_i: rop.FLOAT_EQ_i,
- rop.FLOAT_NE_i: rop.FLOAT_NE_i,
- rop.FLOAT_LT_i: rop.FLOAT_GT_i,
- rop.FLOAT_GE_i: rop.FLOAT_LE_i,
- rop.FLOAT_GT_i: rop.FLOAT_LT_i,
- rop.FLOAT_LE_i: rop.FLOAT_GE_i,
+ rop.FLOAT_EQ: rop.FLOAT_EQ,
+ rop.FLOAT_NE: rop.FLOAT_NE,
+ rop.FLOAT_LT: rop.FLOAT_GT,
+ rop.FLOAT_GE: rop.FLOAT_LE,
+ rop.FLOAT_GT: rop.FLOAT_LT,
+ rop.FLOAT_LE: rop.FLOAT_GE,
- rop.PTR_EQ_i: rop.PTR_EQ_i,
- rop.PTR_NE_i: rop.PTR_NE_i,
+ rop.PTR_EQ: rop.PTR_EQ,
+ rop.PTR_NE: rop.PTR_NE,
}
More information about the pypy-commit
mailing list