[pypy-svn] r77302 - in pypy/branch/jitffi/pypy: . config doc/discussion interpreter jit/backend/cli jit/backend/llgraph jit/backend/llsupport jit/backend/llsupport/test jit/backend/llvm jit/backend/test jit/backend/x86 jit/backend/x86/test jit/metainterp jit/metainterp/optimizeopt jit/metainterp/test jit/tl jit/tool module/__builtin__ module/__builtin__/test module/_ssl/test module/array module/array/benchmark module/array/test module/gc module/gc/test module/pypyjit/test module/select module/select/test module/sys rlib rlib/test rpython rpython/lltypesystem rpython/lltypesystem/test rpython/memory rpython/memory/gc rpython/memory/gc/test rpython/memory/gctransform rpython/memory/test rpython/numpy translator translator/c translator/c/gcc translator/c/src translator/c/test
antocuni at codespeak.net
antocuni at codespeak.net
Thu Sep 23 16:53:39 CEST 2010
Author: antocuni
Date: Thu Sep 23 16:53:32 2010
New Revision: 77302
Added:
pypy/branch/jitffi/pypy/jit/metainterp/test/test_resoperation.py
- copied unchanged from r77301, pypy/trunk/pypy/jit/metainterp/test/test_resoperation.py
pypy/branch/jitffi/pypy/module/gc/app_referents.py
- copied unchanged from r77301, pypy/trunk/pypy/module/gc/app_referents.py
pypy/branch/jitffi/pypy/module/gc/referents.py
- copied unchanged from r77301, pypy/trunk/pypy/module/gc/referents.py
pypy/branch/jitffi/pypy/module/gc/test/test_app_referents.py
- copied unchanged from r77301, pypy/trunk/pypy/module/gc/test/test_app_referents.py
pypy/branch/jitffi/pypy/module/gc/test/test_referents.py
- copied unchanged from r77301, pypy/trunk/pypy/module/gc/test/test_referents.py
pypy/branch/jitffi/pypy/rpython/memory/gc/inspect.py
- copied unchanged from r77301, pypy/trunk/pypy/rpython/memory/gc/inspect.py
pypy/branch/jitffi/pypy/rpython/memory/gc/minimark.py
- copied unchanged from r77301, pypy/trunk/pypy/rpython/memory/gc/minimark.py
pypy/branch/jitffi/pypy/rpython/memory/gc/minimarkpage.py
- copied unchanged from r77301, pypy/trunk/pypy/rpython/memory/gc/minimarkpage.py
pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_minimark.py
- copied unchanged from r77301, pypy/trunk/pypy/rpython/memory/gc/test/test_minimark.py
pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_minimarkpage.py
- copied unchanged from r77301, pypy/trunk/pypy/rpython/memory/gc/test/test_minimarkpage.py
Removed:
pypy/branch/jitffi/pypy/rpython/numpy/
Modified:
pypy/branch/jitffi/pypy/ (props changed)
pypy/branch/jitffi/pypy/config/translationoption.py
pypy/branch/jitffi/pypy/doc/discussion/finalizer-order.txt
pypy/branch/jitffi/pypy/interpreter/baseobjspace.py
pypy/branch/jitffi/pypy/jit/backend/cli/method.py
pypy/branch/jitffi/pypy/jit/backend/cli/runner.py
pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py
pypy/branch/jitffi/pypy/jit/backend/llsupport/gc.py
pypy/branch/jitffi/pypy/jit/backend/llsupport/regalloc.py
pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_gc.py
pypy/branch/jitffi/pypy/jit/backend/llvm/compile.py
pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py
pypy/branch/jitffi/pypy/jit/backend/test/test_ll_random.py
pypy/branch/jitffi/pypy/jit/backend/test/test_random.py
pypy/branch/jitffi/pypy/jit/backend/x86/assembler.py
pypy/branch/jitffi/pypy/jit/backend/x86/codebuf.py
pypy/branch/jitffi/pypy/jit/backend/x86/regalloc.py
pypy/branch/jitffi/pypy/jit/backend/x86/test/test_recompilation.py
pypy/branch/jitffi/pypy/jit/backend/x86/test/test_regalloc.py
pypy/branch/jitffi/pypy/jit/backend/x86/test/test_runner.py
pypy/branch/jitffi/pypy/jit/metainterp/compile.py
pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py
pypy/branch/jitffi/pypy/jit/metainterp/history.py
pypy/branch/jitffi/pypy/jit/metainterp/logger.py
pypy/branch/jitffi/pypy/jit/metainterp/optimize.py
pypy/branch/jitffi/pypy/jit/metainterp/optimizefindnode.py
pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/ (props changed)
pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py (contents, props changed)
pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/heap.py (contents, props changed)
pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/intbounds.py (contents, props changed)
pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/intutils.py (props changed)
pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/optimizer.py (contents, props changed)
pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/rewrite.py (contents, props changed)
pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py (contents, props changed)
pypy/branch/jitffi/pypy/jit/metainterp/pyjitpl.py
pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py
pypy/branch/jitffi/pypy/jit/metainterp/simple_optimize.py
pypy/branch/jitffi/pypy/jit/metainterp/test/oparser.py
pypy/branch/jitffi/pypy/jit/metainterp/test/test_basic.py
pypy/branch/jitffi/pypy/jit/metainterp/test/test_logger.py
pypy/branch/jitffi/pypy/jit/metainterp/test/test_loop.py
pypy/branch/jitffi/pypy/jit/metainterp/test/test_oparser.py
pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py
pypy/branch/jitffi/pypy/jit/metainterp/test/test_recursive.py
pypy/branch/jitffi/pypy/jit/metainterp/test/test_virtualref.py
pypy/branch/jitffi/pypy/jit/tl/pypyjit_demo.py
pypy/branch/jitffi/pypy/jit/tool/showstats.py
pypy/branch/jitffi/pypy/jit/tool/traceviewer.py
pypy/branch/jitffi/pypy/module/__builtin__/functional.py
pypy/branch/jitffi/pypy/module/__builtin__/test/test_minmax.py
pypy/branch/jitffi/pypy/module/_ssl/test/test_ssl.py
pypy/branch/jitffi/pypy/module/array/benchmark/Makefile (props changed)
pypy/branch/jitffi/pypy/module/array/benchmark/intimg.c (props changed)
pypy/branch/jitffi/pypy/module/array/benchmark/intimgtst.c (props changed)
pypy/branch/jitffi/pypy/module/array/benchmark/intimgtst.py (props changed)
pypy/branch/jitffi/pypy/module/array/benchmark/loop.c (props changed)
pypy/branch/jitffi/pypy/module/array/benchmark/sum.c (props changed)
pypy/branch/jitffi/pypy/module/array/benchmark/sumtst.c (props changed)
pypy/branch/jitffi/pypy/module/array/benchmark/sumtst.py (props changed)
pypy/branch/jitffi/pypy/module/array/interp_array.py
pypy/branch/jitffi/pypy/module/array/test/test_array_old.py (props changed)
pypy/branch/jitffi/pypy/module/gc/__init__.py
pypy/branch/jitffi/pypy/module/gc/interp_gc.py
pypy/branch/jitffi/pypy/module/gc/test/test_gc.py
pypy/branch/jitffi/pypy/module/pypyjit/test/test_pypy_c.py
pypy/branch/jitffi/pypy/module/select/interp_select.py
pypy/branch/jitffi/pypy/module/select/test/test_select.py
pypy/branch/jitffi/pypy/module/sys/version.py
pypy/branch/jitffi/pypy/rlib/_rsocket_rffi.py
pypy/branch/jitffi/pypy/rlib/rarithmetic.py
pypy/branch/jitffi/pypy/rlib/rgc.py
pypy/branch/jitffi/pypy/rlib/rstring.py
pypy/branch/jitffi/pypy/rlib/rwin32.py
pypy/branch/jitffi/pypy/rlib/test/test_rgc.py
pypy/branch/jitffi/pypy/rpython/llinterp.py
pypy/branch/jitffi/pypy/rpython/lltypesystem/ll2ctypes.py
pypy/branch/jitffi/pypy/rpython/lltypesystem/llarena.py
pypy/branch/jitffi/pypy/rpython/lltypesystem/llheap.py
pypy/branch/jitffi/pypy/rpython/lltypesystem/llmemory.py
pypy/branch/jitffi/pypy/rpython/lltypesystem/lloperation.py
pypy/branch/jitffi/pypy/rpython/lltypesystem/test/test_ll2ctypes.py
pypy/branch/jitffi/pypy/rpython/memory/gc/base.py
pypy/branch/jitffi/pypy/rpython/memory/gc/generation.py
pypy/branch/jitffi/pypy/rpython/memory/gc/markcompact.py
pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_direct.py
pypy/branch/jitffi/pypy/rpython/memory/gctransform/framework.py
pypy/branch/jitffi/pypy/rpython/memory/gctypelayout.py
pypy/branch/jitffi/pypy/rpython/memory/gcwrapper.py
pypy/branch/jitffi/pypy/rpython/memory/lltypelayout.py
pypy/branch/jitffi/pypy/rpython/memory/support.py
pypy/branch/jitffi/pypy/rpython/memory/test/test_gc.py
pypy/branch/jitffi/pypy/rpython/memory/test/test_support.py
pypy/branch/jitffi/pypy/rpython/memory/test/test_transformed_gc.py
pypy/branch/jitffi/pypy/rpython/rptr.py
pypy/branch/jitffi/pypy/translator/c/funcgen.py
pypy/branch/jitffi/pypy/translator/c/gcc/trackgcroot.py
pypy/branch/jitffi/pypy/translator/c/genc.py
pypy/branch/jitffi/pypy/translator/c/src/mem.h
pypy/branch/jitffi/pypy/translator/c/test/test_newgc.py
pypy/branch/jitffi/pypy/translator/exceptiontransform.py
Log:
merge from trunk: svn merge svn+ssh://codespeak.net/svn/pypy/trunk/pypy -r77083:HEAD \n in particular, it merges the resoperation refactoring
Modified: pypy/branch/jitffi/pypy/config/translationoption.py
==============================================================================
--- pypy/branch/jitffi/pypy/config/translationoption.py (original)
+++ pypy/branch/jitffi/pypy/config/translationoption.py Thu Sep 23 16:53:32 2010
@@ -52,7 +52,7 @@
# gc
ChoiceOption("gc", "Garbage Collection Strategy",
["boehm", "ref", "marksweep", "semispace", "statistics",
- "generation", "hybrid", "markcompact", "none"],
+ "generation", "hybrid", "markcompact", "minimark", "none"],
"ref", requires={
"ref": [("translation.rweakref", False), # XXX
("translation.gctransformer", "ref")],
@@ -65,6 +65,7 @@
"hybrid": [("translation.gctransformer", "framework")],
"boehm": [("translation.gctransformer", "boehm")],
"markcompact": [("translation.gctransformer", "framework")],
+ "minimark": [("translation.gctransformer", "framework")],
},
cmdline="--gc"),
ChoiceOption("gctransformer", "GC transformer that is used - internal",
Modified: pypy/branch/jitffi/pypy/doc/discussion/finalizer-order.txt
==============================================================================
--- pypy/branch/jitffi/pypy/doc/discussion/finalizer-order.txt (original)
+++ pypy/branch/jitffi/pypy/doc/discussion/finalizer-order.txt Thu Sep 23 16:53:32 2010
@@ -133,8 +133,8 @@
that doesn't change the state of an object, we don't follow its children
recursively.
-In practice we can encode the 4 states with a single extra bit in the
-header:
+In practice, in the SemiSpace, Generation and Hybrid GCs, we can encode
+the 4 states with a single extra bit in the header:
===== ============= ======== ====================
state is_forwarded? bit set? bit set in the copy?
@@ -150,3 +150,17 @@
bit in the copy at the end, to clean up before the next collection
(which means recursively bumping the state from 2 to 3 in the final
loop).
+
+In the MiniMark GC, the objects don't move (apart from when they are
+copied out of the nursery), but we use the flag GCFLAG_VISITED to mark
+objects that survive, so we can also have a single extra bit for
+finalizers:
+
+ ===== ============== ============================
+ state GCFLAG_VISITED GCFLAG_FINALIZATION_ORDERING
+ ===== ============== ============================
+ 0 no no
+ 1 no yes
+ 2 yes yes
+ 3 yes no
+ ===== ============== ============================
Modified: pypy/branch/jitffi/pypy/interpreter/baseobjspace.py
==============================================================================
--- pypy/branch/jitffi/pypy/interpreter/baseobjspace.py (original)
+++ pypy/branch/jitffi/pypy/interpreter/baseobjspace.py Thu Sep 23 16:53:32 2010
@@ -71,7 +71,8 @@
space.wrap("__class__ assignment: only for heap types"))
def user_setup(self, space, w_subtype):
- assert False, "only for interp-level user subclasses from typedef.py"
+ raise NotImplementedError("only for interp-level user subclasses "
+ "from typedef.py")
def getname(self, space, default):
try:
Modified: pypy/branch/jitffi/pypy/jit/backend/cli/method.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/cli/method.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/cli/method.py Thu Sep 23 16:53:32 2010
@@ -207,9 +207,9 @@
def _collect_types(self, operations, box2classes):
for op in operations:
- if op.opnum in (rop.GETFIELD_GC, rop.SETFIELD_GC):
+ if op.getopnum() in (rop.GETFIELD_GC, rop.SETFIELD_GC):
box = op.args[0]
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.FieldDescr)
box2classes.setdefault(box, []).append(descr.selfclass)
if op in self.cliloop.guard2ops:
@@ -335,7 +335,7 @@
while self.i < N:
op = oplist[self.i]
self.emit_debug(op.repr())
- func = self.operations[op.opnum]
+ func = self.operations[op.getopnum()]
assert func is not None
func(self, op)
self.i += 1
@@ -357,10 +357,10 @@
assert op.is_guard()
if op in self.cliloop.guard2ops:
inputargs, suboperations = self.cliloop.guard2ops[op]
- self.match_var_fox_boxes(op.fail_args, inputargs)
+ self.match_var_fox_boxes(op.getfailargs(), inputargs)
self.emit_operations(suboperations)
else:
- self.emit_return_failed_op(op, op.fail_args)
+ self.emit_return_failed_op(op, op.getfailargs())
def emit_end(self):
assert self.branches == []
@@ -410,7 +410,7 @@
def emit_ovf_op(self, op, emit_op):
next_op = self.oplist[self.i+1]
- if next_op.opnum == rop.GUARD_NO_OVERFLOW:
+ if next_op.getopnum() == rop.GUARD_NO_OVERFLOW:
self.i += 1
self.emit_ovf_op_and_guard(op, next_op, emit_op)
return
@@ -544,7 +544,7 @@
self.emit_guard_overflow_impl(op, OpCodes.Brfalse)
def emit_op_jump(self, op):
- target_token = op.descr
+ target_token = op.getdescr()
assert isinstance(target_token, LoopToken)
if target_token.cliloop is self.cliloop:
# jump to the beginning of the loop
@@ -586,7 +586,7 @@
self.store_result(op)
def emit_op_instanceof(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.TypeDescr)
clitype = descr.get_clitype()
op.args[0].load(self)
@@ -604,7 +604,7 @@
self.store_result(op)
def emit_op_call_impl(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.StaticMethDescr)
delegate_type = descr.get_delegate_clitype()
meth_invoke = descr.get_meth_info()
@@ -619,7 +619,7 @@
emit_op_call_pure = emit_op_call
def emit_op_oosend(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.MethDescr)
clitype = descr.get_self_clitype()
methinfo = descr.get_meth_info()
@@ -639,7 +639,7 @@
self.store_result(op)
def emit_op_getfield_gc(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.FieldDescr)
clitype = descr.get_self_clitype()
fieldinfo = descr.get_field_info()
@@ -653,7 +653,7 @@
emit_op_getfield_gc_pure = emit_op_getfield_gc
def emit_op_setfield_gc(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.FieldDescr)
clitype = descr.get_self_clitype()
fieldinfo = descr.get_field_info()
@@ -665,7 +665,7 @@
self.il.Emit(OpCodes.Stfld, fieldinfo)
def emit_op_getarrayitem_gc(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.TypeDescr)
clitype = descr.get_array_clitype()
itemtype = descr.get_clitype()
@@ -678,7 +678,7 @@
emit_op_getarrayitem_gc_pure = emit_op_getarrayitem_gc
def emit_op_setarrayitem_gc(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.TypeDescr)
clitype = descr.get_array_clitype()
itemtype = descr.get_clitype()
@@ -689,7 +689,7 @@
self.il.Emit(OpCodes.Stelem, itemtype)
def emit_op_arraylen_gc(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.TypeDescr)
clitype = descr.get_array_clitype()
op.args[0].load(self)
@@ -698,7 +698,7 @@
self.store_result(op)
def emit_op_new_array(self, op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, runner.TypeDescr)
item_clitype = descr.get_clitype()
if item_clitype is None:
Modified: pypy/branch/jitffi/pypy/jit/backend/cli/runner.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/cli/runner.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/cli/runner.py Thu Sep 23 16:53:32 2010
@@ -105,7 +105,7 @@
def _attach_token_to_faildescrs(self, token, operations):
for op in operations:
if op.is_guard():
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, AbstractFailDescr)
descr._loop_token = token
descr._guard_op = op
@@ -136,7 +136,7 @@
func = cliloop.funcbox.holder.GetFunc()
func(self.get_inputargs())
op = self.failing_ops[self.inputargs.get_failed_op()]
- return op.descr
+ return op.getdescr()
def set_future_value_int(self, index, intvalue):
self.get_inputargs().set_int(index, intvalue)
Modified: pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/llgraph/runner.py Thu Sep 23 16:53:32 2010
@@ -151,16 +151,17 @@
def _compile_operations(self, c, operations, var2index):
for op in operations:
- llimpl.compile_add(c, op.opnum)
- descr = op.descr
+ llimpl.compile_add(c, op.getopnum())
+ descr = op.getdescr()
if isinstance(descr, Descr):
llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo)
- if isinstance(descr, history.LoopToken) and op.opnum != rop.JUMP:
+ if isinstance(descr, history.LoopToken) and op.getopnum() != rop.JUMP:
llimpl.compile_add_loop_token(c, descr)
if self.is_oo and isinstance(descr, (OODescr, MethDescr)):
# hack hack, not rpython
- c._obj.externalobj.operations[-1].descr = descr
- for x in op.args:
+ c._obj.externalobj.operations[-1].setdescr(descr)
+ for i in range(op.numargs()):
+ x = op.getarg(i)
if isinstance(x, history.Box):
llimpl.compile_add_var(c, var2index[x])
elif isinstance(x, history.ConstInt):
@@ -173,10 +174,10 @@
raise Exception("'%s' args contain: %r" % (op.getopname(),
x))
if op.is_guard():
- faildescr = op.descr
+ faildescr = op.getdescr()
assert isinstance(faildescr, history.AbstractFailDescr)
faildescr._fail_args_types = []
- for box in op.fail_args:
+ for box in op.getfailargs():
if box is None:
type = history.HOLE
else:
@@ -185,7 +186,7 @@
fail_index = self.get_fail_descr_number(faildescr)
index = llimpl.compile_add_fail(c, fail_index)
faildescr._compiled_fail = c, index
- for box in op.fail_args:
+ for box in op.getfailargs():
if box is not None:
llimpl.compile_add_fail_arg(c, var2index[box])
else:
@@ -203,13 +204,13 @@
x))
op = operations[-1]
assert op.is_final()
- if op.opnum == rop.JUMP:
- targettoken = op.descr
+ if op.getopnum() == rop.JUMP:
+ targettoken = op.getdescr()
assert isinstance(targettoken, history.LoopToken)
compiled_version = targettoken._llgraph_compiled_version
llimpl.compile_add_jump_target(c, compiled_version)
- elif op.opnum == rop.FINISH:
- faildescr = op.descr
+ elif op.getopnum() == rop.FINISH:
+ faildescr = op.getdescr()
index = self.get_fail_descr_number(faildescr)
llimpl.compile_add_fail(c, index)
else:
@@ -280,7 +281,7 @@
def __init__(self, *args, **kwds):
BaseCPU.__init__(self, *args, **kwds)
self.fielddescrof_vtable = self.fielddescrof(rclass.OBJECT, 'typeptr')
-
+
def fielddescrof(self, S, fieldname):
ofs, size = symbolic.get_field_token(S, fieldname)
token = history.getkind(getattr(S, fieldname))
@@ -504,7 +505,7 @@
return ootype.cast_to_object(e)
else:
return ootype.NULL
-
+
def get_exc_value(self):
if llimpl._last_exception:
earg = llimpl._last_exception.args[1]
@@ -580,7 +581,7 @@
x = descr.callmeth(selfbox, argboxes)
# XXX: return None if METH.RESULT is Void
return x
-
+
def make_getargs(ARGS):
argsiter = unrolling_iterable(ARGS)
@@ -612,7 +613,7 @@
class KeyManager(object):
"""
Helper class to convert arbitrary dictionary keys to integers.
- """
+ """
def __init__(self):
self.keys = {}
@@ -695,7 +696,7 @@
self.ARRAY = ARRAY = ootype.Array(TYPE)
def create():
return boxresult(TYPE, ootype.new(TYPE))
-
+
def create_array(lengthbox):
n = lengthbox.getint()
return boxresult(ARRAY, ootype.oonewarray(ARRAY, n))
@@ -757,7 +758,7 @@
obj = objbox.getref(TYPE)
value = unwrap(T, valuebox)
setattr(obj, fieldname, value)
-
+
self.getfield = getfield
self.setfield = setfield
self._is_pointer_field = (history.getkind(T) == 'ref')
Modified: pypy/branch/jitffi/pypy/jit/backend/llsupport/gc.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/llsupport/gc.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/llsupport/gc.py Thu Sep 23 16:53:32 2010
@@ -41,9 +41,12 @@
moving_gc = False
gcrootmap = None
- def __init__(self, gcdescr, translator, rtyper):
- GcLLDescription.__init__(self, gcdescr, translator, rtyper)
- # grab a pointer to the Boehm 'malloc' function
+ @classmethod
+ def configure_boehm_once(cls):
+ """ Configure boehm only once, since we don't cache failures
+ """
+ if hasattr(cls, 'malloc_fn_ptr'):
+ return cls.malloc_fn_ptr
from pypy.rpython.tool import rffi_platform
compilation_info = rffi_platform.configure_boehm()
@@ -59,13 +62,20 @@
GC_MALLOC = "GC_local_malloc"
else:
GC_MALLOC = "GC_malloc"
-
malloc_fn_ptr = rffi.llexternal(GC_MALLOC,
[lltype.Signed], # size_t, but good enough
llmemory.GCREF,
compilation_info=compilation_info,
sandboxsafe=True,
_nowrapper=True)
+ cls.malloc_fn_ptr = malloc_fn_ptr
+ cls.compilation_info = compilation_info
+ return malloc_fn_ptr
+
+ def __init__(self, gcdescr, translator, rtyper):
+ GcLLDescription.__init__(self, gcdescr, translator, rtyper)
+ # grab a pointer to the Boehm 'malloc' function
+ malloc_fn_ptr = self.configure_boehm_once()
self.funcptr_for_new = malloc_fn_ptr
# on some platform GC_init is required before any other
@@ -73,7 +83,7 @@
# XXX move this to tests
init_fn_ptr = rffi.llexternal("GC_init",
[], lltype.Void,
- compilation_info=compilation_info,
+ compilation_info=self.compilation_info,
sandboxsafe=True,
_nowrapper=True)
@@ -559,12 +569,12 @@
#
newops = []
for op in operations:
- if op.opnum == rop.DEBUG_MERGE_POINT:
+ if op.getopnum() == rop.DEBUG_MERGE_POINT:
continue
# ---------- replace ConstPtrs with GETFIELD_RAW ----------
# xxx some performance issue here
- for i in range(len(op.args)):
- v = op.args[i]
+ for i in range(op.numargs()):
+ v = op.getarg(i)
if isinstance(v, ConstPtr) and bool(v.value):
addr = self.gcrefs.get_address_of_gcref(v.value)
# ^^^even for non-movable objects, to record their presence
@@ -574,23 +584,21 @@
newops.append(ResOperation(rop.GETFIELD_RAW,
[ConstInt(addr)], box,
self.single_gcref_descr))
- op.args[i] = box
+ op.setarg(i, box)
# ---------- write barrier for SETFIELD_GC ----------
- if op.opnum == rop.SETFIELD_GC:
- v = op.args[1]
+ if op.getopnum() == rop.SETFIELD_GC:
+ v = op.getarg(1)
if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
bool(v.value)): # store a non-NULL
- self._gen_write_barrier(newops, op.args[0], v)
- op = ResOperation(rop.SETFIELD_RAW, op.args, None,
- descr=op.descr)
+ self._gen_write_barrier(newops, op.getarg(0), v)
+ op = op.copy_and_change(rop.SETFIELD_RAW)
# ---------- write barrier for SETARRAYITEM_GC ----------
- if op.opnum == rop.SETARRAYITEM_GC:
- v = op.args[2]
+ if op.getopnum() == rop.SETARRAYITEM_GC:
+ v = op.getarg(2)
if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
bool(v.value)): # store a non-NULL
- self._gen_write_barrier(newops, op.args[0], v)
- op = ResOperation(rop.SETARRAYITEM_RAW, op.args, None,
- descr=op.descr)
+ self._gen_write_barrier(newops, op.getarg(0), v)
+ op = op.copy_and_change(rop.SETARRAYITEM_RAW)
# ----------
newops.append(op)
del operations[:]
Modified: pypy/branch/jitffi/pypy/jit/backend/llsupport/regalloc.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/llsupport/regalloc.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/llsupport/regalloc.py Thu Sep 23 16:53:32 2010
@@ -81,6 +81,10 @@
for v in vars:
self.possibly_free_var(v)
+ def possibly_free_vars_for_op(self, op):
+ for i in range(op.numargs()):
+ self.possibly_free_var(op.getarg(i))
+
def _check_invariants(self):
if not we_are_translated():
# make sure no duplicates
Modified: pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_gc.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_gc.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/llsupport/test/test_gc.py Thu Sep 23 16:53:32 2010
@@ -258,18 +258,18 @@
gc_ll_descr._gen_write_barrier(newops, v_base, v_value)
assert llop1.record == []
assert len(newops) == 1
- assert newops[0].opnum == rop.COND_CALL_GC_WB
- assert newops[0].args[0] == v_base
- assert newops[0].args[1] == v_value
+ assert newops[0].getopnum() == rop.COND_CALL_GC_WB
+ assert newops[0].getarg(0) == v_base
+ assert newops[0].getarg(1) == v_value
assert newops[0].result is None
- wbdescr = newops[0].descr
+ wbdescr = newops[0].getdescr()
assert isinstance(wbdescr.jit_wb_if_flag, int)
assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int)
assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int)
def test_get_rid_of_debug_merge_point(self):
operations = [
- ResOperation(rop.DEBUG_MERGE_POINT, [], None),
+ ResOperation(rop.DEBUG_MERGE_POINT, ['dummy'], None),
]
gc_ll_descr = self.gc_ll_descr
gc_ll_descr.rewrite_assembler(None, operations)
@@ -298,13 +298,14 @@
gc_ll_descr.gcrefs = MyFakeGCRefList()
gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations)
assert len(operations) == 2
- assert operations[0].opnum == rop.GETFIELD_RAW
- assert operations[0].args == [ConstInt(43)]
- assert operations[0].descr == gc_ll_descr.single_gcref_descr
+ assert operations[0].getopnum() == rop.GETFIELD_RAW
+ assert operations[0].getarg(0) == ConstInt(43)
+ assert operations[0].getdescr() == gc_ll_descr.single_gcref_descr
v_box = operations[0].result
assert isinstance(v_box, BoxPtr)
- assert operations[1].opnum == rop.PTR_EQ
- assert operations[1].args == [v_random_box, v_box]
+ assert operations[1].getopnum() == rop.PTR_EQ
+ assert operations[1].getarg(0) == v_random_box
+ assert operations[1].getarg(1) == v_box
assert operations[1].result == v_result
def test_rewrite_assembler_1_cannot_move(self):
@@ -336,8 +337,9 @@
finally:
rgc.can_move = old_can_move
assert len(operations) == 1
- assert operations[0].opnum == rop.PTR_EQ
- assert operations[0].args == [v_random_box, ConstPtr(s_gcref)]
+ assert operations[0].getopnum() == rop.PTR_EQ
+ assert operations[0].getarg(0) == v_random_box
+ assert operations[0].getarg(1) == ConstPtr(s_gcref)
assert operations[0].result == v_result
# check that s_gcref gets added to the list anyway, to make sure
# that the GC sees it
@@ -356,14 +358,15 @@
gc_ll_descr.rewrite_assembler(self.fake_cpu, operations)
assert len(operations) == 2
#
- assert operations[0].opnum == rop.COND_CALL_GC_WB
- assert operations[0].args[0] == v_base
- assert operations[0].args[1] == v_value
+ assert operations[0].getopnum() == rop.COND_CALL_GC_WB
+ assert operations[0].getarg(0) == v_base
+ assert operations[0].getarg(1) == v_value
assert operations[0].result is None
#
- assert operations[1].opnum == rop.SETFIELD_RAW
- assert operations[1].args == [v_base, v_value]
- assert operations[1].descr == field_descr
+ assert operations[1].getopnum() == rop.SETFIELD_RAW
+ assert operations[1].getarg(0) == v_base
+ assert operations[1].getarg(1) == v_value
+ assert operations[1].getdescr() == field_descr
def test_rewrite_assembler_3(self):
# check write barriers before SETARRAYITEM_GC
@@ -379,11 +382,13 @@
gc_ll_descr.rewrite_assembler(self.fake_cpu, operations)
assert len(operations) == 2
#
- assert operations[0].opnum == rop.COND_CALL_GC_WB
- assert operations[0].args[0] == v_base
- assert operations[0].args[1] == v_value
+ assert operations[0].getopnum() == rop.COND_CALL_GC_WB
+ assert operations[0].getarg(0) == v_base
+ assert operations[0].getarg(1) == v_value
assert operations[0].result is None
#
- assert operations[1].opnum == rop.SETARRAYITEM_RAW
- assert operations[1].args == [v_base, v_index, v_value]
- assert operations[1].descr == array_descr
+ assert operations[1].getopnum() == rop.SETARRAYITEM_RAW
+ assert operations[1].getarg(0) == v_base
+ assert operations[1].getarg(1) == v_index
+ assert operations[1].getarg(2) == v_value
+ assert operations[1].getdescr() == array_descr
Modified: pypy/branch/jitffi/pypy/jit/backend/llvm/compile.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/llvm/compile.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/llvm/compile.py Thu Sep 23 16:53:32 2010
@@ -107,7 +107,7 @@
# store away the exception into self.backup_exc_xxx, *unless* the
# branch starts with a further GUARD_EXCEPTION/GUARD_NO_EXCEPTION.
if exc:
- opnum = operations[0].opnum
+ opnum = operations[0].getopnum()
if opnum not in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION):
self._store_away_exception()
# Normal handling of the operations follows.
@@ -115,7 +115,7 @@
self._generate_op(op)
def _generate_op(self, op):
- opnum = op.opnum
+ opnum = op.getopnum()
for i, name in all_operations:
if opnum == i:
meth = getattr(self, name)
@@ -475,7 +475,7 @@
return location
def generate_GETFIELD_GC(self, op):
- loc = self._generate_field_gep(op.args[0], op.descr)
+ loc = self._generate_field_gep(op.args[0], op.getdescr())
self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "")
generate_GETFIELD_GC_PURE = generate_GETFIELD_GC
@@ -483,7 +483,7 @@
generate_GETFIELD_RAW_PURE = generate_GETFIELD_GC
def generate_SETFIELD_GC(self, op):
- fielddescr = op.descr
+ fielddescr = op.getdescr()
loc = self._generate_field_gep(op.args[0], fielddescr)
assert isinstance(fielddescr, FieldDescr)
getarg = self.cpu.getarg_by_index[fielddescr.size_index]
@@ -491,7 +491,7 @@
llvm_rffi.LLVMBuildStore(self.builder, value_ref, loc, "")
def generate_CALL(self, op):
- calldescr = op.descr
+ calldescr = op.getdescr()
assert isinstance(calldescr, CallDescr)
ty_function_ptr = self.cpu.get_calldescr_ty_function_ptr(calldescr)
v = op.args[0]
@@ -579,7 +579,7 @@
self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "")
def generate_ARRAYLEN_GC(self, op):
- arraydescr = op.descr
+ arraydescr = op.getdescr()
assert isinstance(arraydescr, ArrayDescr)
self._generate_len(op, arraydescr.ty_array_ptr,
self.cpu.const_array_index_length)
@@ -598,7 +598,7 @@
return location
def _generate_array_gep(self, op):
- arraydescr = op.descr
+ arraydescr = op.getdescr()
assert isinstance(arraydescr, ArrayDescr)
location = self._generate_gep(op, arraydescr.ty_array_ptr,
self.cpu.const_array_index_array)
@@ -612,7 +612,7 @@
def generate_SETARRAYITEM_GC(self, op):
loc = self._generate_array_gep(op)
- arraydescr = op.descr
+ arraydescr = op.getdescr()
assert isinstance(arraydescr, ArrayDescr)
getarg = self.cpu.getarg_by_index[arraydescr.itemsize_index]
value_ref = getarg(self, op.args[2])
@@ -660,7 +660,7 @@
return res
def generate_NEW(self, op):
- sizedescr = op.descr
+ sizedescr = op.getdescr()
assert isinstance(sizedescr, SizeDescr)
res = self._generate_new(self.cpu._make_const_int(sizedescr.size))
self.vars[op.result] = res
@@ -695,7 +695,7 @@
self.vars[op.result] = res
def generate_NEW_ARRAY(self, op):
- arraydescr = op.descr
+ arraydescr = op.getdescr()
assert isinstance(arraydescr, ArrayDescr)
self._generate_new_array(op, arraydescr.ty_array_ptr,
self.cpu._make_const_int(arraydescr.itemsize),
Modified: pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/test/runner_test.py Thu Sep 23 16:53:32 2010
@@ -1,5 +1,6 @@
import py, sys, random, os, struct, operator
from pypy.jit.metainterp.history import (AbstractFailDescr,
+ AbstractDescr,
BasicFailDescr,
BoxInt, Box, BoxPtr,
LoopToken,
@@ -39,7 +40,7 @@
else:
raise NotImplementedError(box)
res = self.cpu.execute_token(looptoken)
- if res is operations[-1].descr:
+ if res is operations[-1].getdescr():
self.guard_failed = False
else:
self.guard_failed = True
@@ -74,10 +75,11 @@
ResOperation(rop.FINISH, results, None,
descr=BasicFailDescr(0))]
if operations[0].is_guard():
- operations[0].fail_args = []
+ operations[0].setfailargs([])
if not descr:
descr = BasicFailDescr(1)
- operations[0].descr = descr
+ if descr is not None:
+ operations[0].setdescr(descr)
inputargs = []
for box in valueboxes:
if isinstance(box, Box) and box not in inputargs:
@@ -116,7 +118,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [i1]
+ operations[2].setfailargs([i1])
self.cpu.compile_loop(inputargs, operations, looptoken)
self.cpu.set_future_value_int(0, 2)
@@ -137,7 +139,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [None, None, i1, None]
+ operations[2].setfailargs([None, None, i1, None])
self.cpu.compile_loop(inputargs, operations, looptoken)
self.cpu.set_future_value_int(0, 2)
@@ -160,7 +162,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [i1]
+ operations[2].setfailargs([i1])
wr_i1 = weakref.ref(i1)
wr_guard = weakref.ref(operations[2])
self.cpu.compile_loop(inputargs, operations, looptoken)
@@ -184,7 +186,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [i1]
+ operations[2].setfailargs([i1])
self.cpu.compile_loop(inputargs, operations, looptoken)
i1b = BoxInt()
@@ -194,7 +196,7 @@
ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2),
ResOperation(rop.JUMP, [i1b], None, descr=looptoken),
]
- bridge[1].fail_args = [i1b]
+ bridge[1].setfailargs([i1b])
self.cpu.compile_bridge(faildescr1, [i1b], bridge)
@@ -218,7 +220,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [None, i1, None]
+ operations[2].setfailargs([None, i1, None])
self.cpu.compile_loop(inputargs, operations, looptoken)
i1b = BoxInt()
@@ -228,7 +230,7 @@
ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2),
ResOperation(rop.JUMP, [i1b], None, descr=looptoken),
]
- bridge[1].fail_args = [i1b]
+ bridge[1].setfailargs([i1b])
self.cpu.compile_bridge(faildescr1, [i1b], bridge)
@@ -251,7 +253,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[2].fail_args = [None, i1, None]
+ operations[2].setfailargs([None, i1, None])
self.cpu.compile_loop(inputargs, operations, looptoken)
self.cpu.set_future_value_int(0, 2)
@@ -317,7 +319,7 @@
descr=BasicFailDescr()),
ResOperation(rop.JUMP, [z, t], None, descr=looptoken),
]
- operations[-2].fail_args = [t, z]
+ operations[-2].setfailargs([t, z])
cpu.compile_loop([x, y], operations, looptoken)
self.cpu.set_future_value_int(0, 0)
self.cpu.set_future_value_int(1, 10)
@@ -363,7 +365,7 @@
ResOperation(rop.FINISH, [v_res], None,
descr=BasicFailDescr(2)),
]
- ops[1].fail_args = []
+ ops[1].setfailargs([])
else:
v_exc = self.cpu.ts.BoxRef()
ops = [
@@ -372,7 +374,7 @@
descr=BasicFailDescr(1)),
ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)),
]
- ops[1].fail_args = [v_res]
+ ops[1].setfailargs([v_res])
#
looptoken = LoopToken()
self.cpu.compile_loop([v1, v2], ops, looptoken)
@@ -909,8 +911,8 @@
ResOperation(rop.GUARD_TRUE, [i2], None),
ResOperation(rop.JUMP, jumpargs, None, descr=looptoken),
]
- operations[2].fail_args = inputargs[:]
- operations[2].descr = faildescr
+ operations[2].setfailargs(inputargs[:])
+ operations[2].setdescr(faildescr)
#
self.cpu.compile_loop(inputargs, operations, looptoken)
#
@@ -975,7 +977,7 @@
ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1),
ResOperation(rop.FINISH, fboxes, None, descr=faildescr2),
]
- operations[-2].fail_args = fboxes
+ operations[-2].setfailargs(fboxes)
looptoken = LoopToken()
self.cpu.compile_loop(fboxes, operations, looptoken)
@@ -1098,7 +1100,7 @@
descr=BasicFailDescr(4)),
ResOperation(rop.FINISH, [], None,
descr=BasicFailDescr(5))]
- operations[1].fail_args = []
+ operations[1].setfailargs([])
looptoken = LoopToken()
# Use "set" to unique-ify inputargs
unique_testcase_list = list(set(testcase))
@@ -1412,7 +1414,7 @@
FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed], lltype.Void)
func_ptr = llhelper(lltype.Ptr(FUNC), func_void)
funcbox = self.get_funcbox(self.cpu, func_ptr)
- class WriteBarrierDescr:
+ class WriteBarrierDescr(AbstractDescr):
jit_wb_if_flag = 4096
jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10')
jit_wb_if_flag_singlebyte = 0x10
@@ -1462,7 +1464,7 @@
ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0))
]
- ops[2].fail_args = [i1, i0]
+ ops[2].setfailargs([i1, i0])
looptoken = LoopToken()
self.cpu.compile_loop([i0, i1], ops, looptoken)
self.cpu.set_future_value_int(0, 20)
@@ -1506,7 +1508,7 @@
ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0))
]
- ops[2].fail_args = [i1, i2, i0]
+ ops[2].setfailargs([i1, i2, i0])
looptoken = LoopToken()
self.cpu.compile_loop([i0, i1], ops, looptoken)
self.cpu.set_future_value_int(0, 20)
@@ -1551,7 +1553,7 @@
ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0))
]
- ops[2].fail_args = [i1, f2, i0]
+ ops[2].setfailargs([i1, f2, i0])
looptoken = LoopToken()
self.cpu.compile_loop([i0, i1], ops, looptoken)
self.cpu.set_future_value_int(0, 20)
@@ -1824,7 +1826,7 @@
f2 = float_add(f0, f1)
finish(f2)'''
loop = parse(ops)
- done_number = self.cpu.get_fail_descr_number(loop.operations[-1].descr)
+ done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr())
looptoken = LoopToken()
looptoken.outermost_jitdriver_sd = FakeJitDriverSD()
self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken)
Modified: pypy/branch/jitffi/pypy/jit/backend/test/test_ll_random.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/test/test_ll_random.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/test/test_ll_random.py Thu Sep 23 16:53:32 2010
@@ -464,7 +464,7 @@
self.put(builder, args, descr)
op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None,
descr=BasicFailDescr())
- op.fail_args = fail_subset
+ op.setfailargs(fail_subset)
builder.loop.operations.append(op)
# 5. Non raising-call and GUARD_EXCEPTION
@@ -486,7 +486,7 @@
exc_box = ConstAddr(llmemory.cast_ptr_to_adr(vtableptr), builder.cpu)
op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(),
descr=BasicFailDescr())
- op.fail_args = builder.subset_of_intvars(r)
+ op.setfailargs(builder.subset_of_intvars(r))
op._exc_box = None
builder.should_fail_by = op
builder.guard_op = op
@@ -507,7 +507,7 @@
exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(),
descr=BasicFailDescr())
- op.fail_args = fail_subset
+ op.setfailargs(fail_subset)
builder.loop.operations.append(op)
# 4. raising call and guard_no_exception
@@ -524,7 +524,7 @@
op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(),
descr=BasicFailDescr())
op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
- op.fail_args = builder.subset_of_intvars(r)
+ op.setfailargs(builder.subset_of_intvars(r))
builder.should_fail_by = op
builder.guard_op = op
builder.loop.operations.append(op)
@@ -548,7 +548,7 @@
op = ResOperation(rop.GUARD_EXCEPTION, [other_box], BoxPtr(),
descr=BasicFailDescr())
op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
- op.fail_args = builder.subset_of_intvars(r)
+ op.setfailargs(builder.subset_of_intvars(r))
builder.should_fail_by = op
builder.guard_op = op
builder.loop.operations.append(op)
Modified: pypy/branch/jitffi/pypy/jit/backend/test/test_random.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/test/test_random.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/test/test_random.py Thu Sep 23 16:53:32 2010
@@ -86,7 +86,7 @@
def process_operation(self, s, op, names, subops):
args = []
- for v in op.args:
+ for v in op.getarglist():
if v in names:
args.append(names[v])
## elif isinstance(v, ConstAddr):
@@ -105,11 +105,11 @@
args.append('ConstInt(%d)' % v.value)
else:
raise NotImplementedError(v)
- if op.descr is None:
+ if op.getdescr() is None:
descrstr = ''
else:
try:
- descrstr = ', ' + op.descr._random_info
+ descrstr = ', ' + op.getdescr()._random_info
except AttributeError:
descrstr = ', descr=...'
print >>s, ' ResOperation(rop.%s, [%s], %s%s),' % (
@@ -129,7 +129,7 @@
def print_loop_prebuilt(ops):
for op in ops:
- for arg in op.args:
+ for arg in op.getarglist():
if isinstance(arg, ConstPtr):
if arg not in names:
writevar(arg, 'const_ptr')
@@ -191,7 +191,7 @@
if self.should_fail_by is None:
fail_args = self.loop.operations[-1].args
else:
- fail_args = self.should_fail_by.fail_args
+ fail_args = self.should_fail_by.getfailargs()
for i, v in enumerate(fail_args):
if isinstance(v, (BoxFloat, ConstFloat)):
print >>s, (' assert cpu.get_latest_value_float(%d) == %r'
@@ -284,8 +284,8 @@
builder.intvars[:] = original_intvars
else:
op = ResOperation(rop.GUARD_NO_OVERFLOW, [], None)
- op.descr = BasicFailDescr()
- op.fail_args = fail_subset
+ op.setdescr(BasicFailDescr())
+ op.setfailargs(fail_subset)
builder.loop.operations.append(op)
class BinaryOvfOperation(AbstractOvfOperation, BinaryOperation):
@@ -345,8 +345,8 @@
def produce_into(self, builder, r):
op, passing = self.gen_guard(builder, r)
builder.loop.operations.append(op)
- op.descr = BasicFailDescr()
- op.fail_args = builder.subset_of_intvars(r)
+ op.setdescr(BasicFailDescr())
+ op.setfailargs(builder.subset_of_intvars(r))
if not passing:
builder.should_fail_by = op
builder.guard_op = op
@@ -553,7 +553,7 @@
endvars = []
used_later = {}
for op in loop.operations:
- for v in op.args:
+ for v in op.getarglist():
used_later[v] = True
for v in startvars:
if v not in used_later:
@@ -577,11 +577,11 @@
def get_fail_args(self):
if self.should_fail_by.is_guard():
- assert self.should_fail_by.fail_args is not None
- return self.should_fail_by.fail_args
+ assert self.should_fail_by.getfailargs() is not None
+ return self.should_fail_by.getfailargs()
else:
- assert self.should_fail_by.opnum == rop.FINISH
- return self.should_fail_by.args
+ assert self.should_fail_by.getopnum() == rop.FINISH
+ return self.should_fail_by.getarglist()
def clear_state(self):
for v, S, fields in self.prebuilt_ptr_consts:
@@ -606,7 +606,7 @@
else:
raise NotImplementedError(box)
fail = cpu.execute_token(self.loop.token)
- assert fail is self.should_fail_by.descr
+ assert fail is self.should_fail_by.getdescr()
for i, v in enumerate(self.get_fail_args()):
if isinstance(v, (BoxFloat, ConstFloat)):
value = cpu.get_latest_value_float(i)
@@ -620,7 +620,7 @@
exc = cpu.grab_exc_value()
if (self.guard_op is not None and
self.guard_op.is_guard_exception()):
- if self.guard_op.opnum == rop.GUARD_NO_EXCEPTION:
+ if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION:
assert exc
else:
assert not exc
@@ -633,26 +633,26 @@
else:
op = ResOperation(rop.GUARD_EXCEPTION, [guard_op._exc_box],
BoxPtr())
- op.descr = BasicFailDescr()
- op.fail_args = []
+ op.setdescr(BasicFailDescr())
+ op.setfailargs([])
return op
if self.dont_generate_more:
return False
r = self.r
guard_op = self.guard_op
- fail_args = guard_op.fail_args
- fail_descr = guard_op.descr
+ fail_args = guard_op.getfailargs()
+ fail_descr = guard_op.getdescr()
op = self.should_fail_by
- if not op.fail_args:
+ if not op.getfailargs():
return False
# generate the branch: a sequence of operations that ends in a FINISH
subloop = DummyLoop([])
if guard_op.is_guard_exception():
subloop.operations.append(exc_handling(guard_op))
bridge_builder = self.builder.fork(self.builder.cpu, subloop,
- op.fail_args[:])
- self.generate_ops(bridge_builder, r, subloop, op.fail_args[:])
+ op.getfailargs()[:])
+ self.generate_ops(bridge_builder, r, subloop, op.getfailargs()[:])
# note that 'self.guard_op' now points to the guard that will fail in
# this new bridge, while 'guard_op' still points to the guard that
# has just failed.
Modified: pypy/branch/jitffi/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/x86/assembler.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/x86/assembler.py Thu Sep 23 16:53:32 2010
@@ -1,16 +1,17 @@
import sys, os
from pypy.jit.backend.llsupport import symbolic
from pypy.jit.metainterp.history import Const, Box, BoxInt, BoxPtr, BoxFloat
-from pypy.jit.metainterp.history import AbstractFailDescr, INT, REF, FLOAT,\
- LoopToken
+from pypy.jit.metainterp.history import (AbstractFailDescr, INT, REF, FLOAT,
+ LoopToken)
from pypy.rpython.lltypesystem import lltype, rffi, rstr, llmemory
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rpython.annlowlevel import llhelper
from pypy.tool.uid import fixid
-from pypy.jit.backend.x86.regalloc import RegAlloc, \
- X86RegisterManager, X86XMMRegisterManager, get_ebp_ofs
+from pypy.jit.backend.x86.regalloc import (RegAlloc, X86RegisterManager,
+ X86XMMRegisterManager, get_ebp_ofs)
-from pypy.jit.backend.x86.arch import FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD, IS_X86_32, IS_X86_64
+from pypy.jit.backend.x86.arch import (FRAME_FIXED_SIZE, FORCE_INDEX_OFS, WORD,
+ IS_X86_32, IS_X86_64)
from pypy.jit.backend.x86.regloc import (eax, ecx, edx, ebx,
esp, ebp, esi, edi,
@@ -389,8 +390,8 @@
def _find_debug_merge_point(self, operations):
for op in operations:
- if op.opnum == rop.DEBUG_MERGE_POINT:
- funcname = op.args[0]._get_str()
+ if op.getopnum() == rop.DEBUG_MERGE_POINT:
+ funcname = op.getarg(0)._get_str()
break
else:
funcname = "<loop %d>" % len(self.loop_run_counters)
@@ -418,7 +419,6 @@
mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target)
mc.JMP_r(X86_64_SCRATCH_REG.value)
- mc.valgrind_invalidated()
mc.done()
def _inject_debugging_code(self, operations):
@@ -681,25 +681,25 @@
self.mc.POP(loc)
def regalloc_perform(self, op, arglocs, resloc):
- genop_list[op.opnum](self, op, arglocs, resloc)
+ genop_list[op.getopnum()](self, op, arglocs, resloc)
def regalloc_perform_discard(self, op, arglocs):
- genop_discard_list[op.opnum](self, op, arglocs)
+ genop_discard_list[op.getopnum()](self, op, arglocs)
def regalloc_perform_with_guard(self, op, guard_op, faillocs,
arglocs, resloc, current_depths):
- faildescr = guard_op.descr
+ faildescr = guard_op.getdescr()
assert isinstance(faildescr, AbstractFailDescr)
faildescr._x86_current_depths = current_depths
- failargs = guard_op.fail_args
- guard_opnum = guard_op.opnum
+ failargs = guard_op.getfailargs()
+ guard_opnum = guard_op.getopnum()
guard_token = self.implement_guard_recovery(guard_opnum,
faildescr, failargs,
faillocs)
if op is None:
dispatch_opnum = guard_opnum
else:
- dispatch_opnum = op.opnum
+ dispatch_opnum = op.getopnum()
res = genop_guard_list[dispatch_opnum](self, op, guard_op, guard_token,
arglocs, resloc)
faildescr._x86_adr_jump_offset = res
@@ -725,7 +725,7 @@
def _cmpop(cond, rev_cond):
def genop_cmp(self, op, arglocs, result_loc):
rl = result_loc.lowest8bits()
- if isinstance(op.args[0], Const):
+ if isinstance(op.getarg(0), Const):
self.mc.CMP(arglocs[1], arglocs[0])
self.mc.SET_ir(rx86.Conditions[rev_cond], rl.value)
else:
@@ -755,8 +755,8 @@
def _cmpop_guard(cond, rev_cond, false_cond, false_rev_cond):
def genop_cmp_guard(self, op, guard_op, guard_token, arglocs, result_loc):
- guard_opnum = guard_op.opnum
- if isinstance(op.args[0], Const):
+ guard_opnum = guard_op.getopnum()
+ if isinstance(op.getarg(0), Const):
self.mc.CMP(arglocs[1], arglocs[0])
if guard_opnum == rop.GUARD_FALSE:
return self.implement_guard(guard_token, rev_cond)
@@ -773,7 +773,7 @@
def _cmpop_guard_float(cond, false_cond, need_jp):
def genop_cmp_guard_float(self, op, guard_op, guard_token, arglocs,
result_loc):
- guard_opnum = guard_op.opnum
+ guard_opnum = guard_op.getopnum()
self.mc.UCOMISD(arglocs[0], arglocs[1])
# 16 is enough space for the rel8 jumps below and the rel32
# jump in implement_guard
@@ -942,7 +942,7 @@
genop_guard_float_ge = _cmpop_guard_float("AE", "B", False)
def genop_guard_float_ne(self, op, guard_op, guard_token, arglocs, result_loc):
- guard_opnum = guard_op.opnum
+ guard_opnum = guard_op.getopnum()
self.mc.UCOMISD(arglocs[0], arglocs[1])
# 16 is enough space for the rel8 jumps below and the rel32
# jump in implement_guard
@@ -970,7 +970,7 @@
self.mc.CVTSI2SD(resloc, arglocs[0])
def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc):
- guard_opnum = guard_op.opnum
+ guard_opnum = guard_op.getopnum()
self.mc.CMP(arglocs[0], imm(0))
if guard_opnum == rop.GUARD_TRUE:
return self.implement_guard(guard_token, 'Z')
@@ -984,7 +984,7 @@
self.mc.MOVZX8(resloc, rl)
def genop_guard_int_is_zero(self, op, guard_op, guard_token, arglocs, resloc):
- guard_opnum = guard_op.opnum
+ guard_opnum = guard_op.getopnum()
self.mc.CMP(arglocs[0], imm(0))
if guard_opnum == rop.GUARD_TRUE:
return self.implement_guard(guard_token, 'NZ')
@@ -1120,7 +1120,7 @@
assert isinstance(baseofs, ImmedLoc)
assert isinstance(scale_loc, ImmedLoc)
dest_addr = AddressLoc(base_loc, ofs_loc, scale_loc.value, baseofs.value)
- if op.args[2].type == FLOAT:
+ if op.getarg(2).type == FLOAT:
self.mc.MOVSD(dest_addr, value_loc)
else:
if IS_X86_64 and scale_loc.value == 3:
@@ -1216,7 +1216,7 @@
return addr
def _gen_guard_overflow(self, guard_op, guard_token):
- guard_opnum = guard_op.opnum
+ guard_opnum = guard_op.getopnum()
if guard_opnum == rop.GUARD_NO_OVERFLOW:
return self.implement_guard(guard_token, 'O')
elif guard_opnum == rop.GUARD_OVERFLOW:
@@ -1244,8 +1244,8 @@
genop_guard_guard_isnull = genop_guard_guard_false
def genop_guard_guard_value(self, ign_1, guard_op, guard_token, locs, ign_2):
- if guard_op.args[0].type == FLOAT:
- assert guard_op.args[1].type == FLOAT
+ if guard_op.getarg(0).type == FLOAT:
+ assert guard_op.getarg(1).type == FLOAT
self.mc.UCOMISD(locs[0], locs[1])
else:
self.mc.CMP(locs[0], locs[1])
@@ -1636,8 +1636,8 @@
assert isinstance(sizeloc, ImmedLoc)
size = sizeloc.value
- if isinstance(op.args[0], Const):
- x = imm(op.args[0].getint())
+ if isinstance(op.getarg(0), Const):
+ x = imm(op.getarg(0).getint())
else:
x = arglocs[1]
if x is eax:
@@ -1656,7 +1656,7 @@
def genop_guard_call_may_force(self, op, guard_op, guard_token,
arglocs, result_loc):
- faildescr = guard_op.descr
+ faildescr = guard_op.getdescr()
fail_index = self.cpu.get_fail_descr_number(faildescr)
self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index)
self.genop_call(op, arglocs, result_loc)
@@ -1665,10 +1665,10 @@
def genop_guard_call_assembler(self, op, guard_op, guard_token,
arglocs, result_loc):
- faildescr = guard_op.descr
+ faildescr = guard_op.getdescr()
fail_index = self.cpu.get_fail_descr_number(faildescr)
self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index)
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, LoopToken)
assert len(arglocs) - 2 == len(descr._x86_arglocs[0])
#
@@ -1753,7 +1753,7 @@
def genop_discard_cond_call_gc_wb(self, op, arglocs):
# use 'mc._mc' directly instead of 'mc', to avoid
# bad surprizes if the code buffer is mostly full
- descr = op.descr
+ descr = op.getdescr()
if we_are_translated():
cls = self.cpu.gc_ll_descr.has_write_barrier_class()
assert cls is not None and isinstance(descr, cls)
Modified: pypy/branch/jitffi/pypy/jit/backend/x86/codebuf.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/x86/codebuf.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/x86/codebuf.py Thu Sep 23 16:53:32 2010
@@ -29,6 +29,9 @@
self._pos = 0
def overwrite(self, pos, listofchars):
+ """ Overwrite a specified position with a given list of chars
+ (position is relative
+ """
make_sure_not_resized(listofchars)
assert pos + len(listofchars) <= self._size
for c in listofchars:
@@ -49,35 +52,38 @@
self.writechar(chr(n))
def get_relative_pos(self):
+ """ Current position, relative to code start
+ """
return self._pos
def tell(self):
+ """ Tell the current address at machine code block
+ """
baseaddr = rffi.cast(lltype.Signed, self._data)
return baseaddr + self._pos
- def seekback(self, count):
- pos = self._pos - count
- self._pos = pos
- self._last_dump_start = pos
-
def done(self):
- # normally, no special action is needed here
+ """ Called at the end of writing of each piece of machine code.
+ Even though this function doesn't do much, it's extremely important
+ to call this for all tools to work, like valgrind or machine code
+ dumping
+ """
+ self.valgrind_invalidated()
if machine_code_dumper.enabled:
machine_code_dumper.dump_range(self, self._last_dump_start,
self._pos)
self._last_dump_start = self._pos
- def redone(self, frm, to):
- if machine_code_dumper.enabled:
- baseaddr = rffi.cast(lltype.Signed, self._data)
- machine_code_dumper.dump_range(self, frm - baseaddr, to - baseaddr)
-
def log(self, msg):
+ """ Insert information into machine code dumper, if enabled
+ """
if machine_code_dumper.enabled:
machine_code_dumper.dump(self, 'LOG', self._pos, msg)
def valgrind_invalidated(self):
- # mark the range of the InMemoryCodeBuilder as invalidated for Valgrind
+ """ Mark the range of the InMemoryCodeBuilder as invalidated
+ for Valgrind
+ """
from pypy.jit.backend.x86 import valgrind
valgrind.discard_translations(self._data, self._size)
Modified: pypy/branch/jitffi/pypy/jit/backend/x86/regalloc.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/x86/regalloc.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/x86/regalloc.py Thu Sep 23 16:53:32 2010
@@ -234,6 +234,12 @@
else:
self.rm.possibly_free_var(var)
+ def possibly_free_vars_for_op(self, op):
+ for i in range(op.numargs()):
+ var = op.getarg(i)
+ if var is not None: # xxx kludgy
+ self.possibly_free_var(var)
+
def possibly_free_vars(self, vars):
for var in vars:
if var is not None: # xxx kludgy
@@ -262,12 +268,12 @@
selected_reg, need_lower_byte)
def _compute_loop_consts(self, inputargs, jump, looptoken):
- if jump.opnum != rop.JUMP or jump.descr is not looptoken:
+ if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken:
loop_consts = {}
else:
loop_consts = {}
for i in range(len(inputargs)):
- if inputargs[i] is jump.args[i]:
+ if inputargs[i] is jump.getarg(i):
loop_consts[inputargs[i]] = i
return loop_consts
@@ -312,7 +318,7 @@
self.assembler.regalloc_perform(op, arglocs, result_loc)
def locs_for_fail(self, guard_op):
- return [self.loc(v) for v in guard_op.fail_args]
+ return [self.loc(v) for v in guard_op.getfailargs()]
def perform_with_guard(self, op, guard_op, arglocs, result_loc):
faillocs = self.locs_for_fail(guard_op)
@@ -324,7 +330,7 @@
current_depths)
if op.result is not None:
self.possibly_free_var(op.result)
- self.possibly_free_vars(guard_op.fail_args)
+ self.possibly_free_vars(guard_op.getfailargs())
def perform_guard(self, guard_op, arglocs, result_loc):
faillocs = self.locs_for_fail(guard_op)
@@ -338,7 +344,7 @@
self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs,
result_loc,
current_depths)
- self.possibly_free_vars(guard_op.fail_args)
+ self.possibly_free_vars(guard_op.getfailargs())
def PerformDiscard(self, op, arglocs):
if not we_are_translated():
@@ -346,24 +352,24 @@
self.assembler.regalloc_perform_discard(op, arglocs)
def can_merge_with_next_guard(self, op, i, operations):
- if op.opnum == rop.CALL_MAY_FORCE or op.opnum == rop.CALL_ASSEMBLER:
- assert operations[i + 1].opnum == rop.GUARD_NOT_FORCED
+ if op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER:
+ assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED
return True
if not op.is_comparison():
if op.is_ovf():
- if (operations[i + 1].opnum != rop.GUARD_NO_OVERFLOW and
- operations[i + 1].opnum != rop.GUARD_OVERFLOW):
+ if (operations[i + 1].getopnum() != rop.GUARD_NO_OVERFLOW and
+ operations[i + 1].getopnum() != rop.GUARD_OVERFLOW):
print "int_xxx_ovf not followed by guard_(no)_overflow"
raise AssertionError
return True
return False
- if (operations[i + 1].opnum != rop.GUARD_TRUE and
- operations[i + 1].opnum != rop.GUARD_FALSE):
+ if (operations[i + 1].getopnum() != rop.GUARD_TRUE and
+ operations[i + 1].getopnum() != rop.GUARD_FALSE):
return False
- if operations[i + 1].args[0] is not op.result:
+ if operations[i + 1].getarg(0) is not op.result:
return False
if (self.longevity[op.result][1] > i + 1 or
- op.result in operations[i + 1].fail_args):
+ op.result in operations[i + 1].getfailargs()):
return False
return True
@@ -376,13 +382,13 @@
self.xrm.position = i
if op.has_no_side_effect() and op.result not in self.longevity:
i += 1
- self.possibly_free_vars(op.args)
+ self.possibly_free_vars_for_op(op)
continue
if self.can_merge_with_next_guard(op, i, operations):
- oplist_with_guard[op.opnum](self, op, operations[i + 1])
+ oplist_with_guard[op.getopnum()](self, op, operations[i + 1])
i += 1
else:
- oplist[op.opnum](self, op)
+ oplist[op.getopnum()](self, op)
if op.result is not None:
self.possibly_free_var(op.result)
self.rm._check_invariants()
@@ -402,19 +408,20 @@
op = operations[i]
if op.result is not None:
start_live[op.result] = i
- for arg in op.args:
+ for j in range(op.numargs()):
+ arg = op.getarg(j)
if isinstance(arg, Box):
if arg not in start_live:
- print "Bogus arg in operation %d at %d" % (op.opnum, i)
+ print "Bogus arg in operation %d at %d" % (op.getopnum(), i)
raise AssertionError
longevity[arg] = (start_live[arg], i)
if op.is_guard():
- for arg in op.fail_args:
+ for arg in op.getfailargs():
if arg is None: # hole
continue
assert isinstance(arg, Box)
if arg not in start_live:
- print "Bogus arg in guard %d at %d" % (op.opnum, i)
+ print "Bogus arg in guard %d at %d" % (op.getopnum(), i)
raise AssertionError
longevity[arg] = (start_live[arg], i)
for arg in inputargs:
@@ -432,9 +439,9 @@
return self.rm.loc(v)
def _consider_guard(self, op):
- loc = self.rm.make_sure_var_in_reg(op.args[0])
+ loc = self.rm.make_sure_var_in_reg(op.getarg(0))
self.perform_guard(op, [loc], None)
- self.rm.possibly_free_var(op.args[0])
+ self.rm.possibly_free_var(op.getarg(0))
consider_guard_true = _consider_guard
consider_guard_false = _consider_guard
@@ -442,52 +449,54 @@
consider_guard_isnull = _consider_guard
def consider_finish(self, op):
- locs = [self.loc(v) for v in op.args]
- locs_are_ref = [v.type == REF for v in op.args]
- fail_index = self.assembler.cpu.get_fail_descr_number(op.descr)
+ locs = [self.loc(op.getarg(i)) for i in range(op.numargs())]
+ locs_are_ref = [op.getarg(i).type == REF for i in range(op.numargs())]
+ fail_index = self.assembler.cpu.get_fail_descr_number(op.getdescr())
self.assembler.generate_failure(fail_index, locs, self.exc,
locs_are_ref)
- self.possibly_free_vars(op.args)
+ self.possibly_free_vars_for_op(op)
def consider_guard_no_exception(self, op):
self.perform_guard(op, [], None)
def consider_guard_exception(self, op):
- loc = self.rm.make_sure_var_in_reg(op.args[0])
+ loc = self.rm.make_sure_var_in_reg(op.getarg(0))
box = TempBox()
- loc1 = self.rm.force_allocate_reg(box, op.args)
+ args = op.getarglist()
+ loc1 = self.rm.force_allocate_reg(box, args)
if op.result in self.longevity:
# this means, is it ever used
- resloc = self.rm.force_allocate_reg(op.result, op.args + [box])
+ resloc = self.rm.force_allocate_reg(op.result, args + [box])
else:
resloc = None
self.perform_guard(op, [loc, loc1], resloc)
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
self.rm.possibly_free_var(box)
consider_guard_no_overflow = consider_guard_no_exception
consider_guard_overflow = consider_guard_no_exception
def consider_guard_value(self, op):
- x = self.make_sure_var_in_reg(op.args[0])
- y = self.loc(op.args[1])
+ x = self.make_sure_var_in_reg(op.getarg(0))
+ y = self.loc(op.getarg(1))
self.perform_guard(op, [x, y], None)
- self.possibly_free_vars(op.args)
+ self.possibly_free_vars_for_op(op)
def consider_guard_class(self, op):
- assert isinstance(op.args[0], Box)
- x = self.rm.make_sure_var_in_reg(op.args[0])
- y = self.loc(op.args[1])
+ assert isinstance(op.getarg(0), Box)
+ x = self.rm.make_sure_var_in_reg(op.getarg(0))
+ y = self.loc(op.getarg(1))
self.perform_guard(op, [x, y], None)
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
consider_guard_nonnull_class = consider_guard_class
def _consider_binop_part(self, op):
- x = op.args[0]
- argloc = self.loc(op.args[1])
- loc = self.rm.force_result_in_reg(op.result, x, op.args)
- self.rm.possibly_free_var(op.args[1])
+ x = op.getarg(0)
+ argloc = self.loc(op.getarg(1))
+ args = op.getarglist()
+ loc = self.rm.force_result_in_reg(op.result, x, args)
+ self.rm.possibly_free_var(op.getarg(1))
return loc, argloc
def _consider_binop(self, op):
@@ -510,26 +519,27 @@
consider_int_add_ovf = _consider_binop_with_guard
def consider_int_neg(self, op):
- res = self.rm.force_result_in_reg(op.result, op.args[0])
+ res = self.rm.force_result_in_reg(op.result, op.getarg(0))
self.Perform(op, [res], res)
consider_int_invert = consider_int_neg
def consider_int_lshift(self, op):
- if isinstance(op.args[1], Const):
- loc2 = self.rm.convert_to_imm(op.args[1])
+ if isinstance(op.getarg(1), Const):
+ loc2 = self.rm.convert_to_imm(op.getarg(1))
else:
- loc2 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx)
- loc1 = self.rm.force_result_in_reg(op.result, op.args[0], op.args)
+ loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx)
+ args = op.getarglist()
+ loc1 = self.rm.force_result_in_reg(op.result, op.getarg(0), args)
self.Perform(op, [loc1, loc2], loc1)
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
consider_int_rshift = consider_int_lshift
consider_uint_rshift = consider_int_lshift
def _consider_int_div_or_mod(self, op, resultreg, trashreg):
- l0 = self.rm.make_sure_var_in_reg(op.args[0], selected_reg=eax)
- l1 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx)
+ l0 = self.rm.make_sure_var_in_reg(op.getarg(0), selected_reg=eax)
+ l1 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx)
l2 = self.rm.force_allocate_reg(op.result, selected_reg=resultreg)
# the register (eax or edx) not holding what we are looking for
# will be just trash after that operation
@@ -538,7 +548,7 @@
assert l0 is eax
assert l1 is ecx
assert l2 is resultreg
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
self.rm.possibly_free_var(tmpvar)
def consider_int_mod(self, op):
@@ -552,17 +562,18 @@
consider_uint_floordiv = consider_int_floordiv
def _consider_compop(self, op, guard_op):
- vx = op.args[0]
- vy = op.args[1]
+ vx = op.getarg(0)
+ vy = op.getarg(1)
arglocs = [self.loc(vx), self.loc(vy)]
if (vx in self.rm.reg_bindings or vy in self.rm.reg_bindings or
isinstance(vx, Const) or isinstance(vy, Const)):
pass
else:
arglocs[0] = self.rm.make_sure_var_in_reg(vx)
- self.rm.possibly_free_vars(op.args)
+ args = op.getarglist()
+ self.rm.possibly_free_vars(args)
if guard_op is None:
- loc = self.rm.force_allocate_reg(op.result, op.args,
+ loc = self.rm.force_allocate_reg(op.result, args,
need_lower_byte=True)
self.Perform(op, arglocs, loc)
else:
@@ -582,10 +593,11 @@
consider_ptr_ne = _consider_compop
def _consider_float_op(self, op):
- loc1 = self.xrm.loc(op.args[1])
- loc0 = self.xrm.force_result_in_reg(op.result, op.args[0], op.args)
+ loc1 = self.xrm.loc(op.getarg(1))
+ args = op.getarglist()
+ loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0), args)
self.Perform(op, [loc0, loc1], loc0)
- self.xrm.possibly_free_vars(op.args)
+ self.xrm.possibly_free_vars_for_op(op)
consider_float_add = _consider_float_op
consider_float_sub = _consider_float_op
@@ -593,11 +605,12 @@
consider_float_truediv = _consider_float_op
def _consider_float_cmp(self, op, guard_op):
- loc0 = self.xrm.make_sure_var_in_reg(op.args[0], op.args,
+ args = op.getarglist()
+ loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), args,
imm_fine=False)
- loc1 = self.xrm.loc(op.args[1])
+ loc1 = self.xrm.loc(op.getarg(1))
arglocs = [loc0, loc1]
- self.xrm.possibly_free_vars(op.args)
+ self.xrm.possibly_free_vars_for_op(op)
if guard_op is None:
res = self.rm.force_allocate_reg(op.result, need_lower_byte=True)
self.Perform(op, arglocs, res)
@@ -612,26 +625,26 @@
consider_float_ge = _consider_float_cmp
def consider_float_neg(self, op):
- loc0 = self.xrm.force_result_in_reg(op.result, op.args[0])
+ loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0))
self.Perform(op, [loc0], loc0)
- self.xrm.possibly_free_var(op.args[0])
+ self.xrm.possibly_free_var(op.getarg(0))
def consider_float_abs(self, op):
- loc0 = self.xrm.force_result_in_reg(op.result, op.args[0])
+ loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0))
self.Perform(op, [loc0], loc0)
- self.xrm.possibly_free_var(op.args[0])
+ self.xrm.possibly_free_var(op.getarg(0))
def consider_cast_float_to_int(self, op):
- loc0 = self.xrm.make_sure_var_in_reg(op.args[0], imm_fine=False)
+ loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), imm_fine=False)
loc1 = self.rm.force_allocate_reg(op.result)
self.Perform(op, [loc0], loc1)
- self.xrm.possibly_free_var(op.args[0])
+ self.xrm.possibly_free_var(op.getarg(0))
def consider_cast_int_to_float(self, op):
- loc0 = self.rm.loc(op.args[0])
+ loc0 = self.rm.loc(op.getarg(0))
loc1 = self.xrm.force_allocate_reg(op.result)
self.Perform(op, [loc0], loc1)
- self.rm.possibly_free_var(op.args[0])
+ self.rm.possibly_free_var(op.getarg(0))
def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None):
save_all_regs = guard_not_forced_op is not None
@@ -650,11 +663,11 @@
self.Perform(op, arglocs, resloc)
def _consider_call(self, op, guard_not_forced_op=None):
- calldescr = op.descr
+ calldescr = op.getdescr()
assert isinstance(calldescr, BaseCallDescr)
- assert len(calldescr.arg_classes) == len(op.args) - 1
+ assert len(calldescr.arg_classes) == op.numargs() - 1
size = calldescr.get_result_size(self.translate_support_code)
- self._call(op, [imm(size)] + [self.loc(arg) for arg in op.args],
+ self._call(op, [imm(size)] + [self.loc(op.getarg(i)) for i in range(op.numargs())],
guard_not_forced_op=guard_not_forced_op)
def consider_call(self, op):
@@ -665,28 +678,29 @@
self._consider_call(op, guard_op)
def consider_call_assembler(self, op, guard_op):
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, LoopToken)
jd = descr.outermost_jitdriver_sd
assert jd is not None
size = jd.portal_calldescr.get_result_size(self.translate_support_code)
vable_index = jd.index_of_virtualizable
if vable_index >= 0:
- self.rm._sync_var(op.args[vable_index])
- vable = self.fm.loc(op.args[vable_index])
+ self.rm._sync_var(op.getarg(vable_index))
+ vable = self.fm.loc(op.getarg(vable_index))
else:
vable = imm(0)
self._call(op, [imm(size), vable] +
- [self.loc(arg) for arg in op.args],
+ [self.loc(op.getarg(i)) for i in range(op.numargs())],
guard_not_forced_op=guard_op)
def consider_cond_call_gc_wb(self, op):
assert op.result is None
- loc_newvalue = self.rm.make_sure_var_in_reg(op.args[1], op.args)
+ args = op.getarglist()
+ loc_newvalue = self.rm.make_sure_var_in_reg(op.getarg(1), args)
# ^^^ we force loc_newvalue in a reg (unless it's a Const),
# because it will be needed anyway by the following setfield_gc.
# It avoids loading it twice from the memory.
- loc_base = self.rm.make_sure_var_in_reg(op.args[0], op.args,
+ loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), args,
imm_fine=False)
arglocs = [loc_base, loc_newvalue]
# add eax, ecx and edx as extra "arguments" to ensure they are
@@ -700,7 +714,7 @@
and self.rm.stays_alive(v)):
arglocs.append(reg)
self.PerformDiscard(op, arglocs)
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
def _fastpath_malloc(self, op, descr):
assert isinstance(descr, BaseSizeDescr)
@@ -725,15 +739,15 @@
def consider_new(self, op):
gc_ll_descr = self.assembler.cpu.gc_ll_descr
- if gc_ll_descr.can_inline_malloc(op.descr):
- self._fastpath_malloc(op, op.descr)
+ if gc_ll_descr.can_inline_malloc(op.getdescr()):
+ self._fastpath_malloc(op, op.getdescr())
else:
- args = gc_ll_descr.args_for_new(op.descr)
+ args = gc_ll_descr.args_for_new(op.getdescr())
arglocs = [imm(x) for x in args]
return self._call(op, arglocs)
def consider_new_with_vtable(self, op):
- classint = op.args[0].getint()
+ classint = op.getarg(0).getint()
descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint)
if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize):
self._fastpath_malloc(op, descrsize)
@@ -742,34 +756,34 @@
else:
args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize)
arglocs = [imm(x) for x in args]
- arglocs.append(self.loc(op.args[0]))
+ arglocs.append(self.loc(op.getarg(0)))
return self._call(op, arglocs)
def consider_newstr(self, op):
gc_ll_descr = self.assembler.cpu.gc_ll_descr
if gc_ll_descr.get_funcptr_for_newstr is not None:
# framework GC
- loc = self.loc(op.args[0])
+ loc = self.loc(op.getarg(0))
return self._call(op, [loc])
# boehm GC (XXX kill the following code at some point)
ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.STR, self.translate_support_code)
assert itemsize == 1
- return self._malloc_varsize(ofs_items, ofs, 0, op.args[0],
+ return self._malloc_varsize(ofs_items, ofs, 0, op.getarg(0),
op.result)
def consider_newunicode(self, op):
gc_ll_descr = self.assembler.cpu.gc_ll_descr
if gc_ll_descr.get_funcptr_for_newunicode is not None:
# framework GC
- loc = self.loc(op.args[0])
+ loc = self.loc(op.getarg(0))
return self._call(op, [loc])
# boehm GC (XXX kill the following code at some point)
ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code)
if itemsize == 4:
- return self._malloc_varsize(ofs_items, ofs, 2, op.args[0],
+ return self._malloc_varsize(ofs_items, ofs, 2, op.getarg(0),
op.result)
elif itemsize == 2:
- return self._malloc_varsize(ofs_items, ofs, 1, op.args[0],
+ return self._malloc_varsize(ofs_items, ofs, 1, op.getarg(0),
op.result)
else:
assert False, itemsize
@@ -784,7 +798,7 @@
else:
tempbox = None
other_loc = imm(ofs_items + (v.getint() << scale))
- self._call(ResOperation(rop.NEW, [v], res_v),
+ self._call(ResOperation(rop.NEW, [], res_v),
[other_loc], [v])
loc = self.rm.make_sure_var_in_reg(v, [res_v])
assert self.loc(res_v) == eax
@@ -792,22 +806,22 @@
self.rm.possibly_free_var(v)
if tempbox is not None:
self.rm.possibly_free_var(tempbox)
- self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [], None),
+ self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [None, None], None),
[eax, imm(ofs_length), imm(WORD), loc])
def consider_new_array(self, op):
gc_ll_descr = self.assembler.cpu.gc_ll_descr
if gc_ll_descr.get_funcptr_for_newarray is not None:
# framework GC
- args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.descr)
+ args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr())
arglocs = [imm(x) for x in args]
- arglocs.append(self.loc(op.args[0]))
+ arglocs.append(self.loc(op.getarg(0)))
return self._call(op, arglocs)
# boehm GC (XXX kill the following code at some point)
scale_of_field, basesize, ofs_length, _ = (
- self._unpack_arraydescr(op.descr))
+ self._unpack_arraydescr(op.getdescr()))
return self._malloc_varsize(basesize, ofs_length, scale_of_field,
- op.args[0], op.result)
+ op.getarg(0), op.result)
def _unpack_arraydescr(self, arraydescr):
assert isinstance(arraydescr, BaseArrayDescr)
@@ -829,50 +843,54 @@
return imm(ofs), imm(size), ptr
def consider_setfield_gc(self, op):
- ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.descr)
+ ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.getdescr())
assert isinstance(size_loc, ImmedLoc)
if size_loc.value == 1:
need_lower_byte = True
else:
need_lower_byte = False
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- value_loc = self.make_sure_var_in_reg(op.args[1], op.args,
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ value_loc = self.make_sure_var_in_reg(op.getarg(1), args,
need_lower_byte=need_lower_byte)
- self.possibly_free_vars(op.args)
+ self.possibly_free_vars(args)
self.PerformDiscard(op, [base_loc, ofs_loc, size_loc, value_loc])
consider_setfield_raw = consider_setfield_gc
def consider_strsetitem(self, op):
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
- value_loc = self.rm.make_sure_var_in_reg(op.args[2], op.args,
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+ value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), args,
need_lower_byte=True)
- self.rm.possibly_free_vars(op.args)
+ self.rm.possibly_free_vars_for_op(op)
self.PerformDiscard(op, [base_loc, ofs_loc, value_loc])
consider_unicodesetitem = consider_strsetitem
def consider_setarrayitem_gc(self, op):
- scale, ofs, _, ptr = self._unpack_arraydescr(op.descr)
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
+ scale, ofs, _, ptr = self._unpack_arraydescr(op.getdescr())
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
if scale == 0:
need_lower_byte = True
else:
need_lower_byte = False
- value_loc = self.make_sure_var_in_reg(op.args[2], op.args,
+ value_loc = self.make_sure_var_in_reg(op.getarg(2), args,
need_lower_byte=need_lower_byte)
- ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
- self.possibly_free_vars(op.args)
+ ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+ self.possibly_free_vars(args)
self.PerformDiscard(op, [base_loc, ofs_loc, value_loc,
imm(scale), imm(ofs)])
consider_setarrayitem_raw = consider_setarrayitem_gc
def consider_getfield_gc(self, op):
- ofs_loc, size_loc, _ = self._unpack_fielddescr(op.descr)
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- self.rm.possibly_free_vars(op.args)
+ ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr())
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ self.rm.possibly_free_vars(args)
result_loc = self.force_allocate_reg(op.result)
self.Perform(op, [base_loc, ofs_loc, size_loc], result_loc)
@@ -881,10 +899,11 @@
consider_getfield_gc_pure = consider_getfield_gc
def consider_getarrayitem_gc(self, op):
- scale, ofs, _, _ = self._unpack_arraydescr(op.descr)
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
- self.rm.possibly_free_vars(op.args)
+ scale, ofs, _, _ = self._unpack_arraydescr(op.getdescr())
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+ self.rm.possibly_free_vars_for_op(op)
result_loc = self.force_allocate_reg(op.result)
self.Perform(op, [base_loc, ofs_loc, imm(scale), imm(ofs)], result_loc)
@@ -893,8 +912,8 @@
def consider_int_is_true(self, op, guard_op):
# doesn't need arg to be in a register
- argloc = self.loc(op.args[0])
- self.rm.possibly_free_var(op.args[0])
+ argloc = self.loc(op.getarg(0))
+ self.rm.possibly_free_var(op.getarg(0))
if guard_op is not None:
self.perform_with_guard(op, guard_op, [argloc], None)
else:
@@ -904,33 +923,36 @@
consider_int_is_zero = consider_int_is_true
def consider_same_as(self, op):
- argloc = self.loc(op.args[0])
- self.possibly_free_var(op.args[0])
+ argloc = self.loc(op.getarg(0))
+ self.possibly_free_var(op.getarg(0))
resloc = self.force_allocate_reg(op.result)
self.Perform(op, [argloc], resloc)
#consider_cast_ptr_to_int = consider_same_as
def consider_strlen(self, op):
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- self.rm.possibly_free_vars(op.args)
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ self.rm.possibly_free_vars_for_op(op)
result_loc = self.rm.force_allocate_reg(op.result)
self.Perform(op, [base_loc], result_loc)
consider_unicodelen = consider_strlen
def consider_arraylen_gc(self, op):
- arraydescr = op.descr
+ arraydescr = op.getdescr()
assert isinstance(arraydescr, BaseArrayDescr)
ofs = arraydescr.get_ofs_length(self.translate_support_code)
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- self.rm.possibly_free_vars(op.args)
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ self.rm.possibly_free_vars_for_op(op)
result_loc = self.rm.force_allocate_reg(op.result)
self.Perform(op, [base_loc, imm(ofs)], result_loc)
def consider_strgetitem(self, op):
- base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
- ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
- self.rm.possibly_free_vars(op.args)
+ args = op.getarglist()
+ base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+ ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+ self.rm.possibly_free_vars_for_op(op)
result_loc = self.rm.force_allocate_reg(op.result)
self.Perform(op, [base_loc, ofs_loc], result_loc)
@@ -939,7 +961,7 @@
def consider_jump(self, op):
assembler = self.assembler
assert self.jump_target_descr is None
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, LoopToken)
self.jump_target_descr = descr
nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr)
@@ -951,17 +973,20 @@
xmmtmp = X86XMMRegisterManager.all_regs[0]
xmmtmploc = self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp)
# Part about non-floats
- src_locations = [self.loc(arg) for arg in op.args if arg.type != FLOAT]
+ # XXX we don't need a copy, we only just the original list
+ src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs())
+ if op.getarg(i).type != FLOAT]
assert tmploc not in nonfloatlocs
dst_locations = [loc for loc in nonfloatlocs if loc is not None]
remap_frame_layout(assembler, src_locations, dst_locations, tmploc)
# Part about floats
- src_locations = [self.loc(arg) for arg in op.args if arg.type == FLOAT]
+ src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs())
+ if op.getarg(i).type == FLOAT]
dst_locations = [loc for loc in floatlocs if loc is not None]
remap_frame_layout(assembler, src_locations, dst_locations, xmmtmp)
self.rm.possibly_free_var(box)
self.xrm.possibly_free_var(box1)
- self.possibly_free_vars(op.args)
+ self.possibly_free_vars_for_op(op)
assembler.closing_jump(self.jump_target_descr)
def consider_debug_merge_point(self, op):
@@ -1002,12 +1027,21 @@
def add_none_argument(fn):
return lambda self, op: fn(self, op, None)
+def is_comparison_or_ovf_op(opnum):
+ from pypy.jit.metainterp.resoperation import opclasses, AbstractResOp
+ cls = opclasses[opnum]
+ # hack hack: in theory they are instance method, but they don't use
+ # any instance field, we can use a fake object
+ class Fake(cls):
+ pass
+ op = Fake(None)
+ return op.is_comparison() or op.is_ovf()
+
for name, value in RegAlloc.__dict__.iteritems():
if name.startswith('consider_'):
name = name[len('consider_'):]
num = getattr(rop, name.upper())
- if (ResOperation(num, [], None).is_comparison()
- or ResOperation(num, [], None).is_ovf()
+ if (is_comparison_or_ovf_op(num)
or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER):
oplist_with_guard[num] = value
oplist[num] = add_none_argument(value)
Modified: pypy/branch/jitffi/pypy/jit/backend/x86/test/test_recompilation.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/x86/test/test_recompilation.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/x86/test/test_recompilation.py Thu Sep 23 16:53:32 2010
@@ -47,7 +47,7 @@
finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2)
'''
bridge = self.attach_bridge(ops, loop, -2)
- descr = loop.operations[2].descr
+ descr = loop.operations[2].getdescr()
new = descr._x86_bridge_frame_depth
assert descr._x86_bridge_param_depth == 0
# XXX: Maybe add enough ops to force stack on 64-bit as well?
@@ -114,8 +114,8 @@
assert loop.token._x86_param_depth == 0
# XXX: Maybe add enough ops to force stack on 64-bit as well?
if IS_X86_32:
- assert guard_op.descr._x86_bridge_frame_depth > loop_frame_depth
- assert guard_op.descr._x86_bridge_param_depth == 0
+ assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth
+ assert guard_op.getdescr()._x86_bridge_param_depth == 0
self.cpu.set_future_value_int(0, 0)
self.cpu.set_future_value_int(1, 0)
self.cpu.set_future_value_int(2, 0)
Modified: pypy/branch/jitffi/pypy/jit/backend/x86/test/test_regalloc.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/x86/test/test_regalloc.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/x86/test/test_regalloc.py Thu Sep 23 16:53:32 2010
@@ -9,7 +9,7 @@
from pypy.jit.backend.llsupport.descr import GcCache
from pypy.jit.backend.detect_cpu import getcpuclass
from pypy.jit.backend.x86.regalloc import RegAlloc, X86RegisterManager,\
- FloatConstants
+ FloatConstants, is_comparison_or_ovf_op
from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64
from pypy.jit.metainterp.test.oparser import parse
from pypy.rpython.lltypesystem import lltype, llmemory, rffi
@@ -17,6 +17,11 @@
from pypy.rpython.lltypesystem import rclass, rstr
from pypy.jit.backend.x86.rx86 import *
+def test_is_comparison_or_ovf_op():
+ assert not is_comparison_or_ovf_op(rop.INT_ADD)
+ assert is_comparison_or_ovf_op(rop.INT_ADD_OVF)
+ assert is_comparison_or_ovf_op(rop.INT_EQ)
+
CPU = getcpuclass()
class MockGcDescr(GcCache):
def get_funcptr_for_new(self):
@@ -159,8 +164,8 @@
assert guard_op.is_guard()
bridge = self.parse(ops, **kwds)
assert ([box.type for box in bridge.inputargs] ==
- [box.type for box in guard_op.fail_args])
- faildescr = guard_op.descr
+ [box.type for box in guard_op.getfailargs()])
+ faildescr = guard_op.getdescr()
self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations)
return bridge
@@ -607,7 +612,7 @@
'''
bridge = self.attach_bridge(ops, loop, -2)
- assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2)
+ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
self.cpu.set_future_value_int(0, 4)
self.cpu.set_future_value_int(1, 7)
@@ -630,7 +635,7 @@
'''
bridge = self.attach_bridge(ops, loop, -2)
- assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2)
+ assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
self.cpu.set_future_value_int(0, 4)
self.cpu.set_future_value_int(1, 7)
Modified: pypy/branch/jitffi/pypy/jit/backend/x86/test/test_runner.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/backend/x86/test/test_runner.py (original)
+++ pypy/branch/jitffi/pypy/jit/backend/x86/test/test_runner.py Thu Sep 23 16:53:32 2010
@@ -265,7 +265,7 @@
ResOperation(rop.FINISH, [ConstInt(0)], None,
descr=BasicFailDescr()),
]
- ops[-2].fail_args = [i1]
+ ops[-2].setfailargs([i1])
looptoken = LoopToken()
self.cpu.compile_loop([b], ops, looptoken)
if op == rop.INT_IS_TRUE:
@@ -314,7 +314,7 @@
ResOperation(rop.FINISH, [ConstInt(0)], None,
descr=BasicFailDescr()),
]
- ops[-2].fail_args = [i1]
+ ops[-2].setfailargs([i1])
inputargs = [i for i in (a, b) if isinstance(i, Box)]
looptoken = LoopToken()
self.cpu.compile_loop(inputargs, ops, looptoken)
@@ -353,7 +353,7 @@
ResOperation(rop.JUMP, [i1], None, descr=looptoken),
]
inputargs = [i0]
- operations[3].fail_args = [i1]
+ operations[3].setfailargs([i1])
self.cpu.compile_loop(inputargs, operations, looptoken)
name, loopaddress, loopsize = agent.functions[0]
assert name == "Loop # 0: hello"
@@ -368,7 +368,7 @@
ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye")], None),
ResOperation(rop.JUMP, [i1b], None, descr=looptoken),
]
- bridge[1].fail_args = [i1b]
+ bridge[1].setfailargs([i1b])
self.cpu.compile_bridge(faildescr1, [i1b], bridge)
name, address, size = agent.functions[1]
@@ -462,7 +462,7 @@
cmp_result = BoxInt()
ops.append(ResOperation(float_op, args, cmp_result))
ops.append(ResOperation(guard_op, [cmp_result], None, descr=BasicFailDescr()))
- ops[-1].fail_args = [failed]
+ ops[-1].setfailargs([failed])
ops.append(ResOperation(rop.FINISH, [finished], None, descr=BasicFailDescr()))
Modified: pypy/branch/jitffi/pypy/jit/metainterp/compile.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/compile.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/compile.py Thu Sep 23 16:53:32 2010
@@ -51,7 +51,7 @@
def compile_new_loop(metainterp, old_loop_tokens, greenkey, start):
"""Try to compile a new loop by closing the current history back
to the first operation.
- """
+ """
history = metainterp.history
loop = create_empty_loop(metainterp)
loop.greenkey = greenkey
@@ -65,7 +65,7 @@
jitdriver_sd = metainterp.jitdriver_sd
loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd)
loop.token = loop_token
- loop.operations[-1].descr = loop_token # patch the target of the JUMP
+ loop.operations[-1].setdescr(loop_token) # patch the target of the JUMP
try:
old_loop_token = jitdriver_sd.warmstate.optimize_loop(
metainterp_sd, old_loop_tokens, loop)
@@ -133,7 +133,7 @@
metainterp_sd.profiler.end_backend()
if not we_are_translated():
metainterp_sd.stats.compiled()
- metainterp_sd.log("compiled new bridge")
+ metainterp_sd.log("compiled new bridge")
# ____________________________________________________________
@@ -177,7 +177,7 @@
class TerminatingLoopToken(LoopToken):
terminating = True
-
+
def __init__(self, nargs, finishdescr):
self.specnodes = [prebuiltNotSpecNode]*nargs
self.finishdescr = finishdescr
@@ -233,14 +233,14 @@
self.metainterp_sd = metainterp_sd
def store_final_boxes(self, guard_op, boxes):
- guard_op.fail_args = boxes
- self.guard_opnum = guard_op.opnum
+ guard_op.setfailargs(boxes)
+ self.guard_opnum = guard_op.getopnum()
def make_a_counter_per_value(self, guard_value_op):
- assert guard_value_op.opnum == rop.GUARD_VALUE
- box = guard_value_op.args[0]
+ assert guard_value_op.getopnum() == rop.GUARD_VALUE
+ box = guard_value_op.getarg(0)
try:
- i = guard_value_op.fail_args.index(box)
+ i = guard_value_op.getfailargs().index(box)
except ValueError:
return # xxx probably very rare
else:
@@ -508,7 +508,7 @@
def compile_new_bridge(metainterp, old_loop_tokens, resumekey):
"""Try to compile a new bridge leading from the beginning of the history
to some existing place.
- """
+ """
# The history contains new operations to attach as the code for the
# failure of 'resumekey.guard_op'.
#
@@ -540,13 +540,14 @@
op = new_loop.operations[-1]
if not isinstance(target_loop_token, TerminatingLoopToken):
# normal case
- op.descr = target_loop_token # patch the jump target
+ op.setdescr(target_loop_token) # patch the jump target
else:
# The target_loop_token is a pseudo loop token,
# e.g. loop_tokens_done_with_this_frame_void[0]
# Replace the operation with the real operation we want, i.e. a FINISH
descr = target_loop_token.finishdescr
- new_op = ResOperation(rop.FINISH, op.args, None, descr=descr)
+ args = op.getarglist()
+ new_op = ResOperation(rop.FINISH, args, None, descr=descr)
new_loop.operations[-1] = new_op
# ____________________________________________________________
@@ -597,6 +598,6 @@
ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr),
ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken)
]
- operations[1].fail_args = []
+ operations[1].setfailargs([])
cpu.compile_loop(inputargs, operations, loop_token)
return loop_token
Modified: pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/graphpage.py Thu Sep 23 16:53:32 2010
@@ -17,13 +17,13 @@
for graph, highlight in graphs:
for op in graph.get_operations():
if is_interesting_guard(op):
- graphs.append((SubGraph(op.descr._debug_suboperations),
+ graphs.append((SubGraph(op.getdescr()._debug_suboperations),
highlight))
graphpage = ResOpGraphPage(graphs, errmsg)
graphpage.display()
def is_interesting_guard(op):
- return hasattr(op.descr, '_debug_suboperations')
+ return hasattr(op.getdescr(), '_debug_suboperations')
class ResOpGraphPage(GraphPage):
@@ -76,7 +76,7 @@
for i, op in enumerate(graph.get_operations()):
if is_interesting_guard(op):
self.mark_starter(graphindex, i+1)
- if op.opnum == rop.DEBUG_MERGE_POINT:
+ if op.getopnum() == rop.DEBUG_MERGE_POINT:
if not last_was_mergepoint:
last_was_mergepoint = True
self.mark_starter(graphindex, i)
@@ -155,7 +155,7 @@
op = operations[opindex]
lines.append(repr(op))
if is_interesting_guard(op):
- tgt = op.descr._debug_suboperations[0]
+ tgt = op.getdescr()._debug_suboperations[0]
tgt_g, tgt_i = self.all_operations[tgt]
self.genedge((graphindex, opstartindex),
(tgt_g, tgt_i),
@@ -167,8 +167,8 @@
self.genedge((graphindex, opstartindex),
(graphindex, opindex))
break
- if op.opnum == rop.JUMP:
- tgt = op.descr
+ if op.getopnum() == rop.JUMP:
+ tgt = op.getdescr()
tgt_g = -1
if tgt is None:
tgt_g = graphindex
@@ -191,7 +191,9 @@
def getlinks(self):
boxes = {}
for op in self.all_operations:
- for box in op.args + [op.result]:
+ args = op.getarglist()
+ args.append(op.result)
+ for box in args:
if getattr(box, 'is_box', False):
boxes[box] = True
links = {}
Modified: pypy/branch/jitffi/pypy/jit/metainterp/history.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/history.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/history.py Thu Sep 23 16:53:32 2010
@@ -532,7 +532,7 @@
class BoxFloat(Box):
type = FLOAT
_attrs_ = ('value',)
-
+
def __init__(self, floatval=0.0):
assert isinstance(floatval, float)
self.value = floatval
@@ -759,33 +759,34 @@
assert len(seen) == len(inputargs), (
"duplicate Box in the Loop.inputargs")
TreeLoop.check_consistency_of_branch(operations, seen)
-
+
@staticmethod
def check_consistency_of_branch(operations, seen):
"NOT_RPYTHON"
for op in operations:
- for box in op.args:
+ for i in range(op.numargs()):
+ box = op.getarg(i)
if isinstance(box, Box):
assert box in seen
if op.is_guard():
- assert op.descr is not None
- if hasattr(op.descr, '_debug_suboperations'):
- ops = op.descr._debug_suboperations
+ assert op.getdescr() is not None
+ if hasattr(op.getdescr(), '_debug_suboperations'):
+ ops = op.getdescr()._debug_suboperations
TreeLoop.check_consistency_of_branch(ops, seen.copy())
- for box in op.fail_args or []:
+ for box in op.getfailargs() or []:
if box is not None:
assert isinstance(box, Box)
assert box in seen
else:
- assert op.fail_args is None
+ assert op.getfailargs() is None
box = op.result
if box is not None:
assert isinstance(box, Box)
assert box not in seen
seen[box] = True
assert operations[-1].is_final()
- if operations[-1].opnum == rop.JUMP:
- target = operations[-1].descr
+ if operations[-1].getopnum() == rop.JUMP:
+ target = operations[-1].getdescr()
if target is not None:
assert isinstance(target, LoopToken)
@@ -793,7 +794,8 @@
# RPython-friendly
print '%r: inputargs =' % self, self._dump_args(self.inputargs)
for op in self.operations:
- print '\t', op.getopname(), self._dump_args(op.args), \
+ args = op.getarglist()
+ print '\t', op.getopname(), self._dump_args(args), \
self._dump_box(op.result)
def _dump_args(self, boxes):
@@ -809,14 +811,14 @@
return '<%s>' % (self.name,)
def _list_all_operations(result, operations, omit_finish=True):
- if omit_finish and operations[-1].opnum == rop.FINISH:
+ if omit_finish and operations[-1].getopnum() == rop.FINISH:
# xxx obscure
return
result.extend(operations)
for op in operations:
- if op.is_guard() and op.descr:
- if hasattr(op.descr, '_debug_suboperations'):
- ops = op.descr._debug_suboperations
+ if op.is_guard() and op.getdescr():
+ if hasattr(op.getdescr(), '_debug_suboperations'):
+ ops = op.getdescr()._debug_suboperations
_list_all_operations(result, ops, omit_finish)
# ____________________________________________________________
@@ -885,7 +887,7 @@
self.aborted_count += 1
def entered(self):
- self.enter_count += 1
+ self.enter_count += 1
def compiled(self):
self.compiled_count += 1
@@ -898,7 +900,7 @@
def add_new_loop(self, loop):
self.loops.append(loop)
-
+
# test read interface
def get_all_loops(self):
Modified: pypy/branch/jitffi/pypy/jit/metainterp/logger.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/logger.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/logger.py Thu Sep 23 16:53:32 2010
@@ -79,27 +79,27 @@
debug_print('[' + args + ']')
for i in range(len(operations)):
op = operations[i]
- if op.opnum == rop.DEBUG_MERGE_POINT:
- loc = op.args[0]._get_str()
+ if op.getopnum() == rop.DEBUG_MERGE_POINT:
+ loc = op.getarg(0)._get_str()
debug_print("debug_merge_point('%s')" % (loc,))
continue
- args = ", ".join([self.repr_of_arg(memo, arg) for arg in op.args])
+ args = ", ".join([self.repr_of_arg(memo, op.getarg(i)) for i in range(op.numargs())])
if op.result is not None:
res = self.repr_of_arg(memo, op.result) + " = "
else:
res = ""
is_guard = op.is_guard()
- if op.descr is not None:
- descr = op.descr
+ if op.getdescr() is not None:
+ descr = op.getdescr()
if is_guard and self.guard_number:
index = self.metainterp_sd.cpu.get_fail_descr_number(descr)
r = "<Guard%d>" % index
else:
r = self.repr_of_descr(descr)
args += ', descr=' + r
- if is_guard and op.fail_args is not None:
+ if is_guard and op.getfailargs() is not None:
fail_args = ' [' + ", ".join([self.repr_of_arg(memo, arg)
- for arg in op.fail_args]) + ']'
+ for arg in op.getfailargs()]) + ']'
else:
fail_args = ''
debug_print(res + op.getopname() +
Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimize.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/optimize.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/optimize.py Thu Sep 23 16:53:32 2010
@@ -43,7 +43,7 @@
finder.find_nodes_bridge(bridge)
for old_loop_token in old_loop_tokens:
if finder.bridge_matches(old_loop_token.specnodes):
- bridge.operations[-1].descr = old_loop_token # patch jump target
+ bridge.operations[-1].setdescr(old_loop_token) # patch jump target
optimize_bridge_1(metainterp_sd, bridge)
return old_loop_token
return None
Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizefindnode.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/optimizefindnode.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/optimizefindnode.py Thu Sep 23 16:53:32 2010
@@ -144,7 +144,7 @@
def find_nodes(self, operations):
for op in operations:
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in find_nodes_ops:
if opnum == value:
func(self, op)
@@ -154,18 +154,20 @@
def find_nodes_default(self, op):
if op.is_always_pure():
- for arg in op.args:
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if self.get_constant_box(arg) is None:
break
else:
# all constant arguments: we can constant-fold
- argboxes = [self.get_constant_box(arg) for arg in op.args]
+ argboxes = [self.get_constant_box(op.getarg(i))
+ for i in range(op.numargs())]
resbox = execute_nonspec(self.cpu, None,
- op.opnum, argboxes, op.descr)
+ op.getopnum(), argboxes, op.getdescr())
self.set_constant_node(op.result, resbox.constbox())
# default case: mark the arguments as escaping
- for box in op.args:
- self.getnode(box).mark_escaped()
+ for i in range(op.numargs()):
+ self.getnode(op.getarg(i)).mark_escaped()
def find_nodes_no_escape(self, op):
pass # for operations that don't escape their arguments
@@ -178,53 +180,53 @@
def find_nodes_NEW_WITH_VTABLE(self, op):
instnode = InstanceNode()
- box = op.args[0]
+ box = op.getarg(0)
assert isinstance(box, Const)
instnode.knownclsbox = box
self.nodes[op.result] = instnode
def find_nodes_NEW(self, op):
instnode = InstanceNode()
- instnode.structdescr = op.descr
+ instnode.structdescr = op.getdescr()
self.nodes[op.result] = instnode
def find_nodes_NEW_ARRAY(self, op):
- lengthbox = op.args[0]
+ lengthbox = op.getarg(0)
lengthbox = self.get_constant_box(lengthbox)
if lengthbox is None:
return # var-sized arrays are not virtual
arraynode = InstanceNode()
arraynode.arraysize = lengthbox.getint()
- arraynode.arraydescr = op.descr
+ arraynode.arraydescr = op.getdescr()
self.nodes[op.result] = arraynode
def find_nodes_ARRAYLEN_GC(self, op):
- arraynode = self.getnode(op.args[0])
+ arraynode = self.getnode(op.getarg(0))
if arraynode.arraydescr is not None:
resbox = ConstInt(arraynode.arraysize)
self.set_constant_node(op.result, resbox)
def find_nodes_GUARD_CLASS(self, op):
- instnode = self.getnode(op.args[0])
+ instnode = self.getnode(op.getarg(0))
if instnode.fromstart: # only useful (and safe) in this case
- box = op.args[1]
+ box = op.getarg(1)
assert isinstance(box, Const)
instnode.knownclsbox = box
def find_nodes_GUARD_VALUE(self, op):
- instnode = self.getnode(op.args[0])
+ instnode = self.getnode(op.getarg(0))
if instnode.fromstart: # only useful (and safe) in this case
- box = op.args[1]
+ box = op.getarg(1)
assert isinstance(box, Const)
instnode.knownvaluebox = box
def find_nodes_SETFIELD_GC(self, op):
- instnode = self.getnode(op.args[0])
- fieldnode = self.getnode(op.args[1])
+ instnode = self.getnode(op.getarg(0))
+ fieldnode = self.getnode(op.getarg(1))
if instnode.escaped:
fieldnode.mark_escaped()
return # nothing to be gained from tracking the field
- field = op.descr
+ field = op.getdescr()
assert isinstance(field, AbstractValue)
if instnode.curfields is None:
instnode.curfields = {}
@@ -232,10 +234,10 @@
instnode.add_escape_dependency(fieldnode)
def find_nodes_GETFIELD_GC(self, op):
- instnode = self.getnode(op.args[0])
+ instnode = self.getnode(op.getarg(0))
if instnode.escaped:
return # nothing to be gained from tracking the field
- field = op.descr
+ field = op.getdescr()
assert isinstance(field, AbstractValue)
if instnode.curfields is not None and field in instnode.curfields:
fieldnode = instnode.curfields[field]
@@ -254,13 +256,13 @@
find_nodes_GETFIELD_GC_PURE = find_nodes_GETFIELD_GC
def find_nodes_SETARRAYITEM_GC(self, op):
- indexbox = op.args[1]
+ indexbox = op.getarg(1)
indexbox = self.get_constant_box(indexbox)
if indexbox is None:
self.find_nodes_default(op) # not a Const index
return
- arraynode = self.getnode(op.args[0])
- itemnode = self.getnode(op.args[2])
+ arraynode = self.getnode(op.getarg(0))
+ itemnode = self.getnode(op.getarg(2))
if arraynode.escaped:
itemnode.mark_escaped()
return # nothing to be gained from tracking the item
@@ -270,12 +272,12 @@
arraynode.add_escape_dependency(itemnode)
def find_nodes_GETARRAYITEM_GC(self, op):
- indexbox = op.args[1]
+ indexbox = op.getarg(1)
indexbox = self.get_constant_box(indexbox)
if indexbox is None:
self.find_nodes_default(op) # not a Const index
return
- arraynode = self.getnode(op.args[0])
+ arraynode = self.getnode(op.getarg(0))
if arraynode.escaped:
return # nothing to be gained from tracking the item
index = indexbox.getint()
@@ -298,13 +300,15 @@
def find_nodes_JUMP(self, op):
# only set up the 'unique' field of the InstanceNodes;
# real handling comes later (build_result_specnodes() for loops).
- for box in op.args:
+ for i in range(op.numargs()):
+ box = op.getarg(i)
self.getnode(box).set_unique_nodes()
def find_nodes_FINISH(self, op):
# only for bridges, and only for the ones that end in a 'return'
# or 'raise'; all other cases end with a JUMP.
- for box in op.args:
+ for i in range(op.numargs()):
+ box = op.getarg(i)
self.getnode(box).unique = UNIQUE_NO
find_nodes_ops = _findall(NodeFinder, 'find_nodes_')
@@ -324,7 +328,7 @@
def show(self):
from pypy.jit.metainterp.viewnode import viewnodes, view
op = self._loop.operations[-1]
- assert op.opnum == rop.JUMP
+ assert op.getopnum() == rop.JUMP
exitnodes = [self.getnode(arg) for arg in op.args]
viewnodes(self.inputnodes, exitnodes)
if hasattr(self._loop.token, "specnodes"):
@@ -343,14 +347,14 @@
# Build the list of specnodes based on the result
# computed by NodeFinder.find_nodes().
op = loop.operations[-1]
- assert op.opnum == rop.JUMP
- assert len(self.inputnodes) == len(op.args)
+ assert op.getopnum() == rop.JUMP
+ assert len(self.inputnodes) == op.numargs()
while True:
self.restart_needed = False
specnodes = []
- for i in range(len(op.args)):
+ for i in range(op.numargs()):
inputnode = self.inputnodes[i]
- exitnode = self.getnode(op.args[i])
+ exitnode = self.getnode(op.getarg(i))
specnodes.append(self.intersect(inputnode, exitnode))
if not self.restart_needed:
break
@@ -562,9 +566,9 @@
def bridge_matches(self, nextloop_specnodes):
jump_op = self.jump_op
- assert len(jump_op.args) == len(nextloop_specnodes)
+ assert jump_op.numargs() == len(nextloop_specnodes)
for i in range(len(nextloop_specnodes)):
- exitnode = self.getnode(jump_op.args[i])
+ exitnode = self.getnode(jump_op.getarg(i))
if not nextloop_specnodes[i].matches_instance_node(exitnode):
return False
return True
Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/__init__.py Thu Sep 23 16:53:32 2010
@@ -1,8 +1,8 @@
-from optimizer import Optimizer
-from rewrite import OptRewrite
-from intbounds import OptIntBounds
-from virtualize import OptVirtualize
-from heap import OptHeap
+from pypy.jit.metainterp.optimizeopt.optimizer import Optimizer
+from pypy.jit.metainterp.optimizeopt.rewrite import OptRewrite
+from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds
+from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize
+from pypy.jit.metainterp.optimizeopt.heap import OptHeap
from ccall import OptCCall
def optimize_loop_1(metainterp_sd, loop, virtuals=True):
Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/heap.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/heap.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/heap.py Thu Sep 23 16:53:32 2010
@@ -2,7 +2,7 @@
from pypy.jit.metainterp.resoperation import rop, ResOperation
from pypy.rlib.objectmodel import we_are_translated
-from optimizer import Optimization
+from pypy.jit.metainterp.optimizeopt.optimizer import Optimization
class CachedArrayItems(object):
def __init__(self):
@@ -45,7 +45,7 @@
op = self.lazy_setfields.get(descr, None)
if op is None:
return None
- return self.getvalue(op.args[1])
+ return self.getvalue(op.getarg(1))
return d.get(value, None)
def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False):
@@ -105,7 +105,7 @@
if op.is_guard():
self.optimizer.pendingfields = self.force_lazy_setfields_for_guard()
return
- opnum = op.opnum
+ opnum = op.getopnum()
if (opnum == rop.SETFIELD_GC or
opnum == rop.SETARRAYITEM_GC or
opnum == rop.DEBUG_MERGE_POINT):
@@ -117,7 +117,7 @@
if opnum == rop.CALL_ASSEMBLER:
effectinfo = None
else:
- effectinfo = op.descr.get_extra_info()
+ effectinfo = op.getdescr().get_extra_info()
if effectinfo is not None:
# XXX we can get the wrong complexity here, if the lists
# XXX stored on effectinfo are large
@@ -142,7 +142,7 @@
return
self.force_all_lazy_setfields()
elif op.is_final() or (not we_are_translated() and
- op.opnum < 0): # escape() operations
+ op.getopnum() < 0): # escape() operations
self.force_all_lazy_setfields()
self.clean_caches()
@@ -166,10 +166,11 @@
# - is_comparison() for cases like "int_eq/setfield_gc/guard_true"
# - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced"
# - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow"
- opnum = prevop.opnum
+ opnum = prevop.getopnum()
+ lastop_args = lastop.getarglist()
if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE
or prevop.is_ovf())
- and prevop.result not in lastop.args):
+ and prevop.result not in lastop_args):
newoperations[-2] = lastop
newoperations[-1] = prevop
@@ -189,9 +190,9 @@
# the only really interesting case that we need to handle in the
# guards' resume data is that of a virtual object that is stored
# into a field of a non-virtual object.
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
assert not value.is_virtual() # it must be a non-virtual
- fieldvalue = self.getvalue(op.args[1])
+ fieldvalue = self.getvalue(op.getarg(1))
if fieldvalue.is_virtual():
# this is the case that we leave to resume.py
pendingfields.append((descr, value.box,
@@ -202,20 +203,20 @@
def force_lazy_setfield_if_necessary(self, op, value, write=False):
try:
- op1 = self.lazy_setfields[op.descr]
+ op1 = self.lazy_setfields[op.getdescr()]
except KeyError:
if write:
- self.lazy_setfields_descrs.append(op.descr)
+ self.lazy_setfields_descrs.append(op.getdescr())
else:
- if self.getvalue(op1.args[0]) is not value:
- self.force_lazy_setfield(op.descr)
+ if self.getvalue(op1.getarg(0)) is not value:
+ self.force_lazy_setfield(op.getdescr())
def optimize_GETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
self.force_lazy_setfield_if_necessary(op, value)
# check if the field was read from another getfield_gc just before
# or has been written to recently
- fieldvalue = self.read_cached_field(op.descr, value)
+ fieldvalue = self.read_cached_field(op.getdescr(), value)
if fieldvalue is not None:
self.make_equal_to(op.result, fieldvalue)
return
@@ -225,38 +226,38 @@
self.emit_operation(op) # FIXME: These might need constant propagation?
# then remember the result of reading the field
fieldvalue = self.getvalue(op.result)
- self.cache_field_value(op.descr, value, fieldvalue)
+ self.cache_field_value(op.getdescr(), value, fieldvalue)
def optimize_SETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
- fieldvalue = self.getvalue(op.args[1])
+ value = self.getvalue(op.getarg(0))
+ fieldvalue = self.getvalue(op.getarg(1))
self.force_lazy_setfield_if_necessary(op, value, write=True)
- self.lazy_setfields[op.descr] = op
+ self.lazy_setfields[op.getdescr()] = op
# remember the result of future reads of the field
- self.cache_field_value(op.descr, value, fieldvalue, write=True)
+ self.cache_field_value(op.getdescr(), value, fieldvalue, write=True)
def optimize_GETARRAYITEM_GC(self, op):
- value = self.getvalue(op.args[0])
- indexvalue = self.getvalue(op.args[1])
- fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue)
+ value = self.getvalue(op.getarg(0))
+ indexvalue = self.getvalue(op.getarg(1))
+ fieldvalue = self.read_cached_arrayitem(op.getdescr(), value, indexvalue)
if fieldvalue is not None:
self.make_equal_to(op.result, fieldvalue)
return
###self.optimizer.optimize_default(op)
self.emit_operation(op) # FIXME: These might need constant propagation?
fieldvalue = self.getvalue(op.result)
- self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue)
+ self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue)
def optimize_SETARRAYITEM_GC(self, op):
self.emit_operation(op)
- value = self.getvalue(op.args[0])
- fieldvalue = self.getvalue(op.args[2])
- indexvalue = self.getvalue(op.args[1])
- self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue,
+ value = self.getvalue(op.getarg(0))
+ fieldvalue = self.getvalue(op.getarg(2))
+ indexvalue = self.getvalue(op.getarg(1))
+ self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue,
write=True)
def propagate_forward(self, op):
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in optimize_ops:
if opnum == value:
func(self, op)
Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/intbounds.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/intbounds.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/intbounds.py Thu Sep 23 16:53:32 2010
@@ -1,6 +1,7 @@
-from optimizer import Optimization, CONST_1, CONST_0
+from pypy.jit.metainterp.optimizeopt.optimizer import Optimization, CONST_1, CONST_0
from pypy.jit.metainterp.optimizeutil import _findall
-from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded
+from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded, \
+ IntLowerBound
from pypy.jit.metainterp.history import Const, ConstInt
from pypy.jit.metainterp.resoperation import rop, ResOperation
@@ -9,7 +10,7 @@
remove redundant guards"""
def propagate_forward(self, op):
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in optimize_ops:
if opnum == value:
func(self, op)
@@ -25,29 +26,29 @@
b = v.intbound
if b.has_lower and b.has_upper and b.lower == b.upper:
v.make_constant(ConstInt(b.lower))
-
+
try:
op = self.optimizer.producer[box]
except KeyError:
return
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in propagate_bounds_ops:
if opnum == value:
func(self, op)
break
-
+
def optimize_GUARD_TRUE(self, op):
self.emit_operation(op)
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
optimize_GUARD_FALSE = optimize_GUARD_TRUE
optimize_GUARD_VALUE = optimize_GUARD_TRUE
def optimize_INT_AND(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
-
+
r = self.getvalue(op.result)
if v2.is_constant():
val = v2.box.getint()
@@ -57,76 +58,76 @@
val = v1.box.getint()
if val >= 0:
r.intbound.intersect(IntBound(0,val))
-
+
def optimize_INT_SUB(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(v1.intbound.sub_bound(v2.intbound))
-
+
def optimize_INT_ADD(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(v1.intbound.add_bound(v2.intbound))
def optimize_INT_MUL(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(v1.intbound.mul_bound(v2.intbound))
def optimize_INT_ADD_OVF(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
resbound = v1.intbound.add_bound(v2.intbound)
if resbound.has_lower and resbound.has_upper and \
- self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
+ self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW:
# Transform into INT_ADD and remove guard
- op.opnum = rop.INT_ADD
+ op = op.copy_and_change(rop.INT_ADD)
self.skip_nextop()
- self.optimize_INT_ADD(op)
+ self.optimize_INT_ADD(op) # emit the op
else:
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(resbound)
-
+
def optimize_INT_SUB_OVF(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
resbound = v1.intbound.sub_bound(v2.intbound)
if resbound.has_lower and resbound.has_upper and \
- self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
+ self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW:
# Transform into INT_SUB and remove guard
- op.opnum = rop.INT_SUB
+ op = op.copy_and_change(rop.INT_SUB)
self.skip_nextop()
- self.optimize_INT_SUB(op)
+ self.optimize_INT_SUB(op) # emit the op
else:
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(resbound)
def optimize_INT_MUL_OVF(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
resbound = v1.intbound.mul_bound(v2.intbound)
if resbound.has_lower and resbound.has_upper and \
- self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
+ self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW:
# Transform into INT_MUL and remove guard
- op.opnum = rop.INT_MUL
+ op = op.copy_and_change(rop.INT_MUL)
self.skip_nextop()
- self.optimize_INT_MUL(op)
+ self.optimize_INT_MUL(op) # emit the op
else:
self.emit_operation(op)
r = self.getvalue(op.result)
r.intbound.intersect(resbound)
-
+
def optimize_INT_LT(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_lt(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_ge(v2.intbound):
@@ -135,8 +136,8 @@
self.emit_operation(op)
def optimize_INT_GT(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_gt(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_le(v2.intbound):
@@ -145,8 +146,8 @@
self.emit_operation(op)
def optimize_INT_LE(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_le(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_gt(v2.intbound):
@@ -155,8 +156,8 @@
self.emit_operation(op)
def optimize_INT_GE(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_ge(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_lt(v2.intbound):
@@ -165,134 +166,140 @@
self.emit_operation(op)
def optimize_INT_EQ(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_gt(v2.intbound):
self.make_constant_int(op.result, 0)
elif v1.intbound.known_lt(v2.intbound):
self.make_constant_int(op.result, 0)
- else:
+ else:
self.emit_operation(op)
-
+
def optimize_INT_NE(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.known_gt(v2.intbound):
self.make_constant_int(op.result, 1)
elif v1.intbound.known_lt(v2.intbound):
self.make_constant_int(op.result, 1)
- else:
+ else:
self.emit_operation(op)
-
- def make_int_lt(self, args):
- v1 = self.getvalue(args[0])
- v2 = self.getvalue(args[1])
+
+ def optimize_ARRAYLEN_GC(self, op):
+ self.emit_operation(op)
+ v1 = self.getvalue(op.result)
+ v1.intbound.make_ge(IntLowerBound(0))
+
+ optimize_STRLEN = optimize_ARRAYLEN_GC
+
+ def make_int_lt(self, box1, box2):
+ v1 = self.getvalue(box1)
+ v2 = self.getvalue(box2)
if v1.intbound.make_lt(v2.intbound):
- self.propagate_bounds_backward(args[0])
+ self.propagate_bounds_backward(box1)
if v2.intbound.make_gt(v1.intbound):
- self.propagate_bounds_backward(args[1])
-
+ self.propagate_bounds_backward(box2)
- def make_int_le(self, args):
- v1 = self.getvalue(args[0])
- v2 = self.getvalue(args[1])
+ def make_int_le(self, box1, box2):
+ v1 = self.getvalue(box1)
+ v2 = self.getvalue(box2)
if v1.intbound.make_le(v2.intbound):
- self.propagate_bounds_backward(args[0])
+ self.propagate_bounds_backward(box1)
if v2.intbound.make_ge(v1.intbound):
- self.propagate_bounds_backward(args[1])
+ self.propagate_bounds_backward(box2)
- def make_int_gt(self, args):
- self.make_int_lt([args[1], args[0]])
+ def make_int_gt(self, box1, box2):
+ self.make_int_lt(box2, box1)
- def make_int_ge(self, args):
- self.make_int_le([args[1], args[0]])
+ def make_int_ge(self, box1, box2):
+ self.make_int_le(box2, box1)
def propagate_bounds_INT_LT(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_lt(op.args)
+ self.make_int_lt(op.getarg(0), op.getarg(1))
else:
- self.make_int_ge(op.args)
+ self.make_int_ge(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_GT(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_gt(op.args)
+ self.make_int_gt(op.getarg(0), op.getarg(1))
else:
- self.make_int_le(op.args)
+ self.make_int_le(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_LE(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_le(op.args)
+ self.make_int_le(op.getarg(0), op.getarg(1))
else:
- self.make_int_gt(op.args)
+ self.make_int_gt(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_GE(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- self.make_int_ge(op.args)
+ self.make_int_ge(op.getarg(0), op.getarg(1))
else:
- self.make_int_lt(op.args)
+ self.make_int_lt(op.getarg(0), op.getarg(1))
def propagate_bounds_INT_EQ(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_1):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.intersect(v2.intbound):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
if v2.intbound.intersect(v1.intbound):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_NE(self, op):
r = self.getvalue(op.result)
if r.is_constant():
if r.box.same_constant(CONST_0):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.intbound.intersect(v2.intbound):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
if v2.intbound.intersect(v1.intbound):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_ADD(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op.result)
b = r.intbound.sub_bound(v2.intbound)
if v1.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
b = r.intbound.sub_bound(v1.intbound)
if v2.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_SUB(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op.result)
b = r.intbound.add_bound(v2.intbound)
if v1.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
b = r.intbound.sub_bound(v1.intbound).mul(-1)
if v2.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
def propagate_bounds_INT_MUL(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
r = self.getvalue(op.result)
b = r.intbound.div_bound(v2.intbound)
if v1.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[0])
+ self.propagate_bounds_backward(op.getarg(0))
b = r.intbound.div_bound(v1.intbound)
if v2.intbound.intersect(b):
- self.propagate_bounds_backward(op.args[1])
+ self.propagate_bounds_backward(op.getarg(1))
propagate_bounds_INT_ADD_OVF = propagate_bounds_INT_ADD
propagate_bounds_INT_SUB_OVF = propagate_bounds_INT_SUB
@@ -300,4 +307,3 @@
optimize_ops = _findall(OptIntBounds, 'optimize_')
propagate_bounds_ops = _findall(OptIntBounds, 'propagate_bounds_')
-
Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/optimizer.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/optimizer.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/optimizer.py Thu Sep 23 16:53:32 2010
@@ -11,17 +11,17 @@
from pypy.jit.metainterp.typesystem import llhelper, oohelper
from pypy.rpython.lltypesystem import lltype
from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int
-from intutils import IntBound, IntUnbounded
+from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded
LEVEL_UNKNOWN = '\x00'
LEVEL_NONNULL = '\x01'
LEVEL_KNOWNCLASS = '\x02' # might also mean KNOWNARRAYDESCR, for arrays
-LEVEL_CONSTANT = '\x03'
+LEVEL_CONSTANT = '\x03'
import sys
MAXINT = sys.maxint
MININT = -sys.maxint - 1
-
+
class OptValue(object):
_attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound')
last_guard_index = -1
@@ -36,7 +36,7 @@
if isinstance(box, Const):
self.make_constant(box)
# invariant: box is a Const if and only if level == LEVEL_CONSTANT
-
+
def force_box(self):
return self.box
@@ -171,7 +171,7 @@
def new_const_item(self, arraydescr):
return self.optimizer.new_const_item(arraydescr)
-
+
def pure(self, opnum, args, result):
op = ResOperation(opnum, args, result)
self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op
@@ -184,10 +184,10 @@
def setup(self, virtuals):
pass
-
+
class Optimizer(Optimization):
- def __init__(self, metainterp_sd, loop, optimizations=[], virtuals=True):
+ def __init__(self, metainterp_sd, loop, optimizations=None, virtuals=True):
self.metainterp_sd = metainterp_sd
self.cpu = metainterp_sd.cpu
self.loop = loop
@@ -199,10 +199,8 @@
self.pure_operations = args_dict()
self.producer = {}
self.pendingfields = []
-
- if len(optimizations) == 0:
- self.first_optimization = self
- else:
+
+ if optimizations:
self.first_optimization = optimizations[0]
for i in range(1, len(optimizations)):
optimizations[i - 1].next_optimization = optimizations[i]
@@ -210,6 +208,8 @@
for o in optimizations:
o.optimizer = self
o.setup(virtuals)
+ else:
+ self.first_optimization = self
def forget_numberings(self, virtualbox):
self.metainterp_sd.profiler.count(jitprof.OPT_FORCINGS)
@@ -308,7 +308,7 @@
def propagate_forward(self, op):
self.producer[op.result] = op
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in optimize_ops:
if opnum == value:
func(self, op)
@@ -323,15 +323,15 @@
self._emit_operation(op)
def _emit_operation(self, op):
- for i in range(len(op.args)):
- arg = op.args[i]
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if arg in self.values:
box = self.values[arg].force_box()
- op.args[i] = box
+ op.setarg(i, box)
self.metainterp_sd.profiler.count(jitprof.OPT_OPS)
if op.is_guard():
self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS)
- self.store_final_boxes_in_guard(op)
+ op = self.store_final_boxes_in_guard(op)
elif op.can_raise():
self.exception_might_have_happened = True
elif op.returns_bool_result():
@@ -340,7 +340,7 @@
def store_final_boxes_in_guard(self, op):
###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard()
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, compile.ResumeGuardDescr)
modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo)
newboxes = modifier.finish(self.values, self.pendingfields)
@@ -348,49 +348,54 @@
compile.giveup()
descr.store_final_boxes(op, newboxes)
#
- if op.opnum == rop.GUARD_VALUE:
- if self.getvalue(op.args[0]) in self.bool_boxes:
+ if op.getopnum() == rop.GUARD_VALUE:
+ if self.getvalue(op.getarg(0)) in self.bool_boxes:
# Hack: turn guard_value(bool) into guard_true/guard_false.
- # This is done after the operation is emitted, to let
- # store_final_boxes_in_guard set the guard_opnum field
- # of the descr to the original rop.GUARD_VALUE.
- constvalue = op.args[1].getint()
+ # This is done after the operation is emitted to let
+ # store_final_boxes_in_guard set the guard_opnum field of the
+ # descr to the original rop.GUARD_VALUE.
+ constvalue = op.getarg(1).getint()
if constvalue == 0:
opnum = rop.GUARD_FALSE
elif constvalue == 1:
opnum = rop.GUARD_TRUE
else:
raise AssertionError("uh?")
- op.opnum = opnum
- op.args = [op.args[0]]
+ newop = ResOperation(opnum, [op.getarg(0)], op.result, descr)
+ newop.setfailargs(op.getfailargs())
+ return newop
else:
# a real GUARD_VALUE. Make it use one counter per value.
descr.make_a_counter_per_value(op)
+ return op
def make_args_key(self, op):
- args = op.args[:]
- for i in range(len(args)):
- arg = args[i]
+ args = []
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if arg in self.values:
- args[i] = self.values[arg].get_key_box()
- args.append(ConstInt(op.opnum))
+ args.append(self.values[arg].get_key_box())
+ else:
+ args.append(arg)
+ args.append(ConstInt(op.getopnum()))
return args
-
+
def optimize_default(self, op):
canfold = op.is_always_pure()
is_ovf = op.is_ovf()
if is_ovf:
nextop = self.loop.operations[self.i + 1]
- canfold = nextop.opnum == rop.GUARD_NO_OVERFLOW
+ canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW
if canfold:
- for arg in op.args:
- if self.get_constant_box(arg) is None:
+ for i in range(op.numargs()):
+ if self.get_constant_box(op.getarg(i)) is None:
break
else:
# all constant arguments: constant-fold away
- argboxes = [self.get_constant_box(arg) for arg in op.args]
+ argboxes = [self.get_constant_box(op.getarg(i))
+ for i in range(op.numargs())]
resbox = execute_nonspec(self.cpu, None,
- op.opnum, argboxes, op.descr)
+ op.getopnum(), argboxes, op.getdescr())
self.make_constant(op.result, resbox.constbox())
if is_ovf:
self.i += 1 # skip next operation, it is the unneeded guard
@@ -399,8 +404,8 @@
# did we do the exact same operation already?
args = self.make_args_key(op)
oldop = self.pure_operations.get(args, None)
- if oldop is not None and oldop.descr is op.descr:
- assert oldop.opnum == op.opnum
+ if oldop is not None and oldop.getdescr() is op.getdescr():
+ assert oldop.getopnum() == op.getopnum()
self.make_equal_to(op.result, self.getvalue(oldop.result))
if is_ovf:
self.i += 1 # skip next operation, it is the unneeded guard
Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/rewrite.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/rewrite.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/rewrite.py Thu Sep 23 16:53:32 2010
@@ -1,11 +1,11 @@
-from optimizer import *
+from pypy.jit.metainterp.optimizeopt.optimizer import *
from pypy.jit.metainterp.resoperation import opboolinvers, opboolreflex
from pypy.jit.metainterp.history import ConstInt
from pypy.jit.metainterp.optimizeutil import _findall
from pypy.jit.metainterp.resoperation import rop, ResOperation
class OptRewrite(Optimization):
- """Rewrite operations into equvivialent, cheeper operations.
+ """Rewrite operations into equivalent, cheaper operations.
This includes already executed operations and constants.
"""
@@ -14,7 +14,7 @@
if self.find_rewritable_bool(op, args):
return
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in optimize_ops:
if opnum == value:
func(self, op)
@@ -24,7 +24,7 @@
def try_boolinvers(self, op, targs):
oldop = self.optimizer.pure_operations.get(targs, None)
- if oldop is not None and oldop.descr is op.descr:
+ if oldop is not None and oldop.getdescr() is op.getdescr():
value = self.getvalue(oldop.result)
if value.is_constant():
if value.box.same_constant(CONST_1):
@@ -39,7 +39,7 @@
def find_rewritable_bool(self, op, args):
try:
- oldopnum = opboolinvers[op.opnum]
+ oldopnum = opboolinvers[op.getopnum()]
targs = [args[0], args[1], ConstInt(oldopnum)]
if self.try_boolinvers(op, targs):
return True
@@ -47,17 +47,17 @@
pass
try:
- oldopnum = opboolreflex[op.opnum] # FIXME: add INT_ADD, INT_MUL
+ oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL
targs = [args[1], args[0], ConstInt(oldopnum)]
oldop = self.optimizer.pure_operations.get(targs, None)
- if oldop is not None and oldop.descr is op.descr:
+ if oldop is not None and oldop.getdescr() is op.getdescr():
self.make_equal_to(op.result, self.getvalue(oldop.result))
return True
except KeyError:
pass
try:
- oldopnum = opboolinvers[opboolreflex[op.opnum]]
+ oldopnum = opboolinvers[opboolreflex[op.getopnum()]]
targs = [args[1], args[0], ConstInt(oldopnum)]
if self.try_boolinvers(op, targs):
return True
@@ -67,16 +67,16 @@
return False
def optimize_INT_AND(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.is_null() or v2.is_null():
self.make_constant_int(op.result, 0)
else:
self.emit_operation(op)
def optimize_INT_OR(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v1.is_null():
self.make_equal_to(op.result, v2)
elif v2.is_null():
@@ -85,20 +85,20 @@
self.emit_operation(op)
def optimize_INT_SUB(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
if v2.is_constant() and v2.box.getint() == 0:
self.make_equal_to(op.result, v1)
else:
self.emit_operation(op)
# Synthesize the reverse ops for optimize_default to reuse
- self.pure(rop.INT_ADD, [op.result, op.args[1]], op.args[0])
- self.pure(rop.INT_SUB, [op.args[0], op.result], op.args[1])
+ self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0))
+ self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1))
def optimize_INT_ADD(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
# If one side of the op is 0 the result is the other side.
if v1.is_constant() and v1.box.getint() == 0:
@@ -109,12 +109,12 @@
self.emit_operation(op)
# Synthesize the reverse op for optimize_default to reuse
- self.pure(rop.INT_SUB, [op.result, op.args[1]], op.args[0])
- self.pure(rop.INT_SUB, [op.result, op.args[0]], op.args[1])
+ self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0))
+ self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1))
def optimize_INT_MUL(self, op):
- v1 = self.getvalue(op.args[0])
- v2 = self.getvalue(op.args[1])
+ v1 = self.getvalue(op.getarg(0))
+ v2 = self.getvalue(op.getarg(1))
# If one side of the op is 1 the result is the other side.
if v1.is_constant() and v1.box.getint() == 1:
@@ -128,18 +128,20 @@
self.emit_operation(op)
def optimize_CALL_PURE(self, op):
- for arg in op.args:
+ for i in range(op.numargs()):
+ arg = op.getarg(i)
if self.get_constant_box(arg) is None:
break
else:
# all constant arguments: constant-fold away
- self.make_constant(op.result, op.args[0])
+ self.make_constant(op.result, op.getarg(0))
return
# replace CALL_PURE with just CALL
- self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
- op.descr))
+ args = op.getarglist()[1:]
+ self.emit_operation(ResOperation(rop.CALL, args, op.result,
+ op.getdescr()))
def optimize_guard(self, op, constbox, emit_operation=True):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_constant():
box = value.box
assert isinstance(box, Const)
@@ -151,7 +153,7 @@
value.make_constant(constbox)
def optimize_GUARD_ISNULL(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_null():
return
elif value.is_nonnull():
@@ -160,7 +162,7 @@
value.make_constant(self.optimizer.cpu.ts.CONST_NULL)
def optimize_GUARD_NONNULL(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_nonnull():
return
elif value.is_null():
@@ -169,25 +171,25 @@
value.make_nonnull(len(self.optimizer.newoperations) - 1)
def optimize_GUARD_VALUE(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
emit_operation = True
if value.last_guard_index != -1:
# there already has been a guard_nonnull or guard_class or
# guard_nonnull_class on this value, which is rather silly.
# replace the original guard with a guard_value
old_guard_op = self.optimizer.newoperations[value.last_guard_index]
- old_opnum = old_guard_op.opnum
- old_guard_op.opnum = rop.GUARD_VALUE
- old_guard_op.args = [old_guard_op.args[0], op.args[1]]
+ new_guard_op = old_guard_op.copy_and_change(rop.GUARD_VALUE,
+ args = [old_guard_op.getarg(0), op.getarg(1)])
+ self.optimizer.newoperations[value.last_guard_index] = new_guard_op
# hack hack hack. Change the guard_opnum on
- # old_guard_op.descr so that when resuming,
+ # new_guard_op.getdescr() so that when resuming,
# the operation is not skipped by pyjitpl.py.
- descr = old_guard_op.descr
+ descr = new_guard_op.getdescr()
assert isinstance(descr, compile.ResumeGuardDescr)
descr.guard_opnum = rop.GUARD_VALUE
- descr.make_a_counter_per_value(old_guard_op)
+ descr.make_a_counter_per_value(new_guard_op)
emit_operation = False
- constbox = op.args[1]
+ constbox = op.getarg(1)
assert isinstance(constbox, Const)
self.optimize_guard(op, constbox, emit_operation)
@@ -198,8 +200,8 @@
self.optimize_guard(op, CONST_0)
def optimize_GUARD_CLASS(self, op):
- value = self.getvalue(op.args[0])
- expectedclassbox = op.args[1]
+ value = self.getvalue(op.getarg(0))
+ expectedclassbox = op.getarg(1)
assert isinstance(expectedclassbox, Const)
realclassbox = value.get_constant_class(self.optimizer.cpu)
if realclassbox is not None:
@@ -213,15 +215,16 @@
# there already has been a guard_nonnull or guard_class or
# guard_nonnull_class on this value.
old_guard_op = self.optimizer.newoperations[value.last_guard_index]
- if old_guard_op.opnum == rop.GUARD_NONNULL:
+ if old_guard_op.getopnum() == rop.GUARD_NONNULL:
# it was a guard_nonnull, which we replace with a
# guard_nonnull_class.
- old_guard_op.opnum = rop.GUARD_NONNULL_CLASS
- old_guard_op.args = [old_guard_op.args[0], op.args[1]]
+ new_guard_op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS,
+ args = [old_guard_op.getarg(0), op.getarg(1)])
+ self.optimizer.newoperations[value.last_guard_index] = new_guard_op
# hack hack hack. Change the guard_opnum on
- # old_guard_op.descr so that when resuming,
+ # new_guard_op.getdescr() so that when resuming,
# the operation is not skipped by pyjitpl.py.
- descr = old_guard_op.descr
+ descr = new_guard_op.getdescr()
assert isinstance(descr, compile.ResumeGuardDescr)
descr.guard_opnum = rop.GUARD_NONNULL_CLASS
emit_operation = False
@@ -239,18 +242,18 @@
self.optimizer.exception_might_have_happened = False
def optimize_CALL_LOOPINVARIANT(self, op):
- funcvalue = self.getvalue(op.args[0])
+ funcvalue = self.getvalue(op.getarg(0))
if not funcvalue.is_constant():
self.emit_operation(op)
return
- key = make_hashable_int(op.args[0].getint())
+ key = make_hashable_int(op.getarg(0).getint())
resvalue = self.optimizer.loop_invariant_results.get(key, None)
if resvalue is not None:
self.make_equal_to(op.result, resvalue)
return
# change the op to be a normal call, from the backend's point of view
# there is no reason to have a separate operation for this
- op.opnum = rop.CALL
+ op = op.copy_and_change(rop.CALL)
self.emit_operation(op)
resvalue = self.getvalue(op.result)
self.optimizer.loop_invariant_results[key] = resvalue
@@ -265,17 +268,17 @@
self.emit_operation(op)
def optimize_INT_IS_TRUE(self, op):
- if self.getvalue(op.args[0]) in self.optimizer.bool_boxes:
- self.make_equal_to(op.result, self.getvalue(op.args[0]))
+ if self.getvalue(op.getarg(0)) in self.optimizer.bool_boxes:
+ self.make_equal_to(op.result, self.getvalue(op.getarg(0)))
return
- self._optimize_nullness(op, op.args[0], True)
+ self._optimize_nullness(op, op.getarg(0), True)
def optimize_INT_IS_ZERO(self, op):
- self._optimize_nullness(op, op.args[0], False)
+ self._optimize_nullness(op, op.getarg(0), False)
def _optimize_oois_ooisnot(self, op, expect_isnot):
- value0 = self.getvalue(op.args[0])
- value1 = self.getvalue(op.args[1])
+ value0 = self.getvalue(op.getarg(0))
+ value1 = self.getvalue(op.getarg(1))
if value0.is_virtual():
if value1.is_virtual():
intres = (value0 is value1) ^ expect_isnot
@@ -285,9 +288,9 @@
elif value1.is_virtual():
self.make_constant_int(op.result, expect_isnot)
elif value1.is_null():
- self._optimize_nullness(op, op.args[0], expect_isnot)
+ self._optimize_nullness(op, op.getarg(0), expect_isnot)
elif value0.is_null():
- self._optimize_nullness(op, op.args[1], expect_isnot)
+ self._optimize_nullness(op, op.getarg(1), expect_isnot)
elif value0 is value1:
self.make_constant_int(op.result, not expect_isnot)
else:
@@ -308,10 +311,10 @@
self._optimize_oois_ooisnot(op, False)
def optimize_INSTANCEOF(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
realclassbox = value.get_constant_class(self.optimizer.cpu)
if realclassbox is not None:
- checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr)
+ checkclassbox = self.optimizer.cpu.typedescr2classbox(op.getdescr())
result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu,
realclassbox,
checkclassbox)
Modified: pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/optimizeopt/virtualize.py Thu Sep 23 16:53:32 2010
@@ -6,7 +6,7 @@
from pypy.jit.metainterp.resoperation import rop, ResOperation
from pypy.jit.metainterp.optimizeutil import _findall
from pypy.rlib.objectmodel import we_are_translated
-from optimizer import *
+from pypy.jit.metainterp.optimizeopt.optimizer import *
class AbstractVirtualValue(OptValue):
@@ -263,7 +263,7 @@
def setup(self, virtuals):
if not virtuals:
return
-
+
inputargs = self.optimizer.loop.inputargs
specnodes = self.optimizer.loop.token.specnodes
assert len(inputargs) == len(specnodes)
@@ -290,18 +290,18 @@
def optimize_JUMP(self, op):
orgop = self.optimizer.loop.operations[-1]
exitargs = []
- target_loop_token = orgop.descr
+ target_loop_token = orgop.getdescr()
assert isinstance(target_loop_token, LoopToken)
specnodes = target_loop_token.specnodes
- assert len(op.args) == len(specnodes)
+ assert op.numargs() == len(specnodes)
for i in range(len(specnodes)):
- value = self.getvalue(op.args[i])
+ value = self.getvalue(op.getarg(i))
specnodes[i].teardown_virtual_node(self, value, exitargs)
- op.args = exitargs[:]
+ op = op.copy_and_change(op.getopnum(), args=exitargs[:])
self.emit_operation(op)
def optimize_VIRTUAL_REF(self, op):
- indexbox = op.args[1]
+ indexbox = op.getarg(1)
#
# get some constants
vrefinfo = self.optimizer.metainterp_sd.virtualref_info
@@ -327,17 +327,17 @@
# typically a PyPy PyFrame, and now is the end of its execution, so
# forcing it now does not have catastrophic effects.
vrefinfo = self.optimizer.metainterp_sd.virtualref_info
- # op.args[1] should really never point to null here
+ # op.getarg(1) should really never point to null here
# - set 'forced' to point to the real object
- op1 = ResOperation(rop.SETFIELD_GC, op.args, None,
+ op1 = ResOperation(rop.SETFIELD_GC, op.getarglist(), None,
descr = vrefinfo.descr_forced)
self.optimize_SETFIELD_GC(op1)
# - set 'virtual_token' to TOKEN_NONE
- args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)]
+ args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)]
op1 = ResOperation(rop.SETFIELD_GC, args, None,
descr = vrefinfo.descr_virtual_token)
self.optimize_SETFIELD_GC(op1)
- # Note that in some cases the virtual in op.args[1] has been forced
+ # Note that in some cases the virtual in op.getarg(1) has been forced
# already. This is fine. In that case, and *if* a residual
# CALL_MAY_FORCE suddenly turns out to access it, then it will
# trigger a ResumeGuardForcedDescr.handle_async_forcing() which
@@ -345,11 +345,11 @@
# was already forced).
def optimize_GETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
# optimizefindnode should ensure that fieldvalue is found
assert isinstance(value, AbstractVirtualValue)
- fieldvalue = value.getfield(op.descr, None)
+ fieldvalue = value.getfield(op.getdescr(), None)
assert fieldvalue is not None
self.make_equal_to(op.result, fieldvalue)
else:
@@ -362,36 +362,36 @@
optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC
def optimize_SETFIELD_GC(self, op):
- value = self.getvalue(op.args[0])
- fieldvalue = self.getvalue(op.args[1])
+ value = self.getvalue(op.getarg(0))
+ fieldvalue = self.getvalue(op.getarg(1))
if value.is_virtual():
- value.setfield(op.descr, fieldvalue)
+ value.setfield(op.getdescr(), fieldvalue)
else:
value.ensure_nonnull()
###self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue)
self.emit_operation(op)
def optimize_NEW_WITH_VTABLE(self, op):
- self.make_virtual(op.args[0], op.result, op)
+ self.make_virtual(op.getarg(0), op.result, op)
def optimize_NEW(self, op):
- self.make_vstruct(op.descr, op.result, op)
+ self.make_vstruct(op.getdescr(), op.result, op)
def optimize_NEW_ARRAY(self, op):
- sizebox = self.get_constant_box(op.args[0])
+ sizebox = self.get_constant_box(op.getarg(0))
if sizebox is not None:
# if the original 'op' did not have a ConstInt as argument,
# build a new one with the ConstInt argument
- if not isinstance(op.args[0], ConstInt):
+ if not isinstance(op.getarg(0), ConstInt):
op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result,
- descr=op.descr)
- self.make_varray(op.descr, sizebox.getint(), op.result, op)
+ descr=op.getdescr())
+ self.make_varray(op.getdescr(), sizebox.getint(), op.result, op)
else:
###self.optimize_default(op)
self.emit_operation(op)
def optimize_ARRAYLEN_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
self.make_constant_int(op.result, value.getlength())
else:
@@ -400,9 +400,9 @@
self.emit_operation(op)
def optimize_GETARRAYITEM_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
- indexbox = self.get_constant_box(op.args[1])
+ indexbox = self.get_constant_box(op.getarg(1))
if indexbox is not None:
itemvalue = value.getitem(indexbox.getint())
self.make_equal_to(op.result, itemvalue)
@@ -416,22 +416,22 @@
optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC
def optimize_SETARRAYITEM_GC(self, op):
- value = self.getvalue(op.args[0])
+ value = self.getvalue(op.getarg(0))
if value.is_virtual():
- indexbox = self.get_constant_box(op.args[1])
+ indexbox = self.get_constant_box(op.getarg(1))
if indexbox is not None:
- value.setitem(indexbox.getint(), self.getvalue(op.args[2]))
+ value.setitem(indexbox.getint(), self.getvalue(op.getarg(2)))
return
value.ensure_nonnull()
###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue)
self.emit_operation(op)
def optimize_ARRAYCOPY(self, op):
- source_value = self.getvalue(op.args[2])
- dest_value = self.getvalue(op.args[3])
- source_start_box = self.get_constant_box(op.args[4])
- dest_start_box = self.get_constant_box(op.args[5])
- length = self.get_constant_box(op.args[6])
+ source_value = self.getvalue(op.getarg(2))
+ dest_value = self.getvalue(op.getarg(3))
+ source_start_box = self.get_constant_box(op.getarg(4))
+ dest_start_box = self.get_constant_box(op.getarg(5))
+ length = self.get_constant_box(op.getarg(6))
if (source_value.is_virtual() and source_start_box and dest_start_box
and length and dest_value.is_virtual()):
# XXX optimize the case where dest value is not virtual,
@@ -444,13 +444,14 @@
return
if length and length.getint() == 0:
return # 0-length arraycopy
- descr = op.args[0]
+ descr = op.getarg(0)
assert isinstance(descr, AbstractDescr)
- self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
+ args = op.getarglist()[1:]
+ self.emit_operation(ResOperation(rop.CALL, args, op.result,
descr))
def propagate_forward(self, op):
- opnum = op.opnum
+ opnum = op.getopnum()
for value, func in optimize_ops:
if opnum == value:
func(self, op)
Modified: pypy/branch/jitffi/pypy/jit/metainterp/pyjitpl.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/pyjitpl.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/pyjitpl.py Thu Sep 23 16:53:32 2010
@@ -159,7 +159,7 @@
if got_type == history.INT:
self.registers_i[target_index] = resultbox
elif got_type == history.REF:
- #debug_print(' ->',
+ #debug_print(' ->',
# llmemory.cast_ptr_to_adr(resultbox.getref_base()))
self.registers_r[target_index] = resultbox
elif got_type == history.FLOAT:
@@ -446,7 +446,7 @@
def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr,
sizebox):
sbox = self.metainterp.execute_and_record(rop.NEW, structdescr)
- self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr,
+ self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr,
sbox, sizebox)
abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr,
sizebox)
@@ -1004,7 +1004,7 @@
resumedescr = compile.ResumeGuardDescr(metainterp_sd,
original_greenkey)
guard_op = metainterp.history.record(opnum, moreargs, None,
- descr=resumedescr)
+ descr=resumedescr)
virtualizable_boxes = None
if metainterp.jitdriver_sd.virtualizable_info is not None:
virtualizable_boxes = metainterp.virtualizable_boxes
@@ -1463,7 +1463,7 @@
resbox = self._record_helper_nonpure_varargs(opnum, resbox, descr, argboxes)
return resbox
- def _record_helper_pure(self, opnum, resbox, descr, *argboxes):
+ def _record_helper_pure(self, opnum, resbox, descr, *argboxes):
canfold = self._all_constants(*argboxes)
if canfold:
resbox = resbox.constbox() # ensure it is a Const
@@ -1472,7 +1472,7 @@
resbox = resbox.nonconstbox() # ensure it is a Box
return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes))
- def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes):
+ def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes):
canfold = self._all_constants_varargs(argboxes)
if canfold:
resbox = resbox.constbox() # ensure it is a Const
@@ -1485,7 +1485,7 @@
assert resbox is None or isinstance(resbox, Box)
# record the operation
profiler = self.staticdata.profiler
- profiler.count_ops(opnum, RECORDED_OPS)
+ profiler.count_ops(opnum, RECORDED_OPS)
op = self.history.record(opnum, argboxes, resbox, descr)
self.attach_debug_info(op)
return resbox
@@ -1667,7 +1667,7 @@
# Search in current_merge_points for original_boxes with compatible
# green keys, representing the beginning of the same loop as the one
- # we end now.
+ # we end now.
num_green_args = self.jitdriver_sd.num_green_args
for j in range(len(self.current_merge_points)-1, -1, -1):
@@ -1922,7 +1922,7 @@
vrefbox = self.virtualref_boxes[i+1]
# record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE
call_may_force_op = self.history.operations.pop()
- assert call_may_force_op.opnum == rop.CALL_MAY_FORCE
+ assert call_may_force_op.getopnum() == rop.CALL_MAY_FORCE
self.history.record(rop.VIRTUAL_REF_FINISH,
[vrefbox, virtualbox], None)
self.history.operations.append(call_may_force_op)
@@ -2088,10 +2088,10 @@
""" Patch a CALL into a CALL_PURE.
"""
op = self.history.operations[-1]
- assert op.opnum == rop.CALL
+ assert op.getopnum() == rop.CALL
resbox_as_const = resbox.constbox()
- for arg in op.args:
- if not isinstance(arg, Const):
+ for i in range(op.numargs()):
+ if not isinstance(op.getarg(i), Const):
break
else:
# all-constants: remove the CALL operation now and propagate a
@@ -2100,8 +2100,8 @@
return resbox_as_const
# not all constants (so far): turn CALL into CALL_PURE, which might
# be either removed later by optimizeopt or turned back into CALL.
- op.opnum = rop.CALL_PURE
- op.args = [resbox_as_const] + op.args
+ newop = op.copy_and_change(rop.CALL_PURE, args=[resbox_as_const]+op.getarglist())
+ self.history.operations[-1] = newop
return resbox
def direct_assembler_call(self, targetjitdriver_sd):
@@ -2109,10 +2109,11 @@
patching the CALL_MAY_FORCE that occurred just now.
"""
op = self.history.operations.pop()
- assert op.opnum == rop.CALL_MAY_FORCE
+ assert op.getopnum() == rop.CALL_MAY_FORCE
num_green_args = targetjitdriver_sd.num_green_args
- greenargs = op.args[1:num_green_args+1]
- args = op.args[num_green_args+1:]
+ arglist = op.getarglist()
+ greenargs = arglist[1:num_green_args+1]
+ args = arglist[num_green_args+1:]
assert len(args) == targetjitdriver_sd.num_red_args
vinfo = targetjitdriver_sd.virtualizable_info
if vinfo is not None:
@@ -2122,9 +2123,7 @@
# ^^^ and not "+=", which makes 'args' a resizable list
warmrunnerstate = targetjitdriver_sd.warmstate
token = warmrunnerstate.get_assembler_token(greenargs, args)
- op.opnum = rop.CALL_ASSEMBLER
- op.args = args
- op.descr = token
+ op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token)
self.history.operations.append(op)
# ____________________________________________________________
Modified: pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/resoperation.py Thu Sep 23 16:53:32 2010
@@ -1,42 +1,90 @@
from pypy.rlib.objectmodel import we_are_translated
from pypy.rlib.debug import make_sure_not_resized
-class ResOperation(object):
- """The central ResOperation class, representing one operation."""
+def ResOperation(opnum, args, result, descr=None):
+ cls = opclasses[opnum]
+ op = cls(result)
+ op.initarglist(args)
+ if descr is not None:
+ assert isinstance(op, ResOpWithDescr)
+ op.setdescr(descr)
+ return op
+
- # for 'guard_*'
- fail_args = None
+class AbstractResOp(object):
+ """The central ResOperation class, representing one operation."""
# debug
name = ""
pc = 0
- def __init__(self, opnum, args, result, descr=None):
- make_sure_not_resized(args)
- assert isinstance(opnum, int)
- self.opnum = opnum
- self.args = list(args)
- make_sure_not_resized(self.args)
- assert not isinstance(result, list)
+ def __init__(self, result):
self.result = result
- self.setdescr(descr)
+
+ # methods implemented by each concrete class
+ # ------------------------------------------
+
+ def getopnum(self):
+ raise NotImplementedError
+
+ # methods implemented by the arity mixins
+ # ---------------------------------------
+
+ def initarglist(self, args):
+ "This is supposed to be called only just after the ResOp has been created"
+ raise NotImplementedError
+
+ def getarglist(self):
+ raise NotImplementedError
+
+ def getarg(self, i):
+ raise NotImplementedError
+
+ def setarg(self, i, box):
+ raise NotImplementedError
+
+ def numargs(self):
+ raise NotImplementedError
+
+
+ # methods implemented by GuardResOp
+ # ---------------------------------
+
+ def getfailargs(self):
+ return None
+
+ def setfailargs(self, fail_args):
+ raise NotImplementedError
+
+ # methods implemented by ResOpWithDescr
+ # -------------------------------------
+
+ def getdescr(self):
+ return None
def setdescr(self, descr):
- # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt
- # instance provided by the backend holding details about the type
- # of the operation. It must inherit from AbstractDescr. The
- # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(),
- # cpu.calldescrof(), and cpu.typedescrof().
- from pypy.jit.metainterp.history import check_descr
- check_descr(descr)
- self.descr = descr
+ raise NotImplementedError
+
+ # common methods
+ # --------------
+
+ def copy_and_change(self, opnum, args=None, result=None, descr=None):
+ "shallow copy: the returned operation is meant to be used in place of self"
+ if args is None:
+ args = self.getarglist()
+ if result is None:
+ result = self.result
+ if descr is None:
+ descr = self.getdescr()
+ newop = ResOperation(opnum, args, result, descr)
+ return newop
def clone(self):
- descr = self.descr
+ args = self.getarglist()
+ descr = self.getdescr()
if descr is not None:
descr = descr.clone_if_mutable()
- op = ResOperation(self.opnum, self.args, self.result, descr)
- op.fail_args = self.fail_args
+ op = ResOperation(self.getopnum(), args, self.result, descr)
if not we_are_translated():
op.name = self.name
op.pc = self.pc
@@ -55,82 +103,271 @@
prefix = "%s:%s " % (self.name, self.pc)
else:
prefix = ""
- if self.descr is None or we_are_translated():
+ args = self.getarglist()
+ descr = self.getdescr()
+ if descr is None or we_are_translated():
return '%s%s%s(%s)' % (prefix, sres, self.getopname(),
- ', '.join([str(a) for a in self.args]))
+ ', '.join([str(a) for a in args]))
else:
return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(),
- ', '.join([str(a) for a in self.args]), self.descr)
+ ', '.join([str(a) for a in args]), descr)
def getopname(self):
try:
- return opname[self.opnum].lower()
+ return opname[self.getopnum()].lower()
except KeyError:
- return '<%d>' % self.opnum
+ return '<%d>' % self.getopnum()
def is_guard(self):
- return rop._GUARD_FIRST <= self.opnum <= rop._GUARD_LAST
+ return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST
def is_foldable_guard(self):
- return rop._GUARD_FOLDABLE_FIRST <= self.opnum <= rop._GUARD_FOLDABLE_LAST
+ return rop._GUARD_FOLDABLE_FIRST <= self.getopnum() <= rop._GUARD_FOLDABLE_LAST
def is_guard_exception(self):
- return (self.opnum == rop.GUARD_EXCEPTION or
- self.opnum == rop.GUARD_NO_EXCEPTION)
+ return (self.getopnum() == rop.GUARD_EXCEPTION or
+ self.getopnum() == rop.GUARD_NO_EXCEPTION)
def is_guard_overflow(self):
- return (self.opnum == rop.GUARD_OVERFLOW or
- self.opnum == rop.GUARD_NO_OVERFLOW)
+ return (self.getopnum() == rop.GUARD_OVERFLOW or
+ self.getopnum() == rop.GUARD_NO_OVERFLOW)
def is_always_pure(self):
- return rop._ALWAYS_PURE_FIRST <= self.opnum <= rop._ALWAYS_PURE_LAST
+ return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST
def has_no_side_effect(self):
- return rop._NOSIDEEFFECT_FIRST <= self.opnum <= rop._NOSIDEEFFECT_LAST
+ return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST
def can_raise(self):
- return rop._CANRAISE_FIRST <= self.opnum <= rop._CANRAISE_LAST
+ return rop._CANRAISE_FIRST <= self.getopnum() <= rop._CANRAISE_LAST
def is_ovf(self):
- return rop._OVF_FIRST <= self.opnum <= rop._OVF_LAST
+ return rop._OVF_FIRST <= self.getopnum() <= rop._OVF_LAST
def is_comparison(self):
return self.is_always_pure() and self.returns_bool_result()
def is_final(self):
- return rop._FINAL_FIRST <= self.opnum <= rop._FINAL_LAST
+ return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST
def returns_bool_result(self):
- opnum = self.opnum
+ opnum = self.getopnum()
if we_are_translated():
assert opnum >= 0
elif opnum < 0:
return False # for tests
return opboolresult[opnum]
+
+# ===================
+# Top of the hierachy
+# ===================
+
+class PlainResOp(AbstractResOp):
+ pass
+
+class ResOpWithDescr(AbstractResOp):
+
+ _descr = None
+
+ def getdescr(self):
+ return self._descr
+
+ def setdescr(self, descr):
+ # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt
+ # instance provided by the backend holding details about the type
+ # of the operation. It must inherit from AbstractDescr. The
+ # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(),
+ # cpu.calldescrof(), and cpu.typedescrof().
+ from pypy.jit.metainterp.history import check_descr
+ check_descr(descr)
+ self._descr = descr
+
+class GuardResOp(ResOpWithDescr):
+
+ _fail_args = None
+
+ def getfailargs(self):
+ return self._fail_args
+
+ def setfailargs(self, fail_args):
+ self._fail_args = fail_args
+
+ def copy_and_change(self, opnum, args=None, result=None, descr=None):
+ newop = AbstractResOp.copy_and_change(self, opnum, args, result, descr)
+ newop.setfailargs(self.getfailargs())
+ return newop
+
+ def clone(self):
+ newop = AbstractResOp.clone(self)
+ newop.setfailargs(self.getfailargs())
+ return newop
+
+
+# ============
+# arity mixins
+# ============
+
+class NullaryOp(object):
+ _mixin_ = True
+
+ def initarglist(self, args):
+ assert len(args) == 0
+
+ def getarglist(self):
+ return []
+
+ def numargs(self):
+ return 0
+
+ def getarg(self, i):
+ raise IndexError
+
+ def setarg(self, i, box):
+ raise IndexError
+
+
+class UnaryOp(object):
+ _mixin_ = True
+ _arg0 = None
+
+ def initarglist(self, args):
+ assert len(args) == 1
+ self._arg0, = args
+
+ def getarglist(self):
+ return [self._arg0]
+
+ def numargs(self):
+ return 1
+
+ def getarg(self, i):
+ if i == 0:
+ return self._arg0
+ else:
+ raise IndexError
+
+ def setarg(self, i, box):
+ if i == 0:
+ self._arg0 = box
+ else:
+ raise IndexError
+
+
+class BinaryOp(object):
+ _mixin_ = True
+ _arg0 = None
+ _arg1 = None
+
+ def initarglist(self, args):
+ assert len(args) == 2
+ self._arg0, self._arg1 = args
+
+ def getarglist(self):
+ return [self._arg0, self._arg1, self._arg2]
+
+ def numargs(self):
+ return 2
+
+ def getarg(self, i):
+ if i == 0:
+ return self._arg0
+ elif i == 1:
+ return self._arg1
+ else:
+ raise IndexError
+
+ def setarg(self, i, box):
+ if i == 0:
+ self._arg0 = box
+ elif i == 1:
+ self._arg1 = box
+ else:
+ raise IndexError
+
+ def getarglist(self):
+ return [self._arg0, self._arg1]
+
+
+class TernaryOp(object):
+ _mixin_ = True
+ _arg0 = None
+ _arg1 = None
+ _arg2 = None
+
+ def initarglist(self, args):
+ assert len(args) == 3
+ self._arg0, self._arg1, self._arg2 = args
+
+ def getarglist(self):
+ return [self._arg0, self._arg1, self._arg2]
+
+ def numargs(self):
+ return 3
+
+ def getarg(self, i):
+ if i == 0:
+ return self._arg0
+ elif i == 1:
+ return self._arg1
+ elif i == 2:
+ return self._arg2
+ else:
+ raise IndexError
+
+ def setarg(self, i, box):
+ if i == 0:
+ self._arg0 = box
+ elif i == 1:
+ self._arg1 = box
+ elif i == 2:
+ self._arg2 = box
+ else:
+ raise IndexError
+
+class N_aryOp(object):
+ _mixin_ = True
+ _args = None
+
+ def initarglist(self, args):
+ self._args = args
+
+ def getarglist(self):
+ return self._args
+
+ def numargs(self):
+ return len(self._args)
+
+ def getarg(self, i):
+ return self._args[i]
+
+ def setarg(self, i, box):
+ self._args[i] = box
+
+
# ____________________________________________________________
_oplist = [
'_FINAL_FIRST',
- 'JUMP',
- 'FINISH',
+ 'JUMP/*d',
+ 'FINISH/*d',
'_FINAL_LAST',
'_GUARD_FIRST',
'_GUARD_FOLDABLE_FIRST',
- 'GUARD_TRUE',
- 'GUARD_FALSE',
- 'GUARD_VALUE',
- 'GUARD_CLASS',
- 'GUARD_NONNULL',
- 'GUARD_ISNULL',
- 'GUARD_NONNULL_CLASS',
+ 'GUARD_TRUE/1d',
+ 'GUARD_FALSE/1d',
+ 'GUARD_VALUE/2d',
+ 'GUARD_CLASS/2d',
+ 'GUARD_NONNULL/1d',
+ 'GUARD_ISNULL/1d',
+ 'GUARD_NONNULL_CLASS/2d',
'_GUARD_FOLDABLE_LAST',
- 'GUARD_NO_EXCEPTION',
- 'GUARD_EXCEPTION',
- 'GUARD_NO_OVERFLOW',
- 'GUARD_OVERFLOW',
- 'GUARD_NOT_FORCED',
+ 'GUARD_NO_EXCEPTION/0d',
+ 'GUARD_EXCEPTION/1d',
+ 'GUARD_NO_OVERFLOW/0d',
+ 'GUARD_OVERFLOW/0d',
+ 'GUARD_NOT_FORCED/0d',
'_GUARD_LAST', # ----- end of guard operations -----
'_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations -----
@@ -218,20 +455,20 @@
'STRSETITEM/3',
'UNICODESETITEM/3',
'NEWUNICODE/1',
- #'RUNTIMENEW/1', # ootype operation
- 'COND_CALL_GC_WB', # [objptr, newvalue] (for the write barrier)
+ #'RUNTIMENEW/1', # ootype operation
+ 'COND_CALL_GC_WB/2d', # [objptr, newvalue] (for the write barrier)
'DEBUG_MERGE_POINT/1', # debugging only
'VIRTUAL_REF_FINISH/2', # removed before it's passed to the backend
- 'CALL_C', # call directly C code from here (a function addres comes first)
+ 'CALL_C/*', # call directly C code from here (a function addres comes first)
'_CANRAISE_FIRST', # ----- start of can_raise operations -----
- 'CALL',
- 'CALL_ASSEMBLER', # call already compiled assembler
- 'CALL_MAY_FORCE',
- 'CALL_LOOPINVARIANT',
+ 'CALL/*d',
+ 'CALL_ASSEMBLER/*d', # call already compiled assembler
+ 'CALL_MAY_FORCE/*d',
+ 'CALL_LOOPINVARIANT/*d',
#'OOSEND', # ootype operation
#'OOSEND_PURE', # ootype operation
- 'CALL_PURE', # removed before it's passed to the backend
+ 'CALL_PURE/*d', # removed before it's passed to the backend
# CALL_PURE(result, func, arg_1,..,arg_n)
'_CANRAISE_LAST', # ----- end of can_raise operations -----
@@ -248,6 +485,7 @@
class rop(object):
pass
+opclasses = [] # mapping numbers to the concrete ResOp class
opname = {} # mapping numbers to the original names, for debugging
oparity = [] # mapping numbers to the arity of the operation or -1
opwithdescr = [] # mapping numbers to a flag "takes a descr"
@@ -262,16 +500,62 @@
name, arity = name.split('/')
withdescr = 'd' in arity
boolresult = 'b' in arity
- arity = int(arity.rstrip('db'))
+ arity = arity.rstrip('db')
+ if arity == '*':
+ arity = -1
+ else:
+ arity = int(arity)
else:
arity, withdescr, boolresult = -1, True, False # default
setattr(rop, name, i)
if not name.startswith('_'):
opname[i] = name
+ cls = create_class_for_op(name, i, arity, withdescr)
+ else:
+ cls = None
+ opclasses.append(cls)
oparity.append(arity)
opwithdescr.append(withdescr)
opboolresult.append(boolresult)
- assert len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist)
+ assert len(opclasses)==len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist)
+
+def get_base_class(mixin, base):
+ try:
+ return get_base_class.cache[(mixin, base)]
+ except KeyError:
+ arity_name = mixin.__name__[:-2] # remove the trailing "Op"
+ name = arity_name + base.__name__ # something like BinaryPlainResOp
+ bases = (mixin, base)
+ cls = type(name, bases, {})
+ get_base_class.cache[(mixin, base)] = cls
+ return cls
+get_base_class.cache = {}
+
+def create_class_for_op(name, opnum, arity, withdescr):
+ arity2mixin = {
+ 0: NullaryOp,
+ 1: UnaryOp,
+ 2: BinaryOp,
+ 3: TernaryOp
+ }
+
+ is_guard = name.startswith('GUARD')
+ if is_guard:
+ assert withdescr
+ baseclass = GuardResOp
+ elif withdescr:
+ baseclass = ResOpWithDescr
+ else:
+ baseclass = PlainResOp
+ mixin = arity2mixin.get(arity, N_aryOp)
+
+ def getopnum(self):
+ return opnum
+
+ cls_name = '%s_OP' % name
+ bases = (get_base_class(mixin, baseclass),)
+ dic = {'getopnum': getopnum}
+ return type(cls_name, bases, dic)
setup(__name__ == '__main__') # print out the table when run directly
del _oplist
Modified: pypy/branch/jitffi/pypy/jit/metainterp/simple_optimize.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/simple_optimize.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/simple_optimize.py Thu Sep 23 16:53:32 2010
@@ -11,15 +11,17 @@
from pypy.jit.metainterp.history import AbstractDescr
# change ARRAYCOPY to call, so we don't have to pass around
# unnecessary information to the backend. Do the same with VIRTUAL_REF_*.
- if op.opnum == rop.ARRAYCOPY:
- descr = op.args[0]
+ if op.getopnum() == rop.ARRAYCOPY:
+ descr = op.getarg(0)
assert isinstance(descr, AbstractDescr)
- op = ResOperation(rop.CALL, op.args[1:], op.result, descr=descr)
- elif op.opnum == rop.CALL_PURE:
- op = ResOperation(rop.CALL, op.args[1:], op.result, op.descr)
- elif op.opnum == rop.VIRTUAL_REF:
- op = ResOperation(rop.SAME_AS, [op.args[0]], op.result)
- elif op.opnum == rop.VIRTUAL_REF_FINISH:
+ args = op.getarglist()[1:]
+ op = ResOperation(rop.CALL, args, op.result, descr=descr)
+ elif op.getopnum() == rop.CALL_PURE:
+ args = op.getarglist()[1:]
+ op = ResOperation(rop.CALL, args, op.result, op.getdescr())
+ elif op.getopnum() == rop.VIRTUAL_REF:
+ op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result)
+ elif op.getopnum() == rop.VIRTUAL_REF_FINISH:
return []
return [op]
@@ -36,7 +38,7 @@
newoperations = []
for op in loop.operations:
if op.is_guard():
- descr = op.descr
+ descr = op.getdescr()
assert isinstance(descr, compile.ResumeGuardDescr)
modifier = resume.ResumeDataVirtualAdder(descr, memo)
newboxes = modifier.finish(EMPTY_VALUES)
Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/oparser.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/test/oparser.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/test/oparser.py Thu Sep 23 16:53:32 2010
@@ -6,7 +6,7 @@
from pypy.jit.metainterp.history import TreeLoop, BoxInt, ConstInt,\
ConstObj, ConstPtr, Box, BasicFailDescr, BoxFloat, ConstFloat,\
LoopToken
-from pypy.jit.metainterp.resoperation import rop, ResOperation
+from pypy.jit.metainterp.resoperation import rop, ResOperation, ResOpWithDescr, N_aryOp
from pypy.jit.metainterp.typesystem import llhelper
from pypy.jit.codewriter.heaptracker import adr2int
from pypy.rpython.lltypesystem import lltype, llmemory
@@ -16,17 +16,29 @@
class ParseError(Exception):
pass
-
class Boxes(object):
pass
+class ESCAPE_OP(N_aryOp, ResOpWithDescr):
+
+ OPNUM = -123
+
+ def __init__(self, opnum, args, result, descr=None):
+ assert opnum == self.OPNUM
+ self.result = result
+ self.initarglist(args)
+ self.setdescr(descr)
+
+ def getopnum(self):
+ return self.OPNUM
+
class ExtendedTreeLoop(TreeLoop):
def getboxes(self):
def opboxes(operations):
for op in operations:
yield op.result
- for box in op.args:
+ for box in op.getarglist():
yield box
def allboxes():
for box in self.inputargs:
@@ -171,7 +183,7 @@
opnum = getattr(rop, opname.upper())
except AttributeError:
if opname == 'escape':
- opnum = -123
+ opnum = ESCAPE_OP.OPNUM
else:
raise ParseError("unknown op: %s" % opname)
endnum = line.rfind(')')
@@ -228,6 +240,12 @@
descr = self.looptoken
return opnum, args, descr, fail_args
+ def create_op(self, opnum, args, result, descr):
+ if opnum == ESCAPE_OP.OPNUM:
+ return ESCAPE_OP(opnum, args, result, descr)
+ else:
+ return ResOperation(opnum, args, result, descr)
+
def parse_result_op(self, line):
res, op = line.split("=", 1)
res = res.strip()
@@ -237,14 +255,16 @@
raise ParseError("Double assign to var %s in line: %s" % (res, line))
rvar = self.box_for_var(res)
self.vars[res] = rvar
- res = ResOperation(opnum, args, rvar, descr)
- res.fail_args = fail_args
+ res = self.create_op(opnum, args, rvar, descr)
+ if fail_args is not None:
+ res.setfailargs(fail_args)
return res
def parse_op_no_result(self, line):
opnum, args, descr, fail_args = self.parse_op(line)
- res = ResOperation(opnum, args, None, descr)
- res.fail_args = fail_args
+ res = self.create_op(opnum, args, None, descr)
+ if fail_args is not None:
+ res.setfailargs(fail_args)
return res
def parse_next_op(self, line):
Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_basic.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/test/test_basic.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_basic.py Thu Sep 23 16:53:32 2010
@@ -296,7 +296,7 @@
found = 0
for op in get_stats().loops[0]._all_operations():
if op.getopname() == 'guard_true':
- liveboxes = op.fail_args
+ liveboxes = op.getfailargs()
assert len(liveboxes) == 3
for box in liveboxes:
assert isinstance(box, history.BoxInt)
Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_logger.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/test/test_logger.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_logger.py Thu Sep 23 16:53:32 2010
@@ -100,8 +100,8 @@
debug_merge_point("info")
'''
loop, oloop = self.reparse(inp, check_equal=False)
- assert loop.operations[0].args[0]._get_str() == 'info'
- assert oloop.operations[0].args[0]._get_str() == 'info'
+ assert loop.operations[0].getarg(0)._get_str() == 'info'
+ assert oloop.operations[0].getarg(0)._get_str() == 'info'
def test_floats(self):
inp = '''
Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_loop.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/test/test_loop.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_loop.py Thu Sep 23 16:53:32 2010
@@ -178,7 +178,7 @@
found = 0
for op in get_stats().loops[0]._all_operations():
if op.getopname() == 'guard_true':
- liveboxes = op.fail_args
+ liveboxes = op.getfailargs()
assert len(liveboxes) == 2 # x, y (in some order)
assert isinstance(liveboxes[0], history.BoxInt)
assert isinstance(liveboxes[1], history.BoxInt)
Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_oparser.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/test/test_oparser.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_oparser.py Thu Sep 23 16:53:32 2010
@@ -16,10 +16,10 @@
"""
loop = parse(x)
assert len(loop.operations) == 3
- assert [op.opnum for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB,
+ assert [op.getopnum() for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB,
rop.FINISH]
assert len(loop.inputargs) == 2
- assert loop.operations[-1].descr
+ assert loop.operations[-1].getdescr()
def test_const_ptr_subops():
x = """
@@ -30,8 +30,8 @@
vtable = lltype.nullptr(S)
loop = parse(x, None, locals())
assert len(loop.operations) == 1
- assert loop.operations[0].descr
- assert loop.operations[0].fail_args == []
+ assert loop.operations[0].getdescr()
+ assert loop.operations[0].getfailargs() == []
def test_descr():
class Xyz(AbstractDescr):
@@ -43,7 +43,7 @@
"""
stuff = Xyz()
loop = parse(x, None, locals())
- assert loop.operations[0].descr is stuff
+ assert loop.operations[0].getdescr() is stuff
def test_after_fail():
x = """
@@ -64,7 +64,7 @@
"""
stuff = Xyz()
loop = parse(x, None, locals())
- assert loop.operations[0].descr is stuff
+ assert loop.operations[0].getdescr() is stuff
def test_boxname():
x = """
@@ -111,7 +111,7 @@
TP = lltype.GcArray(lltype.Signed)
NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP))
loop = parse(x, None, {'func_ptr' : NULL})
- assert loop.operations[0].args[0].value == NULL
+ assert loop.operations[0].getarg(0).value == NULL
def test_jump_target():
x = '''
@@ -119,7 +119,7 @@
jump()
'''
loop = parse(x)
- assert loop.operations[0].descr is loop.token
+ assert loop.operations[0].getdescr() is loop.token
def test_jump_target_other():
looptoken = LoopToken()
@@ -128,7 +128,7 @@
jump(descr=looptoken)
'''
loop = parse(x, namespace=locals())
- assert loop.operations[0].descr is looptoken
+ assert loop.operations[0].getdescr() is looptoken
def test_floats():
x = '''
@@ -136,7 +136,7 @@
f1 = float_add(f0, 3.5)
'''
loop = parse(x)
- assert isinstance(loop.operations[0].args[0], BoxFloat)
+ assert isinstance(loop.operations[0].getarg(0), BoxFloat)
def test_debug_merge_point():
x = '''
@@ -147,10 +147,10 @@
debug_merge_point('(stuff) #1')
'''
loop = parse(x)
- assert loop.operations[0].args[0]._get_str() == 'info'
- assert loop.operations[1].args[0]._get_str() == 'info'
- assert loop.operations[2].args[0]._get_str() == "<some ('other,')> info"
- assert loop.operations[3].args[0]._get_str() == "(stuff) #1"
+ assert loop.operations[0].getarg(0)._get_str() == 'info'
+ assert loop.operations[1].getarg(0)._get_str() == 'info'
+ assert loop.operations[2].getarg(0)._get_str() == "<some ('other,')> info"
+ assert loop.operations[3].getarg(0)._get_str() == "(stuff) #1"
def test_descr_with_obj_print():
Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_optimizeopt.py Thu Sep 23 16:53:32 2010
@@ -33,7 +33,7 @@
self.profiler = EmptyProfiler()
self.options = Fake()
self.globaldata = Fake()
-
+
def test_store_final_boxes_in_guard():
from pypy.jit.metainterp.compile import ResumeGuardDescr
from pypy.jit.metainterp.resume import tag, TAGBOX
@@ -42,7 +42,7 @@
opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu),
None)
fdescr = ResumeGuardDescr(None, None)
- op = ResOperation(rop.GUARD_TRUE, [], None, descr=fdescr)
+ op = ResOperation(rop.GUARD_TRUE, ['dummy'], None, descr=fdescr)
# setup rd data
fi0 = resume.FrameInfo(None, "code0", 11)
fdescr.rd_frame_info_list = resume.FrameInfo(fi0, "code1", 33)
@@ -50,11 +50,11 @@
fdescr.rd_snapshot = resume.Snapshot(snapshot0, [b1])
#
opt.store_final_boxes_in_guard(op)
- if op.fail_args == [b0, b1]:
+ if op.getfailargs() == [b0, b1]:
assert fdescr.rd_numb.nums == [tag(1, TAGBOX)]
assert fdescr.rd_numb.prev.nums == [tag(0, TAGBOX)]
else:
- assert op.fail_args == [b1, b0]
+ assert op.getfailargs() == [b1, b0]
assert fdescr.rd_numb.nums == [tag(0, TAGBOX)]
assert fdescr.rd_numb.prev.nums == [tag(1, TAGBOX)]
assert fdescr.rd_virtuals is None
@@ -75,7 +75,7 @@
assert lst3 == [LLtypeMixin.valuedescr]
lst4 = virt1._get_field_descr_list()
assert lst3 is lst4
-
+
virt2 = virtualize.AbstractVirtualStructValue(opt, None)
lst5 = virt2._get_field_descr_list()
assert lst5 is lst1
@@ -140,24 +140,26 @@
print '%-39s| %s' % (txt1[:39], txt2[:39])
txt1 = txt1[39:]
txt2 = txt2[39:]
- assert op1.opnum == op2.opnum
- assert len(op1.args) == len(op2.args)
- for x, y in zip(op1.args, op2.args):
+ assert op1.getopnum() == op2.getopnum()
+ assert op1.numargs() == op2.numargs()
+ for i in range(op1.numargs()):
+ x = op1.getarg(i)
+ y = op2.getarg(i)
assert x == remap.get(y, y)
if op2.result in remap:
assert op1.result == remap[op2.result]
else:
remap[op2.result] = op1.result
- if op1.opnum != rop.JUMP: # xxx obscure
- assert op1.descr == op2.descr
- if op1.fail_args or op2.fail_args:
- assert len(op1.fail_args) == len(op2.fail_args)
+ if op1.getopnum() != rop.JUMP: # xxx obscure
+ assert op1.getdescr() == op2.getdescr()
+ if op1.getfailargs() or op2.getfailargs():
+ assert len(op1.getfailargs()) == len(op2.getfailargs())
if strict_fail_args:
- for x, y in zip(op1.fail_args, op2.fail_args):
+ for x, y in zip(op1.getfailargs(), op2.getfailargs()):
assert x == remap.get(y, y)
else:
- fail_args1 = set(op1.fail_args)
- fail_args2 = set([remap.get(y, y) for y in op2.fail_args])
+ fail_args1 = set(op1.getfailargs())
+ fail_args2 = set([remap.get(y, y) for y in op2.getfailargs()])
assert fail_args1 == fail_args2
assert len(oplist1) == len(oplist2)
print '-'*57
@@ -209,7 +211,7 @@
self.metainterp_sd = metainterp_sd
self.original_greenkey = original_greenkey
def store_final_boxes(self, op, boxes):
- op.fail_args = boxes
+ op.setfailargs(boxes)
def __eq__(self, other):
return type(self) is type(other) # xxx obscure
@@ -489,7 +491,7 @@
jump()
"""
self.optimize_loop(ops, 'Constant(myptr)', expected)
-
+
def test_ooisnull_oononnull_1(self):
ops = """
[p0]
@@ -842,7 +844,7 @@
jump(f, f1)
"""
self.optimize_loop(ops, 'Not, Virtual(node_vtable, floatdescr=Not)',
- expected, checkspecnodes=False)
+ expected, checkspecnodes=False)
def test_virtual_2(self):
ops = """
@@ -2171,7 +2173,7 @@
jump(i1, i0)
"""
self.optimize_loop(ops, 'Not, Not', expected)
-
+
def test_fold_partially_constant_ops(self):
ops = """
[i0]
@@ -2183,7 +2185,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
ops = """
[i0]
i1 = int_add(i0, 0)
@@ -2194,7 +2196,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
ops = """
[i0]
i1 = int_add(0, i0)
@@ -2205,7 +2207,44 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
+ def test_fold_partially_constant_ops_ovf(self):
+ ops = """
+ [i0]
+ i1 = int_sub_ovf(i0, 0)
+ guard_no_overflow() []
+ jump(i1)
+ """
+ expected = """
+ [i0]
+ jump(i0)
+ """
+ self.optimize_loop(ops, 'Not', expected)
+
+ ops = """
+ [i0]
+ i1 = int_add_ovf(i0, 0)
+ guard_no_overflow() []
+ jump(i1)
+ """
+ expected = """
+ [i0]
+ jump(i0)
+ """
+ self.optimize_loop(ops, 'Not', expected)
+
+ ops = """
+ [i0]
+ i1 = int_add_ovf(0, i0)
+ guard_no_overflow() []
+ jump(i1)
+ """
+ expected = """
+ [i0]
+ jump(i0)
+ """
+ self.optimize_loop(ops, 'Not', expected)
+
# ----------
def make_fail_descr(self):
@@ -2324,8 +2363,8 @@
from pypy.jit.metainterp.test.test_resume import ResumeDataFakeReader
from pypy.jit.metainterp.test.test_resume import MyMetaInterp
guard_op, = [op for op in self.loop.operations if op.is_guard()]
- fail_args = guard_op.fail_args
- fdescr = guard_op.descr
+ fail_args = guard_op.getfailargs()
+ fdescr = guard_op.getdescr()
assert fdescr.guard_opnum == guard_opnum
reader = ResumeDataFakeReader(fdescr, fail_args,
MyMetaInterp(self.cpu))
@@ -3119,7 +3158,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_noguard(self):
ops = """
[i0]
@@ -3134,7 +3173,7 @@
jump(i2)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_noopt(self):
ops = """
[i0]
@@ -3153,7 +3192,7 @@
jump(4)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_rev(self):
ops = """
[i0]
@@ -3170,7 +3209,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_tripple(self):
ops = """
[i0]
@@ -3189,7 +3228,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_add(self):
ops = """
[i0]
@@ -3204,11 +3243,11 @@
[i0]
i1 = int_lt(i0, 4)
guard_true(i1) []
- i2 = int_add(i0, 10)
+ i2 = int_add(i0, 10)
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_add_before(self):
ops = """
[i0]
@@ -3227,7 +3266,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_add_ovf(self):
ops = """
[i0]
@@ -3243,11 +3282,11 @@
[i0]
i1 = int_lt(i0, 4)
guard_true(i1) []
- i2 = int_add(i0, 10)
+ i2 = int_add(i0, 10)
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_add_ovf_before(self):
ops = """
[i0]
@@ -3268,7 +3307,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_sub(self):
ops = """
[i0]
@@ -3283,11 +3322,11 @@
[i0]
i1 = int_lt(i0, 4)
guard_true(i1) []
- i2 = int_sub(i0, 10)
+ i2 = int_sub(i0, 10)
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_lt_sub_before(self):
ops = """
[i0]
@@ -3306,7 +3345,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_ltle(self):
ops = """
[i0]
@@ -3357,7 +3396,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_gtge(self):
ops = """
[i0]
@@ -3374,7 +3413,7 @@
jump(i0)
"""
self.optimize_loop(ops, 'Not', expected)
-
+
def test_bound_gegt(self):
ops = """
[i0]
@@ -3414,6 +3453,42 @@
"""
self.optimize_loop(ops, 'Not', expected)
+ def test_bound_arraylen(self):
+ ops = """
+ [i0, p0]
+ p1 = new_array(i0, descr=arraydescr)
+ i1 = arraylen_gc(p1)
+ i2 = int_gt(i1, -1)
+ guard_true(i2) []
+ setarrayitem_gc(p0, 0, p1)
+ jump(i0, p0)
+ """
+ # The dead arraylen_gc will be eliminated by the backend.
+ expected = """
+ [i0, p0]
+ p1 = new_array(i0, descr=arraydescr)
+ i1 = arraylen_gc(p1)
+ setarrayitem_gc(p0, 0, p1)
+ jump(i0, p0)
+ """
+ self.optimize_loop(ops, 'Not, Not', expected)
+
+ def test_bound_strlen(self):
+ ops = """
+ [p0]
+ i0 = strlen(p0)
+ i1 = int_ge(i0, 0)
+ guard_true(i1) []
+ jump(p0)
+ """
+ # The dead strlen will be eliminated be the backend.
+ expected = """
+ [p0]
+ i0 = strlen(p0)
+ jump(p0)
+ """
+ self.optimize_loop(ops, 'Not', expected)
+
def test_addsub_const(self):
ops = """
[i0]
@@ -3558,7 +3633,7 @@
i14 = int_gt(i1, 10)
guard_true(i14) []
i15 = int_ge(i1, 20)
- guard_true(i15) []
+ guard_true(i15) []
jump(i1)
"""
expected = """
@@ -3571,7 +3646,7 @@
i14 = int_gt(i1, 10)
guard_true(i14) []
i15 = int_ge(i1, 20)
- guard_true(i15) []
+ guard_true(i15) []
jump(i1)
"""
self.optimize_loop(ops, 'Not', expected)
@@ -3838,6 +3913,7 @@
self.optimize_loop(ops, 'Not, Not, Not', expected)
+
##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin):
## def test_instanceof(self):
@@ -3852,7 +3928,7 @@
## jump(1)
## """
## self.optimize_loop(ops, 'Not', expected)
-
+
## def test_instanceof_guard_class(self):
## ops = """
## [i0, p0]
Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_recursive.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/test/test_recursive.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_recursive.py Thu Sep 23 16:53:32 2010
@@ -319,8 +319,8 @@
for loop in get_stats().loops:
assert len(loop.operations) <= length + 5 # because we only check once per metainterp bytecode
for op in loop.operations:
- if op.is_guard() and hasattr(op.descr, '_debug_suboperations'):
- assert len(op.descr._debug_suboperations) <= length + 5
+ if op.is_guard() and hasattr(op.getdescr(), '_debug_suboperations'):
+ assert len(op.getdescr()._debug_suboperations) <= length + 5
def test_inline_trace_limit(self):
myjitdriver = JitDriver(greens=[], reds=['n'])
Modified: pypy/branch/jitffi/pypy/jit/metainterp/test/test_virtualref.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/metainterp/test/test_virtualref.py (original)
+++ pypy/branch/jitffi/pypy/jit/metainterp/test/test_virtualref.py Thu Sep 23 16:53:32 2010
@@ -71,11 +71,11 @@
#
ops = self.metainterp.staticdata.stats.loops[0].operations
[guard_op] = [op for op in ops
- if op.opnum == rop.GUARD_NOT_FORCED]
- bxs1 = [box for box in guard_op.fail_args
+ if op.getopnum() == rop.GUARD_NOT_FORCED]
+ bxs1 = [box for box in guard_op.getfailargs()
if str(box._getrepr_()).endswith('.X')]
assert len(bxs1) == 1
- bxs2 = [box for box in guard_op.fail_args
+ bxs2 = [box for box in guard_op.getfailargs()
if str(box._getrepr_()).endswith('JitVirtualRef')]
assert len(bxs2) == 1
JIT_VIRTUAL_REF = self.vrefinfo.JIT_VIRTUAL_REF
@@ -84,11 +84,11 @@
# try reloading from blackhole.py's point of view
from pypy.jit.metainterp.resume import ResumeDataDirectReader
cpu = self.metainterp.cpu
- cpu.get_latest_value_count = lambda : len(guard_op.fail_args)
- cpu.get_latest_value_int = lambda i:guard_op.fail_args[i].getint()
- cpu.get_latest_value_ref = lambda i:guard_op.fail_args[i].getref_base()
+ cpu.get_latest_value_count = lambda : len(guard_op.getfailargs())
+ cpu.get_latest_value_int = lambda i:guard_op.getfailargs()[i].getint()
+ cpu.get_latest_value_ref = lambda i:guard_op.getfailargs()[i].getref_base()
cpu.clear_latest_values = lambda count: None
- resumereader = ResumeDataDirectReader(cpu, guard_op.descr)
+ resumereader = ResumeDataDirectReader(cpu, guard_op.getdescr())
vrefinfo = self.metainterp.staticdata.virtualref_info
lst = []
vrefinfo.continue_tracing = lambda vref, virtual: \
@@ -100,7 +100,7 @@
lst[0][0]) # assert correct type
#
# try reloading from pyjitpl's point of view
- self.metainterp.rebuild_state_after_failure(guard_op.descr)
+ self.metainterp.rebuild_state_after_failure(guard_op.getdescr())
assert len(self.metainterp.framestack) == 1
assert len(self.metainterp.virtualref_boxes) == 2
assert self.metainterp.virtualref_boxes[0].value == bxs1[0].value
Modified: pypy/branch/jitffi/pypy/jit/tl/pypyjit_demo.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/tl/pypyjit_demo.py (original)
+++ pypy/branch/jitffi/pypy/jit/tl/pypyjit_demo.py Thu Sep 23 16:53:32 2010
@@ -39,16 +39,24 @@
try:
from array import array
+
+ def coords(w,h):
+ y = 0
+ while y < h:
+ x = 0
+ while x < w:
+ yield x,y
+ x += 1
+ y += 1
+
def f(img):
- i=0
sa=0
- while i < img.__len__():
- sa+=img[i]
- i+=1
+ for x, y in coords(4,4):
+ sa += x * y
return sa
- img=array('h',(1,2,3,4))
- print f(img)
+ #img=array('h',(1,2,3,4))
+ print f(3)
except Exception, e:
print "Exception: ", type(e)
print e
Modified: pypy/branch/jitffi/pypy/jit/tool/showstats.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/tool/showstats.py (original)
+++ pypy/branch/jitffi/pypy/jit/tool/showstats.py Thu Sep 23 16:53:32 2010
@@ -17,7 +17,7 @@
num_dmp = 0
num_guards = 0
for op in loop.operations:
- if op.opnum == rop.DEBUG_MERGE_POINT:
+ if op.getopnum() == rop.DEBUG_MERGE_POINT:
num_dmp += 1
else:
num_ops += 1
Modified: pypy/branch/jitffi/pypy/jit/tool/traceviewer.py
==============================================================================
--- pypy/branch/jitffi/pypy/jit/tool/traceviewer.py (original)
+++ pypy/branch/jitffi/pypy/jit/tool/traceviewer.py Thu Sep 23 16:53:32 2010
@@ -253,9 +253,10 @@
def main(loopfile, use_threshold, view=True):
countname = py.path.local(loopfile + '.count')
if countname.check():
- counts = [re.split(r' +', line, 1) for line in countname.readlines()]
- counts = Counts([(k.strip("\n"), int(v.strip('\n')))
- for v, k in counts])
+ counts = [re.split('(<code)|(<loop)', line, maxsplit=1)
+ for line in countname.readlines()]
+ counts = Counts([('<code' + k.strip("\n"), int(v.strip('\n').strip()))
+ for v, _, _, k in counts])
l = list(sorted(counts.values()))
if len(l) > 20 and use_threshold:
counts.threshold = l[-20]
Modified: pypy/branch/jitffi/pypy/module/__builtin__/functional.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/__builtin__/functional.py (original)
+++ pypy/branch/jitffi/pypy/module/__builtin__/functional.py Thu Sep 23 16:53:32 2010
@@ -13,6 +13,7 @@
from pypy.rlib.objectmodel import specialize
from pypy.module.__builtin__.app_functional import range as app_range
from inspect import getsource, getfile
+from pypy.rlib.jit import unroll_safe
"""
Implementation of the common integer case of range. Instead of handling
@@ -96,12 +97,32 @@
return W_RangeListObject(start, step, howmany)
+ at unroll_safe
@specialize.arg(2)
def min_max(space, args, implementation_of):
if implementation_of == "max":
compare = space.gt
else:
compare = space.lt
+
+ args_w = args.arguments_w
+ if len(args_w) == 2 and not args.keywords:
+ # Unrollable case
+ w_max_item = None
+ for w_item in args_w:
+ if w_max_item is None or \
+ space.is_true(compare(w_item, w_max_item)):
+ w_max_item = w_item
+ return w_max_item
+ else:
+ return min_max_loop(space, args, implementation_of)
+
+ at specialize.arg(2)
+def min_max_loop(space, args, implementation_of):
+ if implementation_of == "max":
+ compare = space.gt
+ else:
+ compare = space.lt
args_w = args.arguments_w
if len(args_w) > 1:
w_sequence = space.newtuple(args_w)
Modified: pypy/branch/jitffi/pypy/module/__builtin__/test/test_minmax.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/__builtin__/test/test_minmax.py (original)
+++ pypy/branch/jitffi/pypy/module/__builtin__/test/test_minmax.py Thu Sep 23 16:53:32 2010
@@ -51,3 +51,37 @@
def test_max_empty(self):
raises(ValueError, max, [])
+
+class AppTestMaxTuple:
+
+ def test_max_usual(self):
+ assert max((1, 2, 3)) == 3
+
+ def test_max_floats(self):
+ assert max((0.1, 2.7, 14.7)) == 14.7
+
+ def test_max_chars(self):
+ assert max(('a', 'b', 'c')) == 'c'
+
+ def test_max_strings(self):
+ assert max(('aaa', 'bbb', 'c')) == 'c'
+
+ def test_max_mixed(self):
+ assert max(('1', 2, 3, 'aa')) == 'aa'
+
+class AppTestMinList:
+
+ def test_min_usual(self):
+ assert min([1, 2, 3]) == 1
+
+ def test_min_floats(self):
+ assert min([0.1, 2.7, 14.7]) == 0.1
+
+ def test_min_chars(self):
+ assert min(['a', 'b', 'c']) == 'a'
+
+ def test_min_strings(self):
+ assert min(['aaa', 'bbb', 'c']) == 'aaa'
+
+ def test_min_mixed(self):
+ assert min(['1', 2, 3, 'aa']) == 2
Modified: pypy/branch/jitffi/pypy/module/_ssl/test/test_ssl.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/_ssl/test/test_ssl.py (original)
+++ pypy/branch/jitffi/pypy/module/_ssl/test/test_ssl.py Thu Sep 23 16:53:32 2010
@@ -60,8 +60,8 @@
cls.space = space
def setup_method(self, method):
- # https://connect.sigen-ca.si/index-en.html
- ADDR = "connect.sigen-ca.si", 443
+ # https://codespeak.net/
+ ADDR = "codespeak.net", 443
self.w_s = self.space.appexec([self.space.wrap(ADDR)], """(ADDR):
import socket
Modified: pypy/branch/jitffi/pypy/module/array/interp_array.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/array/interp_array.py (original)
+++ pypy/branch/jitffi/pypy/module/array/interp_array.py Thu Sep 23 16:53:32 2010
@@ -528,12 +528,15 @@
def array_tostring__Array(space, self):
cbuf = self.charbuf()
- s = ''
- i = 0
- while i < self.len * mytype.bytes:
- s += cbuf[i]
- i += 1
+ s = ''.join([cbuf[i] for i in xrange(self.len * mytype.bytes)])
return self.space.wrap(s)
+##
+## s = ''
+## i = 0
+## while i < self.len * mytype.bytes:
+## s += cbuf[i]
+## i += 1
+## return self.space.wrap(s)
def array_fromfile__Array_ANY_ANY(space, self, w_f, w_n):
if not isinstance(w_f, W_File):
Modified: pypy/branch/jitffi/pypy/module/gc/__init__.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/gc/__init__.py (original)
+++ pypy/branch/jitffi/pypy/module/gc/__init__.py Thu Sep 23 16:53:32 2010
@@ -10,13 +10,25 @@
'collect': 'interp_gc.collect',
'enable_finalizers': 'interp_gc.enable_finalizers',
'disable_finalizers': 'interp_gc.disable_finalizers',
- 'estimate_heap_size': 'interp_gc.estimate_heap_size',
'garbage' : 'space.newlist([])',
#'dump_heap_stats': 'interp_gc.dump_heap_stats',
}
def __init__(self, space, w_name):
- ts = space.config.translation.type_system
- if ts == 'ootype':
- del self.interpleveldefs['dump_heap_stats']
+ if (not space.config.translating or
+ space.config.translation.gctransformer == "framework"):
+ self.appleveldefs.update({
+ 'dump_rpy_heap': 'app_referents.dump_rpy_heap',
+ })
+ self.interpleveldefs.update({
+ 'get_rpy_roots': 'referents.get_rpy_roots',
+ 'get_rpy_referents': 'referents.get_rpy_referents',
+ 'get_rpy_memory_usage': 'referents.get_rpy_memory_usage',
+ 'get_rpy_type_index': 'referents.get_rpy_type_index',
+ 'get_objects': 'referents.get_objects',
+ 'get_referents': 'referents.get_referents',
+ 'get_referrers': 'referents.get_referrers',
+ '_dump_rpy_heap': 'referents._dump_rpy_heap',
+ 'GcRef': 'referents.W_GcRef',
+ })
MixedModule.__init__(self, space, w_name)
Modified: pypy/branch/jitffi/pypy/module/gc/interp_gc.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/gc/interp_gc.py (original)
+++ pypy/branch/jitffi/pypy/module/gc/interp_gc.py Thu Sep 23 16:53:32 2010
@@ -24,36 +24,6 @@
# ____________________________________________________________
-import sys
-platform = sys.platform
-
-def estimate_heap_size(space):
- # XXX should be done with the help of the GCs
- if platform == "linux2":
- import os
- pid = os.getpid()
- try:
- fd = os.open("/proc/" + str(pid) + "/status", os.O_RDONLY, 0777)
- except OSError:
- pass
- else:
- try:
- content = os.read(fd, 1000000)
- finally:
- os.close(fd)
- lines = content.split("\n")
- for line in lines:
- if line.startswith("VmSize:"):
- start = line.find(" ") # try to ignore tabs
- assert start > 0
- stop = len(line) - 3
- assert stop > 0
- result = int(line[start:stop].strip(" ")) * 1024
- return space.wrap(result)
- raise OperationError(space.w_RuntimeError,
- space.wrap("can't estimate the heap size"))
-estimate_heap_size.unwrap_spec = [ObjSpace]
-
def dump_heap_stats(space, filename):
tb = rgc._heap_stats()
if not tb:
Modified: pypy/branch/jitffi/pypy/module/gc/test/test_gc.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/gc/test/test_gc.py (original)
+++ pypy/branch/jitffi/pypy/module/gc/test/test_gc.py Thu Sep 23 16:53:32 2010
@@ -59,13 +59,6 @@
raises(ValueError, gc.enable_finalizers)
runtest(True)
- def test_estimate_heap_size(self):
- import sys, gc
- if sys.platform == "linux2":
- assert gc.estimate_heap_size() > 1024
- else:
- raises(RuntimeError, gc.estimate_heap_size)
-
def test_enable(self):
import gc
assert gc.isenabled()
Modified: pypy/branch/jitffi/pypy/module/pypyjit/test/test_pypy_c.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/pypyjit/test/test_pypy_c.py (original)
+++ pypy/branch/jitffi/pypy/module/pypyjit/test/test_pypy_c.py Thu Sep 23 16:53:32 2010
@@ -762,6 +762,8 @@
else:
n = 215
+ print
+ print 'Test:', e1, e2, n, res
self.run_source('''
class tst:
pass
@@ -779,6 +781,25 @@
return sa
'''%(e1, e2), n, ([], res))
+ def test_boolrewrite_ptr_single(self):
+ self.run_source('''
+ class tst:
+ pass
+ def main():
+ a = tst()
+ b = tst()
+ c = tst()
+ sa = 0
+ for i in range(1000):
+ if a == b: sa += 1
+ else: sa += 2
+ if a != b: sa += 10000
+ else: sa += 20000
+ if i > 750: a = b
+ return sa
+ ''', 215, ([], 12481752))
+ assert False
+
def test_array_sum(self):
for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)):
res = 19352859
@@ -1059,7 +1080,38 @@
''', 170, ([], 1239690.0))
-
+ def test_min_max(self):
+ self.run_source('''
+ def main():
+ i=0
+ sa=0
+ while i < 2000:
+ sa+=min(max(i, 3000), 4000)
+ i+=1
+ return sa
+ ''', 51, ([], 2000*3000))
+
+ def test_silly_max(self):
+ self.run_source('''
+ def main():
+ i=2
+ sa=0
+ while i < 2000:
+ sa+=max(*range(i))
+ i+=1
+ return sa
+ ''', 125, ([], 1997001))
+
+ def test_iter_max(self):
+ self.run_source('''
+ def main():
+ i=2
+ sa=0
+ while i < 2000:
+ sa+=max(range(i))
+ i+=1
+ return sa
+ ''', 88, ([], 1997001))
# test_circular
Modified: pypy/branch/jitffi/pypy/module/select/interp_select.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/select/interp_select.py (original)
+++ pypy/branch/jitffi/pypy/module/select/interp_select.py Thu Sep 23 16:53:32 2010
@@ -54,14 +54,11 @@
if space.is_w(w_timeout, space.w_None):
timeout = -1
else:
- # rationale for computing directly integer, instead
- # of float + math.cell is that
- # we have for free overflow check and noone really
- # cares (since CPython does not try too hard to have
- # a ceiling of value)
+ # we want to be compatible with cpython and also accept things
+ # that can be casted to integer (I think)
try:
# compute the integer
- timeout = space.int_w(w_timeout)
+ timeout = space.int_w(space.int(w_timeout))
except (OverflowError, ValueError):
raise OperationError(space.w_ValueError,
space.wrap("math range error"))
Modified: pypy/branch/jitffi/pypy/module/select/test/test_select.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/select/test/test_select.py (original)
+++ pypy/branch/jitffi/pypy/module/select/test/test_select.py Thu Sep 23 16:53:32 2010
@@ -210,6 +210,14 @@
assert len(res[2]) == 0
assert res[0][0] == res[1][0]
+ def test_poll(self):
+ import select
+ class A(object):
+ def __int__(self):
+ return 3
+
+ select.poll().poll(A()) # assert did not crash
+
class AppTestSelectWithPipes(_AppTestSelect):
"Use a pipe to get pairs of file descriptors"
def setup_class(cls):
@@ -275,4 +283,3 @@
s1, addr2 = cls.sock.accept()
return s1, s2
-
Modified: pypy/branch/jitffi/pypy/module/sys/version.py
==============================================================================
--- pypy/branch/jitffi/pypy/module/sys/version.py (original)
+++ pypy/branch/jitffi/pypy/module/sys/version.py Thu Sep 23 16:53:32 2010
@@ -4,10 +4,11 @@
import os
-CPYTHON_VERSION = (2, 5, 2, "beta", 42)
-CPYTHON_API_VERSION = 1012
+#XXX # the release serial 42 is not in range(16)
+CPYTHON_VERSION = (2, 5, 2, "beta", 42) #XXX # sync patchlevel.h
+CPYTHON_API_VERSION = 1012 #XXX # sync with include/modsupport.h
-PYPY_VERSION = (1, 3, 0, "beta", '?')
+PYPY_VERSION = (1, 3, 0, "beta", '?') #XXX # sync patchlevel.h
# the last item is replaced by the svn revision ^^^
TRIM_URL_UP_TO = 'svn/pypy/'
Modified: pypy/branch/jitffi/pypy/rlib/_rsocket_rffi.py
==============================================================================
--- pypy/branch/jitffi/pypy/rlib/_rsocket_rffi.py (original)
+++ pypy/branch/jitffi/pypy/rlib/_rsocket_rffi.py Thu Sep 23 16:53:32 2010
@@ -32,11 +32,13 @@
'arpa/inet.h',
'stdint.h',
'errno.h',
- 'netpacket/packet.h',
- 'sys/ioctl.h',
- 'net/if.h',
)
- cond_includes = [('AF_NETLINK', 'linux/netlink.h')]
+
+ cond_includes = [('AF_NETLINK', 'linux/netlink.h'),
+ ('AF_PACKET', 'netpacket/packet.h'),
+ ('AF_PACKET', 'sys/ioctl.h'),
+ ('AF_PACKET', 'net/if.h')]
+
libraries = ()
calling_conv = 'c'
HEADER = ''.join(['#include <%s>\n' % filename for filename in includes])
Modified: pypy/branch/jitffi/pypy/rlib/rarithmetic.py
==============================================================================
--- pypy/branch/jitffi/pypy/rlib/rarithmetic.py (original)
+++ pypy/branch/jitffi/pypy/rlib/rarithmetic.py Thu Sep 23 16:53:32 2010
@@ -50,6 +50,11 @@
LONG_MASK = _Ltest*2-1
LONG_TEST = _Ltest
+LONG_BIT_SHIFT = 0
+while (1 << LONG_BIT_SHIFT) != LONG_BIT:
+ LONG_BIT_SHIFT += 1
+ assert LONG_BIT_SHIFT < 99, "LONG_BIT_SHIFT value not found?"
+
INFINITY = 1e200 * 1e200
NAN = INFINITY / INFINITY
Modified: pypy/branch/jitffi/pypy/rlib/rgc.py
==============================================================================
--- pypy/branch/jitffi/pypy/rlib/rgc.py (original)
+++ pypy/branch/jitffi/pypy/rlib/rgc.py Thu Sep 23 16:53:32 2010
@@ -1,6 +1,7 @@
-import gc
+import gc, types
from pypy.rpython.extregistry import ExtRegistryEntry
from pypy.rlib.objectmodel import we_are_translated
+from pypy.rpython.lltypesystem import lltype, llmemory
# ____________________________________________________________
# General GC features
@@ -93,7 +94,7 @@
def specialize_call(self, hop):
from pypy.rpython.error import TyperError
- from pypy.rpython.lltypesystem import lltype, llmemory, rtuple
+ from pypy.rpython.lltypesystem import rtuple
from pypy.annotation import model as annmodel
from pypy.rpython.memory.gc.marksweep import X_CLONE, X_CLONE_PTR
@@ -150,7 +151,6 @@
return annmodel.s_None
def specialize_call(self, hop):
- from pypy.rpython.lltypesystem import lltype
hop.exception_cannot_occur()
args_v = []
if len(hop.args_s) == 1:
@@ -165,7 +165,6 @@
return annmodel.s_None
def specialize_call(self, hop):
- from pypy.rpython.lltypesystem import lltype
[v_nbytes] = hop.inputargs(lltype.Signed)
hop.exception_cannot_occur()
return hop.genop('gc_set_max_heap_size', [v_nbytes],
@@ -182,7 +181,6 @@
return annmodel.SomeBool()
def specialize_call(self, hop):
- from pypy.rpython.lltypesystem import lltype
hop.exception_cannot_occur()
return hop.genop('gc_can_move', hop.args_v, resulttype=hop.r_result)
@@ -195,11 +193,9 @@
def compute_result_annotation(self):
from pypy.annotation import model as annmodel
from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP
- from pypy.rpython.lltypesystem import lltype
return annmodel.SomePtr(lltype.Ptr(ARRAY_TYPEID_MAP))
def specialize_call(self, hop):
- from pypy.rpython.lltypesystem import lltype
from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP
hop.exception_is_here()
return hop.genop('gc_heap_stats', [], resulttype=hop.r_result)
@@ -209,7 +205,6 @@
When running directly, will pretend that gc is always
moving (might be configurable in a future)
"""
- from pypy.rpython.lltypesystem import lltype
return lltype.nullptr(TP)
class MallocNonMovingEntry(ExtRegistryEntry):
@@ -221,7 +216,6 @@
return malloc(s_TP, s_n, s_zero=s_zero)
def specialize_call(self, hop, i_zero=None):
- from pypy.rpython.lltypesystem import lltype
# XXX assume flavor and zero to be None by now
assert hop.args_s[0].is_constant()
vlist = [hop.inputarg(lltype.Void, arg=0)]
@@ -243,7 +237,6 @@
def ll_arraycopy(source, dest, source_start, dest_start, length):
from pypy.rpython.lltypesystem.lloperation import llop
- from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rlib.objectmodel import keepalive_until_here
# supports non-overlapping copies only
@@ -279,7 +272,6 @@
def ll_shrink_array(p, smallerlength):
from pypy.rpython.lltypesystem.lloperation import llop
- from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rlib.objectmodel import keepalive_until_here
if llop.shrink_array(lltype.Bool, p, smallerlength):
@@ -313,3 +305,221 @@
func._dont_inline_ = True
func._gc_no_collect_ = True
return func
+
+# ____________________________________________________________
+
+def get_rpy_roots():
+ "NOT_RPYTHON"
+ # Return the 'roots' from the GC.
+ # This stub is not usable on top of CPython.
+ # The gc typically returns a list that ends with a few NULL_GCREFs.
+ raise NotImplementedError
+
+def get_rpy_referents(gcref):
+ "NOT_RPYTHON"
+ x = gcref._x
+ if isinstance(x, list):
+ d = x
+ elif isinstance(x, dict):
+ d = x.keys() + x.values()
+ else:
+ d = []
+ if hasattr(x, '__dict__'):
+ d = x.__dict__.values()
+ if hasattr(type(x), '__slots__'):
+ for slot in type(x).__slots__:
+ try:
+ d.append(getattr(x, slot))
+ except AttributeError:
+ pass
+ # discard objects that are too random or that are _freeze_=True
+ return [_GcRef(x) for x in d if _keep_object(x)]
+
+def _keep_object(x):
+ if isinstance(x, type) or type(x) is types.ClassType:
+ return False # don't keep any type
+ if isinstance(x, (list, dict, str)):
+ return True # keep lists and dicts and strings
+ try:
+ return not x._freeze_() # don't keep any frozen object
+ except AttributeError:
+ return type(x).__module__ != '__builtin__' # keep non-builtins
+ except Exception:
+ return False # don't keep objects whose _freeze_() method explodes
+
+def get_rpy_memory_usage(gcref):
+ "NOT_RPYTHON"
+ # approximate implementation using CPython's type info
+ Class = type(gcref._x)
+ size = Class.__basicsize__
+ if Class.__itemsize__ > 0:
+ size += Class.__itemsize__ * len(gcref._x)
+ return size
+
+def get_rpy_type_index(gcref):
+ "NOT_RPYTHON"
+ from pypy.rlib.rarithmetic import intmask
+ Class = gcref._x.__class__
+ return intmask(id(Class))
+
+def cast_gcref_to_int(gcref):
+ if we_are_translated():
+ return lltype.cast_ptr_to_int(gcref)
+ else:
+ return id(gcref._x)
+
+def dump_rpy_heap(fd):
+ "NOT_RPYTHON"
+ raise NotImplementedError
+
+NULL_GCREF = lltype.nullptr(llmemory.GCREF.TO)
+
+class _GcRef(object):
+ # implementation-specific: there should not be any after translation
+ __slots__ = ['_x']
+ def __init__(self, x):
+ self._x = x
+ def __hash__(self):
+ return object.__hash__(self._x)
+ def __eq__(self, other):
+ if isinstance(other, lltype._ptr):
+ assert other == NULL_GCREF, (
+ "comparing a _GcRef with a non-NULL lltype ptr")
+ return False
+ assert isinstance(other, _GcRef)
+ return self._x is other._x
+ def __ne__(self, other):
+ return not self.__eq__(other)
+ def __repr__(self):
+ return "_GcRef(%r)" % (self._x, )
+ def _freeze_(self):
+ raise Exception("instances of rlib.rgc._GcRef cannot be translated")
+
+def cast_instance_to_gcref(x):
+ # Before translation, casts an RPython instance into a _GcRef.
+ # After translation, it is a variant of cast_object_to_ptr(GCREF).
+ if we_are_translated():
+ from pypy.rpython import annlowlevel
+ x = annlowlevel.cast_instance_to_base_ptr(x)
+ return lltype.cast_opaque_ptr(llmemory.GCREF, x)
+ else:
+ return _GcRef(x)
+cast_instance_to_gcref._annspecialcase_ = 'specialize:argtype(0)'
+
+def try_cast_gcref_to_instance(Class, gcref):
+ # Before translation, unwraps the RPython instance contained in a _GcRef.
+ # After translation, it is a type-check performed by the GC.
+ if we_are_translated():
+ from pypy.rpython.annlowlevel import base_ptr_lltype
+ from pypy.rpython.annlowlevel import cast_base_ptr_to_instance
+ from pypy.rpython.lltypesystem import rclass
+ if _is_rpy_instance(gcref):
+ objptr = lltype.cast_opaque_ptr(base_ptr_lltype(), gcref)
+ if objptr.typeptr: # may be NULL, e.g. in rdict's dummykeyobj
+ clsptr = _get_llcls_from_cls(Class)
+ if rclass.ll_isinstance(objptr, clsptr):
+ return cast_base_ptr_to_instance(Class, objptr)
+ return None
+ else:
+ if isinstance(gcref._x, Class):
+ return gcref._x
+ return None
+try_cast_gcref_to_instance._annspecialcase_ = 'specialize:arg(0)'
+
+# ------------------- implementation -------------------
+
+_cache_s_list_of_gcrefs = None
+
+def s_list_of_gcrefs():
+ global _cache_s_list_of_gcrefs
+ if _cache_s_list_of_gcrefs is None:
+ from pypy.annotation import model as annmodel
+ from pypy.annotation.listdef import ListDef
+ s_gcref = annmodel.SomePtr(llmemory.GCREF)
+ _cache_s_list_of_gcrefs = annmodel.SomeList(
+ ListDef(None, s_gcref, mutated=True, resized=False))
+ return _cache_s_list_of_gcrefs
+
+class Entry(ExtRegistryEntry):
+ _about_ = get_rpy_roots
+ def compute_result_annotation(self):
+ return s_list_of_gcrefs()
+ def specialize_call(self, hop):
+ return hop.genop('gc_get_rpy_roots', [], resulttype = hop.r_result)
+
+class Entry(ExtRegistryEntry):
+ _about_ = get_rpy_referents
+ def compute_result_annotation(self, s_gcref):
+ from pypy.annotation import model as annmodel
+ assert annmodel.SomePtr(llmemory.GCREF).contains(s_gcref)
+ return s_list_of_gcrefs()
+ def specialize_call(self, hop):
+ vlist = hop.inputargs(hop.args_r[0])
+ return hop.genop('gc_get_rpy_referents', vlist,
+ resulttype = hop.r_result)
+
+class Entry(ExtRegistryEntry):
+ _about_ = get_rpy_memory_usage
+ def compute_result_annotation(self, s_gcref):
+ from pypy.annotation import model as annmodel
+ return annmodel.SomeInteger()
+ def specialize_call(self, hop):
+ vlist = hop.inputargs(hop.args_r[0])
+ return hop.genop('gc_get_rpy_memory_usage', vlist,
+ resulttype = hop.r_result)
+
+class Entry(ExtRegistryEntry):
+ _about_ = get_rpy_type_index
+ def compute_result_annotation(self, s_gcref):
+ from pypy.annotation import model as annmodel
+ return annmodel.SomeInteger()
+ def specialize_call(self, hop):
+ vlist = hop.inputargs(hop.args_r[0])
+ return hop.genop('gc_get_rpy_type_index', vlist,
+ resulttype = hop.r_result)
+
+def _is_rpy_instance(gcref):
+ "NOT_RPYTHON"
+ raise NotImplementedError
+
+def _get_llcls_from_cls(Class):
+ "NOT_RPYTHON"
+ raise NotImplementedError
+
+class Entry(ExtRegistryEntry):
+ _about_ = _is_rpy_instance
+ def compute_result_annotation(self, s_gcref):
+ from pypy.annotation import model as annmodel
+ return annmodel.SomeBool()
+ def specialize_call(self, hop):
+ vlist = hop.inputargs(hop.args_r[0])
+ return hop.genop('gc_is_rpy_instance', vlist,
+ resulttype = hop.r_result)
+
+class Entry(ExtRegistryEntry):
+ _about_ = _get_llcls_from_cls
+ def compute_result_annotation(self, s_Class):
+ from pypy.annotation import model as annmodel
+ from pypy.rpython.lltypesystem import rclass
+ assert s_Class.is_constant()
+ return annmodel.SomePtr(rclass.CLASSTYPE)
+ def specialize_call(self, hop):
+ from pypy.rpython.rclass import getclassrepr
+ from pypy.objspace.flow.model import Constant
+ from pypy.rpython.lltypesystem import rclass
+ Class = hop.args_s[0].const
+ classdef = hop.rtyper.annotator.bookkeeper.getuniqueclassdef(Class)
+ classrepr = getclassrepr(hop.rtyper, classdef)
+ vtable = classrepr.getvtable()
+ assert lltype.typeOf(vtable) == rclass.CLASSTYPE
+ return Constant(vtable, concretetype=rclass.CLASSTYPE)
+
+class Entry(ExtRegistryEntry):
+ _about_ = dump_rpy_heap
+ def compute_result_annotation(self, s_fd):
+ from pypy.annotation.model import s_Bool
+ return s_Bool
+ def specialize_call(self, hop):
+ vlist = hop.inputargs(lltype.Signed)
+ hop.exception_is_here()
+ return hop.genop('gc_dump_rpy_heap', vlist, resulttype = hop.r_result)
Modified: pypy/branch/jitffi/pypy/rlib/rstring.py
==============================================================================
--- pypy/branch/jitffi/pypy/rlib/rstring.py (original)
+++ pypy/branch/jitffi/pypy/rlib/rstring.py Thu Sep 23 16:53:32 2010
@@ -46,7 +46,9 @@
# -------------- public API ---------------------------------
-INIT_SIZE = 100 # XXX tweak
+# the following number is the maximum size of an RPython unicode
+# string that goes into the nursery of the minimark GC.
+INIT_SIZE = 56
class AbstractStringBuilder(object):
def __init__(self, init_size=INIT_SIZE):
Modified: pypy/branch/jitffi/pypy/rlib/rwin32.py
==============================================================================
--- pypy/branch/jitffi/pypy/rlib/rwin32.py (original)
+++ pypy/branch/jitffi/pypy/rlib/rwin32.py Thu Sep 23 16:53:32 2010
@@ -82,6 +82,8 @@
if WIN32:
HANDLE = rffi.COpaquePtr(typedef='HANDLE')
+ assert rffi.cast(HANDLE, -1) == rffi.cast(HANDLE, -1)
+
LPHANDLE = rffi.CArrayPtr(HANDLE)
HMODULE = HANDLE
NULL_HANDLE = rffi.cast(HANDLE, 0)
Modified: pypy/branch/jitffi/pypy/rlib/test/test_rgc.py
==============================================================================
--- pypy/branch/jitffi/pypy/rlib/test/test_rgc.py (original)
+++ pypy/branch/jitffi/pypy/rlib/test/test_rgc.py Thu Sep 23 16:53:32 2010
@@ -16,7 +16,7 @@
assert len(op.args) == 0
res = interpret(f, [])
-
+
assert res is None
def test_collect_0():
@@ -31,13 +31,13 @@
assert len(ops) == 1
op = ops[0][1]
assert op.opname == 'gc__collect'
- assert len(op.args) == 1
+ assert len(op.args) == 1
assert op.args[0].value == 0
res = interpret(f, [])
-
- assert res is None
-
+
+ assert res is None
+
def test_can_move():
T0 = lltype.GcStruct('T')
T1 = lltype.GcArray(lltype.Float)
@@ -53,9 +53,9 @@
assert len(res) == 2
res = interpret(f, [1])
-
+
assert res == True
-
+
def test_ll_arraycopy_1():
TYPE = lltype.GcArray(lltype.Signed)
a1 = lltype.malloc(TYPE, 10)
@@ -153,3 +153,21 @@
assert len(s2.vars) == 3
for i in range(3):
assert s2.vars[i] == 50 + i
+
+def test_get_referents():
+ class X(object):
+ __slots__ = ['stuff']
+ x1 = X()
+ x1.stuff = X()
+ x2 = X()
+ lst = rgc.get_rpy_referents(rgc.cast_instance_to_gcref(x1))
+ lst2 = [rgc.try_cast_gcref_to_instance(X, x) for x in lst]
+ assert x1.stuff in lst2
+ assert x2 not in lst2
+
+def test_get_memory_usage():
+ class X(object):
+ pass
+ x1 = X()
+ n = rgc.get_rpy_memory_usage(rgc.cast_instance_to_gcref(x1))
+ assert n >= 8 and n <= 64
Modified: pypy/branch/jitffi/pypy/rpython/llinterp.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/llinterp.py (original)
+++ pypy/branch/jitffi/pypy/rpython/llinterp.py Thu Sep 23 16:53:32 2010
@@ -650,7 +650,7 @@
offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1]
inneraddr, FIELD = self.getinneraddr(obj, *offsets)
if FIELD is not lltype.Void:
- self.heap.setinterior(obj, inneraddr, FIELD, fieldvalue)
+ self.heap.setinterior(obj, inneraddr, FIELD, fieldvalue, offsets)
def op_bare_setinteriorfield(self, obj, *fieldnamesval):
offsets, fieldvalue = fieldnamesval[:-1], fieldnamesval[-1]
@@ -916,6 +916,24 @@
def op_gc_get_type_info_group(self):
raise NotImplementedError("gc_get_type_info_group")
+ def op_gc_get_rpy_memory_usage(self):
+ raise NotImplementedError("gc_get_rpy_memory_usage")
+
+ def op_gc_get_rpy_roots(self):
+ raise NotImplementedError("gc_get_rpy_roots")
+
+ def op_gc_get_rpy_referents(self):
+ raise NotImplementedError("gc_get_rpy_referents")
+
+ def op_gc_is_rpy_instance(self):
+ raise NotImplementedError("gc_is_rpy_instance")
+
+ def op_gc_get_rpy_type_index(self):
+ raise NotImplementedError("gc_get_rpy_type_index")
+
+ def op_gc_dump_rpy_heap(self):
+ raise NotImplementedError("gc_dump_rpy_heap")
+
def op_do_malloc_fixedsize_clear(self):
raise NotImplementedError("do_malloc_fixedsize_clear")
@@ -925,6 +943,9 @@
def op_get_write_barrier_failing_case(self):
raise NotImplementedError("get_write_barrier_failing_case")
+ def op_get_write_barrier_from_array_failing_case(self):
+ raise NotImplementedError("get_write_barrier_from_array_failing_case")
+
def op_yield_current_frame_to_caller(self):
raise NotImplementedError("yield_current_frame_to_caller")
Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/ll2ctypes.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/lltypesystem/ll2ctypes.py (original)
+++ pypy/branch/jitffi/pypy/rpython/lltypesystem/ll2ctypes.py Thu Sep 23 16:53:32 2010
@@ -26,9 +26,6 @@
from pypy.translator.platform import platform
from array import array
-def uaddressof(obj):
- return fixid(ctypes.addressof(obj))
-
_ctypes_cache = {}
_eci_cache = {}
@@ -251,7 +248,7 @@
else:
n = None
cstruct = cls._malloc(n)
- add_storage(container, _struct_mixin, cstruct)
+ add_storage(container, _struct_mixin, ctypes.pointer(cstruct))
for field_name in STRUCT._names:
FIELDTYPE = getattr(STRUCT, field_name)
field_value = getattr(container, field_name)
@@ -264,8 +261,6 @@
if isinstance(FIELDTYPE, lltype.Struct):
csubstruct = getattr(cstruct, field_name)
convert_struct(field_value, csubstruct)
- subcontainer = getattr(container, field_name)
- substorage = subcontainer._storage
elif field_name == STRUCT._arrayfld: # inlined var-sized part
csubarray = getattr(cstruct, field_name)
convert_array(field_value, csubarray)
@@ -292,7 +287,7 @@
# regular case: allocate a new ctypes array of the proper type
cls = get_ctypes_type(ARRAY)
carray = cls._malloc(container.getlength())
- add_storage(container, _array_mixin, carray)
+ add_storage(container, _array_mixin, ctypes.pointer(carray))
if not isinstance(ARRAY.OF, lltype.ContainerType):
# fish that we have enough space
ctypes_array = ctypes.cast(carray.items,
@@ -321,13 +316,15 @@
if isinstance(FIELDTYPE, lltype.ContainerType):
if isinstance(FIELDTYPE, lltype.Struct):
struct_container = getattr(container, field_name)
- struct_storage = getattr(ctypes_storage, field_name)
+ struct_storage = ctypes.pointer(
+ getattr(ctypes_storage.contents, field_name))
struct_use_ctypes_storage(struct_container, struct_storage)
struct_container._setparentstructure(container, field_name)
elif isinstance(FIELDTYPE, lltype.Array):
assert FIELDTYPE._hints.get('nolength', False) == False
arraycontainer = _array_of_known_length(FIELDTYPE)
- arraycontainer._storage = getattr(ctypes_storage, field_name)
+ arraycontainer._storage = ctypes.pointer(
+ getattr(ctypes_storage.contents, field_name))
arraycontainer._setparentstructure(container, field_name)
object.__setattr__(container, field_name, arraycontainer)
else:
@@ -352,6 +349,8 @@
def add_storage(instance, mixin_cls, ctypes_storage):
"""Put ctypes_storage on the instance, changing its __class__ so that it
sees the methods of the given mixin class."""
+ # _storage is a ctypes pointer to a structure
+ # except for Opaque objects which use a c_void_p.
assert not isinstance(instance, _parentable_mixin) # not yet
subcls = get_common_subclass(mixin_cls, instance.__class__)
instance.__class__ = subcls
@@ -365,17 +364,23 @@
__slots__ = ()
def _ctypes_storage_was_allocated(self):
- addr = ctypes.addressof(self._storage)
+ addr = ctypes.cast(self._storage, ctypes.c_void_p).value
if addr in ALLOCATED:
raise Exception("internal ll2ctypes error - "
"double conversion from lltype to ctypes?")
# XXX don't store here immortal structures
ALLOCATED[addr] = self
+ def _addressof_storage(self):
+ "Returns the storage address as an int"
+ if self._storage is None or self._storage is True:
+ raise ValueError("Not a ctypes allocated structure")
+ return ctypes.cast(self._storage, ctypes.c_void_p).value
+
def _free(self):
self._check() # no double-frees
# allow the ctypes object to go away now
- addr = ctypes.addressof(self._storage)
+ addr = ctypes.cast(self._storage, ctypes.c_void_p).value
try:
del ALLOCATED[addr]
except KeyError:
@@ -393,16 +398,16 @@
raise RuntimeError("pointer comparison with a freed structure")
if other._storage is True:
return False # the other container is not ctypes-based
- addressof_other = ctypes.addressof(other._storage)
- # both containers are ctypes-based, compare by address
- return (ctypes.addressof(self._storage) == addressof_other)
+ addressof_other = other._addressof_storage()
+ # both containers are ctypes-based, compare the addresses
+ return self._addressof_storage() == addressof_other
def __ne__(self, other):
return not (self == other)
def __hash__(self):
if self._storage is not None:
- return ctypes.addressof(self._storage)
+ return self._addressof_storage()
else:
return object.__hash__(self)
@@ -411,7 +416,7 @@
return '<freed C object %s>' % (self._TYPE,)
else:
return '<C object %s at 0x%x>' % (self._TYPE,
- uaddressof(self._storage),)
+ fixid(self._addressof_storage()))
def __str__(self):
return repr(self)
@@ -422,7 +427,7 @@
def __getattr__(self, field_name):
T = getattr(self._TYPE, field_name)
- cobj = getattr(self._storage, field_name)
+ cobj = getattr(self._storage.contents, field_name)
return ctypes2lltype(T, cobj)
def __setattr__(self, field_name, value):
@@ -430,17 +435,17 @@
object.__setattr__(self, field_name, value) # '_xxx' attributes
else:
cobj = lltype2ctypes(value)
- setattr(self._storage, field_name, cobj)
+ setattr(self._storage.contents, field_name, cobj)
class _array_mixin(_parentable_mixin):
"""Mixin added to _array containers when they become ctypes-based."""
__slots__ = ()
def getitem(self, index, uninitialized_ok=False):
- return self._storage._getitem(index)
+ return self._storage.contents._getitem(index)
def setitem(self, index, value):
- self._storage._setitem(index, value)
+ self._storage.contents._setitem(index, value)
class _array_of_unknown_length(_parentable_mixin, lltype._parentable):
_kind = "array"
@@ -451,10 +456,10 @@
return 0, sys.maxint
def getitem(self, index, uninitialized_ok=False):
- return self._storage._getitem(index, boundscheck=False)
+ return self._storage.contents._getitem(index, boundscheck=False)
def setitem(self, index, value):
- self._storage._setitem(index, value, boundscheck=False)
+ self._storage.contents._setitem(index, value, boundscheck=False)
def getitems(self):
if self._TYPE.OF != lltype.Char:
@@ -476,7 +481,7 @@
__slots__ = ()
def getlength(self):
- return self._storage.length
+ return self._storage.contents.length
def getbounds(self):
return 0, self.getlength()
@@ -653,17 +658,18 @@
container._ctypes_storage_was_allocated()
if isinstance(T.TO, lltype.OpaqueType):
- return container._storage
+ return container._storage.value
storage = container._storage
- p = ctypes.pointer(storage)
+ p = storage
if index:
p = ctypes.cast(p, ctypes.c_void_p)
p = ctypes.c_void_p(p.value + index)
c_tp = get_ctypes_type(T.TO)
- storage._normalized_ctype = c_tp
- if normalize and hasattr(storage, '_normalized_ctype'):
- p = ctypes.cast(p, ctypes.POINTER(storage._normalized_ctype))
+ storage.contents._normalized_ctype = c_tp
+ if normalize and hasattr(storage.contents, '_normalized_ctype'):
+ normalized_ctype = storage.contents._normalized_ctype
+ p = ctypes.cast(p, ctypes.POINTER(normalized_ctype))
if lltype.typeOf(llobj) == llmemory.GCREF:
p = ctypes.cast(p, ctypes.c_void_p)
return p
@@ -707,13 +713,13 @@
cobjheader = ctypes.cast(cobj,
get_ctypes_type(lltype.Ptr(OBJECT)))
struct_use_ctypes_storage(containerheader,
- cobjheader.contents)
+ cobjheader)
REAL_TYPE = get_rtyper().get_type_for_typeptr(
containerheader.typeptr)
REAL_T = lltype.Ptr(REAL_TYPE)
cobj = ctypes.cast(cobj, get_ctypes_type(REAL_T))
container = lltype._struct(REAL_TYPE)
- struct_use_ctypes_storage(container, cobj.contents)
+ struct_use_ctypes_storage(container, cobj)
if REAL_TYPE != T.TO:
p = container._as_ptr()
container = lltype.cast_pointer(T, p)._as_obj()
@@ -728,10 +734,10 @@
elif isinstance(T.TO, lltype.Array):
if T.TO._hints.get('nolength', False):
container = _array_of_unknown_length(T.TO)
- container._storage = cobj.contents
+ container._storage = type(cobj)(cobj.contents)
else:
container = _array_of_known_length(T.TO)
- container._storage = cobj.contents
+ container._storage = type(cobj)(cobj.contents)
elif isinstance(T.TO, lltype.FuncType):
cobjkey = intmask(ctypes.cast(cobj, ctypes.c_void_p).value)
if cobjkey in _int2obj:
@@ -745,7 +751,8 @@
container = _llgcopaque(cobj)
else:
container = lltype._opaque(T.TO)
- container._storage = ctypes.cast(cobj, ctypes.c_void_p)
+ cbuf = ctypes.cast(cobj, ctypes.c_void_p)
+ add_storage(container, _parentable_mixin, cbuf)
else:
raise NotImplementedError(T)
llobj = lltype._ptr(T, container, solid=True)
Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/llarena.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/lltypesystem/llarena.py (original)
+++ pypy/branch/jitffi/pypy/rpython/lltypesystem/llarena.py Thu Sep 23 16:53:32 2010
@@ -16,8 +16,11 @@
class Arena(object):
object_arena_location = {} # {container: (arena, offset)}
old_object_arena_location = weakref.WeakKeyDictionary()
+ _count_arenas = 0
def __init__(self, nbytes, zero):
+ Arena._count_arenas += 1
+ self._arena_index = Arena._count_arenas
self.nbytes = nbytes
self.usagemap = array.array('c')
self.objectptrs = {} # {offset: ptr-to-container}
@@ -25,6 +28,9 @@
self.freed = False
self.reset(zero)
+ def __repr__(self):
+ return '<Arena #%d [%d bytes]>' % (self._arena_index, self.nbytes)
+
def reset(self, zero, start=0, size=None):
self.check()
if size is None:
@@ -40,7 +46,7 @@
assert offset >= stop, "object overlaps cleared area"
else:
obj = ptr._obj
- del Arena.object_arena_location[obj]
+ _dictdel(Arena.object_arena_location, obj)
del self.objectptrs[offset]
del self.objectsizes[offset]
obj._free()
@@ -63,7 +69,7 @@
raise ArenaError("Address offset is outside the arena")
return fakearenaaddress(self, offset)
- def allocate_object(self, offset, size):
+ def allocate_object(self, offset, size, letter='x'):
self.check()
bytes = llmemory.raw_malloc_usage(size)
if offset + bytes > self.nbytes:
@@ -78,7 +84,7 @@
raise ArenaError("new object overlaps a previous object")
assert offset not in self.objectptrs
addr2 = size._raw_malloc([], zero=zero)
- pattern = 'X' + 'x'*(bytes-1)
+ pattern = letter.upper() + letter*(bytes-1)
self.usagemap[offset:offset+bytes] = array.array('c', pattern)
self.setobject(addr2, offset, bytes)
# common case: 'size' starts with a GCHeaderOffset. In this case
@@ -252,6 +258,16 @@
raise RuntimeError(msg % (obj,))
return arena.getaddr(offset)
+def _dictdel(d, key):
+ # hack
+ try:
+ del d[key]
+ except KeyError:
+ items = d.items()
+ d.clear()
+ d.update(items)
+ del d[key]
+
class RoundedUpForAllocation(llmemory.AddressOffset):
"""A size that is rounded up in order to preserve alignment of objects
following it. For arenas containing heterogenous objects.
@@ -297,6 +313,7 @@
assert isinstance(arena_addr, fakearenaaddress)
assert arena_addr.offset == 0
arena_addr.arena.reset(False)
+ assert not arena_addr.arena.objectptrs
arena_addr.arena.freed = True
def arena_reset(arena_addr, size, zero):
@@ -317,10 +334,13 @@
this is used to know what type of lltype object to allocate."""
from pypy.rpython.memory.lltypelayout import memory_alignment
addr = getfakearenaaddress(addr)
- if check_alignment and (addr.offset & (memory_alignment-1)) != 0:
+ letter = 'x'
+ if llmemory.raw_malloc_usage(size) == 1:
+ letter = 'b' # for Byte-aligned allocations
+ elif check_alignment and (addr.offset & (memory_alignment-1)) != 0:
raise ArenaError("object at offset %d would not be correctly aligned"
% (addr.offset,))
- addr.arena.allocate_object(addr.offset, size)
+ addr.arena.allocate_object(addr.offset, size, letter)
def arena_shrink_obj(addr, newsize):
""" Mark object as shorter than it was
@@ -357,6 +377,11 @@
# This only works with linux's madvise(), which is really not a memory
# usage hint but a real command. It guarantees that after MADV_DONTNEED
# the pages are cleared again.
+
+ # Note that the trick of the general 'posix' section below, i.e.
+ # reading /dev/zero, does not seem to have the correct effect of
+ # lazily-allocating pages on all Linux systems.
+
from pypy.rpython.tool import rffi_platform
from pypy.translator.tool.cbuild import ExternalCompilationInfo
_eci = ExternalCompilationInfo(includes=['sys/mman.h'])
@@ -459,6 +484,7 @@
sandboxsafe=True)
def llimpl_arena_free(arena_addr):
+ # NB. minimark.py assumes that arena_free() is actually just a raw_free().
llmemory.raw_free(arena_addr)
register_external(arena_free, [llmemory.Address], None, 'll_arena.arena_free',
llimpl=llimpl_arena_free,
Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/llheap.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/lltypesystem/llheap.py (original)
+++ pypy/branch/jitffi/pypy/rpython/lltypesystem/llheap.py Thu Sep 23 16:53:32 2010
@@ -8,7 +8,8 @@
from pypy.rlib.rgc import collect
from pypy.rlib.rgc import can_move
-def setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue):
+def setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue,
+ offsets=None):
assert typeOf(newvalue) == INNERTYPE
# xxx access the address object's ref() directly for performance
inneraddr.ref()[0] = newvalue
Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/llmemory.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/lltypesystem/llmemory.py (original)
+++ pypy/branch/jitffi/pypy/rpython/lltypesystem/llmemory.py Thu Sep 23 16:53:32 2010
@@ -409,6 +409,9 @@
if self.ptr is None:
s = 'NULL'
else:
+ #try:
+ # s = hex(self.ptr._cast_to_int())
+ #except:
s = str(self.ptr)
return '<fakeaddr %s>' % (s,)
Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/lloperation.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/lltypesystem/lloperation.py (original)
+++ pypy/branch/jitffi/pypy/rpython/lltypesystem/lloperation.py Thu Sep 23 16:53:32 2010
@@ -436,6 +436,7 @@
'do_malloc_fixedsize_clear':LLOp(canraise=(MemoryError,),canunwindgc=True),
'do_malloc_varsize_clear': LLOp(canraise=(MemoryError,),canunwindgc=True),
'get_write_barrier_failing_case': LLOp(sideeffects=False),
+ 'get_write_barrier_from_array_failing_case': LLOp(sideeffects=False),
'gc_get_type_info_group': LLOp(sideeffects=False),
# __________ GC operations __________
@@ -467,6 +468,13 @@
'gc_writebarrier_before_copy': LLOp(canrun=True),
'gc_heap_stats' : LLOp(canunwindgc=True),
+ 'gc_get_rpy_roots' : LLOp(),
+ 'gc_get_rpy_referents': LLOp(),
+ 'gc_get_rpy_memory_usage': LLOp(),
+ 'gc_get_rpy_type_index': LLOp(),
+ 'gc_is_rpy_instance' : LLOp(),
+ 'gc_dump_rpy_heap' : LLOp(),
+
# ------- JIT & GC interaction, only for some GCs ----------
'gc_adr_of_nursery_free' : LLOp(),
Modified: pypy/branch/jitffi/pypy/rpython/lltypesystem/test/test_ll2ctypes.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/lltypesystem/test/test_ll2ctypes.py (original)
+++ pypy/branch/jitffi/pypy/rpython/lltypesystem/test/test_ll2ctypes.py Thu Sep 23 16:53:32 2010
@@ -353,6 +353,8 @@
assert tmppath.check(file=1)
assert not ALLOCATED # detects memory leaks in the test
+ assert rffi.cast(FILEP, -1) == rffi.cast(FILEP, -1)
+
def test_simple_cast(self):
assert rffi.cast(rffi.SIGNEDCHAR, 0x123456) == 0x56
assert rffi.cast(rffi.SIGNEDCHAR, 0x123481) == -127
@@ -1250,6 +1252,32 @@
assert i == llmemory.cast_adr_to_int(a, "forced")
lltype.free(p, flavor='raw')
+ def test_freelist(self):
+ S = lltype.Struct('S', ('x', lltype.Signed), ('y', lltype.Signed))
+ SP = lltype.Ptr(S)
+ chunk = lltype.malloc(rffi.CArrayPtr(S).TO, 10, flavor='raw')
+ assert lltype.typeOf(chunk) == rffi.CArrayPtr(S)
+ free_list = lltype.nullptr(rffi.VOIDP.TO)
+ # build list
+ current = chunk
+ for i in range(10):
+ rffi.cast(rffi.VOIDPP, current)[0] = free_list
+ free_list = rffi.cast(rffi.VOIDP, current)
+ current = rffi.ptradd(current, 1)
+ # get one
+ p = free_list
+ free_list = rffi.cast(rffi.VOIDPP, p)[0]
+ rffi.cast(SP, p).x = 0
+ # get two
+ p = free_list
+ free_list = rffi.cast(rffi.VOIDPP, p)[0]
+ rffi.cast(SP, p).x = 0
+ # get three
+ p = free_list
+ free_list = rffi.cast(rffi.VOIDPP, p)[0]
+ rffi.cast(SP, p).x = 0
+ lltype.free(chunk, flavor='raw')
+
class TestPlatform(object):
def test_lib_on_libpaths(self):
from pypy.translator.platform import platform
Modified: pypy/branch/jitffi/pypy/rpython/memory/gc/base.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/gc/base.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/gc/base.py Thu Sep 23 16:53:32 2010
@@ -5,6 +5,7 @@
from pypy.rpython.memory.support import get_address_stack, get_address_deque
from pypy.rpython.memory.support import AddressDict
from pypy.rpython.lltypesystem.llmemory import NULL, raw_malloc_usage
+from pypy.rlib.rarithmetic import r_uint
TYPEID_MAP = lltype.GcStruct('TYPEID_MAP', ('count', lltype.Signed),
('size', lltype.Signed),
@@ -53,7 +54,8 @@
varsize_offset_to_length,
varsize_offsets_to_gcpointers_in_var_part,
weakpointer_offset,
- member_index):
+ member_index,
+ is_rpython_class):
self.getfinalizer = getfinalizer
self.is_varsize = is_varsize
self.has_gcptr_in_varsize = has_gcptr_in_varsize
@@ -66,6 +68,7 @@
self.varsize_offsets_to_gcpointers_in_var_part = varsize_offsets_to_gcpointers_in_var_part
self.weakpointer_offset = weakpointer_offset
self.member_index = member_index
+ self.is_rpython_class = is_rpython_class
def get_member_index(self, type_id):
return self.member_index(type_id)
@@ -101,6 +104,9 @@
def get_size(self, obj):
return self._get_size_for_typeid(obj, self.get_type_id(obj))
+ def get_size_incl_hash(self, obj):
+ return self.get_size(obj)
+
def malloc(self, typeid, length=0, zero=False):
"""For testing. The interface used by the gctransformer is
the four malloc_[fixed,var]size[_clear]() functions.
@@ -146,7 +152,7 @@
return False
def set_max_heap_size(self, size):
- pass
+ raise NotImplementedError
def x_swap_pool(self, newpool):
return newpool
@@ -194,6 +200,39 @@
length -= 1
trace._annspecialcase_ = 'specialize:arg(2)'
+ def trace_partial(self, obj, start, stop, callback, arg):
+ """Like trace(), but only walk the array part, for indices in
+ range(start, stop). Must only be called if has_gcptr_in_varsize().
+ """
+ length = stop - start
+ typeid = self.get_type_id(obj)
+ if self.is_gcarrayofgcptr(typeid):
+ # a performance shortcut for GcArray(gcptr)
+ item = obj + llmemory.gcarrayofptr_itemsoffset
+ item += llmemory.gcarrayofptr_singleitemoffset * start
+ while length > 0:
+ if self.points_to_valid_gc_object(item):
+ callback(item, arg)
+ item += llmemory.gcarrayofptr_singleitemoffset
+ length -= 1
+ return
+ ll_assert(self.has_gcptr_in_varsize(typeid),
+ "trace_partial() on object without has_gcptr_in_varsize()")
+ item = obj + self.varsize_offset_to_variable_part(typeid)
+ offsets = self.varsize_offsets_to_gcpointers_in_var_part(typeid)
+ itemlength = self.varsize_item_sizes(typeid)
+ item += itemlength * start
+ while length > 0:
+ j = 0
+ while j < len(offsets):
+ itemobj = item + offsets[j]
+ if self.points_to_valid_gc_object(itemobj):
+ callback(itemobj, arg)
+ j += 1
+ item += itemlength
+ length -= 1
+ trace_partial._annspecialcase_ = 'specialize:arg(4)'
+
def points_to_valid_gc_object(self, addr):
return self.is_valid_gc_object(addr.address[0])
@@ -340,6 +379,7 @@
"generation": "generation.GenerationGC",
"hybrid": "hybrid.HybridGC",
"markcompact" : "markcompact.MarkCompactGC",
+ "minimark" : "minimark.MiniMarkGC",
}
try:
modulename, classname = classes[config.translation.gc].split('.')
@@ -351,10 +391,12 @@
GCClass = getattr(module, classname)
return GCClass, GCClass.TRANSLATION_PARAMS
-def read_from_env(varname):
+def _read_float_and_factor_from_env(varname):
import os
value = os.environ.get(varname)
if value:
+ if len(value) > 1 and value[-1] in 'bB':
+ value = value[:-1]
realvalue = value[:-1]
if value[-1] in 'kK':
factor = 1024
@@ -366,7 +408,21 @@
factor = 1
realvalue = value
try:
- return int(float(realvalue) * factor)
+ return (float(realvalue), factor)
except ValueError:
pass
- return -1
+ return (0.0, 0)
+
+def read_from_env(varname):
+ value, factor = _read_float_and_factor_from_env(varname)
+ return int(value * factor)
+
+def read_uint_from_env(varname):
+ value, factor = _read_float_and_factor_from_env(varname)
+ return r_uint(value * factor)
+
+def read_float_from_env(varname):
+ value, factor = _read_float_and_factor_from_env(varname)
+ if factor != 1:
+ return 0.0
+ return value
Modified: pypy/branch/jitffi/pypy/rpython/memory/gc/generation.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/gc/generation.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/gc/generation.py Thu Sep 23 16:53:32 2010
@@ -449,7 +449,7 @@
# for the JIT: a minimal description of the write_barrier() method
# (the JIT assumes it is of the shape
- # "if newvalue.int0 & JIT_WB_IF_FLAG: remember_young_pointer()")
+ # "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()")
JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS
def write_barrier(self, newvalue, addr_struct):
Modified: pypy/branch/jitffi/pypy/rpython/memory/gc/markcompact.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/gc/markcompact.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/gc/markcompact.py Thu Sep 23 16:53:32 2010
@@ -674,6 +674,13 @@
return llmemory.cast_adr_to_int(obj) # not in an arena...
return adr - self.space
+ def get_size_incl_hash(self, obj):
+ size = self.get_size(obj)
+ hdr = self.header(obj)
+ if hdr.tid & GCFLAG_HASHFIELD:
+ size += llmemory.sizeof(lltype.Signed)
+ return size
+
# ____________________________________________________________
class CannotAllocateGCArena(Exception):
Modified: pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_direct.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_direct.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/gc/test/test_direct.py Thu Sep 23 16:53:32 2010
@@ -95,7 +95,10 @@
if self.gc.needs_write_barrier:
newaddr = llmemory.cast_ptr_to_adr(newvalue)
addr_struct = llmemory.cast_ptr_to_adr(p)
- self.gc.write_barrier(newaddr, addr_struct)
+ if hasattr(self.gc, 'write_barrier_from_array'):
+ self.gc.write_barrier_from_array(newaddr, addr_struct, index)
+ else:
+ self.gc.write_barrier(newaddr, addr_struct)
p[index] = newvalue
def malloc(self, TYPE, n=None):
@@ -326,6 +329,27 @@
self.gc.collect()
assert hash == self.gc.identityhash(self.stackroots[-1])
self.stackroots.pop()
+ # (6) ask for the hash of varsized objects, larger and larger
+ for i in range(10):
+ self.gc.collect()
+ p = self.malloc(VAR, i)
+ self.stackroots.append(p)
+ hash = self.gc.identityhash(p)
+ self.gc.collect()
+ assert hash == self.gc.identityhash(self.stackroots[-1])
+ self.stackroots.pop()
+
+ def test_memory_alignment(self):
+ A1 = lltype.GcArray(lltype.Char)
+ for i in range(50):
+ p1 = self.malloc(A1, i)
+ if i:
+ p1[i-1] = chr(i)
+ self.stackroots.append(p1)
+ self.gc.collect()
+ for i in range(1, 50):
+ p = self.stackroots[-50+i]
+ assert p[i-1] == chr(i)
class TestSemiSpaceGC(DirectGCTest):
from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass
@@ -456,3 +480,35 @@
def test_varsized_from_prebuilt_gc(self):
DirectGCTest.test_varsized_from_prebuilt_gc(self)
test_varsized_from_prebuilt_gc.GC_PARAMS = {'space_size': 3 * 1024 * WORD}
+
+
+class TestMiniMarkGCSimple(DirectGCTest):
+ from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass
+ from pypy.rpython.memory.gc.minimark import SimpleArenaCollection
+ # test the GC itself, providing a simple class for ArenaCollection
+ GC_PARAMS = {'ArenaCollectionClass': SimpleArenaCollection}
+
+ def test_card_marker(self):
+ for arraylength in (range(4, 17)
+ + [69] # 3 bytes
+ + [300]): # 10 bytes
+ print 'array length:', arraylength
+ nums = {}
+ a = self.malloc(VAR, arraylength)
+ self.stackroots.append(a)
+ for i in range(50):
+ p = self.malloc(S)
+ p.x = -i
+ a = self.stackroots[-1]
+ index = (i*i) % arraylength
+ self.writearray(a, index, p)
+ nums[index] = p.x
+ #
+ for index, expected_x in nums.items():
+ assert a[index].x == expected_x
+ self.stackroots.pop()
+ test_card_marker.GC_PARAMS = {"card_page_indices": 4,
+ "card_page_indices_min": 7}
+
+class TestMiniMarkGCFull(DirectGCTest):
+ from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass
Modified: pypy/branch/jitffi/pypy/rpython/memory/gctransform/framework.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/gctransform/framework.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/gctransform/framework.py Thu Sep 23 16:53:32 2010
@@ -7,7 +7,7 @@
from pypy.rpython.memory.gc import marksweep
from pypy.rpython.memory.gcheader import GCHeaderBuilder
from pypy.rlib.rarithmetic import ovfcheck
-from pypy.rlib import rstack
+from pypy.rlib import rstack, rgc
from pypy.rlib.debug import ll_assert
from pypy.translator.backendopt import graphanalyze
from pypy.translator.backendopt.support import var_needsgc
@@ -139,6 +139,8 @@
def __init__(self, translator):
from pypy.rpython.memory.gc.base import choose_gc_from_config
from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP
+ from pypy.rpython.memory.gc import inspect
+
super(FrameworkGCTransformer, self).__init__(translator, inline=True)
if hasattr(self, 'GC_PARAMS'):
# for tests: the GC choice can be specified as class attributes
@@ -180,6 +182,7 @@
gcdata.gc.set_root_walker(root_walker)
self.num_pushs = 0
self.write_barrier_calls = 0
+ self.write_barrier_from_array_calls = 0
def frameworkgc_setup():
# run-time initialization code
@@ -388,11 +391,38 @@
else:
self.id_ptr = None
+ self.get_rpy_roots_ptr = getfn(inspect.get_rpy_roots,
+ [s_gc],
+ rgc.s_list_of_gcrefs(),
+ minimal_transform=False)
+ self.get_rpy_referents_ptr = getfn(inspect.get_rpy_referents,
+ [s_gc, s_gcref],
+ rgc.s_list_of_gcrefs(),
+ minimal_transform=False)
+ self.get_rpy_memory_usage_ptr = getfn(inspect.get_rpy_memory_usage,
+ [s_gc, s_gcref],
+ annmodel.SomeInteger(),
+ minimal_transform=False)
+ self.get_rpy_type_index_ptr = getfn(inspect.get_rpy_type_index,
+ [s_gc, s_gcref],
+ annmodel.SomeInteger(),
+ minimal_transform=False)
+ self.is_rpy_instance_ptr = getfn(inspect.is_rpy_instance,
+ [s_gc, s_gcref],
+ annmodel.SomeBool(),
+ minimal_transform=False)
+ self.dump_rpy_heap_ptr = getfn(inspect.dump_rpy_heap,
+ [s_gc, annmodel.SomeInteger()],
+ annmodel.s_Bool,
+ minimal_transform=False)
+
self.set_max_heap_size_ptr = getfn(GCClass.set_max_heap_size.im_func,
[s_gc,
annmodel.SomeInteger(nonneg=True)],
annmodel.s_None)
+ self.write_barrier_ptr = None
+ self.write_barrier_from_array_ptr = None
if GCClass.needs_write_barrier:
self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func,
[s_gc,
@@ -408,8 +438,26 @@
[annmodel.SomeAddress(),
annmodel.SomeAddress()],
annmodel.s_None)
- else:
- self.write_barrier_ptr = None
+ func = getattr(GCClass, 'write_barrier_from_array', None)
+ if func is not None:
+ self.write_barrier_from_array_ptr = getfn(func.im_func,
+ [s_gc,
+ annmodel.SomeAddress(),
+ annmodel.SomeAddress(),
+ annmodel.SomeInteger()],
+ annmodel.s_None,
+ inline=True)
+ func = getattr(gcdata.gc, 'remember_young_pointer_from_array',
+ None)
+ if func is not None:
+ # func should not be a bound method, but a real function
+ assert isinstance(func, types.FunctionType)
+ self.write_barrier_from_array_failing_case_ptr = \
+ getfn(func,
+ [annmodel.SomeAddress(),
+ annmodel.SomeInteger(),
+ annmodel.SomeAddress()],
+ annmodel.s_None)
self.statistics_ptr = getfn(GCClass.statistics.im_func,
[s_gc, annmodel.SomeInteger()],
annmodel.SomeInteger())
@@ -496,6 +544,9 @@
if self.write_barrier_ptr:
log.info("inserted %s write barrier calls" % (
self.write_barrier_calls, ))
+ if self.write_barrier_from_array_ptr:
+ log.info("inserted %s write_barrier_from_array calls" % (
+ self.write_barrier_from_array_calls, ))
# XXX because we call inputconst already in replace_malloc, we can't
# modify the instance, we have to modify the 'rtyped instance'
@@ -766,6 +817,12 @@
[self.write_barrier_failing_case_ptr],
resultvar=op.result)
+ def gct_get_write_barrier_from_array_failing_case(self, hop):
+ op = hop.spaceop
+ hop.genop("same_as",
+ [self.write_barrier_from_array_failing_case_ptr],
+ resultvar=op.result)
+
def gct_zero_gc_pointers_inside(self, hop):
if not self.malloc_zero_filled:
v_ob = hop.spaceop.args[0]
@@ -883,6 +940,53 @@
def gct_gc_get_type_info_group(self, hop):
return hop.cast_result(self.c_type_info_group)
+ def gct_gc_get_rpy_roots(self, hop):
+ livevars = self.push_roots(hop)
+ hop.genop("direct_call",
+ [self.get_rpy_roots_ptr, self.c_const_gc],
+ resultvar=hop.spaceop.result)
+ self.pop_roots(hop, livevars)
+
+ def gct_gc_get_rpy_referents(self, hop):
+ livevars = self.push_roots(hop)
+ [v_ptr] = hop.spaceop.args
+ hop.genop("direct_call",
+ [self.get_rpy_referents_ptr, self.c_const_gc, v_ptr],
+ resultvar=hop.spaceop.result)
+ self.pop_roots(hop, livevars)
+
+ def gct_gc_get_rpy_memory_usage(self, hop):
+ livevars = self.push_roots(hop)
+ [v_ptr] = hop.spaceop.args
+ hop.genop("direct_call",
+ [self.get_rpy_memory_usage_ptr, self.c_const_gc, v_ptr],
+ resultvar=hop.spaceop.result)
+ self.pop_roots(hop, livevars)
+
+ def gct_gc_get_rpy_type_index(self, hop):
+ livevars = self.push_roots(hop)
+ [v_ptr] = hop.spaceop.args
+ hop.genop("direct_call",
+ [self.get_rpy_type_index_ptr, self.c_const_gc, v_ptr],
+ resultvar=hop.spaceop.result)
+ self.pop_roots(hop, livevars)
+
+ def gct_gc_is_rpy_instance(self, hop):
+ livevars = self.push_roots(hop)
+ [v_ptr] = hop.spaceop.args
+ hop.genop("direct_call",
+ [self.is_rpy_instance_ptr, self.c_const_gc, v_ptr],
+ resultvar=hop.spaceop.result)
+ self.pop_roots(hop, livevars)
+
+ def gct_gc_dump_rpy_heap(self, hop):
+ livevars = self.push_roots(hop)
+ [v_fd] = hop.spaceop.args
+ hop.genop("direct_call",
+ [self.dump_rpy_heap_ptr, self.c_const_gc, v_fd],
+ resultvar=hop.spaceop.result)
+ self.pop_roots(hop, livevars)
+
def gct_malloc_nonmovable_varsize(self, hop):
TYPE = hop.spaceop.result.concretetype
if self.gcdata.gc.can_malloc_nonmovable():
@@ -897,6 +1001,15 @@
c = rmodel.inputconst(TYPE, lltype.nullptr(TYPE.TO))
return hop.cast_result(c)
+ def _set_into_gc_array_part(self, op):
+ if op.opname == 'setarrayitem':
+ return op.args[1]
+ if op.opname == 'setinteriorfield':
+ for v in op.args[1:-1]:
+ if v.concretetype is not lltype.Void:
+ return v
+ return None
+
def transform_generic_set(self, hop):
from pypy.objspace.flow.model import Constant
opname = hop.spaceop.opname
@@ -910,15 +1023,26 @@
and not isinstance(v_newvalue, Constant)
and v_struct.concretetype.TO._gckind == "gc"
and hop.spaceop not in self.clean_sets):
- self.write_barrier_calls += 1
v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue],
resulttype = llmemory.Address)
v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct],
resulttype = llmemory.Address)
- hop.genop("direct_call", [self.write_barrier_ptr,
- self.c_const_gc,
- v_newvalue,
- v_structaddr])
+ if (self.write_barrier_from_array_ptr is not None and
+ self._set_into_gc_array_part(hop.spaceop) is not None):
+ self.write_barrier_from_array_calls += 1
+ v_index = self._set_into_gc_array_part(hop.spaceop)
+ assert v_index.concretetype == lltype.Signed
+ hop.genop("direct_call", [self.write_barrier_from_array_ptr,
+ self.c_const_gc,
+ v_newvalue,
+ v_structaddr,
+ v_index])
+ else:
+ self.write_barrier_calls += 1
+ hop.genop("direct_call", [self.write_barrier_ptr,
+ self.c_const_gc,
+ v_newvalue,
+ v_structaddr])
hop.rename('bare_' + opname)
def transform_getfield_typeptr(self, hop):
Modified: pypy/branch/jitffi/pypy/rpython/memory/gctypelayout.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/gctypelayout.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/gctypelayout.py Thu Sep 23 16:53:32 2010
@@ -101,6 +101,10 @@
infobits = self.get(typeid).infobits
return infobits & T_MEMBER_INDEX
+ def q_is_rpython_class(self, typeid):
+ infobits = self.get(typeid).infobits
+ return infobits & T_IS_RPYTHON_INSTANCE != 0
+
def set_query_functions(self, gc):
gc.set_query_functions(
self.q_is_varsize,
@@ -114,7 +118,8 @@
self.q_varsize_offset_to_length,
self.q_varsize_offsets_to_gcpointers_in_var_part,
self.q_weakpointer_offset,
- self.q_member_index)
+ self.q_member_index,
+ self.q_is_rpython_class)
# the lowest 16bits are used to store group member index
@@ -123,6 +128,7 @@
T_HAS_GCPTR_IN_VARSIZE = 0x20000
T_IS_GCARRAY_OF_GCPTR = 0x40000
T_IS_WEAKREF = 0x80000
+T_IS_RPYTHON_INSTANCE = 0x100000 # the type is a subclass of OBJECT
T_KEY_MASK = intmask(0xFF000000)
T_KEY_VALUE = intmask(0x7A000000) # bug detection only
@@ -181,6 +187,8 @@
varinfo.varitemsize = llmemory.sizeof(ARRAY.OF)
if builder.is_weakref_type(TYPE):
infobits |= T_IS_WEAKREF
+ if is_subclass_of_object(TYPE):
+ infobits |= T_IS_RPYTHON_INSTANCE
info.infobits = infobits | T_KEY_VALUE
# ____________________________________________________________
@@ -259,9 +267,7 @@
else:
# no vtable from lltype2vtable -- double-check to be sure
# that it's not a subclass of OBJECT.
- while isinstance(TYPE, lltype.GcStruct):
- assert TYPE is not rclass.OBJECT
- _, TYPE = TYPE._first_struct()
+ assert not is_subclass_of_object(TYPE)
def get_info(self, type_id):
res = llop.get_group_member(GCData.TYPE_INFO_PTR,
@@ -437,6 +443,13 @@
for i in range(p._obj.getlength()):
zero_gc_pointers_inside(p[i], ITEM)
+def is_subclass_of_object(TYPE):
+ while isinstance(TYPE, lltype.GcStruct):
+ if TYPE is rclass.OBJECT:
+ return True
+ _, TYPE = TYPE._first_struct()
+ return False
+
########## weakrefs ##########
# framework: weakref objects are small structures containing only an address
Modified: pypy/branch/jitffi/pypy/rpython/memory/gcwrapper.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/gcwrapper.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/gcwrapper.py Thu Sep 23 16:53:32 2010
@@ -15,6 +15,8 @@
self.llinterp = llinterp
self.prepare_graphs(flowgraphs)
self.gc.setup()
+ self.has_write_barrier_from_array = hasattr(self.gc,
+ 'write_barrier_from_array')
def prepare_graphs(self, flowgraphs):
lltype2vtable = self.llinterp.typer.lltype2vtable
@@ -78,13 +80,30 @@
ARRAY = lltype.typeOf(array).TO
addr = llmemory.cast_ptr_to_adr(array)
addr += llmemory.itemoffsetof(ARRAY, index)
- self.setinterior(array, addr, ARRAY.OF, newitem)
+ self.setinterior(array, addr, ARRAY.OF, newitem, (index,))
- def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue):
+ def setinterior(self, toplevelcontainer, inneraddr, INNERTYPE, newvalue,
+ offsets=()):
if (lltype.typeOf(toplevelcontainer).TO._gckind == 'gc' and
isinstance(INNERTYPE, lltype.Ptr) and INNERTYPE.TO._gckind == 'gc'):
- self.gc.write_barrier(llmemory.cast_ptr_to_adr(newvalue),
- llmemory.cast_ptr_to_adr(toplevelcontainer))
+ #
+ wb = True
+ if self.has_write_barrier_from_array:
+ for index in offsets:
+ if type(index) is not str:
+ assert (type(index) is int # <- fast path
+ or lltype.typeOf(index) == lltype.Signed)
+ self.gc.write_barrier_from_array(
+ llmemory.cast_ptr_to_adr(newvalue),
+ llmemory.cast_ptr_to_adr(toplevelcontainer),
+ index)
+ wb = False
+ break
+ #
+ if wb:
+ self.gc.write_barrier(
+ llmemory.cast_ptr_to_adr(newvalue),
+ llmemory.cast_ptr_to_adr(toplevelcontainer))
llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue)
def collect(self, *gen):
Modified: pypy/branch/jitffi/pypy/rpython/memory/lltypelayout.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/lltypelayout.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/lltypelayout.py Thu Sep 23 16:53:32 2010
@@ -7,7 +7,7 @@
primitive_to_fmt = {lltype.Signed: "l",
lltype.Unsigned: "L",
lltype.Char: "c",
- lltype.UniChar: "H", # maybe
+ lltype.UniChar: "i", # 4 bytes
lltype.Bool: "B",
lltype.Float: "d",
llmemory.Address: "P",
Modified: pypy/branch/jitffi/pypy/rpython/memory/support.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/support.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/support.py Thu Sep 23 16:53:32 2010
@@ -216,6 +216,24 @@
self.index_in_oldest = index + 1
return result
+ def foreach(self, callback, arg):
+ """Invoke 'callback(address, arg)' for all addresses in the deque.
+ Typically, 'callback' is a bound method and 'arg' can be None.
+ """
+ chunk = self.oldest_chunk
+ index = self.index_in_oldest
+ while chunk is not self.newest_chunk:
+ while index < chunk_size:
+ callback(chunk.items[index], arg)
+ index += 1
+ chunk = chunk.next
+ index = 0
+ limit = self.index_in_newest
+ while index < limit:
+ callback(chunk.items[index], arg)
+ index += 1
+ foreach._annspecialcase_ = 'specialize:arg(1)'
+
def delete(self):
cur = self.oldest_chunk
while cur:
Modified: pypy/branch/jitffi/pypy/rpython/memory/test/test_gc.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/test/test_gc.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/test/test_gc.py Thu Sep 23 16:53:32 2010
@@ -26,8 +26,9 @@
class GCTest(object):
GC_PARAMS = {}
GC_CAN_MOVE = False
- GC_CANNOT_MALLOC_NONMOVABLE = False
+ GC_CAN_MALLOC_NONMOVABLE = True
GC_CAN_SHRINK_ARRAY = False
+ GC_CAN_SHRINK_BIG_ARRAY = False
def setup_class(cls):
cls._saved_logstate = py.log._getstate()
@@ -451,10 +452,10 @@
a = rgc.malloc_nonmovable(TP, 3)
if a:
assert not rgc.can_move(a)
- return 0
- return 1
+ return 1
+ return 0
- assert self.interpret(func, []) == int(self.GC_CANNOT_MALLOC_NONMOVABLE)
+ assert self.interpret(func, []) == int(self.GC_CAN_MALLOC_NONMOVABLE)
def test_malloc_nonmovable_fixsize(self):
S = lltype.GcStruct('S', ('x', lltype.Float))
@@ -465,37 +466,36 @@
rgc.collect()
if a:
assert not rgc.can_move(a)
- return 0
- return 1
+ return 1
+ return 0
except Exception, e:
return 2
- assert self.interpret(func, []) == int(self.GC_CANNOT_MALLOC_NONMOVABLE)
+ assert self.interpret(func, []) == int(self.GC_CAN_MALLOC_NONMOVABLE)
def test_shrink_array(self):
from pypy.rpython.lltypesystem.rstr import STR
- GC_CAN_SHRINK_ARRAY = self.GC_CAN_SHRINK_ARRAY
- def f(n, m):
+ def f(n, m, gc_can_shrink_array):
ptr = lltype.malloc(STR, n)
ptr.hash = 0x62
ptr.chars[0] = 'A'
ptr.chars[1] = 'B'
ptr.chars[2] = 'C'
ptr2 = rgc.ll_shrink_array(ptr, 2)
- assert (ptr == ptr2) == GC_CAN_SHRINK_ARRAY
+ assert (ptr == ptr2) == gc_can_shrink_array
rgc.collect()
return ( ord(ptr2.chars[0]) +
(ord(ptr2.chars[1]) << 8) +
(len(ptr2.chars) << 16) +
(ptr2.hash << 24))
- assert self.interpret(f, [3, 0]) == 0x62024241
- # don't test with larger numbers of top of the Hybrid GC, because
- # the default settings make it a too-large varsized object that
- # gets allocated outside the semispace
- if not isinstance(self, TestHybridGC):
- assert self.interpret(f, [12, 0]) == 0x62024241
+ flag = self.GC_CAN_SHRINK_ARRAY
+ assert self.interpret(f, [3, 0, flag]) == 0x62024241
+ # with larger numbers, it gets allocated outside the semispace
+ # with some GCs.
+ flag = self.GC_CAN_SHRINK_BIG_ARRAY
+ assert self.interpret(f, [12, 0, flag]) == 0x62024241
def test_tagged_simple(self):
from pypy.rlib.objectmodel import UnboxedValue
@@ -568,7 +568,7 @@
assert res == 111
def test_writebarrier_before_copy(self):
- S = lltype.GcStruct('S')
+ S = lltype.GcStruct('S', ('x', lltype.Char))
TP = lltype.GcArray(lltype.Ptr(S))
def fn():
l = lltype.malloc(TP, 100)
@@ -628,8 +628,9 @@
class TestSemiSpaceGC(GCTest, snippet.SemiSpaceGCTests):
from pypy.rpython.memory.gc.semispace import SemiSpaceGC as GCClass
GC_CAN_MOVE = True
- GC_CANNOT_MALLOC_NONMOVABLE = True
+ GC_CAN_MALLOC_NONMOVABLE = False
GC_CAN_SHRINK_ARRAY = True
+ GC_CAN_SHRINK_BIG_ARRAY = True
class TestGrowingSemiSpaceGC(TestSemiSpaceGC):
GC_PARAMS = {'space_size': 16*WORD}
@@ -641,16 +642,15 @@
from pypy.rpython.memory.gc.markcompact import MarkCompactGC as GCClass
GC_PARAMS = {'space_size': 65536+16384}
GC_CAN_SHRINK_ARRAY = False
+ GC_CAN_SHRINK_BIG_ARRAY = False
def test_finalizer_order(self):
py.test.skip("Not implemented yet")
- def test_writebarrier_before_copy(self):
- py.test.skip("Not relevant, and crashes because llarena does not "
- "support empty GcStructs")
class TestHybridGC(TestGenerationalGC):
from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass
- GC_CANNOT_MALLOC_NONMOVABLE = False
+ GC_CAN_MALLOC_NONMOVABLE = True
+ GC_CAN_SHRINK_BIG_ARRAY = False
def test_ref_from_rawmalloced_to_regular(self):
import gc
@@ -720,7 +720,7 @@
from pypy.rpython.memory.gc.hybrid import HybridGC as GCClass
GC_CAN_MOVE = False # with this size of heap, stuff gets allocated
# in 3rd gen.
- GC_CANNOT_MALLOC_NONMOVABLE = False
+ GC_CAN_MALLOC_NONMOVABLE = True
GC_PARAMS = {'space_size': 48*WORD,
'min_nursery_size': 12*WORD,
'nursery_size': 12*WORD,
@@ -764,3 +764,13 @@
def test_malloc_nonmovable_fixsize(self):
py.test.skip("Not supported")
+
+
+class TestMiniMarkGC(TestSemiSpaceGC):
+ from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass
+ GC_CAN_SHRINK_BIG_ARRAY = False
+ GC_CAN_MALLOC_NONMOVABLE = True
+
+class TestMiniMarkGCCardMarking(TestMiniMarkGC):
+ GC_PARAMS = {'card_page_indices': 4,
+ 'card_page_indices_min': 10}
Modified: pypy/branch/jitffi/pypy/rpython/memory/test/test_support.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/test/test_support.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/test/test_support.py Thu Sep 23 16:53:32 2010
@@ -113,6 +113,27 @@
deque.append(x)
expected.append(x)
+ def test_foreach(self):
+ AddressDeque = get_address_deque(10)
+ ll = AddressDeque()
+ for num_entries in range(30, -1, -1):
+ addrs = [raw_malloc(llmemory.sizeof(lltype.Signed))
+ for i in range(num_entries)]
+ for a in addrs:
+ ll.append(a)
+
+ seen = []
+ def callback(addr, fortytwo):
+ assert fortytwo == 42
+ seen.append(addr)
+
+ ll.foreach(callback, 42)
+ assert seen == addrs
+ for a in addrs:
+ b = ll.popleft()
+ assert a == b
+ assert not ll.non_empty()
+
def test_stack_annotate():
AddressStack = get_address_stack(60)
Modified: pypy/branch/jitffi/pypy/rpython/memory/test/test_transformed_gc.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/memory/test/test_transformed_gc.py (original)
+++ pypy/branch/jitffi/pypy/rpython/memory/test/test_transformed_gc.py Thu Sep 23 16:53:32 2010
@@ -47,7 +47,7 @@
gcpolicy = None
stacklessgc = False
GC_CAN_MOVE = False
- GC_CANNOT_MALLOC_NONMOVABLE = False
+ GC_CAN_MALLOC_NONMOVABLE = True
taggedpointers = False
def setup_class(cls):
@@ -242,6 +242,26 @@
heap_size = self.heap_usage(statistics)
assert heap_size < 16000 * WORD / 4 # xxx
+ def define_llinterp_dict(self):
+ class A(object):
+ pass
+ def malloc_a_lot():
+ i = 0
+ while i < 10:
+ i += 1
+ a = (1, 2, i)
+ b = {a: A()}
+ j = 0
+ while j < 20:
+ j += 1
+ b[1, j, i] = A()
+ return 0
+ return malloc_a_lot
+
+ def test_llinterp_dict(self):
+ run = self.runner("llinterp_dict")
+ run([])
+
def skipdefine_global_list(cls):
gl = []
class Box:
@@ -602,8 +622,8 @@
rgc.collect()
if a:
assert not rgc.can_move(a)
- return 0
- return 1
+ return 1
+ return 0
#except Exception, e:
# return 2
@@ -611,7 +631,7 @@
def test_malloc_nonmovable(self):
run = self.runner("malloc_nonmovable")
- assert int(self.GC_CANNOT_MALLOC_NONMOVABLE) == run([])
+ assert int(self.GC_CAN_MALLOC_NONMOVABLE) == run([])
def define_malloc_nonmovable_fixsize(cls):
S = lltype.GcStruct('S', ('x', lltype.Float))
@@ -622,8 +642,8 @@
rgc.collect()
if a:
assert not rgc.can_move(a)
- return 0
- return 1
+ return 1
+ return 0
except Exception, e:
return 2
@@ -631,7 +651,7 @@
def test_malloc_nonmovable_fixsize(self):
run = self.runner("malloc_nonmovable_fixsize")
- assert run([]) == int(self.GC_CANNOT_MALLOC_NONMOVABLE)
+ assert run([]) == int(self.GC_CAN_MALLOC_NONMOVABLE)
def define_shrink_array(cls):
from pypy.rpython.lltypesystem.rstr import STR
@@ -680,7 +700,8 @@
class GenericMovingGCTests(GenericGCTests):
GC_CAN_MOVE = True
- GC_CANNOT_MALLOC_NONMOVABLE = True
+ GC_CAN_MALLOC_NONMOVABLE = False
+ GC_CAN_TEST_ID = False
def define_many_ids(cls):
class A(object):
@@ -710,7 +731,8 @@
return f
def test_many_ids(self):
- py.test.skip("fails for bad reasons in lltype.py :-(")
+ if not self.GC_CAN_TEST_ID:
+ py.test.skip("fails for bad reasons in lltype.py :-(")
run = self.runner("many_ids")
run([])
@@ -856,7 +878,7 @@
# (and give fixedsize)
def define_writebarrier_before_copy(cls):
- S = lltype.GcStruct('S')
+ S = lltype.GcStruct('S', ('x', lltype.Char))
TP = lltype.GcArray(lltype.Ptr(S))
def fn():
l = lltype.malloc(TP, 100)
@@ -1144,10 +1166,6 @@
GC_PARAMS = {'space_size': 4096*WORD}
root_stack_depth = 200
- def test_writebarrier_before_copy(self):
- py.test.skip("Not relevant, and crashes because llarena does not "
- "support empty GcStructs")
-
class TestGenerationGC(GenericMovingGCTests):
gcname = "generation"
GC_CAN_SHRINK_ARRAY = True
@@ -1379,7 +1397,7 @@
class TestHybridGC(TestGenerationGC):
gcname = "hybrid"
- GC_CANNOT_MALLOC_NONMOVABLE = False
+ GC_CAN_MALLOC_NONMOVABLE = True
class gcpolicy(gc.FrameworkGcPolicy):
class transformerclass(framework.FrameworkGCTransformer):
@@ -1444,6 +1462,23 @@
def test_malloc_nonmovable_fixsize(self):
py.test.skip("not supported")
+
+class TestMiniMarkGC(TestHybridGC):
+ gcname = "minimark"
+ GC_CAN_TEST_ID = True
+
+ class gcpolicy(gc.FrameworkGcPolicy):
+ class transformerclass(framework.FrameworkGCTransformer):
+ from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass
+ GC_PARAMS = {'nursery_size': 32*WORD,
+ 'page_size': 16*WORD,
+ 'arena_size': 64*WORD,
+ 'small_request_threshold': 5*WORD,
+ 'card_page_indices': 4,
+ 'card_page_indices_min': 10,
+ }
+ root_stack_depth = 200
+
# ________________________________________________________________
# tagged pointers
Modified: pypy/branch/jitffi/pypy/rpython/rptr.py
==============================================================================
--- pypy/branch/jitffi/pypy/rpython/rptr.py (original)
+++ pypy/branch/jitffi/pypy/rpython/rptr.py Thu Sep 23 16:53:32 2010
@@ -35,6 +35,9 @@
id = lltype.cast_ptr_to_int(p)
return ll_str.ll_int2hex(r_uint(id), True)
+ def get_ll_eq_function(self):
+ return None
+
def rtype_getattr(self, hop):
attr = hop.args_s[1].const
if isinstance(hop.s_result, annmodel.SomeLLADTMeth):
Modified: pypy/branch/jitffi/pypy/translator/c/funcgen.py
==============================================================================
--- pypy/branch/jitffi/pypy/translator/c/funcgen.py (original)
+++ pypy/branch/jitffi/pypy/translator/c/funcgen.py Thu Sep 23 16:53:32 2010
@@ -733,6 +733,8 @@
continue
elif T == Signed:
format.append('%ld')
+ elif T == Unsigned:
+ format.append('%lu')
elif T == Float:
format.append('%f')
elif isinstance(T, Ptr) or T == Address:
Modified: pypy/branch/jitffi/pypy/translator/c/gcc/trackgcroot.py
==============================================================================
--- pypy/branch/jitffi/pypy/translator/c/gcc/trackgcroot.py (original)
+++ pypy/branch/jitffi/pypy/translator/c/gcc/trackgcroot.py Thu Sep 23 16:53:32 2010
@@ -856,6 +856,7 @@
visit_and = FunctionGcRootTracker._visit_and
visit_xchgl = FunctionGcRootTracker._visit_xchg
+ visit_xchgq = FunctionGcRootTracker._visit_xchg
# used in "xor reg, reg" to create a NULL GC ptr
visit_xorl = FunctionGcRootTracker.binary_insn
Modified: pypy/branch/jitffi/pypy/translator/c/genc.py
==============================================================================
--- pypy/branch/jitffi/pypy/translator/c/genc.py (original)
+++ pypy/branch/jitffi/pypy/translator/c/genc.py Thu Sep 23 16:53:32 2010
@@ -592,7 +592,7 @@
if sys.platform == 'win32':
python = sys.executable.replace('\\', '/') + ' '
else:
- python = ''
+ python = sys.executable + ' '
if self.translator.platform.name == 'msvc':
lblofiles = []
Modified: pypy/branch/jitffi/pypy/translator/c/src/mem.h
==============================================================================
--- pypy/branch/jitffi/pypy/translator/c/src/mem.h (original)
+++ pypy/branch/jitffi/pypy/translator/c/src/mem.h Thu Sep 23 16:53:32 2010
@@ -224,3 +224,13 @@
#define OP_CAST_PTR_TO_WEAKREFPTR(x, r) r = x
#define OP_CAST_WEAKREFPTR_TO_PTR(x, r) r = x
+
+/************************************************************/
+/* dummy version of these operations, e.g. with Boehm */
+
+#define OP_GC_GET_RPY_ROOTS(r) r = 0
+#define OP_GC_GET_RPY_REFERENTS(x, r) r = 0
+#define OP_GC_GET_RPY_MEMORY_USAGE(x, r) r = -1
+#define OP_GC_GET_RPY_TYPE_INDEX(x, r) r = -1
+#define OP_GC_IS_RPY_INSTANCE(x, r) r = 0
+#define OP_GC_DUMP_RPY_HEAP(r) r = 0
Modified: pypy/branch/jitffi/pypy/translator/c/test/test_newgc.py
==============================================================================
--- pypy/branch/jitffi/pypy/translator/c/test/test_newgc.py (original)
+++ pypy/branch/jitffi/pypy/translator/c/test/test_newgc.py Thu Sep 23 16:53:32 2010
@@ -2,7 +2,7 @@
import sys, os, inspect
from pypy.objspace.flow.model import summary
-from pypy.rpython.lltypesystem import lltype
+from pypy.rpython.lltypesystem import lltype, llmemory
from pypy.rpython.lltypesystem.lloperation import llop
from pypy.rpython.memory.test import snippet
from pypy.rlib import rgc
@@ -19,10 +19,11 @@
removetypeptr = False
taggedpointers = False
GC_CAN_MOVE = False
- GC_CANNOT_MALLOC_NONMOVABLE = False
+ GC_CAN_MALLOC_NONMOVABLE = True
GC_CAN_SHRINK_ARRAY = False
_isolated_func = None
+ c_allfuncs = None
@classmethod
def _makefunc_str_int(cls, f):
@@ -111,6 +112,7 @@
def teardown_class(cls):
if hasattr(cls.c_allfuncs, 'close_isolate'):
cls.c_allfuncs.close_isolate()
+ cls.c_allfuncs = None
def run(self, name, *args):
if not args:
@@ -690,8 +692,8 @@
rgc.collect()
if a:
assert not rgc.can_move(a)
- return 0
- return 1
+ return 1
+ return 0
except Exception, e:
return 2
@@ -699,7 +701,7 @@
def test_malloc_nonmovable(self):
res = self.run('malloc_nonmovable')
- assert res == self.GC_CANNOT_MALLOC_NONMOVABLE
+ assert res == self.GC_CAN_MALLOC_NONMOVABLE
def define_resizable_buffer(cls):
from pypy.rpython.lltypesystem.rstr import STR
@@ -891,12 +893,208 @@
def test_arraycopy_writebarrier_ptr(self):
self.run("arraycopy_writebarrier_ptr")
+ def define_get_rpy_roots(self):
+ U = lltype.GcStruct('U', ('x', lltype.Signed))
+ S = lltype.GcStruct('S', ('u', lltype.Ptr(U)))
+
+ def g(s):
+ lst = rgc.get_rpy_roots()
+ found = False
+ for x in lst:
+ if x == lltype.cast_opaque_ptr(llmemory.GCREF, s):
+ found = True
+ if x == lltype.cast_opaque_ptr(llmemory.GCREF, s.u):
+ os.write(2, "s.u should not be found!\n")
+ assert False
+ return found == 1
+
+ def fn():
+ s = lltype.malloc(S)
+ s.u = lltype.malloc(U)
+ found = g(s)
+ if not found:
+ os.write(2, "not found!\n")
+ assert False
+ s.u.x = 42
+ return 0
+
+ return fn
+
+ def test_get_rpy_roots(self):
+ self.run("get_rpy_roots")
+
+ def define_get_rpy_referents(self):
+ U = lltype.GcStruct('U', ('x', lltype.Signed))
+ S = lltype.GcStruct('S', ('u', lltype.Ptr(U)))
+
+ def fn():
+ s = lltype.malloc(S)
+ s.u = lltype.malloc(U)
+ gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s)
+ gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u)
+ lst = rgc.get_rpy_referents(gcref1)
+ assert gcref2 in lst
+ assert gcref1 not in lst
+ s.u.x = 42
+ return 0
+
+ return fn
+
+ def test_get_rpy_referents(self):
+ self.run("get_rpy_referents")
+
+ def define_is_rpy_instance(self):
+ class Foo:
+ pass
+ S = lltype.GcStruct('S', ('x', lltype.Signed))
+
+ def check(gcref, expected):
+ result = rgc._is_rpy_instance(gcref)
+ assert result == expected
+
+ def fn():
+ s = lltype.malloc(S)
+ gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s)
+ check(gcref1, False)
+
+ f = Foo()
+ gcref3 = rgc.cast_instance_to_gcref(f)
+ check(gcref3, True)
+
+ return 0
+
+ return fn
+
+ def test_is_rpy_instance(self):
+ self.run("is_rpy_instance")
+
+ def define_try_cast_gcref_to_instance(self):
+ class Foo:
+ pass
+ class FooBar(Foo):
+ pass
+ class Biz(object):
+ pass
+ S = lltype.GcStruct('S', ('x', lltype.Signed))
+
+ def fn():
+ foo = Foo()
+ gcref1 = rgc.cast_instance_to_gcref(foo)
+ assert rgc.try_cast_gcref_to_instance(Foo, gcref1) is foo
+ assert rgc.try_cast_gcref_to_instance(FooBar, gcref1) is None
+ assert rgc.try_cast_gcref_to_instance(Biz, gcref1) is None
+
+ foobar = FooBar()
+ gcref2 = rgc.cast_instance_to_gcref(foobar)
+ assert rgc.try_cast_gcref_to_instance(Foo, gcref2) is foobar
+ assert rgc.try_cast_gcref_to_instance(FooBar, gcref2) is foobar
+ assert rgc.try_cast_gcref_to_instance(Biz, gcref2) is None
+
+ s = lltype.malloc(S)
+ gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, s)
+ assert rgc.try_cast_gcref_to_instance(Foo, gcref3) is None
+ assert rgc.try_cast_gcref_to_instance(FooBar, gcref3) is None
+ assert rgc.try_cast_gcref_to_instance(Biz, gcref3) is None
+
+ return 0
+
+ return fn
+
+ def test_try_cast_gcref_to_instance(self):
+ self.run("try_cast_gcref_to_instance")
+
+ def define_get_rpy_memory_usage(self):
+ U = lltype.GcStruct('U', ('x1', lltype.Signed),
+ ('x2', lltype.Signed),
+ ('x3', lltype.Signed),
+ ('x4', lltype.Signed),
+ ('x5', lltype.Signed),
+ ('x6', lltype.Signed),
+ ('x7', lltype.Signed),
+ ('x8', lltype.Signed))
+ S = lltype.GcStruct('S', ('u', lltype.Ptr(U)))
+ A = lltype.GcArray(lltype.Ptr(S))
+
+ def fn():
+ s = lltype.malloc(S)
+ s.u = lltype.malloc(U)
+ a = lltype.malloc(A, 1000)
+ gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s)
+ int1 = rgc.get_rpy_memory_usage(gcref1)
+ assert 8 <= int1 <= 32
+ gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u)
+ int2 = rgc.get_rpy_memory_usage(gcref2)
+ assert 4*9 <= int2 <= 8*12
+ gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a)
+ int3 = rgc.get_rpy_memory_usage(gcref3)
+ assert 4*1001 <= int3 <= 8*1010
+ return 0
+
+ return fn
+
+ def test_get_rpy_memory_usage(self):
+ self.run("get_rpy_memory_usage")
+
+ def define_get_rpy_type_index(self):
+ U = lltype.GcStruct('U', ('x', lltype.Signed))
+ S = lltype.GcStruct('S', ('u', lltype.Ptr(U)))
+ A = lltype.GcArray(lltype.Ptr(S))
+
+ def fn():
+ s = lltype.malloc(S)
+ s.u = lltype.malloc(U)
+ a = lltype.malloc(A, 1000)
+ s2 = lltype.malloc(S)
+ gcref1 = lltype.cast_opaque_ptr(llmemory.GCREF, s)
+ int1 = rgc.get_rpy_type_index(gcref1)
+ gcref2 = lltype.cast_opaque_ptr(llmemory.GCREF, s.u)
+ int2 = rgc.get_rpy_type_index(gcref2)
+ gcref3 = lltype.cast_opaque_ptr(llmemory.GCREF, a)
+ int3 = rgc.get_rpy_type_index(gcref3)
+ gcref4 = lltype.cast_opaque_ptr(llmemory.GCREF, s2)
+ int4 = rgc.get_rpy_type_index(gcref4)
+ assert int1 != int2
+ assert int1 != int3
+ assert int2 != int3
+ assert int1 == int4
+ return 0
+
+ return fn
+
+ def test_get_rpy_type_index(self):
+ self.run("get_rpy_type_index")
+
+ filename_dump = str(udir.join('test_dump_rpy_heap'))
+ def define_dump_rpy_heap(self):
+ U = lltype.GcStruct('U', ('x', lltype.Signed))
+ S = lltype.GcStruct('S', ('u', lltype.Ptr(U)))
+ A = lltype.GcArray(lltype.Ptr(S))
+ filename = self.filename_dump
+
+ def fn():
+ s = lltype.malloc(S)
+ s.u = lltype.malloc(U)
+ a = lltype.malloc(A, 1000)
+ s2 = lltype.malloc(S)
+ #
+ fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0666)
+ rgc.dump_rpy_heap(fd)
+ os.close(fd)
+ return 0
+
+ return fn
+
+ def test_dump_rpy_heap(self):
+ self.run("dump_rpy_heap")
+ assert os.path.exists(self.filename_dump)
+ assert os.path.getsize(self.filename_dump) > 0 # minimal test
+
class TestSemiSpaceGC(TestUsingFramework, snippet.SemiSpaceGCTestDefines):
gcpolicy = "semispace"
should_be_moving = True
GC_CAN_MOVE = True
- GC_CANNOT_MALLOC_NONMOVABLE = True
+ GC_CAN_MALLOC_NONMOVABLE = False
GC_CAN_SHRINK_ARRAY = True
# for snippets
@@ -1055,7 +1253,7 @@
class TestHybridGC(TestGenerationalGC):
gcpolicy = "hybrid"
should_be_moving = True
- GC_CANNOT_MALLOC_NONMOVABLE = False
+ GC_CAN_MALLOC_NONMOVABLE = True
def test_gc_set_max_heap_size(self):
py.test.skip("not implemented")
@@ -1126,6 +1324,15 @@
res = self.run("adding_a_hash")
assert res == 0
+class TestMiniMarkGC(TestSemiSpaceGC):
+ gcpolicy = "minimark"
+ should_be_moving = True
+ GC_CAN_MALLOC_NONMOVABLE = True
+ GC_CAN_SHRINK_ARRAY = True
+
+ def test_gc_heap_stats(self):
+ py.test.skip("not implemented")
+
# ____________________________________________________________________
class TaggedPointersTest(object):
@@ -1180,3 +1387,6 @@
class TestMarkCompactGCMostCompact(TaggedPointersTest, TestMarkCompactGC):
removetypeptr = True
+
+class TestMiniMarkGCMostCompact(TaggedPointersTest, TestMiniMarkGC):
+ removetypeptr = True
Modified: pypy/branch/jitffi/pypy/translator/exceptiontransform.py
==============================================================================
--- pypy/branch/jitffi/pypy/translator/exceptiontransform.py (original)
+++ pypy/branch/jitffi/pypy/translator/exceptiontransform.py Thu Sep 23 16:53:32 2010
@@ -277,7 +277,9 @@
block.exits[0].target is graph.returnblock and
len(block.operations) and
(block.exits[0].args[0].concretetype is lltype.Void or
- block.exits[0].args[0] is block.operations[-1].result)):
+ block.exits[0].args[0] is block.operations[-1].result) and
+ block.operations[-1].opname not in ('malloc', # special cases
+ 'malloc_nonmovable')):
last_operation -= 1
lastblock = block
for i in range(last_operation, -1, -1):
@@ -466,6 +468,9 @@
c_flags = spaceop.args[1]
c_flags.value = c_flags.value.copy()
spaceop.args[1].value['zero'] = True
+ # NB. when inserting more special-cases here, keep in mind that
+ # you also need to list the opnames in transform_block()
+ # (see "special cases")
if insert_zeroing_op:
if normalafterblock is None:
More information about the Pypy-commit
mailing list