[pypy-svn] r69687 - in pypy/branch/virtual-forcing/pypy: jit/backend/x86 rpython rpython/lltypesystem
arigo at codespeak.net
arigo at codespeak.net
Fri Nov 27 13:31:43 CET 2009
Author: arigo
Date: Fri Nov 27 13:31:43 2009
New Revision: 69687
Modified:
pypy/branch/virtual-forcing/pypy/jit/backend/x86/assembler.py
pypy/branch/virtual-forcing/pypy/jit/backend/x86/regalloc.py
pypy/branch/virtual-forcing/pypy/jit/backend/x86/runner.py
pypy/branch/virtual-forcing/pypy/rpython/llinterp.py
pypy/branch/virtual-forcing/pypy/rpython/lltypesystem/lloperation.py
pypy/branch/virtual-forcing/pypy/rpython/lltypesystem/opimpl.py
Log:
(pedronis, fijal, arigo)
Implement cpu.force() by calling the existing guard recovery code.
Modified: pypy/branch/virtual-forcing/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/branch/virtual-forcing/pypy/jit/backend/x86/assembler.py (original)
+++ pypy/branch/virtual-forcing/pypy/jit/backend/x86/assembler.py Fri Nov 27 13:31:43 2009
@@ -879,6 +879,69 @@
arglocs.append(loc)
return arglocs[:]
+ def grab_frame_values(self, bytecode, frame_addr,
+ registers=lltype.nullptr(rffi.LONGP.TO)):
+ num = 0
+ value_hi = 0
+ while 1:
+ # decode the next instruction from the bytecode
+ code = rffi.cast(lltype.Signed, bytecode[0])
+ bytecode = rffi.ptradd(bytecode, 1)
+ if code >= 4*self.DESCR_FROMSTACK:
+ if code > 0x7F:
+ shift = 7
+ code &= 0x7F
+ while True:
+ nextcode = rffi.cast(lltype.Signed, bytecode[0])
+ bytecode = rffi.ptradd(bytecode, 1)
+ code |= (nextcode & 0x7F) << shift
+ shift += 7
+ if nextcode <= 0x7F:
+ break
+ # load the value from the stack
+ kind = code & 3
+ code = (code >> 2) - self.DESCR_FROMSTACK
+ stackloc = frame_addr + get_ebp_ofs(code)
+ value = rffi.cast(rffi.LONGP, stackloc)[0]
+ if kind == self.DESCR_FLOAT:
+ value_hi = value
+ value = rffi.cast(rffi.LONGP, stackloc - 4)[0]
+ else:
+ # 'code' identifies a register: load its value
+ kind = code & 3
+ if kind == self.DESCR_SPECIAL:
+ if code == self.DESCR_HOLE:
+ num += 1
+ continue
+ assert code == self.DESCR_STOP
+ break
+ assert registers # it's NULL when called from cpu.force()
+ code >>= 2
+ if kind == self.DESCR_FLOAT:
+ xmmregisters = rffi.ptradd(registers, -16)
+ value = xmmregisters[2*code]
+ value_hi = xmmregisters[2*code + 1]
+ else:
+ value = registers[code]
+
+ # store the loaded value into fail_boxes_<type>
+ if kind == self.DESCR_INT:
+ tgt = self.fail_boxes_int.get_addr_for_num(num)
+ elif kind == self.DESCR_REF:
+ tgt = self.fail_boxes_ptr.get_addr_for_num(num)
+ elif kind == self.DESCR_FLOAT:
+ tgt = self.fail_boxes_float.get_addr_for_num(num)
+ rffi.cast(rffi.LONGP, tgt)[1] = value_hi
+ else:
+ assert 0, "bogus kind"
+ rffi.cast(rffi.LONGP, tgt)[0] = value
+ num += 1
+ #
+ if not we_are_translated():
+ assert bytecode[4] == 0xCC
+ fail_index = rffi.cast(rffi.LONGP, bytecode)[0]
+ return fail_index
+
def setup_failure_recovery(self):
def failure_recovery_func(registers):
@@ -889,65 +952,7 @@
# recovery bytecode. See _build_failure_recovery() for details.
stack_at_ebp = registers[ebp.op]
bytecode = rffi.cast(rffi.UCHARP, registers[8])
- num = 0
- value_hi = 0
- while 1:
- # decode the next instruction from the bytecode
- code = rffi.cast(lltype.Signed, bytecode[0])
- bytecode = rffi.ptradd(bytecode, 1)
- if code >= 4*self.DESCR_FROMSTACK:
- if code > 0x7F:
- shift = 7
- code &= 0x7F
- while True:
- nextcode = rffi.cast(lltype.Signed, bytecode[0])
- bytecode = rffi.ptradd(bytecode, 1)
- code |= (nextcode & 0x7F) << shift
- shift += 7
- if nextcode <= 0x7F:
- break
- # load the value from the stack
- kind = code & 3
- code = (code >> 2) - self.DESCR_FROMSTACK
- stackloc = stack_at_ebp + get_ebp_ofs(code)
- value = rffi.cast(rffi.LONGP, stackloc)[0]
- if kind == self.DESCR_FLOAT:
- value_hi = value
- value = rffi.cast(rffi.LONGP, stackloc - 4)[0]
- else:
- # 'code' identifies a register: load its value
- kind = code & 3
- if kind == self.DESCR_SPECIAL:
- if code == self.DESCR_HOLE:
- num += 1
- continue
- assert code == self.DESCR_STOP
- break
- code >>= 2
- if kind == self.DESCR_FLOAT:
- xmmregisters = rffi.ptradd(registers, -16)
- value = xmmregisters[2*code]
- value_hi = xmmregisters[2*code + 1]
- else:
- value = registers[code]
-
- # store the loaded value into fail_boxes_<type>
- if kind == self.DESCR_INT:
- tgt = self.fail_boxes_int.get_addr_for_num(num)
- elif kind == self.DESCR_REF:
- tgt = self.fail_boxes_ptr.get_addr_for_num(num)
- elif kind == self.DESCR_FLOAT:
- tgt = self.fail_boxes_float.get_addr_for_num(num)
- rffi.cast(rffi.LONGP, tgt)[1] = value_hi
- else:
- assert 0, "bogus kind"
- rffi.cast(rffi.LONGP, tgt)[0] = value
- num += 1
- #
- if not we_are_translated():
- assert bytecode[4] == 0xCC
- fail_index = rffi.cast(rffi.LONGP, bytecode)[0]
- return fail_index
+ return self.grab_frame_values(bytecode, stack_at_ebp, registers)
self.failure_recovery_func = failure_recovery_func
self.failure_recovery_code = [0, 0, 0, 0]
@@ -1110,7 +1115,12 @@
def genop_guard_call_may_force(self, op, guard_op, addr,
arglocs, result_loc):
- xxx #...
+ faildescr = guard_op.descr
+ fail_index = self.cpu.get_fail_descr_number(faildescr)
+ self.mc.MOV(mem(ebp, FORCE_INDEX_OFS), imm(fail_index))
+ self.genop_call(op, arglocs, result_loc)
+ self.mc.CMP(mem(ebp, FORCE_INDEX_OFS), imm(0))
+ return self.implement_guard(addr, self.mc.JL)
def genop_discard_cond_call_gc_wb(self, op, arglocs):
# use 'mc._mc' directly instead of 'mc', to avoid
@@ -1142,7 +1152,7 @@
mc.overwrite(jz_location-1, [chr(offset)])
def genop_force_token(self, op, arglocs, resloc):
- xxx #self.mc.LEA(resloc, ...)
+ self.mc.LEA(resloc, mem(ebp, FORCE_INDEX_OFS))
def not_implemented_op_discard(self, op, arglocs):
msg = "not implemented operation: %s" % op.getopname()
Modified: pypy/branch/virtual-forcing/pypy/jit/backend/x86/regalloc.py
==============================================================================
--- pypy/branch/virtual-forcing/pypy/jit/backend/x86/regalloc.py (original)
+++ pypy/branch/virtual-forcing/pypy/jit/backend/x86/regalloc.py Fri Nov 27 13:31:43 2009
@@ -289,7 +289,8 @@
self.assembler.regalloc_perform_with_guard(op, guard_op, faillocs,
arglocs, result_loc,
self.sm.stack_depth)
- self.rm.possibly_free_var(op.result)
+ if op.result is not None:
+ self.rm.possibly_free_var(op.result)
self.possibly_free_vars(guard_op.fail_args)
def perform_guard(self, guard_op, arglocs, result_loc):
Modified: pypy/branch/virtual-forcing/pypy/jit/backend/x86/runner.py
==============================================================================
--- pypy/branch/virtual-forcing/pypy/jit/backend/x86/runner.py (original)
+++ pypy/branch/virtual-forcing/pypy/jit/backend/x86/runner.py Fri Nov 27 13:31:43 2009
@@ -6,6 +6,7 @@
from pypy.rlib.objectmodel import we_are_translated
from pypy.jit.metainterp import history
from pypy.jit.backend.x86.assembler import Assembler386
+from pypy.jit.backend.x86.regalloc import FORCE_INDEX_OFS
from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU
class CPU386(AbstractLLCPU):
@@ -86,27 +87,24 @@
adr = llmemory.cast_ptr_to_adr(x)
return CPU386.cast_adr_to_int(adr)
- def force(self, stack_base):
- # args parameter is there only for types
- XXX # rewrite, kill
+ def force(self, addr_of_force_index):
TP = rffi.CArrayPtr(lltype.Signed)
- rffi.cast(TP, stack_base + self.virtualizable_ofs)[0] = 1
- # move things to latest values
- arglocs = self.assembler.rebuild_faillocs_from_descr(
- descr._x86_failure_recovery_bytecode)
- assert len(arglocs) == len(args)
- for i in range(len(arglocs)):
- arg = args[i]
- argloc = arglocs[i]
- if arg.type == history.FLOAT:
- xxx
- elif arg.type == history.REF:
- xxx
- elif arg.type == history.INT:
- pos = stack_base + argloc.ofs_relative_to_ebp()
- self.assembler.fail_boxes_int.setitem(i, rffi.cast(TP, pos)[0])
- else:
- raise NotImplementedError
+ fail_index = rffi.cast(TP, addr_of_force_index)[0]
+ if fail_index < 0:
+ xxx # write a test and kill this line
+ return # already forced
+ faildescr = self.get_fail_descr_from_number(fail_index)
+ rffi.cast(TP, addr_of_force_index)[0] = -1
+ bytecode = rffi.cast(rffi.UCHARP,
+ faildescr._x86_failure_recovery_bytecode)
+ # start of "no gc operation!" block
+ fail_index_2 = self.assembler.grab_frame_values(
+ bytecode,
+ addr_of_force_index - FORCE_INDEX_OFS)
+ self.assembler.leave_jitted_hook()
+ # end of "no gc operation!" block
+ assert fail_index == fail_index_2
+
class CPU386_NO_SSE2(CPU386):
supports_floats = False
Modified: pypy/branch/virtual-forcing/pypy/rpython/llinterp.py
==============================================================================
--- pypy/branch/virtual-forcing/pypy/rpython/llinterp.py (original)
+++ pypy/branch/virtual-forcing/pypy/rpython/llinterp.py Fri Nov 27 13:31:43 2009
@@ -807,9 +807,6 @@
def op_gc__collect(self, *gen):
self.heap.collect(*gen)
- def op_gc_assume_young_pointers(self, addr):
- raise NotImplementedError
-
def op_gc_heap_stats(self):
raise NotImplementedError
Modified: pypy/branch/virtual-forcing/pypy/rpython/lltypesystem/lloperation.py
==============================================================================
--- pypy/branch/virtual-forcing/pypy/rpython/lltypesystem/lloperation.py (original)
+++ pypy/branch/virtual-forcing/pypy/rpython/lltypesystem/lloperation.py Fri Nov 27 13:31:43 2009
@@ -460,7 +460,7 @@
# allocating non-GC structures only
'gc_thread_run' : LLOp(),
'gc_thread_die' : LLOp(),
- 'gc_assume_young_pointers': LLOp(),
+ 'gc_assume_young_pointers': LLOp(canrun=True),
'gc_heap_stats' : LLOp(canunwindgc=True),
# ------- JIT & GC interaction, only for some GCs ----------
Modified: pypy/branch/virtual-forcing/pypy/rpython/lltypesystem/opimpl.py
==============================================================================
--- pypy/branch/virtual-forcing/pypy/rpython/lltypesystem/opimpl.py (original)
+++ pypy/branch/virtual-forcing/pypy/rpython/lltypesystem/opimpl.py Fri Nov 27 13:31:43 2009
@@ -486,6 +486,9 @@
def op_get_member_index(memberoffset):
raise NotImplementedError
+def op_gc_assume_young_pointers(addr):
+ pass
+
# ____________________________________________________________
def get_op_impl(opname):
More information about the Pypy-commit
mailing list