[pypy-svn] r74753 - in pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86: . test
jcreigh at codespeak.net
jcreigh at codespeak.net
Tue May 25 22:57:00 CEST 2010
Author: jcreigh
Date: Tue May 25 22:56:58 2010
New Revision: 74753
Modified:
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_gc_integration.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_jump.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_regalloc.py
Log:
fix some more test failures
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py Tue May 25 22:56:58 2010
@@ -202,16 +202,16 @@
self.malloc_fixedsize_slowpath1 = mc.tell()
if self.cpu.supports_floats: # save the XMM registers in
for i in range(8): # the *caller* frame, from esp+8
- mc.MOVSD(mem64(esp, 8+8*i), xmm_registers[i])
+ mc.MOVSD_sr(8+8*i, i)
mc.SUB(edx, eax) # compute the size we want
- mc.MOV(mem(esp, 4), edx) # save it as the new argument
+ mc.MOV_sr(4, edx.value) # save it as the new argument
addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr()
mc.JMP_l(addr) # tail call to the real malloc
# ---------- second helper for the slow path of malloc ----------
self.malloc_fixedsize_slowpath2 = mc.tell()
if self.cpu.supports_floats: # restore the XMM registers
for i in range(8): # from where they were saved
- mc.MOVSD(xmm_registers[i], mem64(esp, 8+8*i))
+ mc.MOVSD_rs(i, 8+8*i)
nursery_free_adr = self.cpu.gc_ll_descr.get_nursery_free_addr()
mc.MOV(edx, heap(nursery_free_adr)) # load this in EDX
mc.RET()
@@ -467,19 +467,19 @@
self.mc.MOVSD_sr(0, loc.value)
elif isinstance(loc, StackLoc) and loc.width == 8:
# XXX evil trick
- self.mc.PUSH(mem(ebp, get_ebp_ofs(loc.position)))
- self.mc.PUSH(mem(ebp, get_ebp_ofs(loc.position + 1)))
+ self.mc.PUSH_b(get_ebp_ofs(loc.position))
+ self.mc.PUSH_b(get_ebp_ofs(loc.position + 1))
else:
self.mc.PUSH(loc)
def regalloc_pop(self, loc):
- if isinstance(loc, XMMREG):
- self.mc.MOVSD(loc, mem64(esp, 0))
+ if isinstance(loc, RegLoc) and loc.is_xmm:
+ self.mc.MOVSD_rs(loc.value, 0)
self.mc.ADD(esp, imm(2*WORD))
- elif isinstance(loc, MODRM64):
+ elif isinstance(loc, StackLoc) and loc.width == 8:
# XXX evil trick
- self.mc.POP(mem(ebp, get_ebp_ofs(loc.position + 1)))
- self.mc.POP(mem(ebp, get_ebp_ofs(loc.position)))
+ self.mc.POP_b(get_ebp_ofs(loc.position + 1))
+ self.mc.POP_b(get_ebp_ofs(loc.position))
else:
self.mc.POP(loc)
@@ -527,8 +527,6 @@
def _cmpop(cond, rev_cond):
def genop_cmp(self, op, arglocs, result_loc):
- # Clear high bits
- self.mc.MOV_ri(result_loc.value, 0)
rl = result_loc.lowest8bits()
if isinstance(op.args[0], Const):
self.mc.CMP(arglocs[1], arglocs[0])
@@ -536,6 +534,7 @@
else:
self.mc.CMP(arglocs[0], arglocs[1])
self.mc.SET_ir(rx86.Conditions[cond], rl.value)
+ self.mc.MOVZX8_rr(result_loc.value, rl.value)
return genop_cmp
def _cmpop_float(cond, is_ne=False):
@@ -740,8 +739,8 @@
def genop_int_is_true(self, op, arglocs, resloc):
self.mc.CMP(arglocs[0], imm(0))
rl = resloc.lowest8bits()
- self.mc.SETNE(rl)
- self.mc.MOVZX(resloc, rl)
+ self.mc.SET_ir(rx86.Conditions['NE'], rl.value)
+ self.mc.MOVZX8(resloc, rl)
def genop_guard_bool_not(self, op, guard_op, addr, arglocs, resloc):
guard_opnum = guard_op.opnum
@@ -1415,16 +1414,16 @@
arglocs, result_loc):
faildescr = guard_op.descr
fail_index = self.cpu.get_fail_descr_number(faildescr)
- self.mc.MOV(mem(ebp, FORCE_INDEX_OFS), imm(fail_index))
+ self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index)
self.genop_call(op, arglocs, result_loc)
- self.mc.CMP(mem(ebp, FORCE_INDEX_OFS), imm(0))
+ self.mc.CMP_bi(FORCE_INDEX_OFS, 0)
return self.implement_guard(addr, 'L')
def genop_guard_call_assembler(self, op, guard_op, addr,
arglocs, result_loc):
faildescr = guard_op.descr
fail_index = self.cpu.get_fail_descr_number(faildescr)
- self.mc.MOV(mem(ebp, FORCE_INDEX_OFS), imm(fail_index))
+ self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index)
descr = op.descr
assert isinstance(descr, LoopToken)
assert len(arglocs) - 2 == len(descr._x86_arglocs[0])
@@ -1432,11 +1431,11 @@
tmp=eax)
mc = self._start_block()
mc.CMP(eax, imm(self.cpu.done_with_this_frame_int_v))
- mc.JE(rel8_patched_later)
+ mc.J_il8(rx86.Conditions['E'], 0) # patched later
je_location = mc.get_relative_pos()
self._emit_call(rel32(self.assembler_helper_adr), [eax, arglocs[1]], 0,
tmp=ecx, force_mc=True, mc=mc)
- mc.JMP(rel8_patched_later)
+ mc.JMP_l8(0) # patched later
jmp_location = mc.get_relative_pos()
offset = jmp_location - je_location
assert 0 < offset <= 127
@@ -1446,11 +1445,11 @@
assert 0 < offset <= 127
mc.overwrite(jmp_location - 1, [chr(offset)])
self._stop_block()
- if isinstance(result_loc, MODRM64):
+ if isinstance(result_loc, StackLoc) and result_loc.width == 8:
self.mc.FSTP(result_loc)
else:
assert result_loc is eax or result_loc is None
- self.mc.CMP(mem(ebp, FORCE_INDEX_OFS), imm(0))
+ self.mc.CMP_bi(FORCE_INDEX_OFS, 0)
return self.implement_guard(addr, 'L')
def genop_discard_cond_call_gc_wb(self, op, arglocs):
@@ -1464,7 +1463,7 @@
mc = self._start_block()
mc.TEST(mem8(loc_base, descr.jit_wb_if_flag_byteofs),
imm8(descr.jit_wb_if_flag_singlebyte))
- mc.JZ(rel8_patched_later)
+ mc.J_il8(rx86.Conditions['Z'], 0) # patched later
jz_location = mc.get_relative_pos()
# the following is supposed to be the slow path, so whenever possible
# we choose the most compact encoding over the most efficient one.
@@ -1524,7 +1523,7 @@
mc.MOV(eax, heap(nursery_free_adr))
mc.LEA(edx, addr_add(eax, imm(size)))
mc.CMP(edx, heap(nursery_top_adr))
- mc.JNA(rel8_patched_later)
+ mc.J_il8(rx86.Conditions['NA'], 0) # patched later
jmp_adr = mc.get_relative_pos()
# See comments in _build_malloc_fixedsize_slowpath for the
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py Tue May 25 22:56:58 2010
@@ -968,7 +968,7 @@
shape = gcrootmap.get_basic_shape()
for v, val in self.fm.frame_bindings.items():
if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)):
- assert isinstance(val, MODRM)
+ assert isinstance(val, StackLoc)
gcrootmap.add_ebp_offset(shape, get_ebp_ofs(val.position))
for v, reg in self.rm.reg_bindings.items():
if (isinstance(v, BoxPtr) and self.rm.stays_alive(v)):
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py Tue May 25 22:56:58 2010
@@ -430,7 +430,7 @@
MOV8_mr = insn(rex_w, '\x88', byte_register(2, 8), mem_reg_plus_const(1))
- MOVZX8_rr = insn(rex_w, '\x0F\xB6', register(1,8), byte_register(2))
+ MOVZX8_rr = insn(rex_w, '\x0F\xB6', register(1,8), byte_register(2), '\xC0')
MOVZX8_rm = insn(rex_w, '\x0F\xB6', register(1,8), mem_reg_plus_const(2))
MOVZX8_ra = insn(rex_w, '\x0F\xB6', register(1,8), mem_reg_plus_scaled_reg_plus_const(2))
@@ -454,6 +454,8 @@
CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2))
CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32)
+ CMP_rj = insn(rex_w, '\x3B', register(1, 8), '\x05', immediate(2))
+
AND8_rr = insn(rex_w, '\x20', byte_register(1), byte_register(2,8), '\xC0')
OR8_rr = insn(rex_w, '\x08', byte_register(1), byte_register(2,8), '\xC0')
@@ -483,11 +485,15 @@
RET = insn('\xC3')
PUSH_r = insn(rex_nw, register(1), '\x50')
+ PUSH_b = insn(rex_nw, '\xFF', orbyte(6<<3), stack_bp(1))
+
POP_r = insn(rex_nw, register(1), '\x58')
+ POP_b = insn(rex_nw, '\x8F', orbyte(0<<3), stack_bp(1))
LEA_rb = insn(rex_w, '\x8D', register(1,8), stack_bp(2))
LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True))
LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2))
+ LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2))
CALL_l = insn('\xE8', relative(1))
CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3)))
@@ -497,9 +503,10 @@
XCHG_rj = insn(rex_w, '\x87', register(1,8), '\x05', immediate(2))
JMP_l = insn('\xE9', relative(1))
- # FIXME: J_il8 assume the caller will do the appropriate calculation
- # to find the displacement, but J_il does it for the caller.
+ # FIXME: J_il8 and JMP_l8 assume the caller will do the appropriate
+ # calculation to find the displacement, but J_il does it for the caller.
# We need to be consistent.
+ JMP_l8 = insn('\xEB', immediate(1, 'b'))
J_il8 = insn(immediate(1, 'o'), '\x70', immediate(2, 'b'))
J_il = insn('\x0F', immediate(1,'o'), '\x80', relative(2))
@@ -631,6 +638,8 @@
py.test.skip("XCGH_rj unsupported")
def CMP_ji(self, addr, immed):
py.test.skip("CMP_ji unsupported")
+ def CMP_rj(self, reg, immed):
+ py.test.skip("CMP_rj unsupported")
def MOVSD_rj(self, xmm_reg, mem_immed):
py.test.skip("MOVSD_rj unsupported")
def MOVSD_jr(self, xmm_reg, mem_immed):
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_gc_integration.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_gc_integration.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_gc_integration.py Tue May 25 22:56:58 2010
@@ -14,7 +14,6 @@
from pypy.rpython.lltypesystem import lltype, llmemory, rffi
from pypy.rpython.annlowlevel import llhelper
from pypy.rpython.lltypesystem import rclass, rstr
-from pypy.jit.backend.x86.ri386 import *
from pypy.jit.backend.llsupport.gc import GcLLDescr_framework, GcRefList, GcPtrFieldDescr
from pypy.jit.backend.x86.test.test_regalloc import MockAssembler
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_jump.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_jump.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_jump.py Tue May 25 22:56:58 2010
@@ -1,4 +1,4 @@
-from pypy.jit.backend.x86.ri386 import *
+from pypy.jit.backend.x86.regloc import *
from pypy.jit.backend.x86.regalloc import X86FrameManager
from pypy.jit.backend.x86.jump import remap_frame_layout
@@ -25,7 +25,7 @@
continue
assert len(op1) == len(op2)
for x, y in zip(op1, op2):
- if isinstance(x, MODRM) and isinstance(y, MODRM):
+ if isinstance(x, StackLoc) and isinstance(y, MODRM):
assert x.byte == y.byte
assert x.extradata == y.extradata
else:
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_regalloc.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_regalloc.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_regalloc.py Tue May 25 22:56:58 2010
@@ -14,7 +14,7 @@
from pypy.rpython.lltypesystem import lltype, llmemory, rffi
from pypy.rpython.annlowlevel import llhelper
from pypy.rpython.lltypesystem import rclass, rstr
-from pypy.jit.backend.x86.ri386 import *
+from pypy.jit.backend.x86.rx86 import *
class MockGcDescr(GcCache):
def get_funcptr_for_new(self):
More information about the Pypy-commit
mailing list