[pypy-svn] r74723 - in pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86: . test
jcreigh at codespeak.net
jcreigh at codespeak.net
Tue May 25 00:50:50 CEST 2010
Author: jcreigh
Date: Tue May 25 00:50:48 2010
New Revision: 74723
Modified:
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
Log:
more progress. some tests actually pass
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py Tue May 25 00:50:48 2010
@@ -1048,7 +1048,8 @@
addr, locs, ign_2):
mc = self._start_block()
mc.CMP(locs[0], imm8(1))
- mc.JB(rel8_patched_later)
+ # Patched below
+ mc.J_il8(rx86.Conditions['B'], 0)
jb_location = mc.get_relative_pos()
self._cmp_guard_class(mc, locs)
# patch the JB above
@@ -1592,15 +1593,23 @@
num = getattr(rop, opname.upper())
genop_list[num] = value
+def round_up_to_4(size):
+ if size < 4:
+ return 4
+ return size
+
+# XXX: ri386 migration shims:
+
+def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0):
+ return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset)
+
def addr_add_const(reg_or_imm1, offset):
- # XXX: ri386 migration shim
return AddressLoc(reg_or_imm1, ImmedLoc(0), 0, offset)
def mem(loc, offset):
- # XXX: ri386 migration shim
- return AddressLoc(loc, ImmedLoc(0), (0), offset)
+ return AddressLoc(loc, ImmedLoc(0), 0, offset)
-def round_up_to_4(size):
- if size < 4:
- return 4
- return size
+def heap(addr):
+ return AddressLoc(ImmedLoc(addr), ImmedLoc(0), 0, 0)
+
+imm8 = imm
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py Tue May 25 00:50:48 2010
@@ -76,6 +76,9 @@
def getint(self):
return self.value
+ def __repr__(self):
+ return "ImmedLoc(%d)" % (self.value)
+
class AddressLoc(AssemblerLocation):
_immutable_ = True
@@ -90,8 +93,8 @@
self._location_code = 'j'
self.value = base_loc.value + (scaled_loc.value << scale) + static_offset
else:
- # FIXME
- raise AssertionError("Don't know how to handle this case yet")
+ self._location_code = 'a'
+ self.value = (None, scaled_loc.value, scale, static_offset)
else:
if isinstance(scaled_loc, ImmedLoc):
# FIXME: What if base_loc is ebp or esp?
@@ -132,6 +135,7 @@
ADD = _binaryop('ADD')
OR = _binaryop('OR')
XOR = _binaryop('XOR')
+ TEST = _binaryop('TEST')
AND = _binaryop('AND')
SUB = _binaryop('SUB')
@@ -141,8 +145,10 @@
CMP = _binaryop('CMP')
MOV = _binaryop('MOV')
MOV8 = _binaryop('MOV8')
- MOVZX8 = _binaryop("MOVZX8")
- MOVZX16 = _binaryop("MOVZX16")
+ MOVZX8 = _binaryop('MOVZX8')
+ MOVZX16 = _binaryop('MOVZX16')
+
+ LEA = _binaryop('LEA')
MOVSD = _binaryop('MOVSD')
ADDSD = _binaryop('ADDSD')
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py Tue May 25 00:50:48 2010
@@ -189,8 +189,20 @@
# emit "reg1 + (reg2 << scaleshift) + offset"
assert reg1 != R.ebp and reg2 != R.esp
assert 0 <= scaleshift < 4
- reg1 = reg_number_3bits(mc, reg1)
reg2 = reg_number_3bits(mc, reg2)
+
+ # Special case for no base register
+ if reg1 == None:
+ # modrm
+ mc.writechar(chr(0x04 | orbyte))
+ # SIB
+ mc.writechar(chr((scaleshift<<6) | (reg2<<3) | 5))
+ # We're forced to output a disp32, even if offset == 0
+ mc.writeimm32(offset)
+ return 0
+
+ reg1 = reg_number_3bits(mc, reg1)
+
SIB = chr((scaleshift<<6) | (reg2<<3) | reg1)
#
no_offset = offset == 0
@@ -348,6 +360,7 @@
MOV_ri = insn(rex_w, register(1), '\xB8', immediate(2, 'q'))
MOV_rr = insn(rex_w, '\x89', register(2,8), register(1), '\xC0')
+ MOV_bi = insn(rex_w, '\xC7', stack_bp(1), immediate(2))
MOV_br = insn(rex_w, '\x89', register(2,8), stack_bp(1))
MOV_rb = insn(rex_w, '\x8B', register(1,8), stack_bp(2))
MOV_sr = insn(rex_w, '\x89', register(2,8), stack_sp(1))
@@ -387,13 +400,13 @@
XOR_ri, XOR_rr, XOR_rb, _, _ = common_modes(6)
CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br = common_modes(7)
- _CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b'))
- _CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2))
- CMP_mi = select_8_or_32_bit_immed(_CMP_mi8, _CMP_mi32)
-
- _CMP_ji8 = insn(rex_w, '\x83', '\x3D', immediate(1), immediate(2, 'b'))
- _CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2))
- CMP_ji = select_8_or_32_bit_immed(_CMP_ji8, _CMP_ji32)
+ CMP_mi8 = insn(rex_w, '\x83', orbyte(7<<3), mem_reg_plus_const(1), immediate(2, 'b'))
+ CMP_mi32 = insn(rex_w, '\x81', orbyte(7<<3), mem_reg_plus_const(1), immediate(2))
+ CMP_mi = select_8_or_32_bit_immed(CMP_mi8, CMP_mi32)
+
+ CMP_ji8 = insn(rex_w, '\x83', '\x3D', immediate(1), immediate(2, 'b'))
+ CMP_ji32 = insn(rex_w, '\x81', '\x3D', immediate(1), immediate(2))
+ CMP_ji = select_8_or_32_bit_immed(CMP_ji8, CMP_ji32)
NEG_r = insn(rex_w, '\xF7', register(1), '\xD8')
@@ -403,9 +416,9 @@
IMUL_rr = insn(rex_w, '\x0F\xAF', register(1, 8), register(2), '\xC0')
IMUL_rb = insn(rex_w, '\x0F\xAF', register(1, 8), stack_bp(2))
- _IMUL_rri8 = insn(rex_w, '\x6B', register(1, 8), register(2), '\xC0', immediate(3, 'b'))
- _IMUL_rri32 = insn(rex_w, '\x69', register(1, 8), register(2), '\xC0', immediate(3))
- IMUL_rri = select_8_or_32_bit_immed(_IMUL_rri8, _IMUL_rri32)
+ IMUL_rri8 = insn(rex_w, '\x6B', register(1, 8), register(2), '\xC0', immediate(3, 'b'))
+ IMUL_rri32 = insn(rex_w, '\x69', register(1, 8), register(2), '\xC0', immediate(3))
+ IMUL_rri = select_8_or_32_bit_immed(IMUL_rri8, IMUL_rri32)
def IMUL_ri(self, reg, immed):
self.IMUL_rri(reg, reg, immed)
@@ -420,6 +433,7 @@
LEA_rb = insn(rex_w, '\x8D', register(1,8), stack_bp(2))
LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True))
+ LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2))
CALL_l = insn('\xE8', relative(1))
CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3)))
@@ -429,12 +443,18 @@
XCHG_rj = insn(rex_w, '\x87', register(1,8), '\x05', immediate(2))
JMP_l = insn('\xE9', relative(1))
+ # FIXME: J_il8 assume the caller will do the appropriate calculation
+ # to find the displacement, but J_il does it for the caller.
+ # We need to be consistent.
+ J_il8 = insn(immediate(1, 'o'), '\x70', immediate(2, 'b'))
J_il = insn('\x0F', immediate(1,'o'), '\x80', relative(2))
SET_ir = insn('\x0F', immediate(1,'o'),'\x90', register(2), '\xC0')
# The 64-bit version of this, CQO, is defined in X86_64_CodeBuilder
CDQ = insn(rex_nw, '\x99')
+ TEST_rr = insn(rex_w, '\x85', register(2,8), register(1), '\xC0')
+
# ------------------------------ SSE2 ------------------------------
MOVSD_rr = xmminsn('\xF2', rex_nw, '\x0F\x10', register(1,8), register(2),
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86.py Tue May 25 00:50:48 2010
@@ -63,6 +63,15 @@
'\x8B\x54\xBE\x80' +
'\x8B\x94\xBE\x80\x00\x00\x00')
+def test_mov_ra_no_base():
+ s = CodeBuilder32()
+ s.MOV_ra(edx, (None, edi, 2, 0))
+ assert s.getvalue() == '\x8B\x14\xBD\x00\x00\x00\x00'
+
+ s = CodeBuilder32()
+ s.MOV_ra(edx, (None, edi, 2, 0xCD))
+ assert s.getvalue() == '\x8B\x14\xBD\xCD\x00\x00\x00'
+
def test_mov_ar():
s = CodeBuilder32()
s.MOV_ar((esi, edi, 2, 0), edx)
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86_32_auto_encoding.py Tue May 25 00:50:48 2010
@@ -230,13 +230,14 @@
return X86_CodeBuilder
def complete_test(self, methname):
- if methname.split('_')[0][-1].isdigit():
- print "artificial instruction: %r" % (methname,)
- return
if '_' in methname:
instrname, argmodes = methname.split('_')
else:
instrname, argmodes = methname, ''
+ if instrname[-1].isdigit() or (argmodes != '' and argmodes[-1].isdigit()):
+ print "artificial instruction: %r" % (methname,)
+ return
+
print "Testing %s with argmodes=%r" % (instrname, argmodes)
self.methname = methname
self.is_xmm_insn = getattr(getattr(rx86.AbstractX86CodeBuilder,
More information about the Pypy-commit
mailing list