[pypy-svn] r75592 - pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86
jcreigh at codespeak.net
jcreigh at codespeak.net
Fri Jun 25 16:04:40 CEST 2010
Author: jcreigh
Date: Fri Jun 25 16:04:39 2010
New Revision: 75592
Modified:
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py
pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py
Log:
clean up regloc some and stop saying CALL_j when we mean CALL_i
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py Fri Jun 25 16:04:39 2010
@@ -21,7 +21,7 @@
X86_64_SCRATCH_REG,
X86_64_XMM_SCRATCH_REG,
RegLoc, StackLoc,
- ImmedLoc, AddressLoc, imm, rel32)
+ ImmedLoc, AddressLoc, imm)
from pypy.rlib.objectmodel import we_are_translated, specialize
from pypy.jit.backend.x86 import rx86, regloc, codebuf
@@ -217,7 +217,7 @@
mc.MOV_rr(edi.value, edx.value)
addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr()
- mc.JMP_l(addr) # tail call to the real malloc
+ mc.JMP(imm(addr)) # tail call to the real malloc
# ---------- second helper for the slow path of malloc ----------
self.malloc_fixedsize_slowpath2 = mc.tell()
if self.cpu.supports_floats: # restore the XMM registers
@@ -784,7 +784,7 @@
self.mark_gc_roots()
def call(self, addr, args, res):
- self._emit_call(rel32(addr), args)
+ self._emit_call(imm(addr), args)
assert res is eax
genop_int_neg = _unaryop("NEG")
@@ -893,7 +893,7 @@
return self.implement_guard(guard_token, 'Z')
def genop_int_is_zero(self, op, arglocs, resloc):
- self.mc.CMP(arglocs[0], imm8(0))
+ self.mc.CMP(arglocs[0], imm(0))
rl = resloc.lowest8bits()
self.mc.SET_ir(rx86.Conditions['E'], rl.value)
self.mc.MOVZX8(resloc, rl)
@@ -976,11 +976,11 @@
assert isinstance(ofs, ImmedLoc)
assert isinstance(scale, ImmedLoc)
if op.result.type == FLOAT:
- self.mc.MOVSD(resloc, addr64_add(base_loc, ofs_loc, ofs.value,
+ self.mc.MOVSD(resloc, addr_add(base_loc, ofs_loc, ofs.value,
scale.value))
else:
if scale.value == 0:
- self.mc.MOVZX8(resloc, addr8_add(base_loc, ofs_loc, ofs.value,
+ self.mc.MOVZX8(resloc, addr_add(base_loc, ofs_loc, ofs.value,
scale.value))
elif (1 << scale.value) == WORD:
self.mc.MOV(resloc, addr_add(base_loc, ofs_loc, ofs.value,
@@ -1172,7 +1172,7 @@
def genop_guard_guard_nonnull_class(self, ign_1, guard_op,
guard_token, locs, ign_2):
mc = self._start_block()
- mc.CMP(locs[0], imm8(1))
+ mc.CMP(locs[0], imm(1))
# Patched below
mc.J_il8(rx86.Conditions['B'], 0)
jb_location = mc.get_relative_pos()
@@ -1515,7 +1515,7 @@
size = sizeloc.value
if isinstance(op.args[0], Const):
- x = rel32(op.args[0].getint())
+ x = imm(op.args[0].getint())
else:
x = arglocs[1]
if x is eax:
@@ -1551,7 +1551,7 @@
assert len(arglocs) - 2 == len(descr._x86_arglocs[0])
#
# Write a call to the direct_bootstrap_code of the target assembler
- self._emit_call(rel32(descr._x86_direct_bootstrap_code), arglocs, 2,
+ self._emit_call(imm(descr._x86_direct_bootstrap_code), arglocs, 2,
tmp=eax)
mc = self._start_block()
if op.result is None:
@@ -1575,7 +1575,7 @@
je_location = mc.get_relative_pos()
#
# Path A: use assembler_helper_adr
- self._emit_call(rel32(self.assembler_helper_adr), [eax, arglocs[1]], 0,
+ self._emit_call(imm(self.assembler_helper_adr), [eax, arglocs[1]], 0,
tmp=ecx, force_mc=True, mc=mc)
if IS_X86_32 and isinstance(result_loc, StackLoc) and result_loc.type == FLOAT:
mc.FSTP_b(result_loc.value)
@@ -1655,7 +1655,7 @@
# misaligned stack in the call, but it's ok because the write barrier
# is not going to call anything more. Also, this assumes that the
# write barrier does not touch the xmm registers.
- mc.CALL(heap(descr.get_write_barrier_fn(self.cpu)))
+ mc.CALL(imm(descr.get_write_barrier_fn(self.cpu)))
for i in range(len(arglocs)):
loc = arglocs[i]
assert isinstance(loc, RegLoc)
@@ -1759,11 +1759,8 @@
return size
# XXX: ri386 migration shims:
-
def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0):
return AddressLoc(reg_or_imm1, reg_or_imm2, scale, offset)
-addr64_add = addr_add
-addr8_add = addr_add
def addr_add_const(reg_or_imm1, offset):
return AddressLoc(reg_or_imm1, ImmedLoc(0), 0, offset)
@@ -1773,5 +1770,3 @@
def heap(addr):
return AddressLoc(ImmedLoc(addr), ImmedLoc(0), 0, 0)
-
-imm8 = imm
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py Fri Jun 25 16:04:39 2010
@@ -3,6 +3,7 @@
from pypy.rlib.unroll import unrolling_iterable
from pypy.jit.backend.x86.arch import WORD
from pypy.tool.sourcetools import func_with_new_name
+from pypy.rlib.objectmodel import specialize
#
# This module adds support for "locations", which can be either in a Const,
@@ -155,6 +156,13 @@
unrolling_location_codes = unrolling_iterable(list("rbsmajix"))
+ at specialize.arg(1)
+def _rx86_getattr(obj, methname):
+ if hasattr(rx86.AbstractX86CodeBuilder, methname):
+ return getattr(obj, methname)
+ else:
+ raise AssertionError(methname + " undefined")
+
class LocationCodeBuilder(object):
_mixin_ = True
@@ -170,47 +178,34 @@
if code1 == possible_code1:
for possible_code2 in unrolling_location_codes:
if code2 == possible_code2:
- # FIXME: Not RPython anymore!
- # Fake out certain operations for x86_64
val1 = getattr(loc1, "value_" + possible_code1)()
val2 = getattr(loc2, "value_" + possible_code2)()
- # XXX: Could use RIP+disp32 in some cases
+ # Fake out certain operations for x86_64
if self.WORD == 8 and possible_code2 == 'i' and not rx86.fits_in_32bits(val2):
if possible_code1 == 'j':
- # This is the worst case: MOV_ji, and both operands are 64-bit
+ # This is the worst case: INSN_ji, and both operands are 64-bit
# Hopefully this doesn't happen too often
self.PUSH_r(eax.value)
self.MOV_ri(eax.value, val1)
self.MOV_ri(X86_64_SCRATCH_REG.value, val2)
- self.MOV_mr((eax.value, 0), X86_64_SCRATCH_REG.value)
+ methname = name + "_mr"
+ _rx86_getattr(self, methname)((eax.value, 0), X86_64_SCRATCH_REG.value)
self.POP_r(eax.value)
else:
self.MOV_ri(X86_64_SCRATCH_REG.value, val2)
methname = name + "_" + possible_code1 + "r"
- if hasattr(rx86.AbstractX86CodeBuilder, methname):
- getattr(self, methname)(val1, X86_64_SCRATCH_REG.value)
- else:
- assert False, "a"
+ _rx86_getattr(self, methname)(val1, X86_64_SCRATCH_REG.value)
elif self.WORD == 8 and possible_code1 == 'j':
reg_offset = self._addr_as_reg_offset(val1)
methname = name + "_" + "m" + possible_code2
- if hasattr(rx86.AbstractX86CodeBuilder, methname):
- getattr(self, methname)(reg_offset, val2)
- else:
- assert False, "b"
+ _rx86_getattr(self, methname)(reg_offset, val2)
elif self.WORD == 8 and possible_code2 == 'j':
reg_offset = self._addr_as_reg_offset(val2)
methname = name + "_" + possible_code1 + "m"
- if hasattr(rx86.AbstractX86CodeBuilder, methname):
- getattr(self, methname)(val1, reg_offset)
- else:
- assert False, "c"
+ _rx86_getattr(self, methname)(val1, reg_offset)
else:
methname = name + "_" + possible_code1 + possible_code2
- if hasattr(rx86.AbstractX86CodeBuilder, methname):
- getattr(self, methname)(val1, val2)
- else:
- assert False, "d"
+ _rx86_getattr(self, methname)(val1, val2)
return func_with_new_name(INSN, "INSN_" + name)
@@ -219,13 +214,13 @@
code = loc.location_code()
for possible_code in unrolling_location_codes:
if code == possible_code:
- methname = name + "_" + possible_code
- if hasattr(rx86.AbstractX86CodeBuilder, methname):
- val = getattr(loc, "value_" + possible_code)()
- getattr(self, methname)(val)
- return
+ val = getattr(loc, "value_" + possible_code)()
+ if self.WORD == 8 and possible_code == 'i' and not rx86.fits_in_32bits(val):
+ self.MOV_ri(X86_64_SCRATCH_REG.value, val)
+ _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value)
else:
- raise AssertionError("Instruction not defined: " + methname)
+ methname = name + "_" + possible_code
+ _rx86_getattr(self, methname)(val)
return func_with_new_name(INSN, "INSN_" + name)
@@ -312,6 +307,7 @@
XORPD = _binaryop('XORPD')
CALL = _unaryop('CALL')
+ JMP = _unaryop('JMP')
def imm(x):
# XXX: ri386 migration shim
@@ -320,10 +316,6 @@
else:
return ImmedLoc(x)
-def rel32(x):
- # XXX: ri386 migration shim
- return AddressLoc(ImmedLoc(x), ImmedLoc(0))
-
all_extra_instructions = [name for name in LocationCodeBuilder.__dict__
if name[0].isupper()]
all_extra_instructions.sort()
Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py (original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py Fri Jun 25 16:04:39 2010
@@ -489,6 +489,8 @@
XCHG_rr = insn(rex_w, '\x87', register(1), register(2,8), '\xC0')
JMP_l = insn('\xE9', relative(1))
+ # FIXME
+ JMP_i = JMP_l
JMP_r = insn(rex_nw, '\xFF', orbyte(4<<3), register(1), '\xC0')
# FIXME: J_il8 and JMP_l8 assume the caller will do the appropriate
# calculation to find the displacement, but J_il does it for the caller.
@@ -518,7 +520,7 @@
CVTTSD2SI_rb = xmminsn('\xF2', rex_w, '\x0F\x2C', register(1, 8), stack_bp(2))
# XXX: hack
- def CALL_j(self):
+ def CALL_i(self):
assert False
# ------------------------------------------------------------
@@ -555,7 +557,7 @@
# XXX: Bit of kludge, but works in 32-bit because the relative 32-bit
# displacement is always enough to encode any address
- CALL_j = AbstractX86CodeBuilder.CALL_l
+ CALL_i = AbstractX86CodeBuilder.CALL_l
class X86_64_CodeBuilder(AbstractX86CodeBuilder):
@@ -595,7 +597,7 @@
AbstractX86CodeBuilder.CALL_r(self, R.eax)
# XXX
- CALL_j = CALL_l
+ CALL_i = CALL_l
def define_modrm_modes(insnname_template, before_modrm, after_modrm=[], regtype='GPR'):
def add_insn(code, *modrm):
More information about the Pypy-commit
mailing list