[pypy-svn] r74637 - in pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86: . test tool

jcreigh at codespeak.net jcreigh at codespeak.net
Fri May 21 17:43:22 CEST 2010


Author: jcreigh
Date: Fri May 21 17:43:20 2010
New Revision: 74637

Added:
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py
      - copied, changed from r74549, pypy/branch/remove-ri386-multimethod-2/pypy/jit/backend/x86/regloc.py
Modified:
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/codebuf.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/jump.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_assembler.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86.py
   pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/tool/instruction_encoding.sh
Log:
Commiting what I have before attempting a minor refactor. (A few tests even pass!)

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/assembler.py	Fri May 21 17:43:20 2010
@@ -10,9 +10,15 @@
 from pypy.jit.backend.x86.regalloc import RegAlloc, WORD,\
      X86RegisterManager, X86XMMRegisterManager, get_ebp_ofs, FRAME_FIXED_SIZE,\
      FORCE_INDEX_OFS
+
+from pypy.jit.backend.x86.regloc import (eax, ecx, edx, ebx,
+                                         esp, ebp, esi, edi,
+                                         xmm0, xmm1, xmm2, xmm3,
+                                         xmm4, xmm5, xmm6, xmm7,
+                                         RegLoc, StackLoc, ImmedLoc, imm)
+
 from pypy.rlib.objectmodel import we_are_translated, specialize
-from pypy.jit.backend.x86 import codebuf
-from pypy.jit.backend.x86.ri386 import *
+from pypy.jit.backend.x86 import rx86, regloc, codebuf
 from pypy.jit.metainterp.resoperation import rop
 from pypy.jit.backend.x86.support import values_array
 from pypy.rlib.debug import debug_print
@@ -62,7 +68,7 @@
     def make_new_mc(self):
         new_mc = self._instantiate_mc()
         debug_print('[new machine code block at', new_mc.tell(), ']')
-        self._mc.JMP(rel32(new_mc.tell()))
+        self._mc.JMP_l(new_mc.tell())
 
         if self.function_name is not None:
             self.end_function(done=False)
@@ -89,6 +95,9 @@
     method.func_name = name
     return method
 
+for _name in rx86.all_instructions + regloc.all_extra_instructions:
+    setattr(MachineCodeBlockWrapper, _name, _new_method(_name))
+
 for name in dir(codebuf.MachineCodeBlock):
     if name.upper() == name or name == "writechr":
         setattr(MachineCodeBlockWrapper, name, _new_method(name))
@@ -183,8 +192,8 @@
         addr[5] = 2147483647       # / for abs
         addr[6] = 0                #
         addr[7] = 0                #
-        self.loc_float_const_neg = heap64(float_constants)
-        self.loc_float_const_abs = heap64(float_constants + 16)
+        self.loc_float_const_neg = float_constants
+        self.loc_float_const_abs = float_constants + 16
 
     def _build_malloc_fixedsize_slowpath(self):
         mc = self.mc2._mc
@@ -196,7 +205,7 @@
         mc.SUB(edx, eax)                      # compute the size we want
         mc.MOV(mem(esp, 4), edx)              # save it as the new argument
         addr = self.cpu.gc_ll_descr.get_malloc_fixedsize_slowpath_addr()
-        mc.JMP(rel32(addr))                   # tail call to the real malloc
+        mc.JMP_l(addr)                        # tail call to the real malloc
         # ---------- second helper for the slow path of malloc ----------
         self.malloc_fixedsize_slowpath2 = mc.tell()
         if self.cpu.supports_floats:          # restore the XMM registers
@@ -295,7 +304,7 @@
     def patch_jump_for_descr(self, faildescr, adr_new_target):
         adr_jump_offset = faildescr._x86_adr_jump_offset
         mc = codebuf.InMemoryCodeBuilder(adr_jump_offset, adr_jump_offset + 4)
-        mc.write(packimm32(adr_new_target - adr_jump_offset - 4))
+        mc.writeimm32(adr_new_target - adr_jump_offset - 4)
         mc.valgrind_invalidated()
         mc.done()
 
@@ -318,7 +327,7 @@
 
     def _patchable_stackadjust(self):
         # stack adjustment LEA
-        self.mc.LEA(esp, fixedsize_ebp_ofs(0))
+        self.mc.LEA32_rb(esp.value, 0)
         return self.mc.tell() - 4
 
     def _patch_stackadjust(self, adr_lea, reserved_depth):
@@ -330,12 +339,12 @@
         words = (FRAME_FIXED_SIZE - 1) + reserved_depth
         # align, e.g. for Mac OS X        
         aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP
-        mc.write(packimm32(-WORD * aligned_words))
+        mc.writeimm32(-WORD * aligned_words)
         mc.done()
 
     def _call_header(self):
         self.mc.PUSH(ebp)
-        self.mc.MOV(ebp, esp)
+        self.mc.MOV_rr(ebp.value, esp.value)
         self.mc.PUSH(ebx)
         self.mc.PUSH(esi)
         self.mc.PUSH(edi)
@@ -354,23 +363,26 @@
         self._patch_stackadjust(adr_stackadjust, stackdepth)
         for i in range(len(nonfloatlocs)):
             loc = nonfloatlocs[i]
-            if isinstance(loc, REG):
-                self.mc.MOV(loc, mem(ebp, (2 + i) * WORD))
+            if isinstance(loc, RegLoc):
+                assert not loc.is_xmm
+                self.mc.MOV_rb(loc.value, (2 + i) * WORD)
             loc = floatlocs[i]
-            if isinstance(loc, XMMREG):
-                self.mc.MOVSD(loc, mem64(ebp, (1 + i) * 2 * WORD))
+            if isinstance(loc, RegLoc):
+                assert loc.is_xmm
+                self.mc.MOVSD_rb(loc.value, (1 + i) * 2 * WORD)
         tmp = eax
         xmmtmp = xmm0
         for i in range(len(nonfloatlocs)):
             loc = nonfloatlocs[i]
-            if loc is not None and not isinstance(loc, REG):
-                self.mc.MOV(tmp, mem(ebp, (2 + i) * WORD))
+            if loc is not None and not isinstance(loc, RegLoc):
+                self.mc.MOV_rb(tmp.value, (2 + i) * WORD)
                 self.mc.MOV(loc, tmp)
             loc = floatlocs[i]
-            if loc is not None and not isinstance(loc, XMMREG):
-                self.mc.MOVSD(xmmtmp, mem64(ebp, (1 + i) * 2 * WORD))
-                self.mc.MOVSD(loc, xmmtmp)
-        self.mc.JMP(rel32(jmpadr))
+            if loc is not None and not isinstance(loc, RegLoc):
+                self.mc.MOVSD_rb(xmmtmp.value, (1 + i) * 2 * WORD)
+                assert isinstance(loc, StackLoc)
+                self.mc.MOVSD_br(loc.value, xmmtmp.value)
+        self.mc.JMP_l(jmpadr)
         return adr_stackadjust
 
     def _assemble_bootstrap_code(self, inputargs, arglocs):
@@ -382,7 +394,7 @@
             loc = nonfloatlocs[i]
             if loc is None:
                 continue
-            if isinstance(loc, REG):
+            if isinstance(loc, RegLoc):
                 target = loc
             else:
                 target = tmp
@@ -391,22 +403,24 @@
                 # reading them
                 self.mc.XOR(target, target)
                 adr = self.fail_boxes_ptr.get_addr_for_num(i)
-                self.mc.XCHG(target, heap(adr))
+                self.mc.XCHG_rj(target.value, adr)
             else:
                 adr = self.fail_boxes_int.get_addr_for_num(i)
-                self.mc.MOV(target, heap(adr))
+                self.mc.MOV_rj(target.value, adr)
             if target is not loc:
-                self.mc.MOV(loc, target)
+                assert isinstance(loc, StackLoc)
+                self.mc.MOV_br(loc.value, target.value)
         for i in range(len(floatlocs)):
             loc = floatlocs[i]
             if loc is None:
                 continue
             adr = self.fail_boxes_float.get_addr_for_num(i)
-            if isinstance(loc, REG):
-                self.mc.MOVSD(loc, heap64(adr))
+            if isinstance(loc, RegLoc):
+                self.mc.MOVSD_rj(loc.value, adr)
             else:
-                self.mc.MOVSD(xmmtmp, heap64(adr))
-                self.mc.MOVSD(loc, xmmtmp)
+                self.mc.MOVSD_rj(xmmtmp.value, adr)
+                assert isinstance(loc, StackLoc)
+                self.mc.MOVSD_br(loc.value, xmmtmp.value)
         return adr_stackadjust
 
     def dump(self, text):
@@ -439,7 +453,7 @@
     # ------------------------------------------------------------
 
     def mov(self, from_loc, to_loc):
-        if isinstance(from_loc, XMMREG) or isinstance(to_loc, XMMREG):
+        if (isinstance(from_loc, RegLoc) and from_loc.is_xmm) or (isinstance(to_loc, RegLoc) and to_loc.is_xmm):
             self.mc.MOVSD(to_loc, from_loc)
         else:
             self.mc.MOV(to_loc, from_loc)
@@ -447,10 +461,10 @@
     regalloc_mov = mov # legacy interface
 
     def regalloc_push(self, loc):
-        if isinstance(loc, XMMREG):
-            self.mc.SUB(esp, imm(2*WORD))
-            self.mc.MOVSD(mem64(esp, 0), loc)
-        elif isinstance(loc, MODRM64):
+        if isinstance(loc, RegLoc) and loc.is_xmm:
+            self.mc.SUB_ri(esp.value, 2*WORD)
+            self.mc.MOVSD_sr(0, loc.value)
+        elif isinstance(loc, StackLoc) and loc.width == 8:
             # XXX evil trick
             self.mc.PUSH(mem(ebp, get_ebp_ofs(loc.position)))
             self.mc.PUSH(mem(ebp, get_ebp_ofs(loc.position + 1)))
@@ -543,19 +557,15 @@
             if isinstance(op.args[0], Const):
                 self.mc.CMP(arglocs[1], arglocs[0])
                 if guard_opnum == rop.GUARD_FALSE:
-                    name = 'J' + rev_cond
-                    return self.implement_guard(addr, getattr(self.mc, name))
+                    return self.implement_guard(addr, rev_cond)
                 else:
-                    name = 'J' + false_rev_cond
-                    return self.implement_guard(addr, getattr(self.mc, name))
+                    return self.implement_guard(addr, false_rev_cond)
             else:
                 self.mc.CMP(arglocs[0], arglocs[1])
                 if guard_opnum == rop.GUARD_FALSE:
-                    name = 'J' + cond
-                    return self.implement_guard(addr, getattr(self.mc, name))
+                    return self.implement_guard(addr, cond)
                 else:
-                    name = 'J' + false_cond
-                    return self.implement_guard(addr, getattr(self.mc, name))
+                    return self.implement_guard(addr, false_cond)
         return genop_cmp_guard
 
     def _cmpop_guard_float(cond, false_cond, need_jp):
@@ -575,9 +585,8 @@
                     mc = self.mc._mc
                     mc.JP(rel8(2))
                     getattr(mc, 'J' + cond)(rel8(5))
-                    return self.implement_guard(addr, mc.JMP)
-                name = 'J' + false_cond
-                return self.implement_guard(addr, getattr(self.mc, name))
+                    return self.implement_guard(addr)
+                return self.implement_guard(addr, false_cond)
         return genop_cmp_guard_float
 
     @specialize.arg(5)
@@ -589,29 +598,30 @@
         n = len(arglocs)
         for i in range(start, n):
             loc = arglocs[i]
-            if isinstance(loc, REG):
-                if isinstance(loc, XMMREG):
-                    mc.MOVSD(mem64(esp, p), loc)
+            if isinstance(loc, RegLoc):
+                if loc.is_xmm:
+                    mc.MOVSD_mr((esp.value, p), loc.value)
                 else:
-                    mc.MOV(mem(esp, p), loc)
+                    mc.MOV_mr((esp.value, p), loc.value)
             p += round_up_to_4(loc.width)
         p = 0
         for i in range(start, n):
             loc = arglocs[i]
-            if not isinstance(loc, REG):
-                if isinstance(loc, MODRM64):
+            if not isinstance(loc, RegLoc):
+                # if isinstance(loc, MODRM64):
+                if False:
                     mc.MOVSD(xmm0, loc)
                     mc.MOVSD(mem64(esp, p), xmm0)
                 else:
                     mc.MOV(tmp, loc)
-                    mc.MOV(mem(esp, p), tmp)
+                    mc.MOV_sr(p, tmp.value)
             p += round_up_to_4(loc.width)
         self._regalloc.reserve_param(p//WORD)
-        mc.CALL(x)
+        mc.CALL_l(x)
         self.mark_gc_roots()
         
     def call(self, addr, args, res):
-        self._emit_call(rel32(addr), args)
+        self._emit_call(addr, args)
         assert res is eax
 
     genop_int_neg = _unaryop("NEG")
@@ -679,7 +689,7 @@
         else:
             mc.JP(rel8(2))
             mc.JE(rel8(5))
-            return self.implement_guard(addr, mc.JMP)
+            return self.implement_guard(addr)
 
     def genop_float_neg(self, op, arglocs, resloc):
         # Following what gcc does: res = x ^ 0x8000000000000000
@@ -702,7 +712,7 @@
         else:
             mc.JP(rel8(2))
             mc.JZ(rel8(5))
-            return self.implement_guard(addr, mc.JMP)
+            return self.implement_guard(addr)
 
     def genop_float_is_true(self, op, arglocs, resloc):
         loc0, loc1 = arglocs
@@ -741,28 +751,28 @@
 
     def genop_guard_int_is_true(self, op, guard_op, addr, arglocs, resloc):
         guard_opnum = guard_op.opnum
-        self.mc.CMP(arglocs[0], imm8(0))
+        self.mc.CMP(arglocs[0], imm(0))
         if guard_opnum == rop.GUARD_TRUE:
-            return self.implement_guard(addr, self.mc.JZ)
+            return self.implement_guard(addr, 'Z')
         else:
-            return self.implement_guard(addr, self.mc.JNZ)
+            return self.implement_guard(addr, 'NZ')
 
     def genop_int_is_true(self, op, arglocs, resloc):
-        self.mc.CMP(arglocs[0], imm8(0))
+        self.mc.CMP(arglocs[0], imm(0))
         rl = resloc.lowest8bits()
         self.mc.SETNE(rl)
         self.mc.MOVZX(resloc, rl)
 
     def genop_guard_bool_not(self, op, guard_op, addr, arglocs, resloc):
         guard_opnum = guard_op.opnum
-        self.mc.CMP(arglocs[0], imm8(0))
+        self.mc.CMP(arglocs[0], imm(0))
         if guard_opnum == rop.GUARD_TRUE:
-            return self.implement_guard(addr, self.mc.JNZ)
+            return self.implement_guard(addr, 'NZ')
         else:
-            return self.implement_guard(addr, self.mc.JZ)
+            return self.implement_guard(addr, 'Z')
 
     def genop_bool_not(self, op, arglocs, resloc):
-        self.mc.XOR(arglocs[0], imm8(1))
+        self.mc.XOR(arglocs[0], imm(1))
 
     def genop_same_as(self, op, arglocs, resloc):
         self.mov(arglocs[0], resloc)
@@ -771,18 +781,18 @@
 
     def genop_int_mod(self, op, arglocs, resloc):
         self.mc.CDQ()
-        self.mc.IDIV(ecx)
+        self.mc.IDIV_r(ecx.value)
 
     genop_int_floordiv = genop_int_mod
 
     def genop_uint_floordiv(self, op, arglocs, resloc):
         self.mc.XOR(edx, edx)
-        self.mc.DIV(ecx)
+        self.mc.DIV_r(ecx.value)
 
     def genop_new_with_vtable(self, op, arglocs, result_loc):
         assert result_loc is eax
         loc_vtable = arglocs[-1]
-        assert isinstance(loc_vtable, IMM32)
+        assert isinstance(loc_vtable, ImmedLoc)
         arglocs = arglocs[:-1]
         self.call(self.malloc_func_addr, arglocs, eax)
         # xxx ignore NULL returns for now
@@ -790,7 +800,9 @@
 
     def set_vtable(self, loc, loc_vtable):
         if self.cpu.vtable_offset is not None:
-            self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable)
+            assert isinstance(loc, RegLoc)
+            assert isinstance(loc_vtable, ImmedLoc)
+            self.mc.MOV_mi((loc.value, self.cpu.vtable_offset), loc_vtable.value)
 
     # XXX genop_new is abused for all varsized mallocs with Boehm, for now
     # (instead of genop_new_array, genop_newstr, genop_newunicode)
@@ -812,18 +824,25 @@
 
     def genop_getfield_gc(self, op, arglocs, resloc):
         base_loc, ofs_loc, size_loc = arglocs
-        assert isinstance(size_loc, IMM32)
+        assert isinstance(size_loc, ImmedLoc)
+        assert isinstance(resloc, RegLoc)
         size = size_loc.value
+
+        # FIXME: Should be done in one instruction
+        self.mc.PUSH(eax)
+        self.mc.MOV(eax, base_loc)
+        self.mc.ADD(eax, ofs_loc)
         if size == 1:
-            self.mc.MOVZX(resloc, addr8_add(base_loc, ofs_loc))
+            self.mc.MOVZX8_rm(resloc.value, (eax.value, 0))
         elif size == 2:
-            self.mc.MOVZX(resloc, addr_add(base_loc, ofs_loc))
+            self.mc.MOVZX16_rm(resloc.value, (eax.value, 0))
         elif size == WORD:
-            self.mc.MOV(resloc, addr_add(base_loc, ofs_loc))
+            self.mc.MOV_rm(resloc.value, (eax.value, 0))
         elif size == 8:
-            self.mc.MOVSD(resloc, addr64_add(base_loc, ofs_loc))
+            self.mc.MOVSD_rm(resloc.value, (eax.value, 0))
         else:
             raise NotImplementedError("getfield size = %d" % size)
+        self.mc.POP(eax)
 
     genop_getfield_raw = genop_getfield_gc
     genop_getfield_raw_pure = genop_getfield_gc
@@ -831,8 +850,8 @@
 
     def genop_getarrayitem_gc(self, op, arglocs, resloc):
         base_loc, ofs_loc, scale, ofs = arglocs
-        assert isinstance(ofs, IMM32)
-        assert isinstance(scale, IMM32)
+        assert isinstance(ofs, ImmedLoc)
+        assert isinstance(scale, ImmedLoc)
         if op.result.type == FLOAT:
             self.mc.MOVSD(resloc, addr64_add(base_loc, ofs_loc, ofs.value,
                                              scale.value))
@@ -851,24 +870,34 @@
 
     def genop_discard_setfield_gc(self, op, arglocs):
         base_loc, ofs_loc, size_loc, value_loc = arglocs
-        assert isinstance(size_loc, IMM32)
+        assert isinstance(size_loc, ImmedLoc)
         size = size_loc.value
+        # FIXME: Should be done in one instruction
+        self.mc.PUSH(eax)
+        self.mc.PUSH(ebx)
+        self.mc.MOV(eax, base_loc)
+        self.mc.ADD(eax, ofs_loc)
+        self.mc.MOV(ebx, value_loc)
         if size == WORD * 2:
-            self.mc.MOVSD(addr64_add(base_loc, ofs_loc), value_loc)
+            self.mc.MOVSD_mr((eax.value, 0), ebx.value)
         elif size == WORD:
-            self.mc.MOV(addr_add(base_loc, ofs_loc), value_loc)
+            self.mc.MOV_mr((eax.value, 0), ebx.value)
         elif size == 2:
-            self.mc.MOV16(addr_add(base_loc, ofs_loc), value_loc)
+            # XXX: Select 16-bit operand mode in a non-ugly way
+            self.mc.writechar('\x66')
+            self.mc.MOV_mr((eax.value, 0), ebx.value)
         elif size == 1:
-            self.mc.MOV(addr8_add(base_loc, ofs_loc), value_loc.lowest8bits())
+            self.mc.MOV8_mr((eax.value, 0), ebx.lowest8bits())
         else:
             print "[asmgen]setfield addr size %d" % size
             raise NotImplementedError("Addr size %d" % size)
+        self.mc.POP(ebx)
+        self.mc.POP(eax)
 
     def genop_discard_setarrayitem_gc(self, op, arglocs):
         base_loc, ofs_loc, value_loc, scale_loc, baseofs = arglocs
-        assert isinstance(baseofs, IMM32)
-        assert isinstance(scale_loc, IMM32)
+        assert isinstance(baseofs, ImmedLoc)
+        assert isinstance(scale_loc, ImmedLoc)
         if op.args[2].type == FLOAT:
             self.mc.MOVSD(addr64_add(base_loc, ofs_loc, baseofs.value,
                                      scale_loc.value), value_loc)
@@ -918,7 +947,7 @@
 
     def genop_arraylen_gc(self, op, arglocs, resloc):
         base_loc, ofs_loc = arglocs
-        assert isinstance(ofs_loc, IMM32)
+        assert isinstance(ofs_loc, ImmedLoc)
         self.mc.MOV(resloc, addr_add_const(base_loc, ofs_loc.value))
 
     def genop_strgetitem(self, op, arglocs, resloc):
@@ -942,13 +971,13 @@
     def genop_guard_guard_true(self, ign_1, guard_op, addr, locs, ign_2):
         loc = locs[0]
         self.mc.TEST(loc, loc)
-        return self.implement_guard(addr, self.mc.JZ)
+        return self.implement_guard(addr, 'Z')
     genop_guard_guard_nonnull = genop_guard_guard_true
 
     def genop_guard_guard_no_exception(self, ign_1, guard_op, addr,
                                        locs, ign_2):
         self.mc.CMP(heap(self.cpu.pos_exception()), imm(0))
-        return self.implement_guard(addr, self.mc.JNZ)
+        return self.implement_guard(addr, 'NZ')
 
     def genop_guard_guard_exception(self, ign_1, guard_op, addr,
                                     locs, resloc):
@@ -956,7 +985,7 @@
         loc1 = locs[1]
         self.mc.MOV(loc1, heap(self.cpu.pos_exception()))
         self.mc.CMP(loc1, loc)
-        addr = self.implement_guard(addr, self.mc.JNE)
+        addr = self.implement_guard(addr, 'NE')
         if resloc is not None:
             self.mc.MOV(resloc, heap(self.cpu.pos_exc_value()))
         self.mc.MOV(heap(self.cpu.pos_exception()), imm(0))
@@ -966,9 +995,9 @@
     def _gen_guard_overflow(self, guard_op, addr):
         guard_opnum = guard_op.opnum
         if guard_opnum == rop.GUARD_NO_OVERFLOW:
-            return self.implement_guard(addr, self.mc.JO)
+            return self.implement_guard(addr, 'O')
         elif guard_opnum == rop.GUARD_OVERFLOW:
-            return self.implement_guard(addr, self.mc.JNO)
+            return self.implement_guard(addr, 'NO')
         else:
             print "int_xxx_ovf followed by", guard_op.getopname()
             raise AssertionError
@@ -988,7 +1017,7 @@
     def genop_guard_guard_false(self, ign_1, guard_op, addr, locs, ign_2):
         loc = locs[0]
         self.mc.TEST(loc, loc)
-        return self.implement_guard(addr, self.mc.JNZ)
+        return self.implement_guard(addr, 'NZ')
     genop_guard_guard_isnull = genop_guard_guard_false
 
     def genop_guard_guard_value(self, ign_1, guard_op, addr, locs, ign_2):
@@ -997,7 +1026,7 @@
             self.mc.UCOMISD(locs[0], locs[1])
         else:
             self.mc.CMP(locs[0], locs[1])
-        return self.implement_guard(addr, self.mc.JNE)
+        return self.implement_guard(addr, 'NE')
 
     def _cmp_guard_class(self, mc, locs):
         offset = self.cpu.vtable_offset
@@ -1011,7 +1040,7 @@
             #   - multiply by 4 and use it as an offset in type_info_group
             #   - add 16 bytes, to go past the TYPE_INFO structure
             loc = locs[1]
-            assert isinstance(loc, IMM32)
+            assert isinstance(loc, ImmedLoc)
             classptr = loc.value
             # here, we have to go back from 'classptr' to the value expected
             # from reading the 16 bits in the object header
@@ -1026,7 +1055,7 @@
         mc = self._start_block()
         self._cmp_guard_class(mc, locs)
         self._stop_block()
-        return self.implement_guard(addr, self.mc.JNE)
+        return self.implement_guard(addr, 'NE')
 
     def genop_guard_guard_nonnull_class(self, ign_1, guard_op,
                                         addr, locs, ign_2):
@@ -1041,7 +1070,7 @@
         mc.overwrite(jb_location-1, [chr(offset)])
         self._stop_block()
         #
-        return self.implement_guard(addr, self.mc.JNE)
+        return self.implement_guard(addr, 'NE')
 
     def implement_guard_recovery(self, guard_opnum, faildescr, failargs,
                                                                fail_locs):
@@ -1068,12 +1097,12 @@
             if box is not None and box.type == FLOAT:
                 withfloats = True
                 break
-        mc.CALL(rel32(self.failure_recovery_code[exc + 2 * withfloats]))
+        mc.CALL_l(self.failure_recovery_code[exc + 2 * withfloats])
         # write tight data that describes the failure recovery
         faildescr._x86_failure_recovery_bytecode = mc.tell()
         self.write_failure_recovery_description(mc, failargs, fail_locs)
         # write the fail_index too
-        mc.write(packimm32(fail_index))
+        mc.writeimm32(fail_index)
         # for testing the decoding, write a final byte 0xCC
         if not we_are_translated():
             mc.writechr(0xCC)
@@ -1102,11 +1131,11 @@
                 else:
                     raise AssertionError("bogus kind")
                 loc = locs[i]
-                if isinstance(loc, MODRM):
+                if isinstance(loc, StackLoc):
                     n = self.CODE_FROMSTACK//4 + loc.position
                 else:
-                    assert isinstance(loc, REG)
-                    n = loc.op
+                    assert isinstance(loc, RegLoc)
+                    n = loc.value
                 n = kind + 4*n
                 while n > 0x7F:
                     mc.writechr((n & 0x7F) | 0x80)
@@ -1159,9 +1188,9 @@
                 kind = code & 3
                 code >>= 2
                 if kind == self.DESCR_FLOAT:
-                    loc = xmm_registers[code]
+                    loc = regloc.XMMREGLOCS[code]
                 else:
-                    loc = registers[code]
+                    loc = regloc.REGLOCS[code]
             arglocs.append(loc)
         return arglocs[:]
 
@@ -1201,7 +1230,7 @@
     @rgc.no_collect
     def grab_frame_values(self, bytecode, frame_addr, allregisters):
         # no malloc allowed here!!
-        self.fail_ebp = allregisters[16 + ebp.op]
+        self.fail_ebp = allregisters[16 + ebp.value]
         num = 0
         value_hi = 0
         while 1:
@@ -1269,7 +1298,7 @@
             # original value of the registers, optionally the original
             # value of XMM registers, and finally a reference to the
             # recovery bytecode.  See _build_failure_recovery() for details.
-            stack_at_ebp = registers[ebp.op]
+            stack_at_ebp = registers[ebp.value]
             bytecode = rffi.cast(rffi.UCHARP, registers[8])
             allregisters = rffi.ptradd(registers, -16)
             return self.grab_frame_values(bytecode, stack_at_ebp, allregisters)
@@ -1297,11 +1326,11 @@
         mc.PUSH(edx)
         mc.PUSH(ecx)
         mc.PUSH(eax)
-        mc.MOV(esi, esp)
+        mc.MOV_rr(esi.value, esp.value)
         if withfloats:
-            mc.SUB(esp, imm(8*8))
+            mc.SUB_ri(esp.value, 8*8)
             for i in range(8):
-                mc.MOVSD(mem64(esp, 8*i), xmm_registers[i])
+                mc.MOVSD_sr(8*i, i)
 
         # we call a provided function that will
         # - call our on_leave_jitted_hook which will mark
@@ -1309,7 +1338,7 @@
         #   avoid unwarranted freeing
         # - optionally save exception depending on the flag
         addr = self.cpu.get_on_leave_jitted_int(save_exception=exc)
-        mc.CALL(rel32(addr))
+        mc.CALL_l(addr)
 
         # the following call saves all values from the stack and from
         # registers to the right 'fail_boxes_<type>' location.
@@ -1320,13 +1349,13 @@
         # generate_quick_failure().  XXX misaligned stack in the call, but
         # it's ok because failure_recovery_func is not calling anything more
         mc.PUSH(esi)
-        mc.CALL(rel32(failure_recovery_func))
+        mc.CALL_l(failure_recovery_func)
         # returns in eax the fail_index
 
         # now we return from the complete frame, which starts from
         # _assemble_bootstrap_code().  The LEA below throws away most
         # of the frame, including all the PUSHes that we did just above.
-        mc.LEA(esp, addr_add(ebp, imm(-3 * WORD)))
+        mc.LEA_rb(esp.value, -3 * WORD)
         mc.POP(edi)    # [ebp-12]
         mc.POP(esi)    # [ebp-8]
         mc.POP(ebx)    # [ebp-4]
@@ -1339,30 +1368,31 @@
         mc = self.mc
         for i in range(len(locs)):
             loc = locs[i]
-            if isinstance(loc, REG):
+            if isinstance(loc, RegLoc):
                 if loc.width == 8:
                     adr = self.fail_boxes_float.get_addr_for_num(i)
-                    mc.MOVSD(heap64(adr), loc)
+                    mc.MOVSD_jr(adr, loc.value)
                 else:
                     if locs_are_ref[i]:
                         adr = self.fail_boxes_ptr.get_addr_for_num(i)
                     else:
                         adr = self.fail_boxes_int.get_addr_for_num(i)
-                    mc.MOV(heap(adr), loc)
+                    mc.MOV_jr(adr, loc.value)
         for i in range(len(locs)):
             loc = locs[i]
-            if not isinstance(loc, REG):
+            if not isinstance(loc, RegLoc):
                 if loc.width == 8:
-                    mc.MOVSD(xmm0, loc)
+                    assert isinstance(loc, StackLoc)
+                    mc.MOVSD_rb(xmm0.value, loc.value)
                     adr = self.fail_boxes_float.get_addr_for_num(i)
-                    mc.MOVSD(heap64(adr), xmm0)
+                    mc.MOVSD_jr(adr, xmm0.value)
                 else:
                     if locs_are_ref[i]:
                         adr = self.fail_boxes_ptr.get_addr_for_num(i)
                     else:
                         adr = self.fail_boxes_int.get_addr_for_num(i)
                     mc.MOV(eax, loc)
-                    mc.MOV(heap(adr), eax)
+                    mc.MOV_jr(adr, eax.value)
 
         # we call a provided function that will
         # - call our on_leave_jitted_hook which will mark
@@ -1370,9 +1400,9 @@
         #   avoid unwarranted freeing
         # - optionally save exception depending on the flag
         addr = self.cpu.get_on_leave_jitted_int(save_exception=exc)
-        mc.CALL(rel32(addr))
+        mc.CALL_l(addr)
 
-        mc.LEA(esp, addr_add(ebp, imm(-3 * WORD)))
+        mc.LEA_rb(esp.value, -3 * WORD)
         mc.MOV(eax, imm(fail_index))
         mc.POP(edi)    # [ebp-12]
         mc.POP(esi)    # [ebp-8]
@@ -1380,14 +1410,19 @@
         mc.POP(ebp)    # [ebp]
         mc.RET()
 
+    # FIXME: I changed the third argument to this method, but I don't know
+    # what to do with @specialize
     @specialize.arg(2)
-    def implement_guard(self, addr, emit_jump):
-        emit_jump(rel32(addr))
+    def implement_guard(self, addr, condition=None):
+        if condition:
+            self.mc.J_il(rx86.Conditions[condition], addr)
+        else:
+            self.mc.JMP_l(addr)
         return self.mc.tell() - 4
 
     def genop_call(self, op, arglocs, resloc):
         sizeloc = arglocs[0]
-        assert isinstance(sizeloc, IMM32)
+        assert isinstance(sizeloc, ImmedLoc)
         size = sizeloc.value
 
         if isinstance(op.args[0], Const):
@@ -1417,7 +1452,7 @@
         self.mc.MOV(mem(ebp, FORCE_INDEX_OFS), imm(fail_index))
         self.genop_call(op, arglocs, result_loc)
         self.mc.CMP(mem(ebp, FORCE_INDEX_OFS), imm(0))
-        return self.implement_guard(addr, self.mc.JL)
+        return self.implement_guard(addr, 'L')
 
     def genop_guard_call_assembler(self, op, guard_op, addr,
                                    arglocs, result_loc):
@@ -1450,7 +1485,7 @@
         else:
             assert result_loc is eax or result_loc is None
         self.mc.CMP(mem(ebp, FORCE_INDEX_OFS), imm(0))
-        return self.implement_guard(addr, self.mc.JL)
+        return self.implement_guard(addr, 'L')
 
     def genop_discard_cond_call_gc_wb(self, op, arglocs):
         # use 'mc._mc' directly instead of 'mc', to avoid
@@ -1472,7 +1507,7 @@
         # misaligned stack in the call, but it's ok because the write barrier
         # is not going to call anything more.  Also, this assumes that the
         # write barrier does not touch the xmm registers.
-        mc.CALL(rel32(descr.get_write_barrier_fn(self.cpu)))
+        mc.CALL_l(descr.get_write_barrier_fn(self.cpu))
         for i in range(len(arglocs)):
             loc = arglocs[i]
             assert isinstance(loc, REG)
@@ -1484,7 +1519,9 @@
         self._stop_block()
 
     def genop_force_token(self, op, arglocs, resloc):
-        self.mc.LEA(resloc, mem(ebp, FORCE_INDEX_OFS))
+        # RegAlloc.consider_force_token ensures this:
+        assert isinstance(resloc, RegLoc)
+        self.mc.LEA_rb(resloc.value, FORCE_INDEX_OFS)
 
     def not_implemented_op_discard(self, op, arglocs):
         msg = "not implemented operation: %s" % op.getopname()
@@ -1512,7 +1549,7 @@
         return loop_token._x86_arglocs
 
     def closing_jump(self, loop_token):
-        self.mc.JMP(rel32(loop_token._x86_loop_code))
+        self.mc.JMP_l(loop_token._x86_loop_code)
 
     def malloc_cond_fixedsize(self, nursery_free_adr, nursery_top_adr,
                               size, tid):
@@ -1538,10 +1575,10 @@
         # reserve room for the argument to the real malloc and the
         # 8 saved XMM regs
         self._regalloc.reserve_param(1+16)
-        mc.CALL(rel32(slowpath_addr1))
+        mc.CALL_l(slowpath_addr1)
         self.mark_gc_roots()
         slowpath_addr2 = self.malloc_fixedsize_slowpath2
-        mc.CALL(rel32(slowpath_addr2))
+        mc.CALL_l(slowpath_addr2)
 
         offset = mc.get_relative_pos() - jmp_adr
         assert 0 < offset <= 127
@@ -1568,31 +1605,6 @@
         num = getattr(rop, opname.upper())
         genop_list[num] = value
 
-def new_addr_add(heap, mem, memsib):
-    def addr_add(reg_or_imm1, reg_or_imm2, offset=0, scale=0):
-        if isinstance(reg_or_imm1, IMM32):
-            if isinstance(reg_or_imm2, IMM32):
-                return heap(reg_or_imm1.value + offset +
-                            (reg_or_imm2.value << scale))
-            else:
-                return memsib(None, reg_or_imm2, scale, reg_or_imm1.value + offset)
-        else:
-            if isinstance(reg_or_imm2, IMM32):
-                return mem(reg_or_imm1, offset + (reg_or_imm2.value << scale))
-            else:
-                return memsib(reg_or_imm1, reg_or_imm2, scale, offset)
-    return addr_add
-
-addr8_add = new_addr_add(heap8, mem8, memSIB8)
-addr_add = new_addr_add(heap, mem, memSIB)
-addr64_add = new_addr_add(heap64, mem64, memSIB64)
-
-def addr_add_const(reg_or_imm1, offset):
-    if isinstance(reg_or_imm1, IMM32):
-        return heap(reg_or_imm1.value + offset)
-    else:
-        return mem(reg_or_imm1, offset)
-
 def round_up_to_4(size):
     if size < 4:
         return 4

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/codebuf.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/codebuf.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/codebuf.py	Fri May 21 17:43:20 2010
@@ -2,12 +2,13 @@
 import os, sys
 from pypy.rpython.lltypesystem import lltype, rffi
 from pypy.translator.tool.cbuild import ExternalCompilationInfo
-from pypy.jit.backend.x86.ri386 import I386CodeBuilder
+from pypy.jit.backend.x86.rx86 import X86_32_CodeBuilder
+from pypy.jit.backend.x86.regloc import LocationCodeBuilder
 from pypy.rlib.rmmap import PTR, alloc, free
 from pypy.rlib.debug import make_sure_not_resized
 
 
-class InMemoryCodeBuilder(I386CodeBuilder):
+class InMemoryCodeBuilder(X86_32_CodeBuilder, LocationCodeBuilder):
     _last_dump_start = 0
 
     def __init__(self, start, end):
@@ -31,13 +32,15 @@
     def write(self, listofchars):
         self._pos = self.overwrite(self._pos, listofchars)
 
-    def writechr(self, n):
-        # purely for performance: don't make the one-element list [chr(n)]
+    def writechar(self, char):
         pos = self._pos
         assert pos + 1 <= self._size
-        self._data[pos] = chr(n)
+        self._data[pos] = char
         self._pos = pos + 1
 
+    def writechr(self, n):
+        self.writechar(chr(n))
+
     def get_relative_pos(self):
         return self._pos
 

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/jump.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/jump.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/jump.py	Fri May 21 17:43:20 2010
@@ -1,23 +1,6 @@
 import sys
 from pypy.tool.pairtype import extendabletype
-from pypy.jit.backend.x86.ri386 import *
-
-class __extend__(OPERAND):
-    __metaclass__ = extendabletype
-    def _getregkey(self):
-        raise AssertionError("should only happen to registers and frame "
-                             "positions")
-
-class __extend__(REG):
-    __metaclass__ = extendabletype
-    def _getregkey(self):
-        return ~self.op
-
-class __extend__(MODRM):
-    __metaclass__ = extendabletype
-    def _getregkey(self):
-        return self.position
-
+from pypy.jit.backend.x86.regloc import ImmedLoc, StackLoc
 
 def remap_frame_layout(assembler, src_locations, dst_locations, tmpreg):
     pending_dests = len(dst_locations)
@@ -27,7 +10,7 @@
         srccount[dst._getregkey()] = 0
     for i in range(len(dst_locations)):
         src = src_locations[i]
-        if isinstance(src, IMM32):
+        if isinstance(src, ImmedLoc):
             continue
         key = src._getregkey()
         if key in srccount:
@@ -46,7 +29,7 @@
                 srccount[key] = -1       # means "it's done"
                 pending_dests -= 1
                 src = src_locations[i]
-                if not isinstance(src, IMM32):
+                if not isinstance(src, ImmedLoc):
                     key = src._getregkey()
                     if key in srccount:
                         srccount[key] -= 1
@@ -80,7 +63,7 @@
             assert pending_dests == 0
 
 def _move(assembler, src, dst, tmpreg):
-    if isinstance(dst, MODRM) and isinstance(src, MODRM):
+    if isinstance(dst, StackLoc) and isinstance(src, StackLoc):
         assembler.regalloc_mov(src, tmpreg)
         src = tmpreg
     assembler.regalloc_mov(src, dst)

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regalloc.py	Fri May 21 17:43:20 2010
@@ -5,7 +5,7 @@
 from pypy.jit.metainterp.history import (Box, Const, ConstInt, ConstPtr,
                                          ResOperation, ConstAddr, BoxPtr,
                                          LoopToken, INT, REF, FLOAT)
-from pypy.jit.backend.x86.ri386 import *
+from pypy.jit.backend.x86.regloc import *
 from pypy.rpython.lltypesystem import lltype, ll2ctypes, rffi, rstr
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib import rgc
@@ -108,14 +108,12 @@
     @staticmethod
     def frame_pos(i, size):
         if size == 1:
-            res = mem(ebp, get_ebp_ofs(i))
+            return StackLoc(i, get_ebp_ofs(i), size)
         elif size == 2:
-            res = mem64(ebp, get_ebp_ofs(i + 1))
+            return StackLoc(i, get_ebp_ofs(i+1), size)
         else:
             print "Unimplemented size %d" % i
             raise NotImplementedError("unimplemented size %d" % i)
-        res.position = i
-        return res
 
 class RegAlloc(object):
     exc = False
@@ -253,13 +251,13 @@
             arg = inputargs[i]
             i += 1
             if arg.type == FLOAT:
-                if isinstance(loc, REG):
+                if isinstance(loc, RegLoc):
                     self.xrm.reg_bindings[arg] = loc
                     used[loc] = None
                 else:
                     self.fm.frame_bindings[arg] = loc
             else:
-                if isinstance(loc, REG):
+                if isinstance(loc, RegLoc):
                     self.rm.reg_bindings[arg] = loc
                     used[loc] = None
                 else:
@@ -819,7 +817,7 @@
 
     def consider_setfield_gc(self, op):
         ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.descr)
-        assert isinstance(size_loc, IMM32)
+        assert isinstance(size_loc, ImmedLoc)
         if size_loc.value == 1:
             need_lower_byte = True
         else:

Copied: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py (from r74549, pypy/branch/remove-ri386-multimethod-2/pypy/jit/backend/x86/regloc.py)
==============================================================================
--- pypy/branch/remove-ri386-multimethod-2/pypy/jit/backend/x86/regloc.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/regloc.py	Fri May 21 17:43:20 2010
@@ -1,4 +1,4 @@
-from pypy.jit.metainterp.history import AbstractValue
+from pypy.jit.metainterp.history import AbstractValue, ConstInt
 from pypy.jit.backend.x86 import rx86
 
 #
@@ -14,27 +14,98 @@
     def _getregkey(self):
         return self.value
 
+
 class StackLoc(AssemblerLocation):
     _immutable_ = True
-    def __init__(self, position, ebp_offset):
+    def __init__(self, position, ebp_offset, num_words):
         assert ebp_offset < 0   # so no confusion with RegLoc.value
         self.position = position
         self.value = ebp_offset
+        # XXX: Word size hardcoded
+        self.width = num_words * 4
     def __repr__(self):
         return '%d(%%ebp)' % (self.value,)
 
+    def location_code(self):
+        return 'b'
+
+    # FIXME: This definition of assembler sufficient?
+    def assembler(self):
+        return repr(self)
+
 class RegLoc(AssemblerLocation):
     _immutable_ = True
     def __init__(self, regnum, is_xmm):
         assert regnum >= 0
         self.value = regnum
         self.is_xmm = is_xmm
+        # XXX: Word size
+        if self.is_xmm:
+            self.width = 8
+        else:
+            self.width = 4
     def __repr__(self):
         if self.is_xmm:
             return rx86.R.xmmnames[self.value]
         else:
             return rx86.R.names[self.value]
 
+    def lowest8bits(self):
+        # XXX: Only handling i386 al, cl, dl, bl for now
+        assert self.value < 4
+        return self.value
+
+    def location_code(self):
+        return 'r'
+
+    # FIXME: This definition of assembler sufficient?
+    def assembler(self):
+        return '%' + repr(self)
+
+class ImmedLoc(AssemblerLocation):
+    _immutable_ = True
+    # XXX: word size hardcoded. And does this even make sense for an immediate?
+    width = 4
+    def __init__(self, value):
+        self.value = value
+
+    def location_code(self):
+        return 'i'
+
+    def getint(self):
+        return self.value
+
+class AddressLoc(AssemblerLocation):
+    _immutable_ = True
+
+    # The address is base_loc + (scaled_loc << scale) + static_offset
+    def __init__(self, base_loc, scaled_loc, scale, static_offset):
+        assert 0 <= scale < 4
+        assert isinstance(base_loc, ImmedLoc) or isinstance(base_loc, RegLoc)
+        assert isinstance(scaled_loc, ImmedLoc) or isinstance(scaled_loc, RegLoc)
+
+        if isinstance(base_loc, ImmedLoc):
+            if isinstance(scaled_loc, ImmedLoc):
+                self.location_code = 'j'
+                self.value = base_loc.value + (scaled_loc.value << scale) + static_offset
+            else:
+                # FIXME
+                raise AssertionError("Don't know how to handle this case yet")
+        else:
+            if isinstance(scaled_loc, ImmedLoc):
+                # FIXME: What if base_loc is ebp or esp?
+                self.location_code = 'm'
+                self.value = (base_loc.value, (scaled_loc.value << scale) + static_offset)
+            else:
+                self.location_code = 'a'
+                self.value = (base_loc.value, scaled_loc.value, scale, static_offset)
+
+    def location_code(self):
+        return self.location_code
+
+    def value(self):
+        return self.value
+
 REGLOCS = [RegLoc(i, is_xmm=False) for i in range(8)]
 XMMREGLOCS = [RegLoc(i, is_xmm=True) for i in range(8)]
 eax, ecx, edx, ebx, esp, ebp, esi, edi = REGLOCS
@@ -46,14 +117,10 @@
 
     def _binaryop(name):
         def INSN(self, loc1, loc2):
-            assert isinstance(loc1, RegLoc)
-            val1 = loc1.value
-            if isinstance(loc2, RegLoc):
-                getattr(self, name + '_rr')(val1, loc2.value)
-            elif isinstance(loc2, StackLoc):
-                getattr(self, name + '_rb')(val1, loc2.value)
-            else:
-                getattr(self, name + '_ri')(val1, loc2.getint())
+            code1 = loc1.location_code()
+            code2 = loc2.location_code()
+            # XXX: We just hope that the right method exists
+            getattr(self, name + '_' + code1 + code2)(loc1.value, loc2.value)
         return INSN
 
     ADD = _binaryop('ADD')
@@ -61,6 +128,9 @@
     AND = _binaryop('AND')
     SUB = _binaryop('SUB')
     XOR = _binaryop('XOR')
+    MOV = _binaryop('MOV')
+    MOVSD = _binaryop('MOVSD')
+    IMUL = _binaryop('IMUL')
 
     def PUSH(self, loc):
         assert isinstance(loc, RegLoc)
@@ -96,6 +166,12 @@
             assert isinstance(loc0, StackLoc)
             self.CMP_bi(loc0.value, loc1.getint())
 
+def imm(x):
+    # XXX: ri386 migration shim
+    if isinstance(x, ConstInt):
+        return ImmedLoc(x.getint())
+    else:
+        return ImmedLoc(x)
 
 all_extra_instructions = [name for name in LocationCodeBuilder.__dict__
                           if name[0].isupper()]

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/rx86.py	Fri May 21 17:43:20 2010
@@ -339,6 +339,8 @@
     MOV_rr = insn(rex_w, '\x89', register(2,8), register(1), '\xC0')
     MOV_br = insn(rex_w, '\x89', register(2,8), stack_bp(1))
     MOV_rb = insn(rex_w, '\x8B', register(1,8), stack_bp(2))
+    MOV_sr = insn(rex_w, '\x89', register(2,8), stack_sp(1))
+    MOV_rs = insn(rex_w, '\x8B', register(1,8), stack_sp(2))
 
     # "MOV reg1, [reg2+offset]" and the opposite direction
     MOV_rm = insn(rex_w, '\x8B', register(1,8), mem_reg_plus_const(2))
@@ -356,6 +358,11 @@
     MOV_rj = insn(rex_w, '\x8B', register(1,8), '\x05', immediate(2))
     MOV_jr = insn(rex_w, '\x89', register(2,8), '\x05', immediate(1))
 
+    MOV8_mr = insn(rex_w, '\x88', register(2, 8), mem_reg_plus_const(1))
+
+    MOVZX8_rm = insn(rex_w, '\x0F\xB6', register(1,8), mem_reg_plus_const(2))
+    MOVZX16_rm = insn(rex_w, '\x0F\xB7', register(1,8), mem_reg_plus_const(2))
+
     # ------------------------------ Arithmetic ------------------------------
 
     ADD_ri, ADD_rr, ADD_rb, _, _ = common_modes(0)
@@ -365,6 +372,17 @@
     XOR_ri, XOR_rr, XOR_rb, _, _ = common_modes(6)
     CMP_ri, CMP_rr, CMP_rb, CMP_bi, CMP_br = common_modes(7)
 
+    DIV_r = insn(rex_w, '\xF7', register(1), '\xF0')
+    IDIV_r = insn(rex_w, '\xF7', register(1), '\xF8')
+
+    IMUL_rr = insn(rex_w, '\x0F\xAF', register(1, 8), register(2), '\xC0')
+    IMUL_rb = insn(rex_w, '\x0F\xAF', register(1, 8), stack_bp(2))
+    # XXX: There are more efficient encodings of small immediates
+    IMUL_rri = insn(rex_w, '\x69', register(1, 8), register(2), '\xC0', immediate(3))
+
+    def IMUL_ri(self, reg, immed):
+        return self.IMUL_rri(reg, reg, immed)
+
     # ------------------------------ Misc stuff ------------------------------
 
     NOP = insn('\x90')
@@ -381,11 +399,14 @@
     CALL_b = insn('\xFF', orbyte(2<<3), stack_bp(1))
 
     XCHG_rm = insn(rex_w, '\x87', register(1,8), mem_reg_plus_const(2))
+    XCHG_rj = insn(rex_w, '\x87', register(1,8), '\x05', immediate(2))
 
     JMP_l = insn('\xE9', relative(1))
     J_il = insn('\x0F', immediate(1,'o'), '\x80', relative(2))
     SET_ir = insn('\x0F', immediate(1,'o'),'\x90', register(2), '\xC0')
 
+    CDQ = insn(rex_w, '\x99')
+
     # ------------------------------ SSE2 ------------------------------
 
     MOVSD_rr = xmminsn('\xF2', rex_nw, '\x0F\x10', register(1,8), register(2),
@@ -399,6 +420,9 @@
     MOVSD_mr = xmminsn('\xF2', rex_nw, '\x0F\x11', register(2,8),
                                                      mem_reg_plus_const(1))
 
+    MOVSD_rj = xmminsn('\xF2', rex_nw, '\x0F\x10', register(1, 8), '\x05', immediate(2))
+    MOVSD_jr = xmminsn('\xF2', rex_nw, '\x0F\x11', register(2, 8), '\x05', immediate(1))
+
     # ------------------------------------------------------------
 
 Conditions = {
@@ -463,6 +487,12 @@
         py.test.skip("MOV_rj unsupported")
     def MOV_jr(self, mem_immed, reg):
         py.test.skip("MOV_jr unsupported")
+    def XCHG_rj(self, reg, mem_immed):
+        py.test.skip("XCGH_rj unsupported")
+    def MOVSD_rj(self, xmm_reg, mem_immed):
+        py.test.skip("MOVSD_rj unsupported")
+    def MOVSD_jr(self, xmm_reg, mem_immed):
+        py.test.skip("MOVSD_jr unsupported")
 
 # ____________________________________________________________
 

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_assembler.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_assembler.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_assembler.py	Fri May 21 17:43:20 2010
@@ -1,4 +1,4 @@
-from pypy.jit.backend.x86.ri386 import *
+from pypy.jit.backend.x86.regloc import *
 from pypy.jit.backend.x86.assembler import Assembler386, MachineCodeBlockWrapper
 from pypy.jit.backend.x86.regalloc import X86FrameManager, get_ebp_ofs
 from pypy.jit.metainterp.history import BoxInt, BoxPtr, BoxFloat
@@ -21,7 +21,7 @@
         return self.base_address + len(self.content)
     def get_relative_pos(self):
         return len(self.content)
-    def JMP(self, *args):
+    def JMP_l(self, *args):
         self.content.append(("JMP", args))
     def done(self):
         pass
@@ -54,9 +54,9 @@
             Assembler386.DESCR_FLOAT + 4*(8+110),
             Assembler386.CODE_HOLE,
             Assembler386.CODE_HOLE,
-            Assembler386.DESCR_INT   + 4*ebx.op,
-            Assembler386.DESCR_REF   + 4*esi.op,
-            Assembler386.DESCR_FLOAT + 4*xmm2.op]
+            Assembler386.DESCR_INT   + 4*ebx.value,
+            Assembler386.DESCR_REF   + 4*esi.value,
+            Assembler386.DESCR_FLOAT + 4*xmm2.value]
     double_byte_nums = []
     for num in nums[3:6]:
         double_byte_nums.append((num & 0x7F) | 0x80)
@@ -181,9 +181,9 @@
                 value, lo, hi = get_random_float()
                 expected_floats[i] = value
                 kind = Assembler386.DESCR_FLOAT
-                if isinstance(loc, REG):
-                    xmmregisters[2*loc.op] = lo
-                    xmmregisters[2*loc.op+1] = hi
+                if isinstance(loc, RegLoc):
+                    xmmregisters[2*loc.value] = lo
+                    xmmregisters[2*loc.value+1] = hi
                 else:
                     write_in_stack(loc, hi)
                     write_in_stack(loc+1, lo)
@@ -199,13 +199,13 @@
                     value = rffi.cast(rffi.LONG, value)
                 else:
                     assert 0, kind
-                if isinstance(loc, REG):
-                    registers[loc.op] = value
+                if isinstance(loc, RegLoc):
+                    registers[loc.value] = value
                 else:
                     write_in_stack(loc, value)
 
-            if isinstance(loc, REG):
-                num = kind + 4*loc.op
+            if isinstance(loc, RegLoc):
+                num = kind + 4*loc.value
             else:
                 num = kind + 4*(8+loc)
             while num >= 0x80:
@@ -225,7 +225,7 @@
         assert 0 <= descr_bytecode[i] <= 255
         descr_bytes[i] = rffi.cast(rffi.UCHAR, descr_bytecode[i])
     registers[8] = rffi.cast(rffi.LONG, descr_bytes)
-    registers[ebp.op] = rffi.cast(rffi.LONG, stack) + 4*stacklen
+    registers[ebp.value] = rffi.cast(rffi.LONG, stack) + 4*stacklen
 
     # run!
     assembler = Assembler386(FakeCPU())

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86.py
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86.py	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/test/test_rx86.py	Fri May 21 17:43:20 2010
@@ -111,6 +111,36 @@
     s.SET_ir(5, 2)
     assert s.getvalue() == '\x0F\x95\xC2'
 
+def test_xchg_rj():
+    s = CodeBuilder32()
+    s.XCHG_rj(edx, 0x01234567)
+    assert s.getvalue() == '\x87\x15\x67\x45\x23\x01'
+
+def test_movsd_rj():
+    s = CodeBuilder32()
+    s.MOVSD_rj(xmm2, 0x01234567)
+    assert s.getvalue() == '\xF2\x0F\x10\x15\x67\x45\x23\x01'
+
+def test_movzx8_rm():
+    s = CodeBuilder32()
+    s.MOVZX8_rm(ecx, (eax, 16))
+    assert s.getvalue() == '\x0F\xB6\x48\x10'
+
+def test_movzx16_rm():
+    s = CodeBuilder32()
+    s.MOVZX16_rm(ecx, (eax, 16))
+    assert s.getvalue() == '\x0F\xB7\x48\x10'
+
+def test_div():
+    s = CodeBuilder32()
+    s.DIV_r(ecx)
+    assert s.getvalue() == '\xF7\xF1'
+
+def test_imul_rri():
+    s = CodeBuilder32()
+    # Multiply ecx by 0x01234567 and store the result in ebx
+    s.IMUL_rri(ebx, ecx, 0x01234567)
+    assert s.getvalue() == '\x69\xD9\x67\x45\x23\x01'
 
 class CodeBuilder64(CodeBuilderMixin, X86_64_CodeBuilder):
     pass

Modified: pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/tool/instruction_encoding.sh
==============================================================================
--- pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/tool/instruction_encoding.sh	(original)
+++ pypy/branch/x86-64-jit-backend/pypy/jit/backend/x86/tool/instruction_encoding.sh	Fri May 21 17:43:20 2010
@@ -3,10 +3,16 @@
 # Tool to quickly see how the GNU assembler encodes an instruction
 # (AT&T syntax only for now)
 
+# Provide readline if available
+if which rlwrap > /dev/null && [ "$INSIDE_RLWRAP" = "" ]; then
+    export INSIDE_RLWRAP=1
+    exec rlwrap "$0"
+fi
+
 while :; do
     echo -n '? '
     read instruction
     echo "$instruction" | as
-    objdump --disassemble ./a.out | tail -n +8
+    objdump --disassemble ./a.out | grep '^ *[0-9a-f]\+:'
     rm -f ./a.out
 done



More information about the Pypy-commit mailing list