[pypy-commit] pypy ppc-jit-backend: forgot to add files -.-
hager
noreply at buildbot.pypy.org
Mon Jul 18 17:37:44 CEST 2011
Author: hager <sven.hager at uni-duesseldorf.de>
Branch: ppc-jit-backend
Changeset: r45713:7c61615e731c
Date: 2011-07-12 17:41 +0200
http://bitbucket.org/pypy/pypy/changeset/7c61615e731c/
Log: forgot to add files -.-
diff --git a/pypy/jit/backend/ppc/__init__.py b/pypy/jit/backend/ppc/__init__.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/__init__.py
@@ -0,0 +1,1 @@
+#
diff --git a/pypy/jit/backend/ppc/_flush_icache.c b/pypy/jit/backend/ppc/_flush_icache.c
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/_flush_icache.c
@@ -0,0 +1,26 @@
+#include <Python.h>
+#include "../../../translator/c/src/asm_ppc.h"
+
+static PyObject*
+_flush_icache(PyObject *self, PyObject *args)
+{
+ long base, size;
+
+ if (!PyArg_ParseTuple(args, "ii:_flush_icache", &base, &size))
+ return NULL;
+
+ LL_flush_icache(base, size);
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+PyMethodDef _flush_icache_methods[] = {
+ {"_flush_icache", _flush_icache, METH_VARARGS, ""},
+ {0, 0}
+};
+
+PyMODINIT_FUNC
+init_flush_icache(void)
+{
+ Py_InitModule("_flush_icache", _flush_icache_methods);
+}
diff --git a/pypy/jit/backend/ppc/codebuf.py b/pypy/jit/backend/ppc/codebuf.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/codebuf.py
@@ -0,0 +1,5 @@
+from pypy.jit.backend.llsupport.asmmemmgr import BlockBuilderMixin
+
+class MachineCodeBlockWrapper(BlockBuilderMixin):
+ def __init__(self):
+ self.init_block_builder()
diff --git a/pypy/jit/backend/ppc/instruction.py b/pypy/jit/backend/ppc/instruction.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/instruction.py
@@ -0,0 +1,842 @@
+r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, \
+ r13, r14, r15, r16, r17, r18, r19, r20, r21, r22, \
+ r23, r24, r25, r26, r27, r28, r29, r30, r31 = range(32)
+rSCRATCH = r0
+rSP = r1
+rFP = r2 # the ABI doesn't specify a frame pointer. however, we want one
+
+class AllocationSlot(object):
+ offset = 0
+ number = 0
+ def __init__(self):
+ # The field alloc points to a singleton used by the register
+ # allocator to detect conflicts. No two AllocationSlot
+ # instances with the same value in self.alloc can be used at
+ # once.
+ self.alloc = self
+
+ def make_loc(self):
+ """ When we assign a variable to one of these registers, we
+ call make_loc() to get the actual location instance; that
+ instance will have its alloc field set to self. For
+ everything but condition registers, this is self."""
+ return self
+
+class _StackSlot(AllocationSlot):
+ is_register = False
+ def __init__(self, offset):
+ AllocationSlot.__init__(self)
+ self.offset = offset
+ def __repr__(self):
+ return "stack@%s"%(self.offset,)
+
+_stack_slot_cache = {}
+def stack_slot(offset):
+ # because stack slots are put into dictionaries which compare by
+ # identity, it is important that there's a unique _StackSlot
+ # object for each offset, at least per function generated or
+ # something. doing the caching here is easier, though.
+ if offset in _stack_slot_cache:
+ return _stack_slot_cache[offset]
+ _stack_slot_cache[offset] = res = _StackSlot(offset)
+ return res
+
+NO_REGISTER = -1
+GP_REGISTER = 0
+FP_REGISTER = 1
+CR_FIELD = 2
+CT_REGISTER = 3
+
+class Register(AllocationSlot):
+ is_register = True
+ def __init__(self):
+ AllocationSlot.__init__(self)
+
+class GPR(Register):
+ regclass = GP_REGISTER
+ def __init__(self, number):
+ Register.__init__(self)
+ self.number = number
+ def __repr__(self):
+ return 'r' + str(self.number)
+gprs = map(GPR, range(32))
+
+class FPR(Register):
+ regclass = FP_REGISTER
+ def __init__(self, number):
+ Register.__init__(self)
+ self.number = number
+
+fprs = map(FPR, range(32))
+
+class BaseCRF(Register):
+ """ These represent condition registers; however, we never actually
+ use these as the location of something in the register allocator.
+ Instead, we place it in an instance of CRF which indicates which
+ bits are required to extract the value. Note that CRF().alloc will
+ always be an instance of this. """
+ regclass = CR_FIELD
+ def __init__(self, number):
+ self.number = number
+ self.alloc = self
+ def make_loc(self):
+ return CRF(self)
+
+crfs = map(BaseCRF, range(8))
+
+class CRF(Register):
+ regclass = CR_FIELD
+ def __init__(self, crf):
+ Register.__init__(self)
+ self.alloc = crf
+ self.number = crf.number
+ self.info = (-1,-1) # (bit, negated)
+ def set_info(self, info):
+ assert len(info) == 2
+ self.info = info
+ def make_loc(self):
+ # should never call this on a CRF, only a BaseCRF
+ raise NotImplementedError
+ def move_to_gpr(self, gpr):
+ bit, negated = self.info
+ return _CRF2GPR(gpr, self.alloc.number*4 + bit, negated)
+ def move_from_gpr(self, gpr):
+ # cmp2info['ne']
+ self.set_info((2, 1))
+ return _GPR2CRF(self, gpr)
+ def __repr__(self):
+ return 'crf' + str(self.number) + str(self.info)
+
+class CTR(Register):
+ regclass = CT_REGISTER
+ def move_from_gpr(self, gpr):
+ return _GPR2CTR(gpr)
+
+ctr = CTR()
+
+_insn_index = [0]
+
+class Insn(object):
+ '''
+ result is the Var instance that holds the result, or None
+ result_regclass is the class of the register the result goes into
+
+ reg_args is the vars that need to have registers allocated for them
+ reg_arg_regclasses is the type of register that needs to be allocated
+ '''
+ def __init__(self):
+ self._magic_index = _insn_index[0]
+ _insn_index[0] += 1
+ def __repr__(self):
+ return "<%s %d>" % (self.__class__.__name__, self._magic_index)
+ def emit(self, asm):
+ pass
+
+class Insn_GPR__GPR_GPR(Insn):
+ def __init__(self, methptr, result, args):
+ Insn.__init__(self)
+ self.methptr = methptr
+
+ self.result = result
+ self.result_regclass = GP_REGISTER
+ self.reg_args = args
+ self.reg_arg_regclasses = [GP_REGISTER, GP_REGISTER]
+
+ self.result_reg = None
+ self.arg_reg1 = None
+ self.arg_reg2 = None
+
+ def allocate(self, allocator):
+ self.result_reg = allocator.loc_of(self.result)
+ self.arg_reg1 = allocator.loc_of(self.reg_args[0])
+ self.arg_reg2 = allocator.loc_of(self.reg_args[1])
+
+ def __repr__(self):
+ if self.result_reg:
+ r = "%s@%s"%(self.result, self.result_reg)
+ else:
+ r = str(self.result)
+ if self.arg_reg1:
+ a1 = "%s@%s"%(self.reg_args[0], self.arg_reg1)
+ else:
+ a1 = str(self.reg_args[0])
+ if self.arg_reg2:
+ a2 = "%s@%s"%(self.reg_args[1], self.arg_reg2)
+ else:
+ a2 = str(self.reg_args[1])
+ return "<%s-%s %s %s, %s, %s>" % (self.__class__.__name__, self._magic_index,
+ self.methptr.im_func.func_name,
+ r, a1, a2)
+
+ def emit(self, asm):
+ self.methptr(asm,
+ self.result_reg.number,
+ self.arg_reg1.number,
+ self.arg_reg2.number)
+
+class Insn_GPR__GPR_IMM(Insn):
+ def __init__(self, methptr, result, args):
+ Insn.__init__(self)
+ self.methptr = methptr
+ self.imm = args[1]
+
+ self.result = result
+ self.result_regclass = GP_REGISTER
+ self.reg_args = [args[0]]
+ self.reg_arg_regclasses = [GP_REGISTER]
+ self.result_reg = None
+ self.arg_reg = None
+ def allocate(self, allocator):
+ self.result_reg = allocator.loc_of(self.result)
+ self.arg_reg = allocator.loc_of(self.reg_args[0])
+ def __repr__(self):
+ if self.result_reg:
+ r = "%s@%s"%(self.result, self.result_reg)
+ else:
+ r = str(self.result)
+ if self.arg_reg:
+ a = "%s@%s"%(self.reg_args[0], self.arg_reg)
+ else:
+ a = str(self.reg_args[0])
+ return "<%s-%d %s %s, %s, (%s)>" % (self.__class__.__name__, self._magic_index,
+ self.methptr.im_func.func_name,
+ r, a, self.imm.value)
+
+ def emit(self, asm):
+ self.methptr(asm,
+ self.result_reg.number,
+ self.arg_reg.number,
+ self.imm.value)
+
+class Insn_GPR__GPR(Insn):
+ def __init__(self, methptr, result, arg):
+ Insn.__init__(self)
+ self.methptr = methptr
+
+ self.result = result
+ self.result_regclass = GP_REGISTER
+ self.reg_args = [arg]
+ self.reg_arg_regclasses = [GP_REGISTER]
+
+ self.result_reg = None
+ self.arg_reg = None
+ def allocate(self, allocator):
+ self.result_reg = allocator.loc_of(self.result)
+ self.arg_reg = allocator.loc_of(self.reg_args[0])
+ def __repr__(self):
+ if self.result_reg:
+ r = "%s@%s"%(self.result, self.result_reg)
+ else:
+ r = str(self.result)
+ if self.arg_reg:
+ a = "%s@%s"%(self.reg_args[0], self.arg_reg)
+ else:
+ a = str(self.reg_args[0])
+ return "<%s-%d %s %s, %s>" % (self.__class__.__name__, self._magic_index,
+ self.methptr.im_func.func_name, r, a)
+ def emit(self, asm):
+ self.methptr(asm,
+ self.result_reg.number,
+ self.arg_reg.number)
+
+
+class Insn_GPR(Insn):
+ def __init__(self, methptr, result):
+ Insn.__init__(self)
+ self.methptr = methptr
+
+ self.result = result
+ self.result_regclass = GP_REGISTER
+ self.reg_args = []
+ self.reg_arg_regclasses = []
+ self.result_reg = None
+ def allocate(self, allocator):
+ self.result_reg = allocator.loc_of(self.result)
+ def __repr__(self):
+ if self.result_reg:
+ r = "%s@%s"%(self.result, self.result_reg)
+ else:
+ r = str(self.result)
+ return "<%s-%d %s %s>" % (self.__class__.__name__, self._magic_index,
+ self.methptr.im_func.func_name, r)
+ def emit(self, asm):
+ self.methptr(asm,
+ self.result_reg.number)
+
+class Insn_GPR__IMM(Insn):
+ def __init__(self, methptr, result, args):
+ Insn.__init__(self)
+ self.methptr = methptr
+ self.imm = args[0]
+
+ self.result = result
+ self.result_regclass = GP_REGISTER
+ self.reg_args = []
+ self.reg_arg_regclasses = []
+ self.result_reg = None
+ def allocate(self, allocator):
+ self.result_reg = allocator.loc_of(self.result)
+ def __repr__(self):
+ if self.result_reg:
+ r = "%s@%s"%(self.result, self.result_reg)
+ else:
+ r = str(self.result)
+ return "<%s-%d %s %s, (%s)>" % (self.__class__.__name__, self._magic_index,
+ self.methptr.im_func.func_name, r,
+ self.imm.value)
+ def emit(self, asm):
+ self.methptr(asm,
+ self.result_reg.number,
+ self.imm.value)
+
+class MoveCRB2GPR(Insn):
+ def __init__(self, result, gv_condition):
+ Insn.__init__(self)
+ self.result = result
+ self.result_regclass = GP_REGISTER
+ self.reg_args = [gv_condition]
+ self.reg_arg_regclasses = [CR_FIELD]
+ def allocate(self, allocator):
+ self.targetreg = allocator.loc_of(self.result)
+ self.crf = allocator.loc_of(self.reg_args[0])
+ def emit(self, asm):
+ assert isinstance(self.crf, CRF)
+ bit, negated = self.crf.info
+ asm.mfcr(self.targetreg.number)
+ asm.extrwi(self.targetreg.number, self.targetreg.number, 1, self.crf.number*4+bit)
+ if negated:
+ asm.xori(self.targetreg.number, self.targetreg.number, 1)
+
+class Insn_None__GPR_GPR_IMM(Insn):
+ def __init__(self, methptr, args):
+ Insn.__init__(self)
+ self.methptr = methptr
+ self.imm = args[2]
+
+ self.result = None
+ self.result_regclass = NO_REGISTER
+ self.reg_args = args[:2]
+ self.reg_arg_regclasses = [GP_REGISTER, GP_REGISTER]
+ def allocate(self, allocator):
+ self.reg1 = allocator.loc_of(self.reg_args[0])
+ self.reg2 = allocator.loc_of(self.reg_args[1])
+ def __repr__(self):
+ return "<%s %s %d>" % (self.__class__.__name__, self.methptr.im_func.func_name, self._magic_index)
+
+ def emit(self, asm):
+ self.methptr(asm,
+ self.reg1.number,
+ self.reg2.number,
+ self.imm.value)
+
+class Insn_None__GPR_GPR_GPR(Insn):
+ def __init__(self, methptr, args):
+ Insn.__init__(self)
+ self.methptr = methptr
+
+ self.result = None
+ self.result_regclass = NO_REGISTER
+ self.reg_args = args
+ self.reg_arg_regclasses = [GP_REGISTER, GP_REGISTER, GP_REGISTER]
+ def allocate(self, allocator):
+ self.reg1 = allocator.loc_of(self.reg_args[0])
+ self.reg2 = allocator.loc_of(self.reg_args[1])
+ self.reg3 = allocator.loc_of(self.reg_args[2])
+ def __repr__(self):
+ return "<%s %s %d>" % (self.__class__.__name__, self.methptr.im_func.func_name, self._magic_index)
+
+ def emit(self, asm):
+ self.methptr(asm,
+ self.reg1.number,
+ self.reg2.number,
+ self.reg3.number)
+
+class Extrwi(Insn):
+ def __init__(self, result, source, size, bit):
+ Insn.__init__(self)
+
+ self.result = result
+ self.result_regclass = GP_REGISTER
+ self.reg_args = [source]
+ self.reg_arg_regclasses = [GP_REGISTER]
+ self.result_reg = None
+ self.arg_reg = None
+
+ self.size = size
+ self.bit = bit
+ def allocate(self, allocator):
+ self.result_reg = allocator.loc_of(self.result)
+ self.arg_reg = allocator.loc_of(self.reg_args[0])
+ def __repr__(self):
+ if self.result_reg:
+ r = "%s@%s"%(self.result, self.result_reg)
+ else:
+ r = str(self.result)
+ if self.arg_reg:
+ a = "%s@%s"%(self.reg_args[0], self.arg_reg)
+ else:
+ a = str(self.reg_args[0])
+ return "<%s-%d extrwi %s, %s, %s, %s>" % (self.__class__.__name__, self._magic_index,
+ r, a, self.size, self.bit)
+
+ def emit(self, asm):
+ asm.extrwi(self.result_reg.number,
+ self.arg_reg.number,
+ self.size, self.bit)
+
+
+class CMPInsn(Insn):
+ def __init__(self, info, result):
+ Insn.__init__(self)
+ self.info = info
+ self.result = result
+ self.result_reg = None
+
+ def allocate(self, allocator):
+ self.result_reg = allocator.loc_of(self.result)
+ assert isinstance(self.result_reg, CRF)
+ self.result_reg.set_info(self.info)
+
+class CMPW(CMPInsn):
+ def __init__(self, info, result, args):
+ CMPInsn.__init__(self, info, result)
+ self.result_regclass = CR_FIELD
+ self.reg_args = args
+ self.reg_arg_regclasses = [GP_REGISTER, GP_REGISTER]
+ self.arg_reg1 = None
+ self.arg_reg2 = None
+
+ def allocate(self, allocator):
+ CMPInsn.allocate(self, allocator)
+ self.arg_reg1 = allocator.loc_of(self.reg_args[0])
+ self.arg_reg2 = allocator.loc_of(self.reg_args[1])
+
+ def __repr__(self):
+ if self.result_reg:
+ r = "%s@%s"%(self.result, self.result_reg)
+ else:
+ r = str(self.result)
+ if self.arg_reg1:
+ a1 = "%s@%s"%(self.reg_args[0], self.arg_reg1)
+ else:
+ a1 = str(self.reg_args[0])
+ if self.arg_reg2:
+ a2 = "%s@%s"%(self.reg_args[1], self.arg_reg2)
+ else:
+ a2 = str(self.reg_args[1])
+ return "<%s-%d %s %s, %s, %s>"%(self.__class__.__name__, self._magic_index,
+ self.__class__.__name__.lower(),
+ r, a1, a2)
+
+ def emit(self, asm):
+ asm.cmpw(self.result_reg.number, self.arg_reg1.number, self.arg_reg2.number)
+
+class CMPWL(CMPW):
+ def emit(self, asm):
+ asm.cmplw(self.result_reg.number, self.arg_reg1.number, self.arg_reg2.number)
+
+class CMPWI(CMPInsn):
+ def __init__(self, info, result, args):
+ CMPInsn.__init__(self, info, result)
+ self.imm = args[1]
+ self.result_regclass = CR_FIELD
+ self.reg_args = [args[0]]
+ self.reg_arg_regclasses = [GP_REGISTER]
+ self.arg_reg = None
+
+ def allocate(self, allocator):
+ CMPInsn.allocate(self, allocator)
+ self.arg_reg = allocator.loc_of(self.reg_args[0])
+
+ def __repr__(self):
+ if self.result_reg:
+ r = "%s@%s"%(self.result, self.result_reg)
+ else:
+ r = str(self.result)
+ if self.arg_reg:
+ a = "%s@%s"%(self.reg_args[0], self.arg_reg)
+ else:
+ a = str(self.reg_args[0])
+ return "<%s-%d %s %s, %s, (%s)>"%(self.__class__.__name__, self._magic_index,
+ self.__class__.__name__.lower(),
+ r, a, self.imm.value)
+ def emit(self, asm):
+ #print "CMPWI", asm.mc.tell()
+ asm.cmpwi(self.result_reg.number, self.arg_reg.number, self.imm.value)
+
+class CMPWLI(CMPWI):
+ def emit(self, asm):
+ asm.cmplwi(self.result_reg.number, self.arg_reg.number, self.imm.value)
+
+
+## class MTCTR(Insn):
+## def __init__(self, result, args):
+## Insn.__init__(self)
+## self.result = result
+## self.result_regclass = CT_REGISTER
+
+## self.reg_args = args
+## self.reg_arg_regclasses = [GP_REGISTER]
+
+## def allocate(self, allocator):
+## self.arg_reg = allocator.loc_of(self.reg_args[0])
+
+## def emit(self, asm):
+## asm.mtctr(self.arg_reg.number)
+
+class Jump(Insn):
+ def __init__(self, gv_cond, targetbuilder, jump_if_true, jump_args_gv):
+ Insn.__init__(self)
+ self.gv_cond = gv_cond
+ self.jump_if_true = jump_if_true
+
+ self.result = None
+ self.result_regclass = NO_REGISTER
+ self.reg_args = [gv_cond]
+ self.reg_arg_regclasses = [CR_FIELD]
+ self.crf = None
+
+ self.jump_args_gv = jump_args_gv
+ self.targetbuilder = targetbuilder
+ def allocate(self, allocator):
+ self.crf = allocator.loc_of(self.reg_args[0])
+ assert self.crf.info[0] != -1
+
+ assert self.targetbuilder.initial_var2loc is None
+ self.targetbuilder.initial_var2loc = {}
+ from pypy.jit.codegen.ppc.rgenop import Var
+ for gv_arg in self.jump_args_gv:
+ if isinstance(gv_arg, Var):
+ self.targetbuilder.initial_var2loc[gv_arg] = allocator.var2loc[gv_arg]
+ allocator.builders_to_tell_spill_offset_to.append(self.targetbuilder)
+ def __repr__(self):
+ if self.jump_if_true:
+ op = 'if_true'
+ else:
+ op = 'if_false'
+ if self.crf:
+ a = '%s@%s'%(self.reg_args[0], self.crf)
+ else:
+ a = self.reg_args[0]
+ return '<%s-%d %s %s>'%(self.__class__.__name__, self._magic_index,
+ op, a)
+ def emit(self, asm):
+ if self.targetbuilder.start:
+ asm.load_word(rSCRATCH, self.targetbuilder.start)
+ else:
+ self.targetbuilder.patch_start_here = asm.mc.tell()
+ asm.load_word(rSCRATCH, 0)
+ asm.mtctr(rSCRATCH)
+ bit, negated = self.crf.info
+ assert bit != -1
+ if negated ^ self.jump_if_true:
+ BO = 12 # jump if relavent bit is set in the CR
+ else:
+ BO = 4 # jump if relavent bit is NOT set in the CR
+ asm.bcctr(BO, self.crf.number*4 + bit)
+
+class Label(Insn):
+ def __init__(self, label):
+ Insn.__init__(self)
+ self.reg_args = []
+ self.reg_arg_regclasses = []
+ self.result_regclass = NO_REGISTER
+ self.result = None
+ self.label = label
+ def allocate(self, allocator):
+ for gv in self.label.args_gv:
+ loc = allocator.loc_of(gv)
+ if isinstance(loc, CRF):
+ allocator.forget(gv, loc)
+ allocator.lru.remove(gv)
+ allocator.freeregs[loc.regclass].append(loc.alloc)
+ new_loc = allocator._allocate_reg(GP_REGISTER, gv)
+ allocator.lru.append(gv)
+ allocator.insns.append(loc.move_to_gpr(new_loc.number))
+ loc = new_loc
+ self.label.arg_locations = []
+ for gv in self.label.args_gv:
+ loc = allocator.loc_of(gv)
+ self.label.arg_locations.append(loc)
+ allocator.labels_to_tell_spill_offset_to.append(self.label)
+ def __repr__(self):
+ if hasattr(self.label, 'arg_locations'):
+ arg_locations = '[' + ', '.join(
+ ['%s@%s'%(gv, loc) for gv, loc in
+ zip(self.label.args_gv, self.label.arg_locations)]) + ']'
+ else:
+ arg_locations = str(self.label.args_gv)
+ return '<Label-%s %s>'%(self._magic_index,
+ arg_locations)
+ def emit(self, asm):
+ self.label.startaddr = asm.mc.tell()
+
+class LoadFramePointer(Insn):
+ def __init__(self, result):
+ Insn.__init__(self)
+ self.reg_args = []
+ self.reg_arg_regclasses = []
+ self.result = result
+ self.result_regclass = GP_REGISTER
+ def allocate(self, allocator):
+ self.result_reg = allocator.loc_of(self.result)
+ def emit(self, asm):
+ asm.mr(self.result_reg.number, rFP)
+
+class CopyIntoStack(Insn):
+ def __init__(self, place, v):
+ Insn.__init__(self)
+ self.reg_args = [v]
+ self.reg_arg_regclasses = [GP_REGISTER]
+ self.result = None
+ self.result_regclass = NO_REGISTER
+ self.place = place
+ def allocate(self, allocator):
+ self.arg_reg = allocator.loc_of(self.reg_args[0])
+ self.target_slot = allocator.spill_slot()
+ self.place.offset = self.target_slot.offset
+ def emit(self, asm):
+ asm.stw(self.arg_reg.number, rFP, self.target_slot.offset)
+
+class CopyOffStack(Insn):
+ def __init__(self, v, place):
+ Insn.__init__(self)
+ self.reg_args = []
+ self.reg_arg_regclasses = []
+ self.result = v
+ self.result_regclass = GP_REGISTER
+ self.place = place
+ def allocate(self, allocator):
+ self.result_reg = allocator.loc_of(self.result)
+ allocator.free_stack_slots.append(stack_slot(self.place.offset))
+ def emit(self, asm):
+ asm.lwz(self.result_reg.number, rFP, self.place.offset)
+
+class SpillCalleeSaves(Insn):
+ def __init__(self):
+ Insn.__init__(self)
+ self.reg_args = []
+ self.reg_arg_regclasses = []
+ self.result = None
+ self.result_regclass = NO_REGISTER
+ def allocate(self, allocator):
+ # cough cough cough
+ callersave = gprs[3:13]
+ for v in allocator.var2loc:
+ loc = allocator.loc_of(v)
+ if loc in callersave:
+ allocator.spill(loc, v)
+ allocator.freeregs[GP_REGISTER].append(loc)
+ def emit(self, asm):
+ pass
+
+class LoadArg(Insn):
+ def __init__(self, argnumber, arg):
+ Insn.__init__(self)
+ self.reg_args = []
+ self.reg_arg_regclasses = []
+ self.result = None
+ self.result_regclass = NO_REGISTER
+
+ self.argnumber = argnumber
+ self.arg = arg
+ def allocate(self, allocator):
+ from pypy.jit.codegen.ppc.rgenop import Var
+ if isinstance(self.arg, Var):
+ self.loc = allocator.loc_of(self.arg)
+ else:
+ self.loc = None
+ def emit(self, asm):
+ if self.argnumber < 8: # magic numbers 'r' us
+ targetreg = 3+self.argnumber
+ if self.loc is None:
+ self.arg.load_now(asm, gprs[targetreg])
+ elif self.loc.is_register:
+ asm.mr(targetreg, self.loc.number)
+ else:
+ asm.lwz(targetreg, rFP, self.loc.offset)
+ else:
+ targetoffset = 24+self.argnumber*4
+ if self.loc is None:
+ self.arg.load_now(asm, gprs[0])
+ asm.stw(r0, r1, targetoffset)
+ elif self.loc.is_register:
+ asm.stw(self.loc.number, r1, targetoffset)
+ else:
+ asm.lwz(r0, rFP, self.loc.offset)
+ asm.stw(r0, r1, targetoffset)
+
+class CALL(Insn):
+ def __init__(self, result, target):
+ Insn.__init__(self)
+ from pypy.jit.codegen.ppc.rgenop import Var
+ if isinstance(target, Var):
+ self.reg_args = [target]
+ self.reg_arg_regclasses = [CT_REGISTER]
+ else:
+ self.reg_args = []
+ self.reg_arg_regclasses = []
+ self.target = target
+ self.result = result
+ self.result_regclass = GP_REGISTER
+ def allocate(self, allocator):
+ if self.reg_args:
+ assert allocator.loc_of(self.reg_args[0]) is ctr
+ self.resultreg = allocator.loc_of(self.result)
+ def emit(self, asm):
+ if not self.reg_args:
+ self.target.load_now(asm, gprs[0])
+ asm.mtctr(0)
+ asm.bctrl()
+ asm.lwz(rFP, rSP, 0)
+ if self.resultreg != gprs[3]:
+ asm.mr(self.resultreg.number, 3)
+
+
+class AllocTimeInsn(Insn):
+ def __init__(self):
+ Insn.__init__(self)
+ self.reg_args = []
+ self.reg_arg_regclasses = []
+ self.result_regclass = NO_REGISTER
+ self.result = None
+
+class Move(AllocTimeInsn):
+ def __init__(self, dest, src):
+ AllocTimeInsn.__init__(self)
+ self.dest = dest
+ self.src = src
+ def emit(self, asm):
+ asm.mr(self.dest.number, self.src.number)
+
+class Load(AllocTimeInsn):
+ def __init__(self, dest, const):
+ AllocTimeInsn.__init__(self)
+ self.dest = dest
+ self.const = const
+ def __repr__(self):
+ return "<Load-%d %s, (%s)>"%(self._magic_index, self.dest, self.const)
+ def emit(self, asm):
+ self.const.load_now(asm, self.dest)
+
+class Unspill(AllocTimeInsn):
+ """ A special instruction inserted by our register "allocator." It
+ indicates that we need to load a value from the stack into a register
+ because we spilled a particular value. """
+ def __init__(self, var, reg, stack):
+ """
+ var --- the var we spilled (a Var)
+ reg --- the reg we spilled it from (an integer)
+ offset --- the offset on the stack we spilled it to (an integer)
+ """
+ AllocTimeInsn.__init__(self)
+ self.var = var
+ self.reg = reg
+ self.stack = stack
+ if not isinstance(self.reg, GPR):
+ assert isinstance(self.reg, CRF) or isinstance(self.reg, CTR)
+ self.moveinsn = self.reg.move_from_gpr(0)
+ else:
+ self.moveinsn = None
+ def __repr__(self):
+ return '<Unspill-%d %s: %s, %s>'%(self._magic_index, self.var, self.reg, self.stack)
+ def emit(self, asm):
+ if isinstance(self.reg, GPR):
+ r = self.reg.number
+ else:
+ r = 0
+ asm.lwz(r, rFP, self.stack.offset)
+ if self.moveinsn:
+ self.moveinsn.emit(asm)
+
+class Spill(AllocTimeInsn):
+ """ A special instruction inserted by our register "allocator."
+ It indicates that we need to store a value from the register into
+ the stack because we spilled a particular value."""
+ def __init__(self, var, reg, stack):
+ """
+ var --- the var we are spilling (a Var)
+ reg --- the reg we are spilling it from (an integer)
+ offset --- the offset on the stack we are spilling it to (an integer)
+ """
+ AllocTimeInsn.__init__(self)
+ self.var = var
+ self.reg = reg
+ self.stack = stack
+ def __repr__(self):
+ return '<Spill-%d %s: %s, %s>'%(self._magic_index, self.var, self.stack, self.reg)
+ def emit(self, asm):
+ if isinstance(self.reg, GPR):
+ r = self.reg.number
+ else:
+ assert isinstance(self.reg, CRF)
+ self.reg.move_to_gpr(0).emit(asm)
+ r = 0
+ #print 'spilling to', self.stack.offset
+ asm.stw(r, rFP, self.stack.offset)
+
+class _CRF2GPR(AllocTimeInsn):
+ def __init__(self, targetreg, bit, negated):
+ AllocTimeInsn.__init__(self)
+ self.targetreg = targetreg
+ self.bit = bit
+ self.negated = negated
+ def __repr__(self):
+ number = self.bit // 4
+ bit = self.bit % 4
+ return '<CRF2GPR-%d r%s, crf%s(%s, %s)>' % (
+ self._magic_index, self.targetreg, number, bit, self.negated)
+ def emit(self, asm):
+ asm.mfcr(self.targetreg)
+ asm.extrwi(self.targetreg, self.targetreg, 1, self.bit)
+ if self.negated:
+ asm.xori(self.targetreg, self.targetreg, 1)
+
+class _GPR2CRF(AllocTimeInsn):
+ def __init__(self, targetreg, fromreg):
+ AllocTimeInsn.__init__(self)
+ self.targetreg = targetreg
+ self.fromreg = fromreg
+ def __repr__(self):
+ return '<GPR2CRF-%d %s, r%s>' % (
+ self._magic_index, self.targetreg, self.fromreg)
+ def emit(self, asm):
+ asm.cmpwi(self.targetreg.number, self.fromreg, 0)
+
+class _GPR2CTR(AllocTimeInsn):
+ def __init__(self, fromreg):
+ AllocTimeInsn.__init__(self)
+ self.fromreg = fromreg
+ def emit(self, asm):
+ asm.mtctr(self.fromreg)
+
+class Return(Insn):
+ """ Ensures the return value is in r3 """
+ def __init__(self, var):
+ Insn.__init__(self)
+ self.var = var
+ self.reg_args = [self.var]
+ self.reg_arg_regclasses = [GP_REGISTER]
+ self.result = None
+ self.result_regclass = NO_REGISTER
+ self.reg = None
+ def allocate(self, allocator):
+ self.reg = allocator.loc_of(self.reg_args[0])
+ def emit(self, asm):
+ if self.reg.number != 3:
+ asm.mr(r3, self.reg.number)
+
+class FakeUse(Insn):
+ """ A fake use of a var to get it into a register. And reserving
+ a condition register field."""
+ def __init__(self, rvar, var):
+ Insn.__init__(self)
+ self.var = var
+ self.reg_args = [self.var]
+ self.reg_arg_regclasses = [GP_REGISTER]
+ self.result = rvar
+ self.result_regclass = CR_FIELD
+ def allocate(self, allocator):
+ pass
+ def emit(self, asm):
+ pass
diff --git a/pypy/jit/backend/ppc/ppcgen/__init__.py b/pypy/jit/backend/ppc/ppcgen/__init__.py
new file mode 100644
diff --git a/pypy/jit/backend/ppc/ppcgen/_ppcgen.c b/pypy/jit/backend/ppc/ppcgen/_ppcgen.c
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/_ppcgen.c
@@ -0,0 +1,154 @@
+#include <Python.h>
+#include <sys/mman.h>
+
+#define __dcbf(base, index) \
+ __asm__ ("dcbf %0, %1" : /*no result*/ : "b%" (index), "r" (base) : "memory")
+
+
+static PyTypeObject* mmap_type;
+
+#if defined(__APPLE__)
+
+#include <mach-o/dyld.h>
+
+static PyObject*
+_ppy_NSLookupAndBindSymbol(PyObject* self, PyObject* args)
+{
+ char *s;
+ NSSymbol sym;
+
+ if (!PyArg_ParseTuple(args, "s", &s))
+ return NULL;
+
+ if (!NSIsSymbolNameDefined(s)) {
+ return PyErr_Format(PyExc_ValueError,
+ "symbol '%s' not found", s);
+ }
+
+ sym = NSLookupAndBindSymbol(s);
+
+ return PyInt_FromLong((long)NSAddressOfSymbol(sym));
+}
+
+
+#elif defined(linux)
+
+#include <dlfcn.h>
+
+static PyObject*
+_ppy_dlsym(PyObject* self, PyObject* args)
+{
+ char *s;
+ void *handle;
+ void *sym;
+
+ if (!PyArg_ParseTuple(args, "s", &s))
+ return NULL;
+
+ handle = dlopen(RTLD_DEFAULT, RTLD_LAZY);
+ sym = dlsym(handle, s);
+ if (sym == NULL) {
+ return PyErr_Format(PyExc_ValueError,
+ "symbol '%s' not found", s);
+ }
+ return PyInt_FromLong((long)sym);
+}
+
+#else
+
+#error "OS not supported"
+
+#endif
+
+
+static PyObject*
+_ppy_mmap_exec(PyObject* self, PyObject* args)
+{
+ PyObject* code_args;
+ PyObject* r;
+ PyObject* mmap_obj;
+ char* code;
+ size_t size;
+
+ if (!PyArg_ParseTuple(args, "O!O!:mmap_exec",
+ mmap_type, &mmap_obj,
+ &PyTuple_Type, &code_args))
+ return NULL;
+
+ code = *((char**)mmap_obj + 2);
+ size = *((size_t*)mmap_obj + 3);
+
+ r = ((PyCFunction)code)(NULL, code_args);
+
+ Py_DECREF(args);
+
+ return r;
+}
+
+static PyObject*
+_ppy_mmap_flush(PyObject* self, PyObject* arg)
+{
+ char* code;
+ size_t size;
+ int i = 0;
+
+ if (!PyObject_TypeCheck(arg, mmap_type)) {
+ PyErr_SetString(PyExc_TypeError,
+ "mmap_flush: single argument must be mmap object");
+ }
+
+ code = *((char**)arg + 2);
+ size = *((size_t*)arg + 3);
+
+ for (; i < size; i += 32){
+ __dcbf(code, i);
+ }
+
+ Py_INCREF(Py_None);
+ return Py_None;
+}
+
+
+PyMethodDef _ppy_methods[] = {
+#if defined(__APPLE__)
+ {"NSLookupAndBindSymbol", _ppy_NSLookupAndBindSymbol,
+ METH_VARARGS, ""},
+#elif defined(linux)
+ {"dlsym", _ppy_dlsym, METH_VARARGS, ""},
+#endif
+ {"mmap_exec", _ppy_mmap_exec, METH_VARARGS, ""},
+ {"mmap_flush", _ppy_mmap_flush, METH_O, ""},
+ {0, 0}
+};
+
+#if !defined(MAP_ANON) && defined(__APPLE__)
+#define MAP_ANON 0x1000
+#endif
+
+PyMODINIT_FUNC
+init_ppcgen(void)
+{
+ PyObject* m;
+ PyObject* mmap_module;
+ PyObject* mmap_func;
+ PyObject* mmap_obj;
+
+ m = Py_InitModule("_ppcgen", _ppy_methods);
+
+ /* argh */
+ /* time to campaign for a C API for the mmap module! */
+ mmap_module = PyImport_ImportModule("mmap");
+ if (!mmap_module)
+ return;
+ mmap_func = PyObject_GetAttrString(mmap_module, "mmap");
+ if (!mmap_func)
+ return;
+ mmap_obj = PyEval_CallFunction(mmap_func, "iii", -1, 0, MAP_ANON);
+ if (!mmap_obj)
+ return;
+ mmap_type = mmap_obj->ob_type;
+ Py_INCREF(mmap_type);
+ Py_DECREF(mmap_obj);
+ Py_DECREF(mmap_func);
+ Py_DECREF(mmap_module);
+}
diff --git a/pypy/jit/backend/ppc/ppcgen/asmfunc.py b/pypy/jit/backend/ppc/ppcgen/asmfunc.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/asmfunc.py
@@ -0,0 +1,27 @@
+import py
+import mmap, struct
+from pypy.jit.backend.ppc.codebuf import MachineCodeBlockWrapper
+from pypy.jit.backend.llsupport.asmmemmgr import AsmMemoryManager
+from pypy.rpython.lltypesystem import lltype, rffi
+
+_ppcgen = None
+
+def get_ppcgen():
+ global _ppcgen
+ if _ppcgen is None:
+ _ppcgen = py.magic.autopath().dirpath().join('_ppcgen.c')._getpymodule()
+ return _ppcgen
+
+class AsmCode(object):
+ def __init__(self, size):
+ self.code = MachineCodeBlockWrapper()
+
+ def emit(self, insn):
+ bytes = struct.pack("i", insn)
+ for byte in bytes:
+ self.code.writechar(byte)
+
+ def get_function(self):
+ i = self.code.materialize(AsmMemoryManager(), [])
+ t = lltype.FuncType([], lltype.Signed)
+ return rffi.cast(lltype.Ptr(t), i)
diff --git a/pypy/jit/backend/ppc/ppcgen/assembler.py b/pypy/jit/backend/ppc/ppcgen/assembler.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/assembler.py
@@ -0,0 +1,75 @@
+import os
+from pypy.jit.backend.ppc.ppcgen import form
+
+# don't be fooled by the fact that there's some separation between a
+# generic assembler class and a PPC assembler class... there's
+# certainly a RISC dependency in here, and quite possibly a PPC
+# dependency or two too. I personally don't care :)
+
+class AssemblerException(Exception):
+ pass
+
+class Assembler(object):
+ def __init__(self):
+ self.insts = []
+ self.labels = {}
+ self.rlabels = {}
+
+ def label(self, name):
+ if name in self.labels:
+ raise AssemblerException, "duplicate label '%s'"%(name,)
+ self.labels[name] = len(self.insts)*4
+ self.rlabels.setdefault(len(self.insts)*4, []).append(name)
+
+ def labelname(self, base="L"):
+ i = 0
+ while 1:
+ ln = base + str(i)
+ if ln not in self.labels:
+ return ln
+ i += 1
+
+ def assemble0(self, dump=os.environ.has_key('PPY_DEBUG')):
+ for i, inst in enumerate(self.insts):
+ for f in inst.lfields:
+ l = self.labels[inst.fields[f]] - 4*i
+ inst.fields[f] = l
+ buf = []
+ for inst in self.insts:
+ buf.append(inst.assemble())
+ if dump:
+ for i in range(len(buf)):
+ inst = self.disassemble(buf[i], self.rlabels, i*4)
+ for lab in self.rlabels.get(4*i, []):
+ print "%s:"%(lab,)
+ print "\t%4d %s"%(4*i, inst)
+ return buf
+
+ def assemble(self, dump=os.environ.has_key('PPY_DEBUG')):
+ insns = self.assemble0(dump)
+ from pypy.jit.backend.ppc.ppcgen import asmfunc
+ c = asmfunc.AsmCode(len(insns)*4)
+ for i in insns:
+ c.emit(i)
+ return c.get_function()
+
+ def get_idescs(cls):
+ r = []
+ for name in dir(cls):
+ a = getattr(cls, name)
+ if isinstance(a, form.IDesc):
+ r.append((name, a))
+ return r
+ get_idescs = classmethod(get_idescs)
+
+ def disassemble(cls, inst, labels={}, pc=0):
+ matches = []
+ idescs = cls.get_idescs()
+ for name, idesc in idescs:
+ m = idesc.match(inst)
+ if m > 0:
+ matches.append((m, idesc, name))
+ if matches:
+ score, idesc, name = max(matches)
+ return idesc.disassemble(name, inst, labels, pc)
+ disassemble = classmethod(disassemble)
diff --git a/pypy/jit/backend/ppc/ppcgen/autopath.py b/pypy/jit/backend/ppc/ppcgen/autopath.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/autopath.py
@@ -0,0 +1,114 @@
+"""
+self cloning, automatic path configuration
+
+copy this into any subdirectory of pypy from which scripts need
+to be run, typically all of the test subdirs.
+The idea is that any such script simply issues
+
+ import autopath
+
+and this will make sure that the parent directory containing "pypy"
+is in sys.path.
+
+If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
+you can directly run it which will copy itself on all autopath.py files
+it finds under the pypy root directory.
+
+This module always provides these attributes:
+
+ pypydir pypy root directory path
+ this_dir directory where this autopath.py resides
+
+"""
+
+
+def __dirinfo(part):
+ """ return (partdir, this_dir) and insert parent of partdir
+ into sys.path. If the parent directories don't have the part
+ an EnvironmentError is raised."""
+
+ import sys, os
+ try:
+ head = this_dir = os.path.realpath(os.path.dirname(__file__))
+ except NameError:
+ head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
+
+ while head:
+ partdir = head
+ head, tail = os.path.split(head)
+ if tail == part:
+ break
+ else:
+ raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
+
+ pypy_root = os.path.join(head, '')
+ try:
+ sys.path.remove(head)
+ except ValueError:
+ pass
+ sys.path.insert(0, head)
+
+ munged = {}
+ for name, mod in sys.modules.items():
+ if '.' in name:
+ continue
+ fn = getattr(mod, '__file__', None)
+ if not isinstance(fn, str):
+ continue
+ newname = os.path.splitext(os.path.basename(fn))[0]
+ if not newname.startswith(part + '.'):
+ continue
+ path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
+ if path.startswith(pypy_root) and newname != part:
+ modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
+ if newname != '__init__':
+ modpaths.append(newname)
+ modpath = '.'.join(modpaths)
+ if modpath not in sys.modules:
+ munged[modpath] = mod
+
+ for name, mod in munged.iteritems():
+ if name not in sys.modules:
+ sys.modules[name] = mod
+ if '.' in name:
+ prename = name[:name.rfind('.')]
+ postname = name[len(prename)+1:]
+ if prename not in sys.modules:
+ __import__(prename)
+ if not hasattr(sys.modules[prename], postname):
+ setattr(sys.modules[prename], postname, mod)
+
+ return partdir, this_dir
+
+def __clone():
+ """ clone master version of autopath.py into all subdirs """
+ from os.path import join, walk
+ if not this_dir.endswith(join('pypy','tool')):
+ raise EnvironmentError("can only clone master version "
+ "'%s'" % join(pypydir, 'tool',_myname))
+
+
+ def sync_walker(arg, dirname, fnames):
+ if _myname in fnames:
+ fn = join(dirname, _myname)
+ f = open(fn, 'rwb+')
+ try:
+ if f.read() == arg:
+ print "checkok", fn
+ else:
+ print "syncing", fn
+ f = open(fn, 'w')
+ f.write(arg)
+ finally:
+ f.close()
+ s = open(join(pypydir, 'tool', _myname), 'rb').read()
+ walk(pypydir, sync_walker, s)
+
+_myname = 'autopath.py'
+
+# set guaranteed attributes
+
+pypydir, this_dir = __dirinfo('pypy')
+
+if __name__ == '__main__':
+ __clone()
diff --git a/pypy/jit/backend/ppc/ppcgen/field.py b/pypy/jit/backend/ppc/ppcgen/field.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/field.py
@@ -0,0 +1,61 @@
+# only a small file, but there's some hairy stuff in here!
+"""
+>>> f = Field('test', 16, 31)
+>>> f
+<Field 'test'>
+>>> f.encode(65535)
+65535
+>>> f.encode(65536)
+Traceback (most recent call last):
+ File \"<stdin>\", line 1, in ?
+ File \"field.py\", line 25, in encode
+ raise ValueError(\"field '%s' can't accept value %s\"
+ValueError: field 'test' can't accept value 65536
+>>>
+
+"""
+
+
+class Field(object):
+ def __init__(self, name, left, right, signedness=False, valclass=int):
+ self.name = name
+ self.left = left
+ self.right = right
+ width = self.right - self.left + 1
+ # mask applies before shift!
+ self.mask = 2**width - 1
+ self.signed = signedness == 'signed'
+ self.valclass = valclass
+ def __repr__(self):
+ return '<Field %r>'%(self.name,)
+ def encode(self, value):
+ if not issubclass(self.valclass, type(value)):
+ raise ValueError("field '%s' takes '%s's, not '%s's"
+ %(self.name, self.valclass.__name__, type(value).__name__))
+ if not self.signed and value < 0:
+ raise ValueError("field '%s' is unsigned and can't accept value %d"
+ %(self.name, value))
+ # that this does the right thing is /not/ obvious (but true!)
+ if ((value >> 31) ^ value) & ~(self.mask >> self.signed):
+ raise ValueError("field '%s' can't accept value %s"
+ %(self.name, value))
+ value &= self.mask
+ value = long(value)
+ value <<= (32 - self.right - 1)
+ if value & 0x80000000L:
+ # yuck:
+ return ~int((~value)&0xFFFFFFFFL)
+ else:
+ return int(value)
+ def decode(self, inst):
+ mask = self.mask
+ v = (inst >> 32 - self.right - 1) & mask
+ if self.signed and (~mask >> 1) & mask & v:
+ v = ~(~v&mask)
+ return self.valclass(v)
+ def r(self, v, labels, pc):
+ return self.decode(v)
+
+if __name__=='__main__':
+ import doctest
+ doctest.testmod()
diff --git a/pypy/jit/backend/ppc/ppcgen/form.py b/pypy/jit/backend/ppc/ppcgen/form.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/form.py
@@ -0,0 +1,191 @@
+
+# XXX there is much grot here.
+
+# some of this comes from trying to present a reasonably intuitive and
+# useful interface, which implies a certain amount of DWIMmery.
+# things surely still could be more transparent.
+
+class FormException(Exception):
+ pass
+
+
+class Instruction(object):
+ def __init__(self, fields):
+ self.fields = fields
+ self.lfields = [k for (k,v) in fields.iteritems()
+ if isinstance(v, str)]
+ if not self.lfields:
+ self.assemble() # for error checking only
+ def assemble(self):
+ r = 0
+ for field in self.fields:
+ r |= field.encode(self.fields[field])
+ return r
+
+
+class IBoundDesc(object):
+ def __init__(self, desc, fieldmap, assembler):
+ self.fieldmap = fieldmap
+ self.desc = desc
+ self.assembler = assembler
+ def calc_fields(self, args, kw):
+ fieldsleft = list(self.desc.fields)
+ fieldvalues = {}
+ for fname in kw:
+ kw[fname] = self.fieldmap[fname]
+ for d in (self.desc.specializations, kw):
+ for field in d:
+ fieldsleft.remove(field)
+ fieldvalues[field] = d[field]
+ for i in range(min(len(self.desc.defaults), len(fieldsleft) - len(args))):
+ f, v = self.desc.defaults[i]
+ fieldvalues[f] = v
+ fieldsleft.remove(f)
+ for a in args:
+ field = fieldsleft.pop(0)
+ fieldvalues[field] = a
+ return fieldvalues, fieldsleft
+ def __call__(self, *args, **kw):
+ fieldvalues, sparefields = self.calc_fields(args, kw)
+ if sparefields:
+ raise FormException, 'fields %s left'%sparefields
+ self.assembler.insts.append(Instruction(fieldvalues))
+
+
+class IBoundDupDesc(IBoundDesc):
+ def calc_fields(self, args, kw):
+ s = super(IBoundDupDesc, self)
+ fieldvalues, sparefields = s.calc_fields(args, kw)
+ for k, v in self.desc.dupfields.iteritems():
+ fieldvalues[k] = fieldvalues[v]
+ return fieldvalues, sparefields
+
+
+class IDesc(object):
+ boundtype = IBoundDesc
+ def __init__(self, fieldmap, fields, specializations, boundtype=None):
+ self.fieldmap = fieldmap
+ self.fields = fields
+ self.specializations = specializations
+ self.defaults = ()
+ if boundtype is not None:
+ self.boundtype = boundtype
+ for field in specializations:
+ if field not in fields:
+ raise FormException, field
+
+ def __get__(self, ob, cls=None):
+ if ob is None: return self
+ return self.boundtype(self, self.fieldmap, ob)
+
+ def default(self, **defs):
+ assert len(defs) == 1
+ f, v = defs.items()[0]
+ self.defaults = self.defaults + ((self.fieldmap[f], v),)
+ return self
+
+ def __call__(self, **more_specializatons):
+ s = self.specializations.copy()
+ ms = {}
+ ds = {}
+ for fname, v in more_specializatons.iteritems():
+ field = self.fieldmap[fname]
+ if field not in self.fields:
+ raise FormException, "don't know about '%s' here"%k
+ if isinstance(v, str):
+ ds[field] = self.fieldmap[v]
+ else:
+ ms[field] = v
+ s.update(ms)
+ if len(s) != len(self.specializations) + len(ms):
+ raise FormException, "respecialization not currently allowed"
+ if ds:
+ fields = list(self.fields)
+ for field in ds:
+ fields.remove(field)
+ return IDupDesc(self.fieldmap, tuple(fields), s, ds)
+ else:
+ r = IDesc(self.fieldmap, self.fields, s, self.boundtype)
+ r.defaults = tuple([(f, d) for (f, d) in self.defaults if f not in s])
+ return r
+
+ def match(self, inst):
+ c = 0
+ for field in self.fields:
+ if field in self.specializations:
+ if field.decode(inst) != self.specializations[field]:
+ return 0
+ else:
+ c += 1
+ return c
+
+ def __repr__(self):
+ l = []
+ for field in self.fields:
+ if field in self.specializations:
+ l.append('%s=%r'%(field.name, self.specializations[field]))
+ else:
+ l.append(field.name)
+ r = '%s(%s)'%(self.__class__.__name__, ', '.join(l))
+ if self.boundtype is not self.__class__.boundtype:
+ r += ' => ' + self.boundtype.__name__
+ return r
+
+ def disassemble(self, name, inst, labels, pc):
+ kws = []
+ for field in self.fields:
+ if field not in self.specializations:
+ v = field.decode(inst)
+ for f, d in self.defaults:
+ if f is field:
+ if d == v:
+ break
+ else:
+ kws.append('%s=%s'%(field.name, field.r(inst, labels, pc)))
+ return "%-5s %s"%(name, ', '.join(kws))
+
+
+class IDupDesc(IDesc):
+ boundtype = IBoundDupDesc
+ def __init__(self, fieldmap, fields, specializations, dupfields):
+ super(IDupDesc, self).__init__(fieldmap, fields, specializations)
+ self.dupfields = dupfields
+
+ def match(self, inst):
+ for field in self.dupfields:
+ df = self.dupfields[field]
+ if field.decode(inst) != df.decode(inst):
+ return 0
+ else:
+ return super(IDupDesc, self).match(inst)
+
+
+class Form(object):
+ fieldmap = None
+ def __init__(self, *fnames):
+ self.fields = []
+ bits = {}
+ for fname in fnames:
+ if isinstance(fname, str):
+ field = self.fieldmap[fname]
+ else:
+ field = fname
+ for b in range(field.left, field.right+1):
+ if b in bits:
+ raise FormException, "'%s' and '%s' clash at bit '%s'"%(
+ bits[b], fname, b)
+ else:
+ bits[b] = fname
+ self.fields.append(field)
+
+ def __call__(self, **specializations):
+ s = {}
+ for fname in specializations:
+ field = self.fieldmap[fname]
+ if field not in self.fields:
+ raise FormException, "no nothin bout '%s'"%k
+ s[field] = specializations[fname]
+ return IDesc(self.fieldmap, self.fields, s)
+
+ def __repr__(self):
+ return '%s(%r)'%(self.__class__.__name__, [f.name for f in self.fields])
diff --git a/pypy/jit/backend/ppc/ppcgen/func_builder.py b/pypy/jit/backend/ppc/ppcgen/func_builder.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/func_builder.py
@@ -0,0 +1,160 @@
+from pypy.jit.codegen.ppc.ppcgen.ppc_assembler import MyPPCAssembler
+from pypy.jit.codegen.ppc.ppcgen.symbol_lookup import lookup
+from pypy.jit.codegen.ppc.ppcgen.regname import *
+
+def load_arg(code, argi, typecode):
+ rD = r3+argi
+ code.lwz(rD, r4, 12 + 4*argi)
+ if typecode == 'i':
+ code.load_word(r0, lookup("PyInt_Type"))
+ code.lwz(r31, rD, 4) # XXX ick!
+ code.cmpw(r0, r31)
+ code.bne("argserror")
+ code.lwz(rD, rD, 8)
+ elif typecode == 'f':
+ code.load_word(r0, lookup("PyFloat_Type"))
+ code.lwz(r31, rD, 4)
+ code.cmpw(r0, r31)
+ code.bne("argserror")
+ code.lfd(rD-2, rD, 8)
+ elif typecode != "O":
+ raise Exception, "erk"
+
+FAST_ENTRY_LABEL = "FAST-ENTRY-LABEL"
+
+def make_func(code, retcode, signature, localwords=0):
+ """code shouldn't contain prologue/epilogue (or touch r31)"""
+
+ stacksize = 80 + 4*localwords
+
+ argcount = len(signature)
+
+ ourcode = MyPPCAssembler()
+ ourcode.mflr(r0)
+ ourcode.stmw(r31, r1, -4)
+ ourcode.stw(r0, r1, 8)
+ ourcode.stwu(r1, r1, -stacksize)
+
+ ourcode.lwz(r3, r4, 8)
+ ourcode.cmpwi(r3, argcount)
+ ourcode.bne("argserror")
+
+ assert argcount < 9
+
+ if argcount > 0:
+ load_arg(ourcode, 0, signature[0])
+ for i in range(2, argcount):
+ load_arg(ourcode, i, signature[i])
+ if argcount > 1:
+ load_arg(ourcode, 1, signature[1])
+
+ ourcode.bl(FAST_ENTRY_LABEL)
+
+ if retcode == 'i':
+ s = lookup("PyInt_FromLong")
+ ourcode.load_word(r0, s)
+ ourcode.mtctr(r0)
+ ourcode.bctrl()
+ elif retcode == 'f':
+ s = lookup("PyFloat_FromDouble")
+ ourcode.load_word(r0, s)
+ ourcode.mtctr(r0)
+ ourcode.bctrl()
+
+ ourcode.label("epilogue")
+ ourcode.lwz(r0, r1, stacksize + 8)
+ ourcode.addi(r1, r1, stacksize)
+ ourcode.mtlr(r0)
+ ourcode.lmw(r31, r1, -4)
+ ourcode.blr()
+
+ err_set = lookup("PyErr_SetObject")
+ exc = lookup("PyExc_TypeError")
+
+ ourcode.label("argserror")
+ ourcode.load_word(r5, err_set)
+ ourcode.mtctr(r5)
+ ourcode.load_from(r3, exc)
+ ourcode.mr(r4, r3)
+ ourcode.bctrl()
+
+ ourcode.li(r3, 0)
+ ourcode.b("epilogue")
+
+ ourcode.label(FAST_ENTRY_LABEL)
+ # err, should be an Assembler method:
+ l = {}
+ for k in code.labels:
+ l[k] = code.labels[k] + 4*len(ourcode.insts)
+ r = code.rlabels.copy()
+ for k in code.rlabels:
+ r[k + 4*len(ourcode.insts)] = code.rlabels[k]
+ ourcode.insts.extend(code.insts)
+ ourcode.labels.update(l)
+ ourcode.rlabels.update(r)
+
+ r = ourcode.assemble()
+ r.FAST_ENTRY_LABEL = ourcode.labels[FAST_ENTRY_LABEL]
+ return r
+
+def wrap(funcname, retcode, signature):
+
+ argcount = len(signature)
+
+ ourcode = MyPPCAssembler()
+ ourcode.mflr(r0)
+ ourcode.stmw(r31, r1, -4)
+ ourcode.stw(r0, r1, 8)
+ ourcode.stwu(r1, r1, -80)
+
+ ourcode.lwz(r3, r4, 8)
+ ourcode.cmpwi(r3, argcount)
+ ourcode.bne("argserror")
+
+ assert argcount < 9
+
+ if argcount > 0:
+ load_arg(ourcode, 0, signature[0])
+ for i in range(2, argcount):
+ load_arg(ourcode, i, signature[i])
+ if argcount > 1:
+ load_arg(ourcode, 1, signature[1])
+
+
+ ourcode.load_word(r0, lookup(funcname))
+ ourcode.mtctr(r0)
+ ourcode.bctrl()
+
+ if retcode == 'i':
+ s = lookup("PyInt_FromLong")
+ ourcode.load_word(r0, s)
+ ourcode.mtctr(r0)
+ ourcode.bctrl()
+ elif retcode == 'f':
+ s = lookup("PyFloat_FromDouble")
+ ourcode.load_word(r0, s)
+ ourcode.mtctr(r0)
+ ourcode.bctrl()
+
+ ourcode.label("epilogue")
+ ourcode.lwz(r0, r1, 88)
+ ourcode.addi(r1, r1, 80)
+ ourcode.mtlr(r0)
+ ourcode.lmw(r31, r1, -4)
+ ourcode.blr()
+
+ err_set = lookup("PyErr_SetObject")
+ exc = lookup("PyExc_TypeError")
+
+ ourcode.label("argserror")
+ ourcode.load_word(r5, err_set)
+ ourcode.mtctr(r5)
+ ourcode.load_from(r3, exc)
+ ourcode.mr(r4, r3)
+ ourcode.bctrl()
+
+ ourcode.li(r3, 0)
+ ourcode.b("epilogue")
+
+ return ourcode.assemble()
+
diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/ppc_assembler.py
@@ -0,0 +1,845 @@
+from pypy.jit.backend.ppc.ppcgen.ppc_form import PPCForm as Form
+from pypy.jit.backend.ppc.ppcgen.ppc_field import ppc_fields
+from pypy.jit.backend.ppc.ppcgen.assembler import Assembler
+from pypy.jit.backend.ppc.ppcgen.symbol_lookup import lookup
+
+A = Form("frD", "frA", "frB", "XO3", "Rc")
+A1 = Form("frD", "frB", "XO3", "Rc")
+A2 = Form("frD", "frA", "frC", "XO3", "Rc")
+A3 = Form("frD", "frA", "frC", "frB", "XO3", "Rc")
+
+I = Form("LI", "AA", "LK")
+
+B = Form("BO", "BI", "BD", "AA", "LK")
+
+SC = Form("AA") # fudge
+
+DD = Form("rD", "rA", "SIMM")
+DS = Form("rA", "rS", "UIMM")
+
+X = Form("XO1")
+XS = Form("rA", "rS", "rB", "XO1", "Rc")
+XSO = Form("rS", "rA", "rB", "XO1")
+XD = Form("rD", "rA", "rB", "XO1")
+XO = Form("rD", "rA", "rB", "OE", "XO2", "Rc")
+XO0 = Form("rD", "rA", "OE", "XO2", "Rc")
+XDB = Form("frD", "frB", "XO1", "Rc")
+XS0 = Form("rA", "rS", "XO1", "Rc")
+X0 = Form("rA", "rB", "XO1")
+XcAB = Form("crfD", "rA", "rB", "XO1")
+XN = Form("rD", "rA", "NB", "XO1")
+XL = Form("crbD", "crbA", "crbB", "XO1")
+XL1 = Form("crfD", "crfS")
+XL2 = Form("crbD", "XO1", "Rc")
+XFL = Form("FM", "frB", "XO1", "Rc")
+XFX = Form("CRM", "rS", "XO1")
+
+MI = Form("rA", "rS", "SH", "MB", "ME", "Rc")
+MB = Form("rA", "rS", "rB", "MB", "ME", "Rc")
+
+
+class BasicPPCAssembler(Assembler):
+
+ def disassemble(cls, inst, labels={}, pc=0):
+ cache = cls.__dict__.get('idesc cache')
+ if cache is None:
+ idescs = cls.get_idescs()
+ cache = {}
+ for n, i in idescs:
+ cache.setdefault(i.specializations[ppc_fields['opcode']],
+ []).append((n,i))
+ setattr(cls, 'idesc cache', cache)
+ matches = []
+ idescs = cache[ppc_fields['opcode'].decode(inst)]
+ for name, idesc in idescs:
+ m = idesc.match(inst)
+ if m > 0:
+ matches.append((m, idesc, name))
+ if matches:
+ score, idesc, name = max(matches)
+ return idesc.disassemble(name, inst, labels, pc)
+ disassemble = classmethod(disassemble)
+
+ # "basic" means no simplified mnemonics
+
+ # I form
+ b = I(18, AA=0, LK=0)
+ ba = I(18, AA=1, LK=0)
+ bl = I(18, AA=0, LK=1)
+ bla = I(18, AA=1, LK=1)
+
+ # B form
+ bc = B(16, AA=0, LK=0)
+ bcl = B(16, AA=0, LK=1)
+ bca = B(16, AA=1, LK=0)
+ bcla = B(16, AA=1, LK=1)
+
+ # SC form
+ sc = SC(17, AA=1) # it's not really the aa field...
+
+ # D form
+ addi = DD(14)
+ addic = DD(12)
+ addicx = DD(13)
+ addis = DD(15)
+
+ andix = DS(28)
+ andisx = DS(29)
+
+ cmpi = Form("crfD", "L", "rA", "SIMM")(11)
+ cmpi.default(L=0).default(crfD=0)
+ cmpli = Form("crfD", "L", "rA", "UIMM")(10)
+ cmpli.default(L=0).default(crfD=0)
+
+ lbz = DD(34)
+ lbzu = DD(35)
+ lfd = DD(50)
+ lfdu = DD(51)
+ lfs = DD(48)
+ lfsu = DD(49)
+ lha = DD(42)
+ lhau = DD(43)
+ lhz = DD(40)
+ lhzu = DD(41)
+ lmw = DD(46)
+ lwz = DD(32)
+ lwzu = DD(33)
+
+ mulli = DD(7)
+ ori = DS(24)
+ oris = DS(25)
+
+ stb = DD(38)
+ stbu = DD(39)
+ stfd = DD(54)
+ stfdu = DD(55)
+ stfs = DD(52)
+ stfsu = DD(53)
+ sth = DD(44)
+ sthu = DD(45)
+ stmw = DD(47)
+ stw = DD(36)
+ stwu = DD(37)
+
+ subfic = DD(8)
+ twi = Form("TO", "rA", "SIMM")(3)
+ xori = DS(26)
+ xoris = DS(27)
+
+ # X form
+
+ and_ = XS(31, XO1=28, Rc=0)
+ and_x = XS(31, XO1=28, Rc=1)
+
+ andc_ = XS(31, XO1=60, Rc=0)
+ andc_x = XS(31, XO1=60, Rc=1)
+
+ # is the L bit for 64 bit compares? hmm
+ cmp = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=0)
+ cmp.default(L=0).default(crfD=0)
+ cmpl = Form("crfD", "L", "rA", "rB", "XO1")(31, XO1=32)
+ cmpl.default(L=0).default(crfD=0)
+
+ cntlzw = XS0(31, XO1=26, Rc=0)
+ cntlzwx = XS0(31, XO1=26, Rc=1)
+
+ dcba = X0(31, XO1=758)
+ dcbf = X0(31, XO1=86)
+ dcbi = X0(31, XO1=470)
+ dcbst = X0(31, XO1=54)
+ dcbt = X0(31, XO1=278)
+ dcbtst = X0(31, XO1=246)
+ dcbz = X0(31, XO1=1014)
+
+ eciwx = XD(31, XO1=310)
+ ecowx = XS(31, XO1=438, Rc=0)
+
+ eieio = X(31, XO1=854)
+
+ eqv = XS(31, XO1=284, Rc=0)
+ eqvx = XS(31, XO1=284, Rc=1)
+
+ extsb = XS0(31, XO1=954, Rc=0)
+ extsbx = XS0(31, XO1=954, Rc=1)
+
+ extsh = XS0(31, XO1=922, Rc=0)
+ extshx = XS0(31, XO1=922, Rc=1)
+
+ fabs = XDB(63, XO1=264, Rc=0)
+ fabsx = XDB(63, XO1=264, Rc=1)
+
+ fcmpo = XcAB(63, XO1=32)
+ fcmpu = XcAB(63, XO1=0)
+
+ fctiw = XDB(63, XO1=14, Rc=0)
+ fctiwx = XDB(63, XO1=14, Rc=1)
+
+ fctiwz = XDB(63, XO1=14, Rc=0)
+ fctiwzx = XDB(63, XO1=14, Rc=1)
+
+ fmr = XDB(63, XO1=72, Rc=0)
+ fmrx = XDB(63, XO1=72, Rc=1)
+
+ fnabs = XDB(63, XO1=136, Rc=0)
+ fnabsx = XDB(63, XO1=136, Rc=1)
+
+ fneg = XDB(63, XO1=40, Rc=0)
+ fnegx = XDB(63, XO1=40, Rc=1)
+
+ frsp = XDB(63, XO1=12, Rc=0)
+ frspx = XDB(63, XO1=12, Rc=1)
+
+ fsqrt = XDB(63, XO1=22, Rc=0)
+
+ icbi = X0(31, XO1=982)
+
+ lbzux = XD(31, XO1=119)
+ lbzx = XD(31, XO1=87)
+ lfdux = XD(31, XO1=631)
+ lfdx = XD(31, XO1=599)
+ lfsux = XD(31, XO1=567)
+ lfsx = XD(31, XO1=535)
+ lhaux = XD(31, XO1=375)
+ lhax = XD(31, XO1=343)
+ lhbrx = XD(31, XO1=790)
+ lhzux = XD(31, XO1=311)
+ lhzx = XD(31, XO1=279)
+ lswi = XD(31, XO1=597)
+ lswx = XD(31, XO1=533)
+ lwarx = XD(31, XO1=20)
+ lwbrx = XD(31, XO1=534)
+ lwzux = XD(31, XO1=55)
+ lwzx = XD(31, XO1=23)
+
+ mcrfs = Form("crfD", "crfS", "XO1")(63, XO1=64)
+ mcrxr = Form("crfD", "XO1")(31, XO1=512)
+ mfcr = Form("rD", "XO1")(31, XO1=19)
+ mffs = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=0)
+ mffsx = Form("frD", "XO1", "Rc")(63, XO1=583, Rc=1)
+ mfmsr = Form("rD", "XO1")(31, XO1=83)
+ mfsr = Form("rD", "SR", "XO1")(31, XO1=595)
+ mfsrin = XDB(31, XO1=659, Rc=0)
+
+ add = XO(31, XO2=266, OE=0, Rc=0)
+ addx = XO(31, XO2=266, OE=0, Rc=1)
+ addo = XO(31, XO2=266, OE=1, Rc=0)
+ addox = XO(31, XO2=266, OE=1, Rc=1)
+
+ addc = XO(31, XO2=10, OE=0, Rc=0)
+ addcx = XO(31, XO2=10, OE=0, Rc=1)
+ addco = XO(31, XO2=10, OE=1, Rc=0)
+ addcox = XO(31, XO2=10, OE=1, Rc=1)
+
+ adde = XO(31, XO2=138, OE=0, Rc=0)
+ addex = XO(31, XO2=138, OE=0, Rc=1)
+ addeo = XO(31, XO2=138, OE=1, Rc=0)
+ addeox = XO(31, XO2=138, OE=1, Rc=1)
+
+ addme = XO(31, rB=0, XO2=234, OE=0, Rc=0)
+ addmex = XO(31, rB=0, XO2=234, OE=0, Rc=1)
+ addmeo = XO(31, rB=0, XO2=234, OE=1, Rc=0)
+ addmeox = XO(31, rB=0, XO2=234, OE=1, Rc=1)
+
+ addze = XO(31, rB=0, XO2=202, OE=0, Rc=0)
+ addzex = XO(31, rB=0, XO2=202, OE=0, Rc=1)
+ addzeo = XO(31, rB=0, XO2=202, OE=1, Rc=0)
+ addzeox = XO(31, rB=0, XO2=202, OE=1, Rc=1)
+
+ bcctr = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=0)
+ bcctrl = Form("BO", "BI", "XO1", "LK")(19, XO1=528, LK=1)
+
+ bclr = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=0)
+ bclrl = Form("BO", "BI", "XO1", "LK")(19, XO1=16, LK=1)
+
+ crand = XL(19, XO1=257)
+ crandc = XL(19, XO1=129)
+ creqv = XL(19, XO1=289)
+ crnand = XL(19, XO1=225)
+ crnor = XL(19, XO1=33)
+ cror = XL(19, XO1=449)
+ crorc = XL(19, XO1=417)
+ crxor = XL(19, XO1=193)
+
+ divw = XO(31, XO2=491, OE=0, Rc=0)
+ divwx = XO(31, XO2=491, OE=0, Rc=1)
+ divwo = XO(31, XO2=491, OE=1, Rc=0)
+ divwox = XO(31, XO2=491, OE=1, Rc=1)
+
+ divwu = XO(31, XO2=459, OE=0, Rc=0)
+ divwux = XO(31, XO2=459, OE=0, Rc=1)
+ divwuo = XO(31, XO2=459, OE=1, Rc=0)
+ divwuox = XO(31, XO2=459, OE=1, Rc=1)
+
+ fadd = A(63, XO3=21, Rc=0)
+ faddx = A(63, XO3=21, Rc=1)
+ fadds = A(59, XO3=21, Rc=0)
+ faddsx = A(59, XO3=21, Rc=1)
+
+ fdiv = A(63, XO3=18, Rc=0)
+ fdivx = A(63, XO3=18, Rc=1)
+ fdivs = A(59, XO3=18, Rc=0)
+ fdivsx = A(59, XO3=18, Rc=1)
+
+ fmadd = A3(63, XO3=19, Rc=0)
+ fmaddx = A3(63, XO3=19, Rc=1)
+ fmadds = A3(59, XO3=19, Rc=0)
+ fmaddsx = A3(59, XO3=19, Rc=1)
+
+ fmsub = A3(63, XO3=28, Rc=0)
+ fmsubx = A3(63, XO3=28, Rc=1)
+ fmsubs = A3(59, XO3=28, Rc=0)
+ fmsubsx = A3(59, XO3=28, Rc=1)
+
+ fmul = A2(63, XO3=25, Rc=0)
+ fmulx = A2(63, XO3=25, Rc=1)
+ fmuls = A2(59, XO3=25, Rc=0)
+ fmulsx = A2(59, XO3=25, Rc=1)
+
+ fnmadd = A3(63, XO3=31, Rc=0)
+ fnmaddx = A3(63, XO3=31, Rc=1)
+ fnmadds = A3(59, XO3=31, Rc=0)
+ fnmaddsx = A3(59, XO3=31, Rc=1)
+
+ fnmsub = A3(63, XO3=30, Rc=0)
+ fnmsubx = A3(63, XO3=30, Rc=1)
+ fnmsubs = A3(59, XO3=30, Rc=0)
+ fnmsubsx = A3(59, XO3=30, Rc=1)
+
+ fres = A1(59, XO3=24, Rc=0)
+ fresx = A1(59, XO3=24, Rc=1)
+
+ frsp = A1(63, XO3=12, Rc=0)
+ frspx = A1(63, XO3=12, Rc=1)
+
+ frsqrte = A1(63, XO3=26, Rc=0)
+ frsqrtex = A1(63, XO3=26, Rc=1)
+
+ fsel = A3(63, XO3=23, Rc=0)
+ fselx = A3(63, XO3=23, Rc=1)
+
+ frsqrt = A1(63, XO3=22, Rc=0)
+ frsqrtx = A1(63, XO3=22, Rc=1)
+ frsqrts = A1(59, XO3=22, Rc=0)
+ frsqrtsx = A1(59, XO3=22, Rc=1)
+
+ fsub = A(63, XO3=20, Rc=0)
+ fsubx = A(63, XO3=20, Rc=1)
+ fsubs = A(59, XO3=20, Rc=0)
+ fsubsx = A(59, XO3=20, Rc=1)
+
+ isync = X(19, XO1=150)
+
+ mcrf = XL1(19)
+
+ mfspr = Form("rD", "spr", "XO1")(31, XO1=339)
+ mftb = Form("rD", "spr", "XO1")(31, XO1=371)
+
+ mtcrf = XFX(31, XO1=144)
+
+ mtfsb0 = XL2(63, XO1=70, Rc=0)
+ mtfsb0x = XL2(63, XO1=70, Rc=1)
+ mtfsb1 = XL2(63, XO1=38, Rc=0)
+ mtfsb1x = XL2(63, XO1=38, Rc=1)
+
+ mtfsf = XFL(63, XO1=711, Rc=0)
+ mtfsfx = XFL(63, XO1=711, Rc=1)
+
+ mtfsfi = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=0)
+ mtfsfix = Form("crfD", "IMM", "XO1", "Rc")(63, XO1=134, Rc=1)
+
+ mtmsr = Form("rS", "XO1")(31, XO1=146)
+
+ mtspr = Form("rS", "spr", "XO1")(31, XO1=467)
+
+ mtsr = Form("rS", "SR", "XO1")(31, XO1=210)
+ mtsrin = Form("rS", "rB", "XO1")(31, XO1=242)
+
+ mulhw = XO(31, OE=0, XO2=75, Rc=0)
+ mulhwx = XO(31, OE=0, XO2=75, Rc=1)
+
+ mulhwu = XO(31, OE=0, XO2=11, Rc=0)
+ mulhwux = XO(31, OE=0, XO2=11, Rc=1)
+
+ mullw = XO(31, OE=0, XO2=235, Rc=0)
+ mullwx = XO(31, OE=0, XO2=235, Rc=1)
+ mullwo = XO(31, OE=1, XO2=235, Rc=0)
+ mullwox = XO(31, OE=1, XO2=235, Rc=1)
+
+ nand = XS(31, XO1=476, Rc=0)
+ nandx = XS(31, XO1=476, Rc=1)
+
+ neg = XO0(31, OE=0, XO2=104, Rc=0)
+ negx = XO0(31, OE=0, XO2=104, Rc=1)
+ nego = XO0(31, OE=1, XO2=104, Rc=0)
+ negox = XO0(31, OE=1, XO2=104, Rc=1)
+
+ nor = XS(31, XO1=124, Rc=0)
+ norx = XS(31, XO1=124, Rc=1)
+
+ or_ = XS(31, XO1=444, Rc=0)
+ or_x = XS(31, XO1=444, Rc=1)
+
+ orc = XS(31, XO1=412, Rc=0)
+ orcx = XS(31, XO1=412, Rc=1)
+
+ rfi = X(19, XO1=50)
+
+ rlwimi = MI(20, Rc=0)
+ rlwimix = MI(20, Rc=1)
+
+ rlwinm = MI(21, Rc=0)
+ rlwinmx = MI(21, Rc=1)
+
+ rlwnm = MB(23, Rc=0)
+ rlwnmx = MB(23, Rc=1)
+
+ slw = XS(31, XO1=24, Rc=0)
+ slwx = XS(31, XO1=24, Rc=1)
+
+ sraw = XS(31, XO1=792, Rc=0)
+ srawx = XS(31, XO1=792, Rc=1)
+
+ srawi = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=0)
+ srawix = Form("rA", "rS", "SH", "XO1", "Rc")(31, XO1=824, Rc=1)
+
+ srw = XS(31, XO1=536, Rc=0)
+ srwx = XS(31, XO1=536, Rc=1)
+
+ stbux = XSO(31, XO1=247)
+ stbx = XSO(31, XO1=215)
+ stfdux = XSO(31, XO1=759)
+ stfdx = XSO(31, XO1=727)
+ stfiwx = XSO(31, XO1=983)
+ stfsux = XSO(31, XO1=695)
+ stfsx = XSO(31, XO1=663)
+ sthbrx = XSO(31, XO1=918)
+ sthux = XSO(31, XO1=439)
+ sthx = XSO(31, XO1=407)
+ stswi = Form("rS", "rA", "NB", "XO1")(31, XO1=725)
+ stswx = XSO(31, XO1=661)
+ stwbrx = XSO(31, XO1=662)
+ stwcxx = Form("rS", "rA", "rB", "XO1", "Rc")(31, XO1=150, Rc=1)
+ stwux = XSO(31, XO1=183)
+ stwx = XSO(31, XO1=151)
+
+ subf = XO(31, XO2=40, OE=0, Rc=0)
+ subfx = XO(31, XO2=40, OE=0, Rc=1)
+ subfo = XO(31, XO2=40, OE=1, Rc=0)
+ subfox = XO(31, XO2=40, OE=1, Rc=1)
+
+ subfc = XO(31, XO2=8, OE=0, Rc=0)
+ subfcx = XO(31, XO2=8, OE=0, Rc=1)
+ subfco = XO(31, XO2=8, OE=1, Rc=0)
+ subfcox = XO(31, XO2=8, OE=1, Rc=1)
+
+ subfe = XO(31, XO2=136, OE=0, Rc=0)
+ subfex = XO(31, XO2=136, OE=0, Rc=1)
+ subfeo = XO(31, XO2=136, OE=1, Rc=0)
+ subfeox = XO(31, XO2=136, OE=1, Rc=1)
+
+ subfme = XO0(31, OE=0, XO2=232, Rc=0)
+ subfmex = XO0(31, OE=0, XO2=232, Rc=1)
+ subfmeo = XO0(31, OE=1, XO2=232, Rc=0)
+ subfmeox= XO0(31, OE=1, XO2=232, Rc=1)
+
+ subfze = XO0(31, OE=0, XO2=200, Rc=0)
+ subfzex = XO0(31, OE=0, XO2=200, Rc=1)
+ subfzeo = XO0(31, OE=1, XO2=200, Rc=0)
+ subfzeox= XO0(31, OE=1, XO2=200, Rc=1)
+
+ sync = X(31, XO1=598)
+
+ tlbia = X(31, XO1=370)
+ tlbie = Form("rB", "XO1")(31, XO1=306)
+ tlbsync = X(31, XO1=566)
+
+ tw = Form("TO", "rA", "rB", "XO1")(31, XO1=4)
+
+ xor = XS(31, XO1=316, Rc=0)
+ xorx = XS(31, XO1=316, Rc=1)
+
+class PPCAssembler(BasicPPCAssembler):
+ BA = BasicPPCAssembler
+
+ # awkward mnemonics:
+ # mftb
+ # most of the branch mnemonics...
+
+ # F.2 Simplified Mnemonics for Subtract Instructions
+
+ def subi(self, rD, rA, value):
+ self.addi(rD, rA, -value)
+ def subis(self, rD, rA, value):
+ self.addis(rD, rA, -value)
+ def subic(self, rD, rA, value):
+ self.addic(rD, rA, -value)
+ def subicx(self, rD, rA, value):
+ self.addicx(rD, rA, -value)
+
+ def sub(self, rD, rA, rB):
+ self.subf(rD, rB, rA)
+ def subc(self, rD, rA, rB):
+ self.subfc(rD, rB, rA)
+ def subx(self, rD, rA, rB):
+ self.subfx(rD, rB, rA)
+ def subcx(self, rD, rA, rB):
+ self.subfcx(rD, rB, rA)
+ def subo(self, rD, rA, rB):
+ self.subfo(rD, rB, rA)
+ def subco(self, rD, rA, rB):
+ self.subfco(rD, rB, rA)
+ def subox(self, rD, rA, rB):
+ self.subfox(rD, rB, rA)
+ def subcox(self, rD, rA, rB):
+ self.subfcox(rD, rB, rA)
+
+ # F.3 Simplified Mnemonics for Compare Instructions
+
+ cmpwi = BA.cmpi(L=0)
+ cmplwi = BA.cmpli(L=0)
+ cmpw = BA.cmp(L=0)
+ cmplw = BA.cmpl(L=0)
+
+ # F.4 Simplified Mnemonics for Rotate and Shift Instructions
+
+ def extlwi(self, rA, rS, n, b):
+ self.rlwinm(rA, rS, b, 0, n-1)
+
+ def extrwi(self, rA, rS, n, b):
+ self.rlwinm(rA, rS, b+n, 32-n, 31)
+
+ def inslwi(self, rA, rS, n, b):
+ self.rwlimi(rA, rS, 32-b, b, b + n -1)
+
+ def insrwi(self, rA, rS, n, b):
+ self.rwlimi(rA, rS, 32-(b+n), b, b + n -1)
+
+ def rotlwi(self, rA, rS, n):
+ self.rlwinm(rA, rS, n, 0, 31)
+
+ def rotrwi(self, rA, rS, n):
+ self.rlwinm(rA, rS, 32-n, 0, 31)
+
+ def rotlw(self, rA, rS, rB):
+ self.rlwnm(rA, rS, rB, 0, 31)
+
+ def slwi(self, rA, rS, n):
+ self.rlwinm(rA, rS, n, 0, 31-n)
+
+ def srwi(self, rA, rS, n):
+ self.rlwinm(rA, rS, 32-n, n, 31)
+
+
+ # F.5 Simplified Mnemonics for Branch Instructions
+
+ # there's a lot of these!
+ bt = BA.bc(BO=12)
+ bf = BA.bc(BO=4)
+ bdnz = BA.bc(BO=16, BI=0)
+ bdnzt = BA.bc(BO=8)
+ bdnzf = BA.bc(BO=0)
+ bdz = BA.bc(BO=18)
+ bdzt = BA.bc(BO=10)
+ bdzf = BA.bc(BO=2)
+
+ bta = BA.bca(BO=12)
+ bfa = BA.bca(BO=4)
+ bdnza = BA.bca(BO=16, BI=0)
+ bdnzta = BA.bca(BO=8)
+ bdnzfa = BA.bca(BO=0)
+ bdza = BA.bca(BO=18)
+ bdzta = BA.bca(BO=10)
+ bdzfa = BA.bca(BO=2)
+
+ btl = BA.bcl(BO=12)
+ bfl = BA.bcl(BO=4)
+ bdnzl = BA.bcl(BO=16, BI=0)
+ bdnztl = BA.bcl(BO=8)
+ bdnzfl = BA.bcl(BO=0)
+ bdzl = BA.bcl(BO=18)
+ bdztl = BA.bcl(BO=10)
+ bdzfl = BA.bcl(BO=2)
+
+ btla = BA.bcla(BO=12)
+ bfla = BA.bcla(BO=4)
+ bdnzla = BA.bcla(BO=16, BI=0)
+ bdnztla = BA.bcla(BO=8)
+ bdnzfla = BA.bcla(BO=0)
+ bdzla = BA.bcla(BO=18)
+ bdztla = BA.bcla(BO=10)
+ bdzfla = BA.bcla(BO=2)
+
+ blr = BA.bclr(BO=20, BI=0)
+ btlr = BA.bclr(BO=12)
+ bflr = BA.bclr(BO=4)
+ bdnzlr = BA.bclr(BO=16, BI=0)
+ bdnztlr = BA.bclr(BO=8)
+ bdnzflr = BA.bclr(BO=0)
+ bdzlr = BA.bclr(BO=18, BI=0)
+ bdztlr = BA.bclr(BO=10)
+ bdzflr = BA.bclr(BO=2)
+
+ bctr = BA.bcctr(BO=20, BI=0)
+ btctr = BA.bcctr(BO=12)
+ bfctr = BA.bcctr(BO=4)
+
+ blrl = BA.bclrl(BO=20, BI=0)
+ btlrl = BA.bclrl(BO=12)
+ bflrl = BA.bclrl(BO=4)
+ bdnzlrl = BA.bclrl(BO=16, BI=0)
+ bdnztlrl = BA.bclrl(BO=8)
+ bdnzflrl = BA.bclrl(BO=0)
+ bdzlrl = BA.bclrl(BO=18, BI=0)
+ bdztlrl = BA.bclrl(BO=10)
+ bdzflrl = BA.bclrl(BO=2)
+
+ bctrl = BA.bcctrl(BO=20, BI=0)
+ btctrl = BA.bcctrl(BO=12)
+ bfctrl = BA.bcctrl(BO=4)
+
+ # these should/could take a[n optional] crf argument, but it's a
+ # bit hard to see how to arrange that.
+
+ blt = BA.bc(BO=12, BI=0)
+ ble = BA.bc(BO=4, BI=1)
+ beq = BA.bc(BO=12, BI=2)
+ bge = BA.bc(BO=4, BI=0)
+ bgt = BA.bc(BO=12, BI=1)
+ bnl = BA.bc(BO=4, BI=0)
+ bne = BA.bc(BO=4, BI=2)
+ bng = BA.bc(BO=4, BI=1)
+ bso = BA.bc(BO=12, BI=3)
+ bns = BA.bc(BO=4, BI=3)
+ bun = BA.bc(BO=12, BI=3)
+ bnu = BA.bc(BO=4, BI=3)
+
+ blta = BA.bca(BO=12, BI=0)
+ blea = BA.bca(BO=4, BI=1)
+ beqa = BA.bca(BO=12, BI=2)
+ bgea = BA.bca(BO=4, BI=0)
+ bgta = BA.bca(BO=12, BI=1)
+ bnla = BA.bca(BO=4, BI=0)
+ bnea = BA.bca(BO=4, BI=2)
+ bnga = BA.bca(BO=4, BI=1)
+ bsoa = BA.bca(BO=12, BI=3)
+ bnsa = BA.bca(BO=4, BI=3)
+ buna = BA.bca(BO=12, BI=3)
+ bnua = BA.bca(BO=4, BI=3)
+
+ bltl = BA.bcl(BO=12, BI=0)
+ blel = BA.bcl(BO=4, BI=1)
+ beql = BA.bcl(BO=12, BI=2)
+ bgel = BA.bcl(BO=4, BI=0)
+ bgtl = BA.bcl(BO=12, BI=1)
+ bnll = BA.bcl(BO=4, BI=0)
+ bnel = BA.bcl(BO=4, BI=2)
+ bngl = BA.bcl(BO=4, BI=1)
+ bsol = BA.bcl(BO=12, BI=3)
+ bnsl = BA.bcl(BO=4, BI=3)
+ bunl = BA.bcl(BO=12, BI=3)
+ bnul = BA.bcl(BO=4, BI=3)
+
+ bltla = BA.bcla(BO=12, BI=0)
+ blela = BA.bcla(BO=4, BI=1)
+ beqla = BA.bcla(BO=12, BI=2)
+ bgela = BA.bcla(BO=4, BI=0)
+ bgtla = BA.bcla(BO=12, BI=1)
+ bnlla = BA.bcla(BO=4, BI=0)
+ bnela = BA.bcla(BO=4, BI=2)
+ bngla = BA.bcla(BO=4, BI=1)
+ bsola = BA.bcla(BO=12, BI=3)
+ bnsla = BA.bcla(BO=4, BI=3)
+ bunla = BA.bcla(BO=12, BI=3)
+ bnula = BA.bcla(BO=4, BI=3)
+
+ bltlr = BA.bclr(BO=12, BI=0)
+ blelr = BA.bclr(BO=4, BI=1)
+ beqlr = BA.bclr(BO=12, BI=2)
+ bgelr = BA.bclr(BO=4, BI=0)
+ bgtlr = BA.bclr(BO=12, BI=1)
+ bnllr = BA.bclr(BO=4, BI=0)
+ bnelr = BA.bclr(BO=4, BI=2)
+ bnglr = BA.bclr(BO=4, BI=1)
+ bsolr = BA.bclr(BO=12, BI=3)
+ bnslr = BA.bclr(BO=4, BI=3)
+ bunlr = BA.bclr(BO=12, BI=3)
+ bnulr = BA.bclr(BO=4, BI=3)
+
+ bltctr = BA.bcctr(BO=12, BI=0)
+ blectr = BA.bcctr(BO=4, BI=1)
+ beqctr = BA.bcctr(BO=12, BI=2)
+ bgectr = BA.bcctr(BO=4, BI=0)
+ bgtctr = BA.bcctr(BO=12, BI=1)
+ bnlctr = BA.bcctr(BO=4, BI=0)
+ bnectr = BA.bcctr(BO=4, BI=2)
+ bngctr = BA.bcctr(BO=4, BI=1)
+ bsoctr = BA.bcctr(BO=12, BI=3)
+ bnsctr = BA.bcctr(BO=4, BI=3)
+ bunctr = BA.bcctr(BO=12, BI=3)
+ bnuctr = BA.bcctr(BO=4, BI=3)
+
+ bltlrl = BA.bclrl(BO=12, BI=0)
+ blelrl = BA.bclrl(BO=4, BI=1)
+ beqlrl = BA.bclrl(BO=12, BI=2)
+ bgelrl = BA.bclrl(BO=4, BI=0)
+ bgtlrl = BA.bclrl(BO=12, BI=1)
+ bnllrl = BA.bclrl(BO=4, BI=0)
+ bnelrl = BA.bclrl(BO=4, BI=2)
+ bnglrl = BA.bclrl(BO=4, BI=1)
+ bsolrl = BA.bclrl(BO=12, BI=3)
+ bnslrl = BA.bclrl(BO=4, BI=3)
+ bunlrl = BA.bclrl(BO=12, BI=3)
+ bnulrl = BA.bclrl(BO=4, BI=3)
+
+ bltctrl = BA.bcctrl(BO=12, BI=0)
+ blectrl = BA.bcctrl(BO=4, BI=1)
+ beqctrl = BA.bcctrl(BO=12, BI=2)
+ bgectrl = BA.bcctrl(BO=4, BI=0)
+ bgtctrl = BA.bcctrl(BO=12, BI=1)
+ bnlctrl = BA.bcctrl(BO=4, BI=0)
+ bnectrl = BA.bcctrl(BO=4, BI=2)
+ bngctrl = BA.bcctrl(BO=4, BI=1)
+ bsoctrl = BA.bcctrl(BO=12, BI=3)
+ bnsctrl = BA.bcctrl(BO=4, BI=3)
+ bunctrl = BA.bcctrl(BO=12, BI=3)
+ bnuctrl = BA.bcctrl(BO=4, BI=3)
+
+ # whew! and we haven't even begun the predicted versions...
+
+ # F.6 Simplified Mnemonics for Condition Register
+ # Logical Instructions
+
+ crset = BA.creqv(crbA="crbD", crbB="crbD")
+ crclr = BA.crxor(crbA="crbD", crbB="crbD")
+ crmove = BA.cror(crbA="crbB")
+ crnot = BA.crnor(crbA="crbB")
+
+ # F.7 Simplified Mnemonics for Trap Instructions
+
+ trap = BA.tw(TO=31, rA=0, rB=0)
+ twlt = BA.tw(TO=16)
+ twle = BA.tw(TO=20)
+ tweq = BA.tw(TO=4)
+ twge = BA.tw(TO=12)
+ twgt = BA.tw(TO=8)
+ twnl = BA.tw(TO=12)
+ twng = BA.tw(TO=24)
+ twllt = BA.tw(TO=2)
+ twlle = BA.tw(TO=6)
+ twlge = BA.tw(TO=5)
+ twlgt = BA.tw(TO=1)
+ twlnl = BA.tw(TO=5)
+ twlng = BA.tw(TO=6)
+
+ twlti = BA.twi(TO=16)
+ twlei = BA.twi(TO=20)
+ tweqi = BA.twi(TO=4)
+ twgei = BA.twi(TO=12)
+ twgti = BA.twi(TO=8)
+ twnli = BA.twi(TO=12)
+ twnei = BA.twi(TO=24)
+ twngi = BA.twi(TO=20)
+ twllti = BA.twi(TO=2)
+ twllei = BA.twi(TO=6)
+ twlgei = BA.twi(TO=5)
+ twlgti = BA.twi(TO=1)
+ twlnli = BA.twi(TO=5)
+ twlngi = BA.twi(TO=6)
+
+ # F.8 Simplified Mnemonics for Special-Purpose
+ # Registers
+
+ mfctr = BA.mfspr(spr=9)
+ mflr = BA.mfspr(spr=8)
+ mftbl = BA.mftb(spr=268)
+ mftbu = BA.mftb(spr=269)
+ mfxer = BA.mfspr(spr=1)
+
+ mtctr = BA.mtspr(spr=9)
+ mtlr = BA.mtspr(spr=8)
+ mtxer = BA.mtspr(spr=1)
+
+ # F.9 Recommended Simplified Mnemonics
+
+ nop = BA.ori(rS=0, rA=0, UIMM=0)
+
+ li = BA.addi(rA=0)
+ lis = BA.addis(rA=0)
+
+ mr = BA.or_(rB="rS")
+ mrx = BA.or_x(rB="rS")
+
+ not_ = BA.nor(rB="rS")
+ not_x = BA.norx(rB="rS")
+
+ mtcr = BA.mtcrf(CRM=0xFF)
+
+def hi(w):
+ return w >> 16
+
+def ha(w):
+ if (w >> 15) & 1:
+ return (w >> 16) + 1
+ else:
+ return w >> 16
+
+def lo(w):
+ return w & 0x0000FFFF
+
+def la(w):
+ v = w & 0x0000FFFF
+ if v & 0x8000:
+ return -((v ^ 0xFFFF) + 1) # "sign extend" to 32 bits
+ return v
+
+class MyPPCAssembler(PPCAssembler):
+ def load_word(self, rD, word):
+ self.addis(rD, 0, hi(word))
+ self.ori(rD, rD, lo(word))
+ def load_from(self, rD, addr):
+ self.addis(rD, 0, ha(addr))
+ self.lwz(rD, rD, la(addr))
+
+def b(n):
+ r = []
+ for i in range(32):
+ r.append(n&1)
+ n >>= 1
+ r.reverse()
+ return ''.join(map(str, r))
+
+from pypy.jit.backend.ppc.ppcgen.regname import *
+
+def main():
+
+ a = MyPPCAssembler()
+
+ a.lwz(r5, r4, 12)
+ a.lwz(r6, r4, 16)
+ a.lwz(r7, r5, 8)
+ a.lwz(r8, r6, 8)
+ a.add(r3, r7, r8)
+ a.load_word(r4, lookup("PyInt_FromLong"))
+ a.mtctr(r4)
+ a.bctr()
+
+ f = a.assemble(True)
+ print f(12,3)
+
+ a = MyPPCAssembler()
+ a.label("loop")
+ a.mftbu(r3)
+ a.mftbl(r4)
+ a.mftbu(r5)
+ a.cmpw(r5, r3)
+ a.bne(-16)
+ a.load_word(r5, lookup("PyLong_FromUnsignedLongLong"))
+ a.mtctr(r5)
+ a.bctr()
+
+ tb = a.assemble(True)
+ t0 = tb()
+ print [tb() - t0 for i in range(10)]
+
+if __name__ == '__main__':
+ main()
diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_field.py b/pypy/jit/backend/ppc/ppcgen/ppc_field.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/ppc_field.py
@@ -0,0 +1,91 @@
+from pypy.jit.backend.ppc.ppcgen.field import Field
+from pypy.jit.backend.ppc.ppcgen import regname
+
+fields = { # bit margins are *inclusive*! (and bit 0 is
+ # most-significant, 31 least significant)
+ "opcode": ( 0, 5),
+ "AA": (30, 30),
+ "BD": (16, 29, 'signed'),
+ "BI": (11, 15),
+ "BO": ( 6, 10),
+ "crbA": (11, 15),
+ "crbB": (16, 20),
+ "crbD": ( 6, 10),
+ "crfD": ( 6, 8),
+ "crfS": (11, 13),
+ "CRM": (12, 19),
+ "d": (16, 31, 'signed'),
+ "FM": ( 7, 14),
+ "frA": (11, 15, 'unsigned', regname._F),
+ "frB": (16, 20, 'unsigned', regname._F),
+ "frC": (21, 25, 'unsigned', regname._F),
+ "frD": ( 6, 10, 'unsigned', regname._F),
+ "frS": ( 6, 10, 'unsigned', regname._F),
+ "IMM": (16, 19),
+ "L": (10, 10),
+ "LI": ( 6, 29, 'signed'),
+ "LK": (31, 31),
+ "MB": (21, 25),
+ "ME": (26, 30),
+ "NB": (16, 20),
+ "OE": (21, 21),
+ "rA": (11, 15, 'unsigned', regname._R),
+ "rB": (16, 20, 'unsigned', regname._R),
+ "Rc": (31, 31),
+ "rD": ( 6, 10, 'unsigned', regname._R),
+ "rS": ( 6, 10, 'unsigned', regname._R),
+ "SH": (16, 20),
+ "SIMM": (16, 31, 'signed'),
+ "SR": (12, 15),
+ "spr": (11, 20),
+ "TO": ( 6, 10),
+ "UIMM": (16, 31),
+ "XO1": (21, 30),
+ "XO2": (22, 30),
+ "XO3": (26, 30),
+}
+
+
+class IField(Field):
+ def __init__(self, name, left, right, signedness):
+ assert signedness == 'signed'
+ super(IField, self).__init__(name, left, right, signedness)
+ def encode(self, value):
+ # XXX should check range
+ value &= self.mask << 2 | 0x3
+ return value & ~0x3
+ def decode(self, inst):
+ mask = self.mask << 2
+ v = inst & mask
+ if self.signed and (~mask >> 1) & mask & v:
+ return ~(~v&self.mask)
+ else:
+ return v
+ def r(self, i, labels, pc):
+ if not ppc_fields['AA'].decode(i):
+ v = self.decode(i)
+ if pc+v in labels:
+ return "%s (%r)"%(v, ', '.join(labels[pc+v]))
+ else:
+ return self.decode(i)
+
+
+class spr(Field):
+ def encode(self, value):
+ value = (value&31) << 5 | (value >> 5 & 31)
+ return super(spr, self).encode(value)
+ def decode(self, inst):
+ value = super(spr, self).decode(inst)
+ return (value&31) << 5 | (value >> 5 & 31)
+
+# other special fields?
+
+ppc_fields = {
+ "LI": IField("LI", *fields["LI"]),
+ "BD": IField("BD", *fields["BD"]),
+ "spr": spr("spr", *fields["spr"]),
+}
+
+for f in fields:
+ if f not in ppc_fields:
+ ppc_fields[f] = Field(f, *fields[f])
diff --git a/pypy/jit/backend/ppc/ppcgen/ppc_form.py b/pypy/jit/backend/ppc/ppcgen/ppc_form.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/ppc_form.py
@@ -0,0 +1,13 @@
+from pypy.jit.backend.ppc.ppcgen.form import Form
+from pypy.jit.backend.ppc.ppcgen.ppc_field import ppc_fields
+
+class PPCForm(Form):
+ fieldmap = ppc_fields
+
+ def __init__(self, *fnames):
+ super(PPCForm, self).__init__(*("opcode",) + fnames)
+
+ def __call__(self, opcode, **specializations):
+ specializations['opcode'] = opcode
+ return super(PPCForm, self).__call__(**specializations)
+
diff --git a/pypy/jit/backend/ppc/ppcgen/pystructs.py b/pypy/jit/backend/ppc/ppcgen/pystructs.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/pystructs.py
@@ -0,0 +1,22 @@
+class PyVarObject(object):
+ ob_size = 8
+
+class PyObject(object):
+ ob_refcnt = 0
+ ob_type = 4
+
+class PyTupleObject(object):
+ ob_item = 12
+
+class PyTypeObject(object):
+ tp_name = 12
+ tp_basicsize = 16
+ tp_itemsize = 20
+ tp_dealloc = 24
+
+class PyFloatObject(object):
+ ob_fval = 8
+
+class PyIntObject(object):
+ ob_ival = 8
+
diff --git a/pypy/jit/backend/ppc/ppcgen/rassemblermaker.py b/pypy/jit/backend/ppc/ppcgen/rassemblermaker.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/rassemblermaker.py
@@ -0,0 +1,63 @@
+from pypy.tool.sourcetools import compile2
+from pypy.rlib.rarithmetic import r_uint
+from pypy.jit.codegen.ppc.ppcgen.form import IDesc, IDupDesc
+
+## "opcode": ( 0, 5),
+## "rA": (11, 15, 'unsigned', regname._R),
+## "rB": (16, 20, 'unsigned', regname._R),
+## "Rc": (31, 31),
+## "rD": ( 6, 10, 'unsigned', regname._R),
+## "OE": (21, 21),
+## "XO2": (22, 30),
+
+## XO = Form("rD", "rA", "rB", "OE", "XO2", "Rc")
+
+## add = XO(31, XO2=266, OE=0, Rc=0)
+
+## def add(rD, rA, rB):
+## v = 0
+## v |= (31&(2**(5-0+1)-1)) << (32-5-1)
+## ...
+## return v
+
+def make_func(name, desc):
+ sig = []
+ fieldvalues = []
+ for field in desc.fields:
+ if field in desc.specializations:
+ fieldvalues.append((field, desc.specializations[field]))
+ else:
+ sig.append(field.name)
+ fieldvalues.append((field, field.name))
+ if isinstance(desc, IDupDesc):
+ for destfield, srcfield in desc.dupfields.iteritems():
+ fieldvalues.append((destfield, srcfield.name))
+ body = ['v = r_uint(0)']
+ assert 'v' not in sig # that wouldn't be funny
+ #body.append('print %r'%name + ', ' + ', '.join(["'%s:', %s"%(s, s) for s in sig]))
+ for field, value in fieldvalues:
+ if field.name == 'spr':
+ body.append('spr = (%s&31) << 5 | (%s >> 5 & 31)'%(value, value))
+ value = 'spr'
+ body.append('v |= (%3s & r_uint(%#05x)) << %d'%(value,
+ field.mask,
+ (32 - field.right - 1)))
+ body.append('self.emit(v)')
+ src = 'def %s(self, %s):\n %s'%(name, ', '.join(sig), '\n '.join(body))
+ d = {'r_uint':r_uint}
+ #print src
+ exec compile2(src) in d
+ return d[name]
+
+def make_rassembler(cls):
+ bases = [make_rassembler(b) for b in cls.__bases__]
+ ns = {}
+ for k, v in cls.__dict__.iteritems():
+ if isinstance(v, IDesc):
+ v = make_func(k, v)
+ ns[k] = v
+ rcls = type('R' + cls.__name__, tuple(bases), ns)
+ def emit(self, value):
+ self.insts.append(value)
+ rcls.emit = emit
+ return rcls
diff --git a/pypy/jit/backend/ppc/ppcgen/regname.py b/pypy/jit/backend/ppc/ppcgen/regname.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/regname.py
@@ -0,0 +1,18 @@
+class _R(int):
+ def __repr__(self):
+ return "r%s"%(super(_R, self).__repr__(),)
+ __str__ = __repr__
+class _F(int):
+ def __repr__(self):
+ return "fr%s"%(super(_F, self).__repr__(),)
+ __str__ = __repr__
+
+r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, \
+ r13, r14, r15, r16, r17, r18, r19, r20, r21, r22, \
+ r23, r24, r25, r26, r27, r28, r29, r30, r31 = map(_R, range(32))
+
+fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7, fr8, fr9, fr10, fr11, fr12, \
+ fr13, fr14, fr15, fr16, fr17, fr18, fr19, fr20, fr21, fr22, \
+ fr23, fr24, fr25, fr26, fr27, fr28, fr29, fr30, fr31 = map(_F, range(32))
+
+crf0, crf1, crf2, crf3, crf4, crf5, crf6, crf7 = range(8)
diff --git a/pypy/jit/backend/ppc/ppcgen/symbol_lookup.py b/pypy/jit/backend/ppc/ppcgen/symbol_lookup.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/symbol_lookup.py
@@ -0,0 +1,15 @@
+
+def lookup(sym):
+ global lookup
+ import py
+
+ _ppcgen = py.magic.autopath().dirpath().join('_ppcgen.c')._getpymodule()
+
+ try:
+ from _ppcgen import NSLookupAndBindSymbol
+
+ def lookup(sym):
+ return NSLookupAndBindSymbol('_' + sym)
+ except ImportError:
+ from _ppcgen import dlsym as lookup
+ return lookup(sym)
diff --git a/pypy/jit/backend/ppc/ppcgen/test/__init__.py b/pypy/jit/backend/ppc/ppcgen/test/__init__.py
new file mode 100644
diff --git a/pypy/jit/backend/ppc/ppcgen/test/autopath.py b/pypy/jit/backend/ppc/ppcgen/test/autopath.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/test/autopath.py
@@ -0,0 +1,114 @@
+"""
+self cloning, automatic path configuration
+
+copy this into any subdirectory of pypy from which scripts need
+to be run, typically all of the test subdirs.
+The idea is that any such script simply issues
+
+ import autopath
+
+and this will make sure that the parent directory containing "pypy"
+is in sys.path.
+
+If you modify the master "autopath.py" version (in pypy/tool/autopath.py)
+you can directly run it which will copy itself on all autopath.py files
+it finds under the pypy root directory.
+
+This module always provides these attributes:
+
+ pypydir pypy root directory path
+ this_dir directory where this autopath.py resides
+
+"""
+
+
+def __dirinfo(part):
+ """ return (partdir, this_dir) and insert parent of partdir
+ into sys.path. If the parent directories don't have the part
+ an EnvironmentError is raised."""
+
+ import sys, os
+ try:
+ head = this_dir = os.path.realpath(os.path.dirname(__file__))
+ except NameError:
+ head = this_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
+
+ while head:
+ partdir = head
+ head, tail = os.path.split(head)
+ if tail == part:
+ break
+ else:
+ raise EnvironmentError, "'%s' missing in '%r'" % (partdir, this_dir)
+
+ pypy_root = os.path.join(head, '')
+ try:
+ sys.path.remove(head)
+ except ValueError:
+ pass
+ sys.path.insert(0, head)
+
+ munged = {}
+ for name, mod in sys.modules.items():
+ if '.' in name:
+ continue
+ fn = getattr(mod, '__file__', None)
+ if not isinstance(fn, str):
+ continue
+ newname = os.path.splitext(os.path.basename(fn))[0]
+ if not newname.startswith(part + '.'):
+ continue
+ path = os.path.join(os.path.dirname(os.path.realpath(fn)), '')
+ if path.startswith(pypy_root) and newname != part:
+ modpaths = os.path.normpath(path[len(pypy_root):]).split(os.sep)
+ if newname != '__init__':
+ modpaths.append(newname)
+ modpath = '.'.join(modpaths)
+ if modpath not in sys.modules:
+ munged[modpath] = mod
+
+ for name, mod in munged.iteritems():
+ if name not in sys.modules:
+ sys.modules[name] = mod
+ if '.' in name:
+ prename = name[:name.rfind('.')]
+ postname = name[len(prename)+1:]
+ if prename not in sys.modules:
+ __import__(prename)
+ if not hasattr(sys.modules[prename], postname):
+ setattr(sys.modules[prename], postname, mod)
+
+ return partdir, this_dir
+
+def __clone():
+ """ clone master version of autopath.py into all subdirs """
+ from os.path import join, walk
+ if not this_dir.endswith(join('pypy','tool')):
+ raise EnvironmentError("can only clone master version "
+ "'%s'" % join(pypydir, 'tool',_myname))
+
+
+ def sync_walker(arg, dirname, fnames):
+ if _myname in fnames:
+ fn = join(dirname, _myname)
+ f = open(fn, 'rwb+')
+ try:
+ if f.read() == arg:
+ print "checkok", fn
+ else:
+ print "syncing", fn
+ f = open(fn, 'w')
+ f.write(arg)
+ finally:
+ f.close()
+ s = open(join(pypydir, 'tool', _myname), 'rb').read()
+ walk(pypydir, sync_walker, s)
+
+_myname = 'autopath.py'
+
+# set guaranteed attributes
+
+pypydir, this_dir = __dirinfo('pypy')
+
+if __name__ == '__main__':
+ __clone()
diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_field.py b/pypy/jit/backend/ppc/ppcgen/test/test_field.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/test/test_field.py
@@ -0,0 +1,66 @@
+import autopath
+
+from pypy.jit.codegen.ppc.ppcgen.field import Field
+from py.test import raises
+
+import random
+
+maxppcint = 0x7fffffff
+
+class TestFields(object):
+ def test_decode(self):
+ # this test is crappy
+ field = Field("test", 0, 31)
+ for i in range(100):
+ j = random.randrange(maxppcint)
+ assert field.decode(j) == j
+ field = Field("test", 0, 31-4)
+ for i in range(100):
+ j = random.randrange(maxppcint)
+ assert field.decode(j) == j>>4
+ assert field.decode(j) == j>>4
+ field = Field("test", 3, 31-4)
+ for i in range(100):
+ j = random.randrange(maxppcint>>3)
+ assert field.decode(j) == j>>4
+
+
+ def test_decode_unsigned(self):
+ field = Field("test", 16, 31)
+ for i in range(1000):
+ hi = long(random.randrange(0x10000)) << 16
+ lo = long(random.randrange(0x10000))
+ assert field.decode(hi|lo) == lo
+
+
+ def test_decode_signed(self):
+ field = Field("test", 16, 31, 'signed')
+ for i in range(1000):
+ hi = long(random.randrange(0x10000)) << 16
+ lo = long(random.randrange(0x10000))
+ word = hi|lo
+ if lo & 0x8000:
+ lo |= ~0xFFFF
+ assert field.decode(word) == lo
+
+
+ def test_error_checking_unsigned(self):
+ for b in range(0, 17):
+ field = Field("test", b, 15+b)
+ assert field.decode(field.encode(0)) == 0
+ assert field.decode(field.encode(32768)) == 32768
+ assert field.decode(field.encode(65535)) == 65535
+ raises(ValueError, field.encode, -32768)
+ raises(ValueError, field.encode, -1)
+ raises(ValueError, field.encode, 65536)
+
+
+ def test_error_checking_signed(self):
+ for b in range(0, 17):
+ field = Field("test", b, 15+b, 'signed')
+ assert field.decode(field.encode(0)) == 0
+ assert field.decode(field.encode(-32768)) == -32768
+ assert field.decode(field.encode(32767)) == 32767
+ raises(ValueError, field.encode, 32768)
+ raises(ValueError, field.encode, -32769)
+
diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_form.py b/pypy/jit/backend/ppc/ppcgen/test/test_form.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/test/test_form.py
@@ -0,0 +1,68 @@
+import autopath
+from pypy.jit.codegen.ppc.ppcgen.ppc_assembler import b
+import random
+import sys
+
+from pypy.jit.codegen.ppc.ppcgen.form import Form, FormException
+from pypy.jit.codegen.ppc.ppcgen.field import Field
+from pypy.jit.codegen.ppc.ppcgen.assembler import Assembler
+
+# 0 31
+# +-------------------------------+
+# | h | l |
+# +-------------------------------+
+# | hh | hl | lh | ll |
+# +-------------------------------+
+
+test_fieldmap = {
+ 'l' : Field('l', 16, 31),
+ 'h' : Field('h', 0, 15),
+ 'll': Field('ll', 24, 31),
+ 'lh': Field('lh', 16, 23),
+ 'hl': Field('hl', 8, 15),
+ 'hh': Field('hh', 0, 7),
+}
+
+def p(w):
+ import struct
+ return struct.pack('>i', w)
+
+
+class TestForm(Form):
+ fieldmap = test_fieldmap
+
+class TestForms(object):
+ def test_bitclash(self):
+ raises(FormException, TestForm, 'h', 'hh')
+ raises(FormException, TestForm,
+ Field('t1', 0, 0), Field('t2', 0, 0))
+
+ def test_basic(self):
+ class T(Assembler):
+ i = TestForm('h', 'l')()
+ j = i(h=1)
+ k = i(l=3)
+ raises(FormException, k, l=0)
+ a = T()
+ a.i(5, 6)
+ assert p(a.assemble0()[0]) == '\000\005\000\006'
+ a = T()
+ a.j(2)
+ assert p(a.assemble0()[0]) == '\000\001\000\002'
+ a = T()
+ a.k(4)
+ assert p(a.assemble0()[0]) == '\000\004\000\003'
+
+ def test_defdesc(self):
+ class T(Assembler):
+ i = TestForm('hh', 'hl', 'lh', 'll')()
+ i.default(hl=0).default(hh=1)
+ a = T()
+ a.i(1, 2, 3, 4)
+ assert p(a.assemble0()[0]) == '\001\002\003\004'
+ a = T()
+ a.i(1, 3, 4)
+ assert p(a.assemble0()[0]) == '\001\000\003\004'
+ a = T()
+ a.i(3, 4)
+ assert p(a.assemble0()[0]) == '\001\000\003\004'
diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_func_builder.py b/pypy/jit/backend/ppc/ppcgen/test/test_func_builder.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/test/test_func_builder.py
@@ -0,0 +1,87 @@
+import py
+import random, sys, os
+
+from pypy.jit.codegen.ppc.ppcgen.ppc_assembler import MyPPCAssembler
+from pypy.jit.codegen.ppc.ppcgen.symbol_lookup import lookup
+from pypy.jit.codegen.ppc.ppcgen.func_builder import make_func
+from pypy.jit.codegen.ppc.ppcgen import form, func_builder
+from pypy.jit.codegen.ppc.ppcgen.regname import *
+
+class TestFuncBuilderTest(object):
+ def setup_class(cls):
+ if (not hasattr(os, 'uname') or
+ os.uname()[-1] != 'Power Macintosh'):
+ py.test.skip("can't test all of ppcgen on non-PPC!")
+
+ def test_simple(self):
+ a = MyPPCAssembler()
+ a.blr()
+ f = make_func(a, "O", "O")
+ assert f(1) == 1
+ raises(TypeError, f)
+ raises(TypeError, f, 1, 2)
+
+ def test_less_simple(self):
+ a = MyPPCAssembler()
+ s = lookup("PyNumber_Add")
+ a.load_word(r5, s)
+ a.mtctr(r5)
+ a.bctr()
+ f = make_func(a, "O", "OO")
+ raises(TypeError, f)
+ raises(TypeError, f, 1)
+ assert f(1, 2) == 3
+ raises(TypeError, f, 1, 2, 3)
+
+ def test_signature(self):
+ a = MyPPCAssembler()
+ a.add(r3, r3, r4)
+ a.blr()
+ f = make_func(a, "i", "ii")
+ raises(TypeError, f)
+ raises(TypeError, f, 1)
+ assert f(1, 2) == 3
+ raises(TypeError, f, 1, 2, 3)
+ raises(TypeError, f, 1, "2")
+
+ def test_signature2(self):
+ a = MyPPCAssembler()
+ a.add(r3, r3, r4)
+ a.add(r3, r3, r5)
+ a.add(r3, r3, r6)
+ a.add(r3, r3, r7)
+ s = lookup("PyInt_FromLong")
+ a.load_word(r0, s)
+ a.mtctr(r0)
+ a.bctr()
+ f = make_func(a, "O", "iiiii")
+ raises(TypeError, f)
+ raises(TypeError, f, 1)
+ assert f(1, 2, 3, 4, 5) == 1 + 2 + 3 + 4 + 5
+ raises(TypeError, f, 1, 2, 3)
+ raises(TypeError, f, 1, "2", 3, 4, 5)
+
+ def test_floats(self):
+ a = MyPPCAssembler()
+ a.fadd(fr1, fr1, fr2)
+ a.blr()
+ f = make_func(a, 'f', 'ff')
+ raises(TypeError, f)
+ raises(TypeError, f, 1.0)
+ assert f(1.0, 2.0) == 3.0
+ raises(TypeError, f, 1.0, 2.0, 3.0)
+ raises(TypeError, f, 1.0, 2)
+
+ def test_fast_entry(self):
+ a = MyPPCAssembler()
+ a.blr()
+ f = make_func(a, "O", "O")
+ assert f(1) == 1
+ b = MyPPCAssembler()
+ from pypy.jit.codegen.ppc.ppcgen import util
+ # eurgh!:
+ b.load_word(r0, util.access_at(id(f.code), 8) + f.FAST_ENTRY_LABEL)
+ b.mtctr(r0)
+ b.bctr()
+ g = make_func(b, "O", "O")
+ assert g(1) == 1
diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_ppc.py b/pypy/jit/backend/ppc/ppcgen/test/test_ppc.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/test/test_ppc.py
@@ -0,0 +1,212 @@
+import py
+import random, sys, os
+
+from pypy.jit.backend.ppc.ppcgen.ppc_assembler import BasicPPCAssembler, MyPPCAssembler
+from pypy.jit.backend.ppc.ppcgen.symbol_lookup import lookup
+from pypy.jit.backend.ppc.ppcgen.regname import *
+from pypy.jit.backend.ppc.ppcgen import form, pystructs
+from pypy.jit.backend.detect_cpu import autodetect_main_model
+
+
+class TestDisassemble(object):
+ def test_match(self):
+ A = BasicPPCAssembler
+ a = A()
+ a.add(1, 2, 3)
+ inst = a.insts[-1]
+ assert A.add.match(inst.assemble())
+
+class TestAssemble(object):
+ def setup_class(cls):
+ if autodetect_main_model() not in ["ppc", "ppc64"]:
+ py.test.skip("can't test all of ppcgen on non-PPC!")
+
+ def test_add_imm(self):
+ a = MyPPCAssembler()
+
+ a.li(3, 6)
+ a.addi(3, 3, 1)
+ a.blr()
+
+ f = a.assemble()
+ assert f() == 7
+
+"""
+class TestAssemble(object):
+
+ def setup_class(cls):
+ #if (not hasattr(os, 'uname') or
+ if autodetect_main_model() not in ["ppc", "ppc64"]:
+ #os.uname()[-1] in ['Power Macintosh', 'PPC64']:
+
+ py.test.skip("can't test all of ppcgen on non-PPC!")
+
+ def test_tuplelength(self):
+ a = MyPPCAssembler()
+
+ a.lwz(3, 4, pystructs.PyVarObject.ob_size)
+ a.load_word(5, lookup("PyInt_FromLong"))
+ a.mtctr(5)
+ a.bctr()
+
+ f = a.assemble()
+ assert f() == 0
+ assert f(1) == 1
+ assert f('') == 1
+
+
+ def test_tuplelength2(self):
+ a = MyPPCAssembler()
+
+ a.mflr(0)
+ a.stw(0, 1, 8)
+ a.stwu(1, 1, -80)
+ a.mr(3, 4)
+ a.load_word(5, lookup("PyTuple_Size"))
+ a.mtctr(5)
+ a.bctrl()
+ a.load_word(5, lookup("PyInt_FromLong"))
+ a.mtctr(5)
+ a.bctrl()
+ a.lwz(0, 1, 88)
+ a.addi(1, 1, 80)
+ a.mtlr(0)
+ a.blr()
+
+ f = a.assemble()
+ assert f() == 0
+ assert f(1) == 1
+ assert f('') == 1
+ assert f('', 3) == 2
+
+
+ def test_intcheck(self):
+ a = MyPPCAssembler()
+
+ a.lwz(r5, r4, pystructs.PyVarObject.ob_size)
+ a.cmpwi(r5, 1)
+ a.bne("not_one")
+ a.lwz(r5, r4, pystructs.PyTupleObject.ob_item + 0*4)
+ a.lwz(r5, r5, 4)
+ a.load_word(r6, lookup("PyInt_Type"))
+ a.cmpw(r5, r6)
+ a.bne("not_int")
+ a.li(r3, 1)
+ a.b("exit")
+ a.label("not_int")
+ a.li(r3, 0)
+ a.b("exit")
+ a.label("not_one")
+ a.li(r3, 2)
+ a.label("exit")
+ a.load_word(r5, lookup("PyInt_FromLong"))
+ a.mtctr(r5)
+ a.bctr()
+
+ f = a.assemble()
+
+ assert f() == 2
+ assert f("", "") == 2
+ assert f("") == 0
+ assert f(1) == 1
+
+
+ def test_raise(self):
+ a = MyPPCAssembler()
+
+ a.mflr(0)
+ a.stw(0, 1, 8)
+ a.stwu(1, 1, -80)
+
+ err_set = lookup("PyErr_SetObject")
+ exc = lookup("PyExc_ValueError")
+
+ a.load_word(5, err_set)
+ a.mtctr(5)
+ a.load_from(3, exc)
+ a.mr(4, 3)
+ a.bctrl()
+
+ a.li(3, 0)
+
+ a.lwz(0, 1, 88)
+ a.addi(1, 1, 80)
+ a.mtlr(0)
+ a.blr()
+
+ raises(ValueError, a.assemble())
+
+
+ def test_makestring(self):
+ a = MyPPCAssembler()
+
+ a.li(r3, 0)
+ a.li(r4, 0)
+ a.load_word(r5, lookup("PyString_FromStringAndSize"))
+ a.mtctr(r5)
+ a.bctr()
+
+ f = a.assemble()
+ assert f() == ''
+
+
+ def test_numberadd(self):
+ a = MyPPCAssembler()
+
+ a.lwz(r5, r4, pystructs.PyVarObject.ob_size)
+ a.cmpwi(r5, 2)
+ a.bne("err_out")
+
+ a.lwz(r3, r4, 12)
+ a.lwz(r4, r4, 16)
+
+ a.load_word(r5, lookup("PyNumber_Add"))
+ a.mtctr(r5)
+ a.bctr()
+
+ a.label("err_out")
+
+ a.mflr(r0)
+ a.stw(r0, r1, 8)
+ a.stwu(r1, r1, -80)
+
+ err_set = lookup("PyErr_SetObject")
+ exc = lookup("PyExc_TypeError")
+
+ a.load_word(r5, err_set)
+ a.mtctr(r5)
+ a.load_from(r3, exc)
+ a.mr(r4, r3)
+ a.bctrl()
+
+ a.li(r3, 0)
+
+ a.lwz(r0, r1, 88)
+ a.addi(r1, r1, 80)
+ a.mtlr(r0)
+ a.blr()
+
+ f = a.assemble()
+
+ raises(TypeError, f)
+ raises(TypeError, f, '', 1)
+ raises(TypeError, f, 1)
+ raises(TypeError, f, 1, 2, 3)
+ assert f(1, 2) == 3
+ assert f('a', 'b') == 'ab'
+
+
+ def test_assemblerChecks(self):
+ def testFailure(idesc, *args):
+ a = MyPPCAssembler()
+ raises(ValueError, idesc.__get__(a), *args)
+ def testSucceed(idesc, *args):
+ a = MyPPCAssembler()
+ # "assertNotRaises" :-)
+ idesc.__get__(a)(*args)
+ testFailure(MyPPCAssembler.add, 32, 31, 30)
+ testFailure(MyPPCAssembler.add, -1, 31, 30)
+ testSucceed(MyPPCAssembler.bne, -12)
+ testSucceed(MyPPCAssembler.lwz, 0, 0, 32767)
+ testSucceed(MyPPCAssembler.lwz, 0, 0, -32768)
+"""
diff --git a/pypy/jit/backend/ppc/ppcgen/test/test_rassemblermaker.py b/pypy/jit/backend/ppc/ppcgen/test/test_rassemblermaker.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/test/test_rassemblermaker.py
@@ -0,0 +1,39 @@
+from pypy.jit.codegen.ppc.ppcgen.rassemblermaker import make_rassembler
+from pypy.jit.codegen.ppc.ppcgen.ppc_assembler import PPCAssembler
+
+RPPCAssembler = make_rassembler(PPCAssembler)
+
+_a = PPCAssembler()
+_a.add(3, 3, 4)
+add_r3_r3_r4 = _a.insts[0].assemble()
+
+def test_simple():
+ ra = RPPCAssembler()
+ ra.add(3, 3, 4)
+ assert ra.insts == [add_r3_r3_r4]
+
+def test_rtyped():
+ from pypy.rpython.test.test_llinterp import interpret
+ def f():
+ ra = RPPCAssembler()
+ ra.add(3, 3, 4)
+ ra.lwz(1, 1, 1) # ensure that high bit doesn't produce long but r_uint
+ return ra.insts[0]
+ res = interpret(f, [])
+ assert res == add_r3_r3_r4
+
+def test_mnemonic():
+ mrs = []
+ for A in PPCAssembler, RPPCAssembler:
+ a = A()
+ a.mr(3, 4)
+ mrs.append(a.insts[0])
+ assert mrs[0].assemble() == mrs[1]
+
+def test_spr_coding():
+ mrs = []
+ for A in PPCAssembler, RPPCAssembler:
+ a = A()
+ a.mtctr(3)
+ mrs.append(a.insts[0])
+ assert mrs[0].assemble() == mrs[1]
diff --git a/pypy/jit/backend/ppc/ppcgen/util.py b/pypy/jit/backend/ppc/ppcgen/util.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/ppcgen/util.py
@@ -0,0 +1,23 @@
+from pypy.jit.codegen.ppc.ppcgen.ppc_assembler import MyPPCAssembler
+from pypy.jit.codegen.ppc.ppcgen.func_builder import make_func
+
+from regname import *
+
+def access_at():
+ a = MyPPCAssembler()
+
+ a.lwzx(r3, r3, r4)
+ a.blr()
+
+ return make_func(a, "i", "ii")
+
+access_at = access_at()
+
+def itoO():
+ a = MyPPCAssembler()
+
+ a.blr()
+
+ return make_func(a, "O", "i")
+
+itoO = itoO()
diff --git a/pypy/jit/backend/ppc/regalloc.py b/pypy/jit/backend/ppc/regalloc.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/regalloc.py
@@ -0,0 +1,213 @@
+from pypy.jit.codegen.ppc.instruction import \
+ gprs, fprs, crfs, ctr, \
+ NO_REGISTER, GP_REGISTER, FP_REGISTER, CR_FIELD, CT_REGISTER, \
+ CMPInsn, Spill, Unspill, stack_slot, \
+ rSCRATCH
+
+from pypy.jit.codegen.ppc.conftest import option
+
+DEBUG_PRINT = option.debug_print
+
+class RegisterAllocation:
+ def __init__(self, freeregs, initial_mapping, initial_spill_offset):
+ if DEBUG_PRINT:
+ print
+ print "RegisterAllocation __init__", initial_mapping.items()
+
+ self.insns = [] # output list of instructions
+
+ # registers with dead values
+ self.freeregs = {}
+ for regcls in freeregs:
+ self.freeregs[regcls] = freeregs[regcls][:]
+
+ self.var2loc = {} # maps Vars to AllocationSlots
+ self.lru = [] # least-recently-used list of vars; first is oldest.
+ # contains all vars in registers, and no vars on stack
+
+ self.spill_offset = initial_spill_offset # where to put next spilled
+ # value, relative to rFP,
+ # measured in bytes
+ self.free_stack_slots = [] # a free list for stack slots
+
+ # go through the initial mapping and initialize the data structures
+ for var, loc in initial_mapping.iteritems():
+ self.set(var, loc)
+ if loc.is_register:
+ if loc.alloc in self.freeregs[loc.regclass]:
+ self.freeregs[loc.regclass].remove(loc.alloc)
+ self.lru.append(var)
+ else:
+ assert loc.offset >= self.spill_offset
+
+ self.labels_to_tell_spill_offset_to = []
+ self.builders_to_tell_spill_offset_to = []
+
+ def set(self, var, loc):
+ assert var not in self.var2loc
+ self.var2loc[var] = loc
+
+ def forget(self, var, loc):
+ assert self.var2loc[var] is loc
+ del self.var2loc[var]
+
+ def loc_of(self, var):
+ return self.var2loc[var]
+
+ def spill_slot(self):
+ """ Returns an unused stack location. """
+ if self.free_stack_slots:
+ return self.free_stack_slots.pop()
+ else:
+ self.spill_offset -= 4
+ return stack_slot(self.spill_offset)
+
+ def spill(self, reg, argtospill):
+ if argtospill in self.lru:
+ self.lru.remove(argtospill)
+ self.forget(argtospill, reg)
+ spillslot = self.spill_slot()
+ if reg.regclass != GP_REGISTER:
+ self.insns.append(reg.move_to_gpr(0))
+ reg = gprs[0]
+ self.insns.append(Spill(argtospill, reg, spillslot))
+ self.set(argtospill, spillslot)
+
+ def _allocate_reg(self, regclass, newarg):
+
+ # check if there is a register available
+ freeregs = self.freeregs[regclass]
+
+ if freeregs:
+ reg = freeregs.pop().make_loc()
+ self.set(newarg, reg)
+ if DEBUG_PRINT:
+ print "allocate_reg: Putting %r into fresh register %r" % (newarg, reg)
+ return reg
+
+ # if not, find something to spill
+ for i in range(len(self.lru)):
+ argtospill = self.lru[i]
+ reg = self.loc_of(argtospill)
+ assert reg.is_register
+ if reg.regclass == regclass:
+ del self.lru[i]
+ break
+ else:
+ assert 0
+
+ # Move the value we are spilling onto the stack, both in the
+ # data structures and in the instructions:
+
+ self.spill(reg, argtospill)
+
+ if DEBUG_PRINT:
+ print "allocate_reg: Spilled %r from %r to %r." % (argtospill, reg, self.loc_of(argtospill))
+
+ # update data structures to put newarg into the register
+ reg = reg.alloc.make_loc()
+ self.set(newarg, reg)
+ if DEBUG_PRINT:
+ print "allocate_reg: Put %r in stolen reg %r." % (newarg, reg)
+ return reg
+
+ def _promote(self, arg):
+ if arg in self.lru:
+ self.lru.remove(arg)
+ self.lru.append(arg)
+
+ def allocate_for_insns(self, insns):
+ from pypy.jit.codegen.ppc.rgenop import Var
+
+ insns2 = []
+
+ # make a pass through the instructions, loading constants into
+ # Vars where needed.
+ for insn in insns:
+ newargs = []
+ for arg in insn.reg_args:
+ if not isinstance(arg, Var):
+ newarg = Var()
+ arg.load(insns2, newarg)
+ newargs.append(newarg)
+ else:
+ newargs.append(arg)
+ insn.reg_args[0:len(newargs)] = newargs
+ insns2.append(insn)
+
+ # Walk through instructions in forward order
+ for insn in insns2:
+
+ if DEBUG_PRINT:
+ print "Processing instruction"
+ print insn
+ print "LRU list was:", self.lru
+ print 'located at', [self.loc_of(a) for a in self.lru]
+
+ # put things into the lru
+ for arg in insn.reg_args:
+ self._promote(arg)
+ if insn.result:
+ self._promote(insn.result)
+ if DEBUG_PRINT:
+ print "LRU list is now:", self.lru
+ print 'located at', [self.loc_of(a) for a in self.lru if a is not insn.result]
+
+ # We need to allocate a register for each used
+ # argument that is not already in one
+ for i in range(len(insn.reg_args)):
+ arg = insn.reg_args[i]
+ argcls = insn.reg_arg_regclasses[i]
+ if DEBUG_PRINT:
+ print "Allocating register for", arg, "..."
+ argloc = self.loc_of(arg)
+ if DEBUG_PRINT:
+ print "currently in", argloc
+
+ if not argloc.is_register:
+ # It has no register now because it has been spilled
+ self.forget(arg, argloc)
+ newargloc = self._allocate_reg(argcls, arg)
+ if DEBUG_PRINT:
+ print "unspilling to", newargloc
+ self.insns.append(Unspill(arg, newargloc, argloc))
+ self.free_stack_slots.append(argloc)
+ elif argloc.regclass != argcls:
+ # it's in the wrong kind of register
+ # (this code is excessively confusing)
+ self.forget(arg, argloc)
+ self.freeregs[argloc.regclass].append(argloc.alloc)
+ if argloc.regclass != GP_REGISTER:
+ if argcls == GP_REGISTER:
+ gpr = self._allocate_reg(GP_REGISTER, arg).number
+ else:
+ gpr = rSCRATCH
+ self.insns.append(
+ argloc.move_to_gpr(gpr))
+ else:
+ gpr = argloc.number
+ if argcls != GP_REGISTER:
+ newargloc = self._allocate_reg(argcls, arg)
+ self.insns.append(
+ newargloc.move_from_gpr(gpr))
+ else:
+ if DEBUG_PRINT:
+ print "it was in ", argloc
+ pass
+
+ # Need to allocate a register for the destination
+ assert not insn.result or insn.result not in self.var2loc
+ if insn.result_regclass != NO_REGISTER:
+ if DEBUG_PRINT:
+ print "Allocating register for result %r..." % (insn.result,)
+ resultreg = self._allocate_reg(insn.result_regclass, insn.result)
+ insn.allocate(self)
+ if DEBUG_PRINT:
+ print insn
+ print
+ self.insns.append(insn)
+ #print 'allocation done'
+ #for i in self.insns:
+ # print i
+ #print self.var2loc
+ return self.insns
diff --git a/pypy/jit/backend/ppc/rgenop.py b/pypy/jit/backend/ppc/rgenop.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/rgenop.py
@@ -0,0 +1,1427 @@
+import py
+from pypy.jit.codegen.model import AbstractRGenOp, GenLabel, GenBuilder
+from pypy.jit.codegen.model import GenVar, GenConst, CodeGenSwitch
+from pypy.jit.codegen.model import ReplayBuilder, dummy_var
+from pypy.rpython.lltypesystem import lltype, llmemory
+from pypy.rpython.lltypesystem import lloperation
+from pypy.rpython.extfunc import register_external
+from pypy.rlib.objectmodel import specialize, we_are_translated
+from pypy.jit.codegen.conftest import option
+from ctypes import POINTER, cast, c_void_p, c_int, CFUNCTYPE
+
+from pypy.jit.codegen.ppc import codebuf
+from pypy.jit.codegen.ppc.instruction import rSP, rFP, rSCRATCH, gprs
+from pypy.jit.codegen.ppc import instruction as insn
+from pypy.jit.codegen.ppc.regalloc import RegisterAllocation
+from pypy.jit.codegen.emit_moves import emit_moves, emit_moves_safe
+
+from pypy.jit.codegen.ppc.ppcgen.rassemblermaker import make_rassembler
+from pypy.jit.codegen.ppc.ppcgen.ppc_assembler import MyPPCAssembler
+
+from pypy.jit.codegen.i386.rgenop import gc_malloc_fnaddr
+from pypy.rpython.annlowlevel import llhelper
+
+class RPPCAssembler(make_rassembler(MyPPCAssembler)):
+ def emit(self, value):
+ self.mc.write(value)
+
+_PPC = RPPCAssembler
+
+
+_flush_icache = None
+def flush_icache(base, size):
+ global _flush_icache
+ if _flush_icache == None:
+ cpath = py.magic.autopath().dirpath().join('_flush_icache.c')
+ _flush_icache = cpath._getpymodule()._flush_icache
+ _flush_icache(base, size)
+register_external(flush_icache, [int, int], None, "LL_flush_icache")
+
+
+NSAVEDREGISTERS = 19
+
+DEBUG_TRAP = option.trap
+DEBUG_PRINT = option.debug_print
+
+_var_index = [0]
+class Var(GenVar):
+ conditional = False
+ def __init__(self):
+ self.__magic_index = _var_index[0]
+ _var_index[0] += 1
+ def __repr__(self):
+ return "v%d" % self.__magic_index
+ def fits_in_uimm(self):
+ return False
+ def fits_in_simm(self):
+ return False
+
+class ConditionVar(Var):
+ """ Used for vars that originated as the result of a conditional
+ operation, like a == b """
+ conditional = True
+
+class IntConst(GenConst):
+
+ def __init__(self, value):
+ self.value = value
+
+ def __repr__(self):
+ return 'IntConst(%d)'%self.value
+
+ @specialize.arg(1)
+ def revealconst(self, T):
+ if isinstance(T, lltype.Ptr):
+ return lltype.cast_int_to_ptr(T, self.value)
+ elif T is llmemory.Address:
+ return llmemory.cast_int_to_adr(self.value)
+ else:
+ return lltype.cast_primitive(T, self.value)
+
+ def load(self, insns, var):
+ insns.append(
+ insn.Insn_GPR__IMM(_PPC.load_word,
+ var, [self]))
+
+ def load_now(self, asm, loc):
+ if loc.is_register:
+ assert isinstance(loc, insn.GPR)
+ asm.load_word(loc.number, self.value)
+ else:
+ #print 'load_now to', loc.offset
+ asm.load_word(rSCRATCH, self.value)
+ asm.stw(rSCRATCH, rFP, loc.offset)
+
+ def fits_in_simm(self):
+ return abs(self.value) < 2**15
+
+ def fits_in_uimm(self):
+ return 0 <= self.value < 2**16
+
+class AddrConst(GenConst):
+
+ def __init__(self, addr):
+ self.addr = addr
+
+ @specialize.arg(1)
+ def revealconst(self, T):
+ if T is llmemory.Address:
+ return self.addr
+ elif isinstance(T, lltype.Ptr):
+ return llmemory.cast_adr_to_ptr(self.addr, T)
+ elif T is lltype.Signed:
+ return llmemory.cast_adr_to_int(self.addr)
+ else:
+ assert 0, "XXX not implemented"
+
+ def fits_in_simm(self):
+ return False
+
+ def fits_in_uimm(self):
+ return False
+
+ def load(self, insns, var):
+ i = IntConst(llmemory.cast_adr_to_int(self.addr))
+ insns.append(
+ insn.Insn_GPR__IMM(RPPCAssembler.load_word,
+ var, [i]))
+
+ def load_now(self, asm, loc):
+ value = llmemory.cast_adr_to_int(self.addr)
+ if loc.is_register:
+ assert isinstance(loc, insn.GPR)
+ asm.load_word(loc.number, value)
+ else:
+ #print 'load_now to', loc.offset
+ asm.load_word(rSCRATCH, value)
+ asm.stw(rSCRATCH, rFP, loc.offset)
+
+
+class JumpPatchupGenerator(object):
+
+ def __init__(self, insns, allocator):
+ self.insns = insns
+ self.allocator = allocator
+
+ def emit_move(self, tarloc, srcloc):
+ srcvar = None
+ if DEBUG_PRINT:
+ for v, loc in self.allocator.var2loc.iteritems():
+ if loc is srcloc:
+ srcvar = v
+ break
+ emit = self.insns.append
+ if tarloc == srcloc:
+ return
+ if tarloc.is_register and srcloc.is_register:
+ assert isinstance(tarloc, insn.GPR)
+ if isinstance(srcloc, insn.GPR):
+ emit(insn.Move(tarloc, srcloc))
+ else:
+ assert isinstance(srcloc, insn.CRF)
+ emit(srcloc.move_to_gpr(tarloc.number))
+ elif tarloc.is_register and not srcloc.is_register:
+ emit(insn.Unspill(srcvar, tarloc, srcloc))
+ elif not tarloc.is_register and srcloc.is_register:
+ emit(insn.Spill(srcvar, srcloc, tarloc))
+ elif not tarloc.is_register and not srcloc.is_register:
+ emit(insn.Unspill(srcvar, insn.gprs[0], srcloc))
+ emit(insn.Spill(srcvar, insn.gprs[0], tarloc))
+
+ def create_fresh_location(self):
+ return self.allocator.spill_slot()
+
+class StackInfo(Var):
+ # not really a Var at all, but needs to be mixable with Consts....
+ # offset will be assigned later
+ offset = 0
+ pass
+
+def prepare_for_jump(insns, sourcevars, src2loc, target, allocator):
+
+ tar2src = {} # tar var -> src var
+ tar2loc = {}
+
+ # construct mapping of targets to sources; note that "target vars"
+ # and "target locs" are the same thing right now
+ targetlocs = target.arg_locations
+ tarvars = []
+
+## if DEBUG_PRINT:
+## print targetlocs
+## print allocator.var2loc
+
+ for i in range(len(targetlocs)):
+ tloc = targetlocs[i]
+ src = sourcevars[i]
+ if isinstance(src, Var):
+ tar2loc[tloc] = tloc
+ tar2src[tloc] = src
+ tarvars.append(tloc)
+ if not tloc.is_register:
+ if tloc in allocator.free_stack_slots:
+ allocator.free_stack_slots.remove(tloc)
+
+ gen = JumpPatchupGenerator(insns, allocator)
+ emit_moves(gen, tarvars, tar2src, tar2loc, src2loc)
+
+ for i in range(len(targetlocs)):
+ tloc = targetlocs[i]
+ src = sourcevars[i]
+ if not isinstance(src, Var):
+ insns.append(insn.Load(tloc, src))
+
+class Label(GenLabel):
+
+ def __init__(self, args_gv):
+ self.args_gv = args_gv
+ #self.startaddr = startaddr
+ #self.arg_locations = arg_locations
+ self.min_stack_offset = 1
+
+# our approach to stack layout:
+
+# on function entry, the stack looks like this:
+
+# ....
+# | parameter area |
+# | linkage area | <- rSP points to the last word of the linkage area
+# +----------------+
+
+# we set things up like so:
+
+# | parameter area |
+# | linkage area | <- rFP points to where the rSP was
+# | saved registers |
+# | local variables |
+# +-----------------+ <- rSP points here, and moves around between basic blocks
+
+# points of note (as of 2006-11-09 anyway :-):
+# 1. we currently never spill to the parameter area (should fix?)
+# 2. we always save all callee-save registers
+# 3. as each basic block can move the SP around as it sees fit, we index
+# into the local variables area from the FP (frame pointer; it is not
+# usual on the PPC to have a frame pointer, but there's no reason we
+# can't have one :-)
+
+
+class Builder(GenBuilder):
+
+ def __init__(self, rgenop):
+ self.rgenop = rgenop
+ self.asm = RPPCAssembler()
+ self.asm.mc = None
+ self.insns = []
+ self.initial_spill_offset = 0
+ self.initial_var2loc = None
+ self.max_param_space = -1
+ self.final_jump_addr = 0
+
+ self.start = 0
+ self.closed = True
+ self.patch_start_here = 0
+
+ # ----------------------------------------------------------------
+ # the public Builder interface:
+
+ def end(self):
+ pass
+
+ @specialize.arg(1)
+ def genop1(self, opname, gv_arg):
+ #print opname, 'on', id(self)
+ genmethod = getattr(self, 'op_' + opname)
+ r = genmethod(gv_arg)
+ #print '->', id(r)
+ return r
+
+ @specialize.arg(1)
+ def genop2(self, opname, gv_arg1, gv_arg2):
+ #print opname, 'on', id(self)
+ genmethod = getattr(self, 'op_' + opname)
+ r = genmethod(gv_arg1, gv_arg2)
+ #print '->', id(r)
+ return r
+
+ @specialize.arg(1)
+ def genraisingop2(self, opname, gv_arg1, gv_arg2):
+ genmethod = getattr(self, 'raisingop_' + opname)
+ r = genmethod(gv_arg1, gv_arg2)
+ return r
+
+ @specialize.arg(1)
+ def genraisingop1(self, opname, gv_arg):
+ genmethod = getattr(self, 'raisingop_' + opname)
+ r = genmethod(gv_arg)
+ return r
+
+ def genop_call(self, sigtoken, gv_fnptr, args_gv):
+ self.insns.append(insn.SpillCalleeSaves())
+ for i in range(len(args_gv)):
+ self.insns.append(insn.LoadArg(i, args_gv[i]))
+ gv_result = Var()
+ self.max_param_space = max(self.max_param_space, len(args_gv)*4)
+ self.insns.append(insn.CALL(gv_result, gv_fnptr))
+ return gv_result
+
+ def genop_getfield(self, fieldtoken, gv_ptr):
+ fieldoffset, fieldsize = fieldtoken
+ opcode = {1:_PPC.lbz, 2:_PPC.lhz, 4:_PPC.lwz}[fieldsize]
+ return self._arg_simm_op(gv_ptr, IntConst(fieldoffset), opcode)
+
+ def genop_setfield(self, fieldtoken, gv_ptr, gv_value):
+ gv_result = Var()
+ fieldoffset, fieldsize = fieldtoken
+ opcode = {1:_PPC.stb, 2:_PPC.sth, 4:_PPC.stw}[fieldsize]
+ self.insns.append(
+ insn.Insn_None__GPR_GPR_IMM(opcode,
+ [gv_value, gv_ptr, IntConst(fieldoffset)]))
+ return gv_result
+
+ def genop_getsubstruct(self, fieldtoken, gv_ptr):
+ return self._arg_simm_op(gv_ptr, IntConst(fieldtoken[0]), _PPC.addi)
+
+ def genop_getarrayitem(self, arraytoken, gv_ptr, gv_index):
+ _, _, itemsize = arraytoken
+ opcode = {1:_PPC.lbzx,
+ 2:_PPC.lhzx,
+ 4:_PPC.lwzx}[itemsize]
+ opcodei = {1:_PPC.lbz,
+ 2:_PPC.lhz,
+ 4:_PPC.lwz}[itemsize]
+ gv_itemoffset = self.itemoffset(arraytoken, gv_index)
+ return self._arg_arg_op_with_simm(gv_ptr, gv_itemoffset, opcode, opcodei)
+
+ def genop_getarraysubstruct(self, arraytoken, gv_ptr, gv_index):
+ _, _, itemsize = arraytoken
+ assert itemsize == 4
+ gv_itemoffset = self.itemoffset(arraytoken, gv_index)
+ return self._arg_arg_op_with_simm(gv_ptr, gv_itemoffset, _PPC.add, _PPC.addi,
+ commutative=True)
+
+ def genop_getarraysize(self, arraytoken, gv_ptr):
+ lengthoffset, _, _ = arraytoken
+ return self._arg_simm_op(gv_ptr, IntConst(lengthoffset), _PPC.lwz)
+
+ def genop_setarrayitem(self, arraytoken, gv_ptr, gv_index, gv_value):
+ _, _, itemsize = arraytoken
+ gv_itemoffset = self.itemoffset(arraytoken, gv_index)
+ gv_result = Var()
+ if gv_itemoffset.fits_in_simm():
+ opcode = {1:_PPC.stb,
+ 2:_PPC.sth,
+ 4:_PPC.stw}[itemsize]
+ self.insns.append(
+ insn.Insn_None__GPR_GPR_IMM(opcode,
+ [gv_value, gv_ptr, gv_itemoffset]))
+ else:
+ opcode = {1:_PPC.stbx,
+ 2:_PPC.sthx,
+ 4:_PPC.stwx}[itemsize]
+ self.insns.append(
+ insn.Insn_None__GPR_GPR_GPR(opcode,
+ [gv_value, gv_ptr, gv_itemoffset]))
+
+ def genop_malloc_fixedsize(self, alloctoken):
+ return self.genop_call(1, # COUGH
+ IntConst(gc_malloc_fnaddr()),
+ [IntConst(alloctoken)])
+
+ def genop_malloc_varsize(self, varsizealloctoken, gv_size):
+ gv_itemoffset = self.itemoffset(varsizealloctoken, gv_size)
+ gv_result = self.genop_call(1, # COUGH
+ IntConst(gc_malloc_fnaddr()),
+ [gv_itemoffset])
+ lengthoffset, _, _ = varsizealloctoken
+ self.insns.append(
+ insn.Insn_None__GPR_GPR_IMM(_PPC.stw,
+ [gv_size, gv_result, IntConst(lengthoffset)]))
+ return gv_result
+
+ def genop_same_as(self, gv_arg):
+ if not isinstance(gv_arg, Var):
+ gv_result = Var()
+ gv_arg.load(self.insns, gv_result)
+ return gv_result
+ else:
+ return gv_arg
+
+ def genop_cast_int_to_ptr(self, kind, gv_int):
+ return gv_int
+
+## def genop_debug_pdb(self): # may take an args_gv later
+
+ def genop_get_frame_base(self):
+ gv_result = Var()
+ self.insns.append(
+ insn.LoadFramePointer(gv_result))
+ return gv_result
+
+ def get_frame_info(self, vars_gv):
+ result = []
+ for v in vars_gv:
+ if isinstance(v, Var):
+ place = StackInfo()
+ self.insns.append(insn.CopyIntoStack(place, v))
+ result.append(place)
+ else:
+ result.append(None)
+ return result
+
+ def alloc_frame_place(self, kind, gv_initial_value=None):
+ place = StackInfo()
+ if gv_initial_value is None:
+ gv_initial_value = AddrConst(llmemory.NULL)
+ self.insns.append(insn.CopyIntoStack(place, gv_initial_value))
+ return place
+
+ def genop_absorb_place(self, place):
+ var = Var()
+ self.insns.append(insn.CopyOffStack(var, place))
+ return var
+
+ def enter_next_block(self, args_gv):
+ if DEBUG_PRINT:
+ print 'enter_next_block1', args_gv
+ seen = {}
+ for i in range(len(args_gv)):
+ gv = args_gv[i]
+ if isinstance(gv, Var):
+ if gv in seen:
+ new_gv = self._arg_op(gv, _PPC.mr)
+ args_gv[i] = new_gv
+ seen[gv] = True
+ else:
+ new_gv = Var()
+ gv.load(self.insns, new_gv)
+ args_gv[i] = new_gv
+
+ if DEBUG_PRINT:
+ print 'enter_next_block2', args_gv
+
+ r = Label(args_gv)
+ self.insns.append(insn.Label(r))
+ return r
+
+ def jump_if_false(self, gv_condition, args_gv):
+ return self._jump(gv_condition, False, args_gv)
+
+ def jump_if_true(self, gv_condition, args_gv):
+ return self._jump(gv_condition, True, args_gv)
+
+ def finish_and_return(self, sigtoken, gv_returnvar):
+ self.insns.append(insn.Return(gv_returnvar))
+ self.allocate_and_emit([])
+
+ # standard epilogue:
+
+ # restore old SP
+ self.asm.lwz(rSP, rSP, 0)
+ # restore all callee-save GPRs
+ self.asm.lmw(gprs[32-NSAVEDREGISTERS].number, rSP, -4*(NSAVEDREGISTERS+1))
+ # restore Condition Register
+ self.asm.lwz(rSCRATCH, rSP, 4)
+ self.asm.mtcr(rSCRATCH)
+ # restore Link Register and jump to it
+ self.asm.lwz(rSCRATCH, rSP, 8)
+ self.asm.mtlr(rSCRATCH)
+ self.asm.blr()
+
+ self._close()
+
+ def finish_and_goto(self, outputargs_gv, target):
+ if target.min_stack_offset == 1:
+ self.pause_writing(outputargs_gv)
+ self.start_writing()
+ allocator = self.allocate(outputargs_gv)
+ if DEBUG_PRINT:
+ before_moves = len(self.insns)
+ print outputargs_gv
+ print target.args_gv
+ allocator.spill_offset = min(allocator.spill_offset, target.min_stack_offset)
+ prepare_for_jump(
+ self.insns, outputargs_gv, allocator.var2loc, target, allocator)
+ if DEBUG_PRINT:
+ print 'moves:'
+ for i in self.insns[before_moves:]:
+ print ' ', i
+ self.emit(allocator)
+ here_size = self._stack_size(allocator.spill_offset)
+ there_size = self._stack_size(target.min_stack_offset)
+ if here_size != there_size:
+ self.emit_stack_adjustment(there_size)
+ if self.rgenop.DEBUG_SCRIBBLE:
+ if here_size > there_size:
+ offsets = range(there_size, here_size, 4)
+ else:
+ offsets = range(here_size, there_size, 4)
+ for offset in offsets:
+ self.asm.load_word(rSCRATCH, 0x23456789)
+ self.asm.stw(rSCRATCH, rSP, -offset)
+ self.asm.load_word(rSCRATCH, target.startaddr)
+ self.asm.mtctr(rSCRATCH)
+ self.asm.bctr()
+ self._close()
+
+ def flexswitch(self, gv_exitswitch, args_gv):
+ # make sure the exitswitch ends the block in a register:
+ crresult = Var()
+ self.insns.append(insn.FakeUse(crresult, gv_exitswitch))
+ allocator = self.allocate_and_emit(args_gv)
+ switch_mc = self.asm.mc.reserve(7 * 5 + 4)
+ self._close()
+ result = FlexSwitch(self.rgenop, switch_mc,
+ allocator.loc_of(gv_exitswitch),
+ allocator.loc_of(crresult),
+ allocator.var2loc,
+ allocator.spill_offset)
+ return result, result.add_default()
+
+ def start_writing(self):
+ if not self.closed:
+ return self
+ assert self.asm.mc is None
+ if self.final_jump_addr != 0:
+ mc = self.rgenop.open_mc()
+ target = mc.tell()
+ if target == self.final_jump_addr + 16:
+ mc.setpos(mc.getpos()-4)
+ else:
+ self.asm.mc = self.rgenop.ExistingCodeBlock(
+ self.final_jump_addr, self.final_jump_addr+8)
+ self.asm.load_word(rSCRATCH, target)
+ flush_icache(self.final_jump_addr, 8)
+ self._code_start = mc.tell()
+ self.asm.mc = mc
+ self.final_jump_addr = 0
+ self.closed = False
+ return self
+ else:
+ self._open()
+ self.maybe_patch_start_here()
+ return self
+
+ def maybe_patch_start_here(self):
+ if self.patch_start_here:
+ mc = self.asm.mc
+ self.asm.mc = self.rgenop.ExistingCodeBlock(
+ self.patch_start_here, self.patch_start_here+8)
+ self.asm.load_word(rSCRATCH, mc.tell())
+ flush_icache(self.patch_start_here, 8)
+ self.asm.mc = mc
+ self.patch_start_here = 0
+
+ def pause_writing(self, args_gv):
+ allocator = self.allocate_and_emit(args_gv)
+ self.initial_var2loc = allocator.var2loc
+ self.initial_spill_offset = allocator.spill_offset
+ self.insns = []
+ self.max_param_space = -1
+ self.final_jump_addr = self.asm.mc.tell()
+ self.closed = True
+ self.asm.nop()
+ self.asm.nop()
+ self.asm.mtctr(rSCRATCH)
+ self.asm.bctr()
+ self._close()
+ return self
+
+ # ----------------------------------------------------------------
+ # ppc-specific interface:
+
+ def itemoffset(self, arraytoken, gv_index):
+ # if gv_index is constant, this can return a constant...
+ lengthoffset, startoffset, itemsize = arraytoken
+
+ gv_offset = Var()
+ self.insns.append(
+ insn.Insn_GPR__GPR_IMM(RPPCAssembler.mulli,
+ gv_offset, [gv_index, IntConst(itemsize)]))
+ gv_itemoffset = Var()
+ self.insns.append(
+ insn.Insn_GPR__GPR_IMM(RPPCAssembler.addi,
+ gv_itemoffset, [gv_offset, IntConst(startoffset)]))
+ return gv_itemoffset
+
+ def _write_prologue(self, sigtoken):
+ numargs = sigtoken # for now
+ if DEBUG_TRAP:
+ self.asm.trap()
+ inputargs = [Var() for i in range(numargs)]
+ assert self.initial_var2loc is None
+ self.initial_var2loc = {}
+ for arg in inputargs[:8]:
+ self.initial_var2loc[arg] = gprs[3+len(self.initial_var2loc)]
+ if len(inputargs) > 8:
+ for i in range(8, len(inputargs)):
+ arg = inputargs[i]
+ self.initial_var2loc[arg] = insn.stack_slot(24 + 4 * len(self.initial_var2loc))
+ self.initial_spill_offset = self._var_offset(0)
+
+ # Standard prologue:
+
+ # Minimum stack space = 24+params+lv+4*GPRSAVE+8*FPRSAVE
+ # params = stack space for parameters for functions we call
+ # lv = stack space for local variables
+ # GPRSAVE = the number of callee-save GPRs we save, currently
+ # NSAVEDREGISTERS which is 19, i.e. all of them
+ # FPRSAVE = the number of callee-save FPRs we save, currently 0
+ # Initially, we set params == lv == 0 and allow each basic block to
+ # ensure it has enough space to continue.
+
+ minspace = self._stack_size(self._var_offset(0))
+ # save Link Register
+ self.asm.mflr(rSCRATCH)
+ self.asm.stw(rSCRATCH, rSP, 8)
+ # save Condition Register
+ self.asm.mfcr(rSCRATCH)
+ self.asm.stw(rSCRATCH, rSP, 4)
+ # save the callee-save GPRs
+ self.asm.stmw(gprs[32-NSAVEDREGISTERS].number, rSP, -4*(NSAVEDREGISTERS + 1))
+ # set up frame pointer
+ self.asm.mr(rFP, rSP)
+ # save stack pointer into linkage area and set stack pointer for us.
+ self.asm.stwu(rSP, rSP, -minspace)
+
+ if self.rgenop.DEBUG_SCRIBBLE:
+ # write junk into all non-argument, non rFP or rSP registers
+ self.asm.load_word(rSCRATCH, 0x12345678)
+ for i in range(min(11, 3+len(self.initial_var2loc)), 32):
+ self.asm.load_word(i, 0x12345678)
+ # scribble the part of the stack between
+ # self._var_offset(0) and minspace
+ for offset in range(self._var_offset(0), -minspace, -4):
+ self.asm.stw(rSCRATCH, rFP, offset)
+ # and then a bit more
+ for offset in range(-minspace-4, -minspace-200, -4):
+ self.asm.stw(rSCRATCH, rFP, offset)
+
+ return inputargs
+
+ def _var_offset(self, v):
+ """v represents an offset into the local variable area in bytes;
+ this returns the offset relative to rFP"""
+ return -(4*NSAVEDREGISTERS+4+v)
+
+ def _stack_size(self, lv):
+ """ Returns the required stack size to store all data, assuming
+ that there are 'param' bytes of parameters for callee functions and
+ 'lv' is the largest (wrt to abs() :) rFP-relative byte offset of
+ any variable on the stack. Plus 4 because the rFP actually points
+ into our caller's linkage area."""
+ assert lv <= 0
+ if self.max_param_space >= 0:
+ param = max(self.max_param_space, 32) + 24
+ else:
+ param = 0
+ return ((4 + param - lv + 15) & ~15)
+
+ def _open(self):
+ self.asm.mc = self.rgenop.open_mc()
+ self._code_start = self.asm.mc.tell()
+ self.closed = False
+
+ def _close(self):
+ _code_stop = self.asm.mc.tell()
+ code_size = _code_stop - self._code_start
+ flush_icache(self._code_start, code_size)
+ self.rgenop.close_mc(self.asm.mc)
+ self.asm.mc = None
+
+ def allocate_and_emit(self, live_vars_gv):
+ allocator = self.allocate(live_vars_gv)
+ return self.emit(allocator)
+
+ def allocate(self, live_vars_gv):
+ assert self.initial_var2loc is not None
+ allocator = RegisterAllocation(
+ self.rgenop.freeregs,
+ self.initial_var2loc,
+ self.initial_spill_offset)
+ self.insns = allocator.allocate_for_insns(self.insns)
+ return allocator
+
+ def emit(self, allocator):
+ in_size = self._stack_size(self.initial_spill_offset)
+ our_size = self._stack_size(allocator.spill_offset)
+ if in_size != our_size:
+ assert our_size > in_size
+ self.emit_stack_adjustment(our_size)
+ if self.rgenop.DEBUG_SCRIBBLE:
+ for offset in range(in_size, our_size, 4):
+ self.asm.load_word(rSCRATCH, 0x23456789)
+ self.asm.stw(rSCRATCH, rSP, -offset)
+ if self.rgenop.DEBUG_SCRIBBLE:
+ locs = {}
+ for _, loc in self.initial_var2loc.iteritems():
+ locs[loc] = True
+ regs = insn.gprs[3:]
+ for reg in regs:
+ if reg not in locs:
+ self.asm.load_word(reg.number, 0x3456789)
+ self.asm.load_word(0, 0x3456789)
+ for offset in range(self._var_offset(0),
+ self.initial_spill_offset,
+ -4):
+ if insn.stack_slot(offset) not in locs:
+ self.asm.stw(0, rFP, offset)
+ for insn_ in self.insns:
+ insn_.emit(self.asm)
+ for label in allocator.labels_to_tell_spill_offset_to:
+ label.min_stack_offset = allocator.spill_offset
+ for builder in allocator.builders_to_tell_spill_offset_to:
+ builder.initial_spill_offset = allocator.spill_offset
+ return allocator
+
+ def emit_stack_adjustment(self, newsize):
+ # the ABI requires that at all times that r1 is valid, in the
+ # sense that it must point to the bottom of the stack and that
+ # executing SP <- *(SP) repeatedly walks the stack.
+ # this code satisfies this, although there is a 1-instruction
+ # window where such walking would find a strange intermediate
+ # "frame"
+ self.asm.addi(rSCRATCH, rFP, -newsize)
+ self.asm.sub(rSCRATCH, rSCRATCH, rSP)
+
+ # this is a pure debugging check that we avoid the situation
+ # where *(r1) == r1 which would violates the ABI rules listed
+ # above. after a while it can be removed or maybe made
+ # conditional on some --option passed to py.test
+ self.asm.tweqi(rSCRATCH, 0)
+
+ self.asm.stwux(rSP, rSP, rSCRATCH)
+ self.asm.stw(rFP, rSP, 0)
+
+ def _arg_op(self, gv_arg, opcode):
+ gv_result = Var()
+ self.insns.append(
+ insn.Insn_GPR__GPR(opcode, gv_result, gv_arg))
+ return gv_result
+
+ def _arg_arg_op(self, gv_x, gv_y, opcode):
+ gv_result = Var()
+ self.insns.append(
+ insn.Insn_GPR__GPR_GPR(opcode, gv_result, [gv_x, gv_y]))
+ return gv_result
+
+ def _arg_simm_op(self, gv_x, gv_imm, opcode):
+ assert gv_imm.fits_in_simm()
+ gv_result = Var()
+ self.insns.append(
+ insn.Insn_GPR__GPR_IMM(opcode, gv_result, [gv_x, gv_imm]))
+ return gv_result
+
+ def _arg_uimm_op(self, gv_x, gv_imm, opcode):
+ assert gv_imm.fits_in_uimm()
+ gv_result = Var()
+ self.insns.append(
+ insn.Insn_GPR__GPR_IMM(opcode, gv_result, [gv_x, gv_imm]))
+ return gv_result
+
+ def _arg_arg_op_with_simm(self, gv_x, gv_y, opcode, opcodei,
+ commutative=False):
+ if gv_y.fits_in_simm():
+ return self._arg_simm_op(gv_x, gv_y, opcodei)
+ elif gv_x.fits_in_simm() and commutative:
+ return self._arg_simm_op(gv_y, gv_x, opcodei)
+ else:
+ return self._arg_arg_op(gv_x, gv_y, opcode)
+
+ def _arg_arg_op_with_uimm(self, gv_x, gv_y, opcode, opcodei,
+ commutative=False):
+ if gv_y.fits_in_uimm():
+ return self._arg_uimm_op(gv_x, gv_y, opcodei)
+ elif gv_x.fits_in_uimm() and commutative:
+ return self._arg_uimm_op(gv_y, gv_x, opcodei)
+ else:
+ return self._arg_arg_op(gv_x, gv_y, opcode)
+
+ def _identity(self, gv_arg):
+ return gv_arg
+
+ cmp2info = {
+ # bit-in-crf negated
+ 'gt': ( 1, 0 ),
+ 'lt': ( 0, 0 ),
+ 'le': ( 1, 1 ),
+ 'ge': ( 0, 1 ),
+ 'eq': ( 2, 0 ),
+ 'ne': ( 2, 1 ),
+ }
+
+ cmp2info_flipped = {
+ # bit-in-crf negated
+ 'gt': ( 0, 0 ),
+ 'lt': ( 1, 0 ),
+ 'le': ( 0, 1 ),
+ 'ge': ( 1, 1 ),
+ 'eq': ( 2, 0 ),
+ 'ne': ( 2, 1 ),
+ }
+
+ def _compare(self, op, gv_x, gv_y):
+ #print "op", op
+ gv_result = ConditionVar()
+ if gv_y.fits_in_simm():
+ self.insns.append(
+ insn.CMPWI(self.cmp2info[op], gv_result, [gv_x, gv_y]))
+ elif gv_x.fits_in_simm():
+ self.insns.append(
+ insn.CMPWI(self.cmp2info_flipped[op], gv_result, [gv_y, gv_x]))
+ else:
+ self.insns.append(
+ insn.CMPW(self.cmp2info[op], gv_result, [gv_x, gv_y]))
+ return gv_result
+
+ def _compare_u(self, op, gv_x, gv_y):
+ gv_result = ConditionVar()
+ if gv_y.fits_in_uimm():
+ self.insns.append(
+ insn.CMPWLI(self.cmp2info[op], gv_result, [gv_x, gv_y]))
+ elif gv_x.fits_in_uimm():
+ self.insns.append(
+ insn.CMPWLI(self.cmp2info_flipped[op], gv_result, [gv_y, gv_x]))
+ else:
+ self.insns.append(
+ insn.CMPWL(self.cmp2info[op], gv_result, [gv_x, gv_y]))
+ return gv_result
+
+ def _jump(self, gv_condition, if_true, args_gv):
+ targetbuilder = self.rgenop.newbuilder()
+
+ self.insns.append(
+ insn.Jump(gv_condition, targetbuilder, if_true, args_gv))
+
+ return targetbuilder
+
+ def _ov(self):
+ # mfxer rFOO
+ # extrwi rBAR, rFOO, 1, 1
+ gv_xer = Var()
+ self.insns.append(
+ insn.Insn_GPR(_PPC.mfxer, gv_xer))
+ gv_ov = Var()
+ self.insns.append(insn.Extrwi(gv_ov, gv_xer, 1, 1))
+ return gv_ov
+
+ def op_bool_not(self, gv_arg):
+ return self._arg_uimm_op(gv_arg, self.rgenop.genconst(1), RPPCAssembler.xori)
+
+ def op_int_is_true(self, gv_arg):
+ return self._compare('ne', gv_arg, self.rgenop.genconst(0))
+
+ def op_int_neg(self, gv_arg):
+ return self._arg_op(gv_arg, _PPC.neg)
+
+ def raisingop_int_neg_ovf(self, gv_arg):
+ gv_result = self._arg_op(gv_arg, _PPC.nego)
+ gv_ov = self._ov()
+ return (gv_result, gv_ov)
+
+ def op_int_abs(self, gv_arg):
+ gv_sign = self._arg_uimm_op(gv_arg, self.rgenop.genconst(31), _PPC.srawi)
+ gv_maybe_inverted = self._arg_arg_op(gv_arg, gv_sign, _PPC.xor)
+ return self._arg_arg_op(gv_sign, gv_maybe_inverted, _PPC.subf)
+
+ def raisingop_int_abs_ovf(self, gv_arg):
+ gv_sign = self._arg_uimm_op(gv_arg, self.rgenop.genconst(31), _PPC.srawi)
+ gv_maybe_inverted = self._arg_arg_op(gv_arg, gv_sign, _PPC.xor)
+ gv_result = self._arg_arg_op(gv_sign, gv_maybe_inverted, _PPC.subfo)
+ return (gv_result, self._ov())
+
+ def op_int_invert(self, gv_arg):
+ return self._arg_op(gv_arg, _PPC.not_)
+
+ def op_int_add(self, gv_x, gv_y):
+ return self._arg_arg_op_with_simm(gv_x, gv_y, _PPC.add, _PPC.addi,
+ commutative=True)
+
+ def raisingop_int_add_ovf(self, gv_x, gv_y):
+ gv_result = self._arg_arg_op(gv_x, gv_y, _PPC.addo)
+ gv_ov = self._ov()
+ return (gv_result, gv_ov)
+
+ def op_int_sub(self, gv_x, gv_y):
+ return self._arg_arg_op_with_simm(gv_x, gv_y, _PPC.sub, _PPC.subi)
+
+ def raisingop_int_sub_ovf(self, gv_x, gv_y):
+ gv_result = self._arg_arg_op(gv_x, gv_y, _PPC.subo)
+ gv_ov = self._ov()
+ return (gv_result, gv_ov)
+
+ def op_int_mul(self, gv_x, gv_y):
+ return self._arg_arg_op_with_simm(gv_x, gv_y, _PPC.mullw, _PPC.mulli,
+ commutative=True)
+
+ def raisingop_int_mul_ovf(self, gv_x, gv_y):
+ gv_result = self._arg_arg_op(gv_x, gv_y, _PPC.mullwo)
+ gv_ov = self._ov()
+ return (gv_result, gv_ov)
+
+ def op_int_floordiv(self, gv_x, gv_y):
+ return self._arg_arg_op(gv_x, gv_y, _PPC.divw)
+
+ ## def op_int_floordiv_zer(self, gv_x, gv_y):
+
+ def op_int_mod(self, gv_x, gv_y):
+ gv_dividend = self.op_int_floordiv(gv_x, gv_y)
+ gv_z = self.op_int_mul(gv_dividend, gv_y)
+ return self.op_int_sub(gv_x, gv_z)
+
+ ## def op_int_mod_zer(self, gv_x, gv_y):
+
+ def op_int_lt(self, gv_x, gv_y):
+ return self._compare('lt', gv_x, gv_y)
+
+ def op_int_le(self, gv_x, gv_y):
+ return self._compare('le', gv_x, gv_y)
+
+ def op_int_eq(self, gv_x, gv_y):
+ return self._compare('eq', gv_x, gv_y)
+
+ def op_int_ne(self, gv_x, gv_y):
+ return self._compare('ne', gv_x, gv_y)
+
+ def op_int_gt(self, gv_x, gv_y):
+ return self._compare('gt', gv_x, gv_y)
+
+ def op_int_ge(self, gv_x, gv_y):
+ return self._compare('ge', gv_x, gv_y)
+
+ op_char_lt = op_int_lt
+ op_char_le = op_int_le
+ op_char_eq = op_int_eq
+ op_char_ne = op_int_ne
+ op_char_gt = op_int_gt
+ op_char_ge = op_int_ge
+
+ op_unichar_eq = op_int_eq
+ op_unichar_ne = op_int_ne
+
+ def op_int_and(self, gv_x, gv_y):
+ return self._arg_arg_op(gv_x, gv_y, _PPC.and_)
+
+ def op_int_or(self, gv_x, gv_y):
+ return self._arg_arg_op_with_uimm(gv_x, gv_y, _PPC.or_, _PPC.ori,
+ commutative=True)
+
+ def op_int_lshift(self, gv_x, gv_y):
+ if gv_y.fits_in_simm():
+ if abs(gv_y.value) >= 32:
+ return self.rgenop.genconst(0)
+ else:
+ return self._arg_uimm_op(gv_x, gv_y, _PPC.slwi)
+ # computing x << y when you don't know y is <=32
+ # (we can assume y >= 0 though)
+ # here's the plan:
+ #
+ # z = nltu(y, 32) (as per cwg)
+ # w = x << y
+ # r = w&z
+ gv_a = self._arg_simm_op(gv_y, self.rgenop.genconst(32), _PPC.subfic)
+ gv_b = self._arg_op(gv_y, _PPC.addze)
+ gv_z = self._arg_arg_op(gv_b, gv_y, _PPC.subf)
+ gv_w = self._arg_arg_op(gv_x, gv_y, _PPC.slw)
+ return self._arg_arg_op(gv_z, gv_w, _PPC.and_)
+
+ ## def op_int_lshift_val(self, gv_x, gv_y):
+
+ def op_int_rshift(self, gv_x, gv_y):
+ if gv_y.fits_in_simm():
+ if abs(gv_y.value) >= 32:
+ gv_y = self.rgenop.genconst(31)
+ return self._arg_simm_op(gv_x, gv_y, _PPC.srawi)
+ # computing x >> y when you don't know y is <=32
+ # (we can assume y >= 0 though)
+ # here's the plan:
+ #
+ # ntlu_y_32 = nltu(y, 32) (as per cwg)
+ # o = srawi(x, 31) & ~ntlu_y_32
+ # w = (x >> y) & ntlu_y_32
+ # r = w|o
+ gv_a = self._arg_uimm_op(gv_y, self.rgenop.genconst(32), _PPC.subfic)
+ gv_b = self._arg_op(gv_y, _PPC.addze)
+ gv_ntlu_y_32 = self._arg_arg_op(gv_b, gv_y, _PPC.subf)
+
+ gv_c = self._arg_uimm_op(gv_x, self.rgenop.genconst(31), _PPC.srawi)
+ gv_o = self._arg_arg_op(gv_c, gv_ntlu_y_32, _PPC.andc_)
+
+ gv_e = self._arg_arg_op(gv_x, gv_y, _PPC.sraw)
+ gv_w = self._arg_arg_op(gv_e, gv_ntlu_y_32, _PPC.and_)
+
+ return self._arg_arg_op(gv_o, gv_w, _PPC.or_)
+
+ ## def op_int_rshift_val(self, gv_x, gv_y):
+
+ def op_int_xor(self, gv_x, gv_y):
+ return self._arg_arg_op_with_uimm(gv_x, gv_y, _PPC.xor, _PPC.xori,
+ commutative=True)
+
+ ## various int_*_ovfs
+
+ op_uint_is_true = op_int_is_true
+ op_uint_invert = op_int_invert
+
+ op_uint_add = op_int_add
+ op_uint_sub = op_int_sub
+ op_uint_mul = op_int_mul
+
+ def op_uint_floordiv(self, gv_x, gv_y):
+ return self._arg_arg_op(gv_x, gv_y, _PPC.divwu)
+
+ ## def op_uint_floordiv_zer(self, gv_x, gv_y):
+
+ def op_uint_mod(self, gv_x, gv_y):
+ gv_dividend = self.op_uint_floordiv(gv_x, gv_y)
+ gv_z = self.op_uint_mul(gv_dividend, gv_y)
+ return self.op_uint_sub(gv_x, gv_z)
+
+ ## def op_uint_mod_zer(self, gv_x, gv_y):
+
+ def op_uint_lt(self, gv_x, gv_y):
+ return self._compare_u('lt', gv_x, gv_y)
+
+ def op_uint_le(self, gv_x, gv_y):
+ return self._compare_u('le', gv_x, gv_y)
+
+ def op_uint_eq(self, gv_x, gv_y):
+ return self._compare_u('eq', gv_x, gv_y)
+
+ def op_uint_ne(self, gv_x, gv_y):
+ return self._compare_u('ne', gv_x, gv_y)
+
+ def op_uint_gt(self, gv_x, gv_y):
+ return self._compare_u('gt', gv_x, gv_y)
+
+ def op_uint_ge(self, gv_x, gv_y):
+ return self._compare_u('ge', gv_x, gv_y)
+
+ op_uint_and = op_int_and
+ op_uint_or = op_int_or
+
+ op_uint_lshift = op_int_lshift
+
+ ## def op_uint_lshift_val(self, gv_x, gv_y):
+
+ def op_uint_rshift(self, gv_x, gv_y):
+ if gv_y.fits_in_simm():
+ if abs(gv_y.value) >= 32:
+ return self.rgenop.genconst(0)
+ else:
+ return self._arg_simm_op(gv_x, gv_y, _PPC.srwi)
+ # computing x << y when you don't know y is <=32
+ # (we can assume y >=0 though, i think)
+ # here's the plan:
+ #
+ # z = ngeu(y, 32) (as per cwg)
+ # w = x >> y
+ # r = w&z
+ gv_a = self._arg_simm_op(gv_y, self.rgenop.genconst(32), _PPC.subfic)
+ gv_b = self._arg_op(gv_y, _PPC.addze)
+ gv_z = self._arg_arg_op(gv_b, gv_y, _PPC.subf)
+ gv_w = self._arg_arg_op(gv_x, gv_y, _PPC.srw)
+ return self._arg_arg_op(gv_z, gv_w, _PPC.and_)
+ ## def op_uint_rshift_val(self, gv_x, gv_y):
+
+ op_uint_xor = op_int_xor
+
+ # ... floats ...
+
+ # ... llongs, ullongs ...
+
+ # here we assume that booleans are always 1 or 0 and chars are
+ # always zero-padded.
+
+ op_cast_bool_to_int = _identity
+ op_cast_bool_to_uint = _identity
+ ## def op_cast_bool_to_float(self, gv_arg):
+ op_cast_char_to_int = _identity
+ op_cast_unichar_to_int = _identity
+ op_cast_int_to_char = _identity
+
+ op_cast_int_to_unichar = _identity
+ op_cast_int_to_uint = _identity
+ ## def op_cast_int_to_float(self, gv_arg):
+ ## def op_cast_int_to_longlong(self, gv_arg):
+ op_cast_uint_to_int = _identity
+ ## def op_cast_uint_to_float(self, gv_arg):
+ ## def op_cast_float_to_int(self, gv_arg):
+ ## def op_cast_float_to_uint(self, gv_arg):
+ ## def op_truncate_longlong_to_int(self, gv_arg):
+
+ # many pointer operations are genop_* special cases above
+
+ op_ptr_eq = op_int_eq
+ op_ptr_ne = op_int_ne
+
+ op_ptr_nonzero = op_int_is_true
+ op_ptr_ne = op_int_ne
+ op_ptr_eq = op_int_eq
+
+ def op_ptr_iszero(self, gv_arg):
+ return self._compare('eq', gv_arg, self.rgenop.genconst(0))
+
+ op_cast_ptr_to_int = _identity
+
+ # ... address operations ...
+
+ at specialize.arg(0)
+def cast_int_to_whatever(T, value):
+ if isinstance(T, lltype.Ptr):
+ return lltype.cast_int_to_ptr(T, value)
+ elif T is llmemory.Address:
+ return llmemory.cast_int_to_adr(value)
+ else:
+ return lltype.cast_primitive(T, value)
+
+ at specialize.arg(0)
+def cast_whatever_to_int(T, value):
+ if isinstance(T, lltype.Ptr):
+ return lltype.cast_ptr_to_int(value)
+ elif T is llmemory.Address:
+ return llmemory.cast_adr_to_int(value)
+ else:
+ return lltype.cast_primitive(lltype.Signed, value)
+
+class RPPCGenOp(AbstractRGenOp):
+
+ # the set of registers we consider available for allocation
+ # we can artifically restrict it for testing purposes
+ freeregs = {
+ insn.GP_REGISTER:insn.gprs[3:],
+ insn.FP_REGISTER:insn.fprs,
+ insn.CR_FIELD:insn.crfs,
+ insn.CT_REGISTER:[insn.ctr]}
+ DEBUG_SCRIBBLE = option.debug_scribble
+ MC_SIZE = 65536
+
+ def __init__(self):
+ self.mcs = [] # machine code blocks where no-one is currently writing
+ self.keepalive_gc_refs = []
+
+ # ----------------------------------------------------------------
+ # the public RGenOp interface
+
+ def newgraph(self, sigtoken, name):
+ numargs = sigtoken # for now
+ builder = self.newbuilder()
+ builder._open()
+ entrypoint = builder.asm.mc.tell()
+ inputargs_gv = builder._write_prologue(sigtoken)
+ return builder, IntConst(entrypoint), inputargs_gv
+
+ @specialize.genconst(1)
+ def genconst(self, llvalue):
+ T = lltype.typeOf(llvalue)
+ if T is llmemory.Address:
+ return AddrConst(llvalue)
+ elif isinstance(T, lltype.Primitive):
+ return IntConst(lltype.cast_primitive(lltype.Signed, llvalue))
+ elif isinstance(T, lltype.Ptr):
+ lladdr = llmemory.cast_ptr_to_adr(llvalue)
+ if T.TO._gckind == 'gc':
+ self.keepalive_gc_refs.append(lltype.cast_opaque_ptr(llmemory.GCREF, llvalue))
+ return AddrConst(lladdr)
+ else:
+ assert 0, "XXX not implemented"
+
+## @staticmethod
+## @specialize.genconst(0)
+## def constPrebuiltGlobal(llvalue):
+
+ @staticmethod
+ def genzeroconst(kind):
+ return zero_const
+
+ def replay(self, label):
+ return ReplayBuilder(self), [dummy_var] * len(label.args_gv)
+
+ @staticmethod
+ def erasedType(T):
+ if T is llmemory.Address:
+ return llmemory.Address
+ if isinstance(T, lltype.Primitive):
+ return lltype.Signed
+ elif isinstance(T, lltype.Ptr):
+ return llmemory.GCREF
+ else:
+ assert 0, "XXX not implemented"
+
+ @staticmethod
+ @specialize.memo()
+ def fieldToken(T, name):
+ FIELD = getattr(T, name)
+ if isinstance(FIELD, lltype.ContainerType):
+ fieldsize = 0 # not useful for getsubstruct
+ else:
+ fieldsize = llmemory.sizeof(FIELD)
+ return (llmemory.offsetof(T, name), fieldsize)
+
+ @staticmethod
+ @specialize.memo()
+ def allocToken(T):
+ return llmemory.sizeof(T)
+
+ @staticmethod
+ @specialize.memo()
+ def varsizeAllocToken(T):
+ if isinstance(T, lltype.Array):
+ return RPPCGenOp.arrayToken(T)
+ else:
+ # var-sized structs
+ arrayfield = T._arrayfld
+ ARRAYFIELD = getattr(T, arrayfield)
+ arraytoken = RPPCGenOp.arrayToken(ARRAYFIELD)
+ length_offset, items_offset, item_size = arraytoken
+ arrayfield_offset = llmemory.offsetof(T, arrayfield)
+ return (arrayfield_offset+length_offset,
+ arrayfield_offset+items_offset,
+ item_size)
+
+ @staticmethod
+ @specialize.memo()
+ def arrayToken(A):
+ return (llmemory.ArrayLengthOffset(A),
+ llmemory.ArrayItemsOffset(A),
+ llmemory.ItemOffset(A.OF))
+
+ @staticmethod
+ @specialize.memo()
+ def kindToken(T):
+ if T is lltype.Float:
+ py.test.skip("not implemented: floats in the i386^WPPC back-end")
+ return None # for now
+
+ @staticmethod
+ @specialize.memo()
+ def sigToken(FUNCTYPE):
+ return len(FUNCTYPE.ARGS) # for now
+
+ @staticmethod
+ @specialize.arg(0)
+ def read_frame_var(T, base, info, index):
+ """Read from the stack frame of a caller. The 'base' is the
+ frame stack pointer captured by the operation generated by
+ genop_get_frame_base(). The 'info' is the object returned by
+ get_frame_info(); we are looking for the index-th variable
+ in the list passed to get_frame_info()."""
+ place = info[index]
+ if isinstance(place, StackInfo):
+ #print '!!!', base, place.offset
+ #print '???', [peek_word_at(base + place.offset + i)
+ # for i in range(-64, 65, 4)]
+ assert place.offset != 0
+ value = peek_word_at(base + place.offset)
+ return cast_int_to_whatever(T, value)
+ else:
+ assert isinstance(place, GenConst)
+ return place.revealconst(T)
+
+ @staticmethod
+ @specialize.arg(0)
+ def genconst_from_frame_var(kind, base, info, index):
+ place = info[index]
+ if isinstance(place, StackInfo):
+ #print '!!!', base, place.offset
+ #print '???', [peek_word_at(base + place.offset + i)
+ # for i in range(-64, 65, 4)]
+ assert place.offset != 0
+ value = peek_word_at(base + place.offset)
+ return IntConst(value)
+ else:
+ assert isinstance(place, GenConst)
+ return place
+
+
+ @staticmethod
+ @specialize.arg(0)
+ def write_frame_place(T, base, place, value):
+ assert place.offset != 0
+ value = cast_whatever_to_int(T, value)
+ poke_word_into(base + place.offset, value)
+
+ @staticmethod
+ @specialize.arg(0)
+ def read_frame_place(T, base, place):
+ value = peek_word_at(base + place.offset)
+ return cast_int_to_whatever(T, value)
+
+ def check_no_open_mc(self):
+ pass
+
+ # ----------------------------------------------------------------
+ # ppc-specific interface:
+
+ MachineCodeBlock = codebuf.OwningMachineCodeBlock
+ ExistingCodeBlock = codebuf.ExistingCodeBlock
+
+ def open_mc(self):
+ if self.mcs:
+ return self.mcs.pop()
+ else:
+ return self.MachineCodeBlock(self.MC_SIZE) # XXX supposed infinite for now
+
+ def close_mc(self, mc):
+## from pypy.jit.codegen.ppc.ppcgen.asmfunc import get_ppcgen
+## print '!!!!', cast(mc._data, c_void_p).value
+## print '!!!!', mc._data.contents[0]
+## get_ppcgen().flush2(cast(mc._data, c_void_p).value,
+## mc._size*4)
+ self.mcs.append(mc)
+
+ def newbuilder(self):
+ return Builder(self)
+
+# a switch can take 7 instructions:
+
+# load_word rSCRATCH, gv_case.value (really two instructions)
+# cmpw crf, rSWITCH, rSCRATCH
+# load_word rSCRATCH, targetaddr (again two instructions)
+# mtctr rSCRATCH
+# beqctr crf
+
+# yay RISC :/
+
+class FlexSwitch(CodeGenSwitch):
+
+ # a fair part of this code could likely be shared with the i386
+ # backend.
+
+ def __init__(self, rgenop, mc, switch_reg, crf, var2loc, initial_spill_offset):
+ self.rgenop = rgenop
+ self.crf = crf
+ self.switch_reg = switch_reg
+ self.var2loc = var2loc
+ self.initial_spill_offset = initial_spill_offset
+ self.asm = RPPCAssembler()
+ self.asm.mc = mc
+ self.default_target_addr = 0
+
+ def add_case(self, gv_case):
+ targetbuilder = self.rgenop.newbuilder()
+ targetbuilder._open()
+ targetbuilder.initial_var2loc = self.var2loc
+ targetbuilder.initial_spill_offset = self.initial_spill_offset
+ target_addr = targetbuilder.asm.mc.tell()
+ p = self.asm.mc.getpos()
+ # that this works depends a bit on the fixed length of the
+ # instruction sequences we use to jump around. if the code is
+ # ever updated to use the branch-relative instructions (a good
+ # idea, btw) this will need to be thought about again
+ try:
+ self._add_case(gv_case, target_addr)
+ except codebuf.CodeBlockOverflow:
+ self.asm.mc.setpos(p)
+ base = self.asm.mc.tell()
+ mc = self.rgenop.open_mc()
+ newmc = mc.reserve(7 * 5 + 4)
+ self.rgenop.close_mc(mc)
+ new_addr = newmc.tell()
+ self.asm.load_word(rSCRATCH, new_addr)
+ self.asm.mtctr(rSCRATCH)
+ self.asm.bctr()
+ size = self.asm.mc.tell() - base
+ flush_icache(base, size)
+ self.asm.mc = newmc
+ self._add_case(gv_case, target_addr)
+ return targetbuilder
+
+ def _add_case(self, gv_case, target_addr):
+ asm = self.asm
+ base = self.asm.mc.tell()
+ assert isinstance(gv_case, GenConst)
+ gv_case.load_now(asm, insn.gprs[0])
+ asm.cmpw(self.crf.number, rSCRATCH, self.switch_reg.number)
+ asm.load_word(rSCRATCH, target_addr)
+ asm.mtctr(rSCRATCH)
+ asm.bcctr(12, self.crf.number*4 + 2)
+ if self.default_target_addr:
+ self._write_default()
+ size = self.asm.mc.tell() - base
+ flush_icache(base, size)
+
+ def add_default(self):
+ targetbuilder = self.rgenop.newbuilder()
+ targetbuilder._open()
+ targetbuilder.initial_var2loc = self.var2loc
+ targetbuilder.initial_spill_offset = self.initial_spill_offset
+ base = self.asm.mc.tell()
+ self.default_target_addr = targetbuilder.asm.mc.tell()
+ self._write_default()
+ size = self.asm.mc.tell() - base
+ flush_icache(base, size)
+ return targetbuilder
+
+ def _write_default(self):
+ pos = self.asm.mc.getpos()
+ self.asm.load_word(rSCRATCH, self.default_target_addr)
+ self.asm.mtctr(rSCRATCH)
+ self.asm.bctr()
+ self.asm.mc.setpos(pos)
+
+global_rgenop = RPPCGenOp()
+RPPCGenOp.constPrebuiltGlobal = global_rgenop.genconst
+
+def peek_word_at(addr):
+ # now the Very Obscure Bit: when translated, 'addr' is an
+ # address. When not, it's an integer. It just happens to
+ # make the test pass, but that's probably going to change.
+ if we_are_translated():
+ return addr.signed[0]
+ else:
+ from ctypes import cast, c_void_p, c_int, POINTER
+ p = cast(c_void_p(addr), POINTER(c_int))
+ return p[0]
+
+def poke_word_into(addr, value):
+ # now the Very Obscure Bit: when translated, 'addr' is an
+ # address. When not, it's an integer. It just happens to
+ # make the test pass, but that's probably going to change.
+ if we_are_translated():
+ addr.signed[0] = value
+ else:
+ from ctypes import cast, c_void_p, c_int, POINTER
+ p = cast(c_void_p(addr), POINTER(c_int))
+ p[0] = value
+
+zero_const = AddrConst(llmemory.NULL)
diff --git a/pypy/jit/backend/ppc/runner.py b/pypy/jit/backend/ppc/runner.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/runner.py
@@ -0,0 +1,152 @@
+import py
+from pypy.rpython.lltypesystem import lltype, llmemory, rffi
+from pypy.rpython.lltypesystem.lloperation import llop
+from pypy.rpython.llinterp import LLInterpreter
+from pypy.rlib.objectmodel import we_are_translated
+from pypy.jit.metainterp import history, compile
+from pypy.jit.backend.x86.assembler import Assembler386
+from pypy.jit.backend.x86.arch import FORCE_INDEX_OFS
+from pypy.jit.backend.x86.profagent import ProfileAgent
+from pypy.jit.backend.llsupport.llmodel import AbstractLLCPU
+from pypy.jit.backend.x86 import regloc
+import sys
+
+from pypy.tool.ansi_print import ansi_log
+log = py.log.Producer('jitbackend')
+py.log.setconsumer('jitbackend', ansi_log)
+
+
+class PPC_64_CPU(AbstractLLCPU):
+ debug = True
+ supports_floats = False
+
+ BOOTSTRAP_TP = lltype.FuncType([], lltype.Signed)
+ dont_keepalive_stuff = False # for tests
+
+ def __init__(self, rtyper, stats, opts=None, translate_support_code=False,
+ gcdescr=None):
+ if gcdescr is not None:
+ gcdescr.force_index_ofs = FORCE_INDEX_OFS
+ AbstractLLCPU.__init__(self, rtyper, stats, opts,
+ translate_support_code, gcdescr)
+
+ def setup(self):
+ if self.opts is not None:
+ failargs_limit = self.opts.failargs_limit
+ else:
+ failargs_limit = 1000
+ self.assembler = Assembler386(self, self.translate_support_code,
+ failargs_limit)
+
+ def get_on_leave_jitted_hook(self):
+ return self.assembler.leave_jitted_hook
+
+ def setup_once(self):
+ self.assembler.setup_once()
+
+ def finish_once(self):
+ self.assembler.finish_once()
+
+ def compile_loop(self, inputargs, operations, looptoken, log=True):
+ return self.assembler.assemble_loop(inputargs, operations, looptoken,
+ log=log)
+
+ def compile_bridge(self, faildescr, inputargs, operations,
+ original_loop_token, log=True):
+ clt = original_loop_token.compiled_loop_token
+ clt.compiling_a_bridge()
+ return self.assembler.assemble_bridge(faildescr, inputargs, operations,
+ original_loop_token, log=log)
+
+ def set_future_value_int(self, index, intvalue):
+ self.assembler.fail_boxes_int.setitem(index, intvalue)
+
+ def set_future_value_float(self, index, floatvalue):
+ self.assembler.fail_boxes_float.setitem(index, floatvalue)
+
+ def set_future_value_ref(self, index, ptrvalue):
+ self.assembler.fail_boxes_ptr.setitem(index, ptrvalue)
+
+ def get_latest_value_int(self, index):
+ return self.assembler.fail_boxes_int.getitem(index)
+
+ def get_latest_value_float(self, index):
+ return self.assembler.fail_boxes_float.getitem(index)
+
+ def get_latest_value_ref(self, index):
+ return self.assembler.fail_boxes_ptr.getitem(index)
+
+ def get_latest_value_count(self):
+ return self.assembler.fail_boxes_count
+
+ def clear_latest_values(self, count):
+ setitem = self.assembler.fail_boxes_ptr.setitem
+ null = lltype.nullptr(llmemory.GCREF.TO)
+ for index in range(count):
+ setitem(index, null)
+
+ def get_latest_force_token(self):
+ return self.assembler.fail_ebp + FORCE_INDEX_OFS
+
+ def execute_token(self, executable_token):
+ addr = executable_token._x86_bootstrap_code
+ #llop.debug_print(lltype.Void, ">>>> Entering", addr)
+ func = rffi.cast(lltype.Ptr(self.BOOTSTRAP_TP), addr)
+ #llop.debug_print(lltype.Void, "<<<< Back")
+ fail_index = self._execute_call(func)
+ return self.get_fail_descr_from_number(fail_index)
+
+ def _execute_call(self, func):
+ # help flow objspace
+ prev_interpreter = None
+ if not self.translate_support_code:
+ prev_interpreter = LLInterpreter.current_interpreter
+ LLInterpreter.current_interpreter = self.debug_ll_interpreter
+ res = 0
+ try:
+ res = func()
+ finally:
+ if not self.translate_support_code:
+ LLInterpreter.current_interpreter = prev_interpreter
+ return res
+
+ @staticmethod
+ def cast_ptr_to_int(x):
+ adr = llmemory.cast_ptr_to_adr(x)
+ return CPU386.cast_adr_to_int(adr)
+
+ all_null_registers = lltype.malloc(rffi.LONGP.TO, 24,
+ flavor='raw', zero=True,
+ immortal=True)
+
+ def force(self, addr_of_force_index):
+ TP = rffi.CArrayPtr(lltype.Signed)
+ fail_index = rffi.cast(TP, addr_of_force_index)[0]
+ assert fail_index >= 0, "already forced!"
+ faildescr = self.get_fail_descr_from_number(fail_index)
+ rffi.cast(TP, addr_of_force_index)[0] = ~fail_index
+ frb = self.assembler._find_failure_recovery_bytecode(faildescr)
+ bytecode = rffi.cast(rffi.UCHARP, frb)
+ # start of "no gc operation!" block
+ fail_index_2 = self.assembler.grab_frame_values(
+ bytecode,
+ addr_of_force_index - FORCE_INDEX_OFS,
+ self.all_null_registers)
+ self.assembler.leave_jitted_hook()
+ # end of "no gc operation!" block
+ assert fail_index == fail_index_2
+ return faildescr
+
+ def redirect_call_assembler(self, oldlooptoken, newlooptoken):
+ self.assembler.redirect_call_assembler(oldlooptoken, newlooptoken)
+
+ def invalidate_loop(self, looptoken):
+ from pypy.jit.backend.x86 import codebuf
+
+ for addr, tgt in looptoken.compiled_loop_token.invalidate_positions:
+ mc = codebuf.MachineCodeBlockWrapper()
+ mc.JMP_l(tgt)
+ assert mc.get_relative_pos() == 5 # [JMP] [tgt 4 bytes]
+ mc.copy_to_raw_memory(addr - 1)
+ # positions invalidated
+ looptoken.compiled_loop_token.invalidate_positions = []
diff --git a/pypy/jit/backend/ppc/test/__init__.py b/pypy/jit/backend/ppc/test/__init__.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/test/__init__.py
@@ -0,0 +1,1 @@
+#
diff --git a/pypy/jit/backend/ppc/test/test_genc_ts.py b/pypy/jit/backend/ppc/test/test_genc_ts.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/test/test_genc_ts.py
@@ -0,0 +1,16 @@
+import py
+from pypy.jit.codegen.i386.test.test_genc_ts import I386TimeshiftingTestMixin
+from pypy.jit.timeshifter.test import test_timeshift
+from pypy.jit.codegen.ppc.rgenop import RPPCGenOp
+
+class PPCTimeshiftingTestMixin(I386TimeshiftingTestMixin):
+ RGenOp = RPPCGenOp
+
+class TestTimeshiftPPC(PPCTimeshiftingTestMixin,
+ test_timeshift.TestLLType):
+
+ # for the individual tests see
+ # ====> ../../../timeshifter/test/test_timeshift.py
+
+ pass
+
diff --git a/pypy/jit/backend/ppc/test/test_interp.py b/pypy/jit/backend/ppc/test/test_interp.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/test/test_interp.py
@@ -0,0 +1,22 @@
+from pypy.jit.codegen.ppc import codebuf, rgenop
+from pypy.rpython.lltypesystem import lltype
+from pypy.jit.codegen.test import rgenop_tests
+from pypy.rpython.test.test_llinterp import interpret
+from pypy.jit.codegen.ppc.test import test_rgenop
+
+class LLTypeRGenOp(rgenop.RPPCGenOp):
+ MachineCodeBlock = codebuf.LLTypeMachineCodeBlock
+ ExistingCodeBlock = codebuf.LLTypeExistingCodeBlock
+
+def test_simple():
+ FUNC = lltype.FuncType([lltype.Signed], lltype.Signed)
+ def f(n):
+ rgenop = LLTypeRGenOp()
+ sigtoken = rgenop.sigToken(FUNC)
+ builder, gv_add_one, [gv_x] = rgenop.newgraph(sigtoken, "adder")
+ builder.start_writing()
+ gv_result = builder.genop2("int_add", gv_x, rgenop.genconst(n))
+ builder.finish_and_return(sigtoken, gv_result)
+ builder.end()
+ res = interpret(f, [5], policy=rgenop_tests.GENOP_POLICY)
+ # just testing that this didn't crash
diff --git a/pypy/jit/backend/ppc/test/test_interp_ts.py b/pypy/jit/backend/ppc/test/test_interp_ts.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/test/test_interp_ts.py
@@ -0,0 +1,21 @@
+from pypy.jit.codegen.ppc.conftest import option
+import py
+from pypy.jit.codegen.i386.test.test_interp_ts import I386LLInterpTimeshiftingTestMixin
+from pypy.jit.timeshifter.test import test_timeshift
+
+def setup_module(mod):
+ if not option.run_interp_tests:
+ py.test.skip("these tests take ages and are not really useful")
+
+
+class PPCLLInterpTimeshiftingTestMixin(I386LLInterpTimeshiftingTestMixin):
+ from pypy.jit.codegen.ppc.test.test_interp import LLTypeRGenOp as RGenOp
+
+class TestTimeshiftPPC(PPCLLInterpTimeshiftingTestMixin,
+ test_timeshift.TestLLType):
+
+ # for the individual tests see
+ # ====> ../../../timeshifter/test/test_timeshift.py
+
+ pass
+
diff --git a/pypy/jit/backend/ppc/test/test_operation.py b/pypy/jit/backend/ppc/test/test_operation.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/test/test_operation.py
@@ -0,0 +1,43 @@
+from pypy.jit.codegen.test.operation_tests import OperationTests
+from pypy.jit.codegen.ppc.rgenop import RPPCGenOp
+from pypy.rpython.memory.lltypelayout import convert_offset_to_int
+from pypy.rlib.objectmodel import specialize
+
+def conv(n):
+ if not isinstance(n, int):
+ n = convert_offset_to_int(n)
+ return n
+
+
+class RGenOpPacked(RPPCGenOp):
+ """Like RPPCGenOp, but produces concrete offsets in the tokens
+ instead of llmemory.offsets. These numbers may not agree with
+ your C compiler's.
+ """
+
+ @staticmethod
+ @specialize.memo()
+ def fieldToken(T, name):
+ return tuple(map(conv, RPPCGenOp.fieldToken(T, name)))
+
+ @staticmethod
+ @specialize.memo()
+ def arrayToken(A):
+ return tuple(map(conv, RPPCGenOp.arrayToken(A)))
+
+ @staticmethod
+ @specialize.memo()
+ def allocToken(T):
+ return conv(RPPCGenOp.allocToken(T))
+
+ @staticmethod
+ @specialize.memo()
+ def varsizeAllocToken(A):
+ return tuple(map(conv, RPPCGenOp.varsizeAllocToken(A)))
+
+
+class PPCTestMixin(object):
+ RGenOp = RGenOpPacked
+
+class TestOperation(PPCTestMixin, OperationTests):
+ pass
diff --git a/pypy/jit/backend/ppc/test/test_rgenop.py b/pypy/jit/backend/ppc/test/test_rgenop.py
new file mode 100644
--- /dev/null
+++ b/pypy/jit/backend/ppc/test/test_rgenop.py
@@ -0,0 +1,33 @@
+import py
+from pypy.jit.codegen.ppc.rgenop import RPPCGenOp
+from pypy.rpython.lltypesystem import lltype
+from pypy.jit.codegen.test.rgenop_tests import FUNC, FUNC2
+from pypy.jit.codegen.test.rgenop_tests import AbstractRGenOpTestsDirect
+from pypy.jit.codegen.test.rgenop_tests import AbstractRGenOpTestsCompile
+from ctypes import cast, c_int, c_void_p, CFUNCTYPE
+from pypy.jit.codegen.ppc import instruction as insn
+
+# for the individual tests see
+# ====> ../../test/rgenop_tests.py
+
+class FewRegisters(RPPCGenOp):
+ freeregs = {
+ insn.GP_REGISTER:insn.gprs[3:6],
+ insn.FP_REGISTER:insn.fprs,
+ insn.CR_FIELD:insn.crfs[:1],
+ insn.CT_REGISTER:[insn.ctr]}
+
+class FewRegistersAndScribble(FewRegisters):
+ DEBUG_SCRIBBLE = True
+
+class TestRPPCGenopDirect(AbstractRGenOpTestsDirect):
+ RGenOp = RPPCGenOp
+
+class TestRPPCGenopCompile(AbstractRGenOpTestsCompile):
+ RGenOp = RPPCGenOp
+
+class TestRPPCGenopNoRegs(AbstractRGenOpTestsDirect):
+ RGenOp = FewRegisters
+
+class TestRPPCGenopNoRegsAndScribble(AbstractRGenOpTestsDirect):
+ RGenOp = FewRegistersAndScribble
More information about the pypy-commit
mailing list