[pypy-svn] r36635 - in pypy/branch/i386-regalloc/pypy/jit/codegen: . llgraph llgraph/test llvm llvm/test ppc ppc/test test

arigo at codespeak.net arigo at codespeak.net
Sat Jan 13 12:10:16 CET 2007


Author: arigo
Date: Sat Jan 13 12:09:25 2007
New Revision: 36635

Modified:
   pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/llimpl.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/rgenop.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/test/test_rgenop.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/compatibility.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/rgenop.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_exception.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_ts.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_vlist.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_operation.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_rgenop.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/model.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/instruction.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/regalloc.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/rgenop.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/test/test_rgenop.py
   pypy/branch/i386-regalloc/pypy/jit/codegen/test/rgenop_tests.py
Log:
svn merge -r36412:36633 http://codespeak.net/svn/pypy/dist/pypy/jit/codegen


Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/llimpl.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/llimpl.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/llimpl.py	Sat Jan 13 12:09:25 2007
@@ -193,7 +193,11 @@
     elif T == llmemory.Address:
         return llmemory.cast_ptr_to_adr(c.value)
     else:
-        return lltype.cast_primitive(T, c.value)
+        if lltype.typeOf(c.value) == llmemory.Address:
+            value = llmemory.cast_adr_to_int(c.value)
+        else:
+            value = c.value
+        return lltype.cast_primitive(T, value)
 
 def isconst(gv_value):
     c = from_opaque_object(gv_value)
@@ -543,3 +547,35 @@
 #setannotation(placeholder,    s_ConstOrVar, specialize_as_constant=True)
 
 setannotation(show_incremental_progress, None)
+
+# read frame var support
+
+def read_frame_var(T, base, info, index):
+    vars = info._obj.vars
+    v = vars[index]
+    if isinstance(v, flowmodel.Constant):
+        val = v.value
+    else:
+        llframe = base.ptr
+        val = llframe.bindings[v]
+    assert lltype.typeOf(val) == T
+    return val
+        
+
+class ReadFrameVarEntry(ExtRegistryEntry):
+        "Annotation and specialization for calls to 'func'."
+        _about_ = read_frame_var
+
+        def compute_result_annotation(self, *args_s):
+            T = args_s[0].const
+            return annmodel.lltype_to_annotation(T)
+
+        # specialize as direct_call
+        def specialize_call(self, hop):
+            FUNCTYPE = lltype.FuncType([r.lowleveltype for r in hop.args_r],
+                                       hop.r_result.lowleveltype)
+            args_v = hop.inputargs(*hop.args_r)
+            funcptr = lltype.functionptr(FUNCTYPE, 'read_frame_var',
+                                         _callable=read_frame_var)
+            cfunc = hop.inputconst(lltype.Ptr(FUNCTYPE), funcptr)
+            return hop.genop('direct_call', [cfunc] + args_v, hop.r_result)

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/rgenop.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/rgenop.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/rgenop.py	Sat Jan 13 12:09:25 2007
@@ -1,5 +1,5 @@
 from pypy.rlib.objectmodel import specialize
-from pypy.rpython.lltypesystem import lltype
+from pypy.rpython.lltypesystem import lltype, llmemory
 from pypy.jit.codegen.model import AbstractRGenOp, GenLabel, GenBuilder
 from pypy.jit.codegen.model import GenVar, GenConst, CodeGenSwitch
 from pypy.jit.codegen.llgraph import llimpl
@@ -39,6 +39,8 @@
 gv_Signed = gv_TYPE(lltype.Signed)
 gv_dummy_placeholder = LLConst(llimpl.dummy_placeholder)
 
+gv_Address = gv_TYPE(llmemory.Address)
+gv_GCREF = gv_TYPE(llmemory.GCREF)
 
 class LLLabel(GenLabel):
     def __init__(self, b, g):
@@ -220,6 +222,16 @@
         llimpl.show_incremental_progress(self.gv_f)
 
 
+    # read_frame_var support
+
+    def get_frame_base(self):
+        return LLVar(llimpl.genop(self.b, 'get_frame_base', [],
+                                  gv_Address.v))
+
+    def get_frame_info(self, vars):
+        return LLVar(llimpl.genop(self.b, 'get_frame_info', vars,
+                                  gv_GCREF.v))
+
 class RGenOp(AbstractRGenOp):
     gv_Void = gv_Void
 
@@ -302,5 +314,10 @@
     def _freeze_(self):
         return True    # no real point in using a full class in llgraph
 
+    @staticmethod
+    @specialize.arg(0)
+    def read_frame_var(T, base, info, index):
+        return llimpl.read_frame_var(T, base, info, index)
+
 
 rgenop = RGenOp()      # no real point in using a full class in llgraph

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/test/test_rgenop.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/test/test_rgenop.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/llgraph/test/test_rgenop.py	Sat Jan 13 12:09:25 2007
@@ -1,9 +1,9 @@
 import py
-from pypy.rpython.lltypesystem import lltype
+from pypy.rpython.lltypesystem import lltype, llmemory
 from pypy.jit.codegen.llgraph.rgenop import RGenOp
 from pypy.jit.codegen.llgraph.llimpl import testgengraph
 from pypy.jit.codegen.test.rgenop_tests import AbstractRGenOpTests
-from pypy.rpython.test.test_llinterp import interpret
+from pypy.rpython.test.test_llinterp import gengraph, interpret
 
 
 class TestLLGraphRGenop(AbstractRGenOpTests):
@@ -23,3 +23,35 @@
 
     # for the individual tests see
     # ====> ../../test/rgenop_tests.py
+
+
+def test_read_frame_var():
+    from pypy.annotation import model as annmodel
+
+    def reader(base, info):
+        return RGenOp.read_frame_var(lltype.Signed, base, info, 0)
+
+    t, rtyper, reader_graph = gengraph(reader,
+                                       [annmodel.SomeAddress(),
+                                        annmodel.SomePtr(llmemory.GCREF)])
+    reader_ptr = rtyper.getcallable(reader_graph)
+
+    F1 = lltype.FuncType([lltype.Signed], lltype.Signed)
+    rgenop = RGenOp()
+    signed_kind = rgenop.kindToken(lltype.Signed)
+    sigtoken = rgenop.sigToken(F1)
+    gv_reader = RGenOp.constPrebuiltGlobal(reader_ptr)
+    readertoken = rgenop.sigToken(lltype.typeOf(reader_ptr).TO)
+
+    builder, gv_f, [gv_x] = rgenop.newgraph(sigtoken, "f")
+
+    gv_y = builder.genop2("int_mul", gv_x, rgenop.genconst(2))
+    gv_base = builder.get_frame_base()
+    gv_info = builder.get_frame_info([gv_y])
+    gv_z = builder.genop_call(readertoken, gv_reader, [gv_base, gv_info])
+    builder.finish_and_return(sigtoken, gv_z)
+    builder.end()
+
+    ptr = gv_f.revealconst(lltype.Ptr(F1))
+    res = testgengraph(ptr._obj.graph, [21])
+    assert res == 42

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/compatibility.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/compatibility.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/compatibility.py	Sat Jan 13 12:09:25 2007
@@ -7,7 +7,7 @@
 if llvm_version() < 2.0:
     icmp = scmp = ucmp = fcmp = 'set'
     inttoptr = trunc = zext = bitcast = 'cast'
-    shr_prefix = ('', '')
+    shr_prefix = ['', '']
     i8  = 'ubyte'
     i16 = 'short'
     i32 = 'int'
@@ -22,9 +22,12 @@
     trunc = 'trunc'
     zext = 'zext'
     bitcast = 'bitcast'
-    shr_prefix = ('l', 'a')
+    shr_prefix = ['l', 'a']
     define = 'define'
     i8  = 'i8'
     i16 = 'i16'
     i32 = 'i32'
     i64 = 'i64'
+
+i1 = 'bool'
+f64 = 'double'

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/rgenop.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/rgenop.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/rgenop.py	Sat Jan 13 12:09:25 2007
@@ -1,6 +1,7 @@
 import py, os
 from pypy.rlib.objectmodel import specialize
 from pypy.rpython.lltypesystem import lltype, llmemory
+from pypy.rlib.rarithmetic import intmask
 from pypy.jit.codegen.model import AbstractRGenOp, GenLabel, GenBuilder
 from pypy.jit.codegen.model import GenVar, GenConst, CodeGenSwitch
 from pypy.jit.codegen.llvm import llvmjit
@@ -8,15 +9,17 @@
 from pypy.jit.codegen.i386.rgenop import gc_malloc_fnaddr
 from pypy.jit.codegen.llvm.conftest import option
 from pypy.jit.codegen.llvm.compatibility import icmp, scmp, ucmp, fcmp, inttoptr,\
-    trunc, zext, bitcast, shr_prefix, define, i8, i16, i32
+    trunc, zext, bitcast, shr_prefix, define, i1, i8, i16, i32, f64
 
 
+pi8  = i8  + '*'
+pi32 = i32 + '*'
+u32  = i32
+
 LINENO       = option.lineno
 PRINT_SOURCE = option.print_source
 PRINT_DEBUG  = option.print_debug
 
-WORD = 4
-
 
 class ParseException(Exception):
     pass
@@ -89,56 +92,60 @@
     def __init__(self, type):
         self.n = count.n_vars
         self.type = type
-        self.signed = type is i32 or type is 'float'
+        self.signed = type is i32 or type is f64
         count.n_vars += 1
 
     def operand(self):
-        return '%s %%v%d' % (self.type, self.n)
+        return '%s %s' % (self.type, self.operand2())
 
     def operand2(self):
         return '%%v%d' % (self.n,)
 
 
 class GenericConst(GenConst):
-    #type = 'generic'
-
-    #def __init__(self, value):
-    #    self.value = value
 
     def operand(self):
-        return '%s %s' % (self.type, self.value)
-
-    def operand2(self):
-        return str(self.value)
+        return '%s %s' % (self.type, self.operand2())
 
     @specialize.arg(1)
     def revealconst(self, T):
         if isinstance(T, lltype.Ptr):
-            return lltype.cast_int_to_ptr(T, self.value)
+            return lltype.cast_int_to_ptr(T, self.get_integer_value())
         elif T is llmemory.Address:
-            return llmemory.cast_int_to_adr(self.value)
+            return llmemory.cast_int_to_adr(self.get_integer_value())
         else:
-            return lltype.cast_primitive(T, self.value)
+            return lltype.cast_primitive(T, self.get_integer_value())
 
 
 class BoolConst(GenericConst):
-    type = 'bool'
+    type = i1
     signed = False
 
     def __init__(self, value):
         self.value = bool(value)
 
+    def operand2(self):
+        if self.value:
+            return 'true'
+        else:
+            return 'false'
+
+    def get_integer_value(self):
+        return int(self.value)
+
 
 class CharConst(GenericConst):
     type = i8
     signed = False
 
     def __init__(self, value):
-        if type(value) is str:
-            self.value = ord(value)
-        else:
-            assert type(value) is int
-            self.value = value
+        self.value = ord(value)
+
+    def operand2(self):
+        return '%d' % self.value
+
+    def get_integer_value(self):
+        return self.value
 
 
 class UniCharConst(GenericConst):
@@ -148,6 +155,12 @@
     def __init__(self, value):
         self.value = unicode(value)
 
+    def operand2(self):
+        return '%s' % self.value
+
+    def get_integer_value(self):
+        return int(self.value)
+
 
 class IntConst(GenericConst):
     type = i32
@@ -156,42 +169,58 @@
     def __init__(self, value):
         self.value = int(value)
 
-    #XXX why does typeof value change in test_genc_ts.py -k test_degenerated_before_return(_2)?
-    def operand(self):
-        return '%s %d' % (self.type, int(self.value))
-
     def operand2(self):
-        return str(int(self.value))
+        return str(self.value)
+
+    def get_integer_value(self):
+        return self.value
 
 
 class UIntConst(GenericConst):
-    type = i32  #'uint'
+    type = u32
     signed = False
 
     def __init__(self, value):
-        self.value = int(value)
+        self.value = value
+
+    def operand2(self):
+        return str(self.value)
+
+    def get_integer_value(self):
+        return intmask(self.value)
 
 
 class FloatConst(GenericConst):
-    type = 'float'
+    type = f64
     signed = True
 
     def __init__(self, value):
         self.value = float(value)
 
+    def operand2(self):
+        return str(self.value)
+
+    @specialize.arg(1)
+    def revealconst(self, T):
+        assert T is lltype.Float
+        return self.value
+
 
 class AddrConst(GenConst):
-    type = i32 + '*'
+    type = pi8
     signed = False
 
     def __init__(self, addr):
         self.addr = addr
 
     def operand(self):
-        return '%s %s' % (self.type, llmemory.cast_adr_to_int(self.addr))
+        return '%s %s' % (self.type, self.operand2())
 
     def operand2(self):
-        return str(llmemory.cast_adr_to_int(self.addr))
+        s = str(llmemory.cast_adr_to_int(self.addr))
+        if s == '0':
+            s = 'null'
+        return s
 
     @specialize.arg(1)
     def revealconst(self, T):
@@ -433,52 +462,52 @@
     def op_float_neg(self, gv_x): return self._rgenop2_generic('sub', FloatConst(0.0), gv_x)
 
     def op_int_lt(self, gv_x, gv_y):
-        return self._rgenop2_generic(scmp + 'lt', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(scmp + 'lt', gv_x, gv_y, i1)
 
     def op_int_le(self, gv_x, gv_y):
-        return self._rgenop2_generic(scmp + 'le', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(scmp + 'le', gv_x, gv_y, i1)
 
     def op_int_eq(self, gv_x, gv_y):
-        return self._rgenop2_generic(icmp + 'eq' , gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(icmp + 'eq' , gv_x, gv_y, i1)
 
     def op_int_ne(self, gv_x, gv_y):
-        return self._rgenop2_generic(icmp + 'ne' , gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(icmp + 'ne' , gv_x, gv_y, i1)
 
     def op_int_gt(self, gv_x, gv_y):
-        return self._rgenop2_generic(scmp + 'gt', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(scmp + 'gt', gv_x, gv_y, i1)
 
     def op_int_ge(self, gv_x, gv_y):
-        return self._rgenop2_generic(scmp + 'ge', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(scmp + 'ge', gv_x, gv_y, i1)
 
     def op_uint_lt(self, gv_x, gv_y):
-        return self._rgenop2_generic(ucmp + 'lt', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(ucmp + 'lt', gv_x, gv_y, i1)
 
     def op_uint_le(self, gv_x, gv_y):
-        return self._rgenop2_generic(ucmp + 'le', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(ucmp + 'le', gv_x, gv_y, i1)
 
     def op_uint_gt(self, gv_x, gv_y):
-        return self._rgenop2_generic(ucmp + 'gt', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(ucmp + 'gt', gv_x, gv_y, i1)
 
     def op_uint_ge(self, gv_x, gv_y):
-        return self._rgenop2_generic(ucmp + 'ge', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(ucmp + 'ge', gv_x, gv_y, i1)
 
     def op_float_lt(self, gv_x, gv_y):
-        return self._rgenop2_generic(fcmp + 'lt', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(fcmp + 'lt', gv_x, gv_y, i1)
 
     def op_float_le(self, gv_x, gv_y): 
-        return self._rgenop2_generic(fcmp + 'le', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(fcmp + 'le', gv_x, gv_y, i1)
     
     def op_float_eq(self, gv_x, gv_y): 
-        return self._rgenop2_generic(fcmp + 'eq', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(fcmp + 'eq', gv_x, gv_y, i1)
     
     def op_float_ne(self, gv_x, gv_y): 
-        return self._rgenop2_generic(fcmp + 'ne', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(fcmp + 'ne', gv_x, gv_y, i1)
 
     def op_float_gt(self, gv_x, gv_y): 
-        return self._rgenop2_generic(fcmp + 'gt', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(fcmp + 'gt', gv_x, gv_y, i1)
 
     def op_float_ge(self, gv_x, gv_y): 
-        return self._rgenop2_generic(fcmp + 'ge', gv_x, gv_y, 'bool')
+        return self._rgenop2_generic(fcmp + 'ge', gv_x, gv_y, i1)
     
     op_unichar_eq = op_ptr_eq = op_uint_eq = op_int_eq
     op_unichar_ne = op_ptr_ne = op_uint_ne = op_int_ne
@@ -504,14 +533,15 @@
     def op_uint_invert(self, gv_x): return self._rgenop2_generic('xor', gv_x, UIntConst((1<<32)-1))
 
     def _abs(self, gv_x, nullstr='0'):
-        gv_comp    = Var('bool')
+        gv_comp    = Var(i1)
         gv_abs_pos = Var(gv_x.type)
         gv_result  = Var(gv_x.type)
-        if nullstr is '0':
-            l = ' %s=' + scmp + 'ge %s,%s'
+        if nullstr == 'null' or nullstr == '0':
+            cmp = scmp
         else:
-            l = ' %s=' + fcmp + 'ge %s,%s'
-        self.asm.append(l % (gv_comp.operand2(), gv_x.operand(), nullstr))
+            cmp = fcmp
+        self.asm.append(' %s=%sge %s,%s' % (
+            gv_comp.operand2(), cmp, gv_x.operand(), nullstr))
         self.asm.append(' %s=sub %s %s,%s' % (
             gv_abs_pos.operand2(), gv_x.type, nullstr, gv_x.operand2()))
         self.asm.append(' %s=select %s,%s,%s' % (
@@ -539,8 +569,12 @@
         if restype is gv_x.type:
             return self.genop_same_as(None, gv_x)
         gv_result = Var(restype)
+        if restype[-1] == '*':
+            t = bitcast
+        else:
+            t = zext
         self.asm.append(' %s=%s %s to %s' % (
-            gv_result.operand2(), zext, gv_x.operand(), restype))
+            gv_result.operand2(), t, gv_x.operand(), restype))
         return gv_result
 
     def _trunc_to(self, gv_x, restype=None):
@@ -552,19 +586,19 @@
             gv_result.operand2(), trunc, gv_x.operand(), restype))
         return gv_result
 
-    def _cast_to_bool(self, gv_x):      return self._cast_to(gv_x, 'bool')
+    def _cast_to_bool(self, gv_x):      return self._cast_to(gv_x, i1)
     def _cast_to_char(self, gv_x):      return self._cast_to(gv_x, i8)
     def _cast_to_unichar(self, gv_x):   return self._cast_to(gv_x, i32)
     def _cast_to_int(self, gv_x):       return self._cast_to(gv_x, i32)
-    def _cast_to_uint(self, gv_x):      return self._cast_to(gv_x, i32) #'uint')
-    def _cast_to_float(self, gv_x):     return self._cast_to(gv_x, 'float')
+    def _cast_to_uint(self, gv_x):      return self._cast_to(gv_x, u32)
+    def _cast_to_float(self, gv_x):     return self._cast_to(gv_x, f64)
 
-    def _trunc_to_bool(self, gv_x):      return self._trunc_to(gv_x, 'bool')
+    def _trunc_to_bool(self, gv_x):      return self._trunc_to(gv_x, i1)
     def _trunc_to_char(self, gv_x):      return self._trunc_to(gv_x, i8)
     def _trunc_to_unichar(self, gv_x):   return self._trunc_to(gv_x, i32)
     def _trunc_to_int(self, gv_x):       return self._trunc_to(gv_x, i32)
-    def _trunc_to_uint(self, gv_x):      return self._trunc_to(gv_x, i32) #'uint')
-    def _trunc_to_float(self, gv_x):     return self._trunc_to(gv_x, 'float')
+    def _trunc_to_uint(self, gv_x):      return self._trunc_to(gv_x, u32)
+    def _trunc_to_float(self, gv_x):     return self._trunc_to(gv_x, f64)
 
     op_cast_char_to_bool    = _trunc_to_bool
     op_cast_unichar_to_bool = _trunc_to_bool
@@ -645,104 +679,70 @@
 
     def _is_false(self, gv_x, nullstr='0'):
         log('%s Builder._is_false %s' % (self.block.label, gv_x.operand()))
-        gv_result = Var('bool')
-        if nullstr is '0':
-            l = ' %s=' + icmp + 'eq %s,%s'
+        gv_result = Var(i1)
+        if nullstr == 'null' or nullstr == '0':
+            cmp = icmp
         else:
-            l = ' %s=' + fcmp + 'eq %s,%s'
-        self.asm.append(l % (gv_result.operand2(), gv_x.operand(), nullstr))
+            cmp = fcmp
+        self.asm.append(' %s=%seq %s,%s' % (
+            gv_result.operand2(), cmp, gv_x.operand(), nullstr))
         return gv_result
 
     def _is_true(self, gv_x, nullstr='0'):
         log('%s Builder._is_true %s' % (self.block.label, gv_x.operand()))
-        gv_result = Var('bool')
-        if nullstr is '0':
-            l = ' %s=' + icmp + 'ne %s,%s'
+        gv_result = Var(i1)
+        if nullstr == 'null' or nullstr == '0':
+            cmp = icmp
         else:
-            l = ' %s=' + fcmp + 'ne %s,%s'
-        self.asm.append(l % (gv_result.operand2(), gv_x.operand(), nullstr))
+            cmp = fcmp
+        self.asm.append(' %s=%sne %s,%s' % (
+            gv_result.operand2(), cmp, gv_x.operand(), nullstr))
         return gv_result
 
     op_bool_is_true = op_char_is_true = op_unichar_is_true = op_int_is_true =\
-    op_uint_is_true = op_ptr_nonzero = _is_true
-
-    op_ptr_iszero  = _is_false
+    op_uint_is_true = _is_true
+    
+    def op_ptr_nonzero(self, gv_x):     return self._is_true(gv_x, 'null')
+    def op_ptr_iszero(self, gv_x):      return self._is_false(gv_x, 'null')
 
-    def op_float_is_true(self, gv_x):   return self._is_true(gv_x, '0.0')
+    def op_float_is_true(self, gv_x):   return self._is_true(gv_x, '0.0') #XXX fails for doubles
 
-    def genop_getfield(self, (offset, fieldsize), gv_ptr):
-        log('%s Builder.genop_getfield (%d,%d) %s' % (
-            self.block.label, offset, fieldsize, gv_ptr.operand()))
-        if fieldsize == WORD:
-            t = i32
-        else:
-            if fieldsize == 1:
-                t = i8
-            else:
-                if fieldsize != 2:
-                    logger.dump('assert fails on: fieldsize != [124]')
-                    self.rgenop._dump_partial_lines()
-                    assert fieldsize == 2
-                t = i16
+    def genop_getfield(self, fieldtoken, gv_ptr):
+        offset, fieldtype = fieldtoken
+        log('%s Builder.genop_getfield (%d,%s) %s' % (
+            self.block.label, offset, fieldtype, gv_ptr.operand()))
         gv_ptr_var = self._as_var(gv_ptr)
-        gv_p = Var(t + '*')
+        gv_p = Var(gv_ptr.type)
         self.asm.append(' %s=getelementptr %s,%s %s' % (
-            gv_p.operand2(), gv_ptr_var.operand(), i32, offset / fieldsize))
-        gv_result = Var(t)
+            gv_p.operand2(), gv_ptr_var.operand(), i32, offset))
+        gv_p2 = self._cast_to(gv_p, fieldtype + '*')
+        gv_result = Var(fieldtype)
         self.asm.append(' %s=load %s' % (
-            gv_result.operand2(), gv_p.operand()))
+            gv_result.operand2(), gv_p2.operand()))
         return gv_result
 
-    def genop_setfield(self, (offset, fieldsize), gv_ptr, gv_value):
-        log('%s Builder.genop_setfield (%d,%d) %s=%s' % (
-            self.block.label, offset, fieldsize, gv_ptr.operand(), gv_value.operand()))
-        #if fieldsize == WORD:
-        #    gv_result = Var(i32)
-        #else:
-        #    if fieldsize == 1:
-        #       gv_result = Var(i8)
-        #    else:
-        #       assert fieldsize == 2
-        #       gv_result = Var(i16)
+    def genop_setfield(self, fieldtoken, gv_ptr, gv_value):
+        offset, fieldtype = fieldtoken
+        log('%s Builder.genop_setfield (%d,%s) %s=%s' % (
+            self.block.label, offset, fieldtype, gv_ptr.operand(), gv_value.operand()))
         gv_ptr_var = self._as_var(gv_ptr)
-        gv_p = Var(gv_value.type+'*')
+        gv_p = Var(gv_ptr.type)
         self.asm.append(' %s=getelementptr %s,%s %s' % (
-            gv_p.operand2(), gv_ptr_var.operand(), i32, offset / fieldsize))
+            gv_p.operand2(), gv_ptr_var.operand(), i32, offset))
+        gv_p2 = self._cast_to(gv_p, fieldtype + '*')
         self.asm.append(' store %s,%s' % (
-            gv_value.operand(), gv_p.operand()))
+            gv_value.operand(), gv_p2.operand()))
 
-    def genop_getsubstruct(self, (offset, fieldsize), gv_ptr):
-        log('%s Builder.genop_getsubstruct (%d,%d) %s' % (
-            self.block.label, offset, fieldsize, gv_ptr.operand()))
+    def genop_getsubstruct(self, fieldtoken, gv_ptr):
+        offset, fieldtype = fieldtoken
+        log('%s Builder.genop_getsubstruct (%d,%s) %s' % (
+            self.block.label, offset, fieldtype, gv_ptr.operand()))
         gv_ptr_var = self._as_var(gv_ptr)
         gv_sub = Var(gv_ptr.type)
-        self.asm.append(' %s=getelementptr %s,%d' % (
-            gv_sub.operand2(), gv_ptr_var.operand(), offset))
+        self.asm.append(' %s=getelementptr %s,%s %d' % (
+            gv_sub.operand2(), gv_ptr_var.operand(), i32, offset))
         return gv_sub
 
-    def genop_getarrayitem(self, arraytoken, gv_ptr, gv_index):
-        array_length_offset, array_items_offset, itemsize = arraytoken
-        log('%s Builder.genop_getarrayitem %s,%s,%s' % (
-            self.block.label, arraytoken, gv_ptr.operand(), gv_index.operand()))
-
-        gv_i = Var(gv_index.type)
-        try:
-            offset = array_items_offset / itemsize
-        except TypeError:
-            offset = 4 #XXX (get inspired by ppc backend)
-        self.asm.append(' %s=add %s,%d' % (
-            gv_i.operand2(), gv_index.operand(), offset)) #/itemsize correct?
-
-        gv_ptr_var = self._as_var(gv_ptr)
-        gv_p = Var(gv_ptr_var.type)
-        self.asm.append(' %s=getelementptr %s,%s' % (
-            gv_p.operand2(), gv_ptr_var.operand(), gv_i.operand()))
-
-        gv_result = Var(gv_ptr_var.type[:-1])
-        self.asm.append(' %s=load %s' % (
-            gv_result.operand2(), gv_p.operand()))
-        return gv_result
-
     def genop_getarraysubstruct(self, arraytoken, gv_ptr, gv_index):
         '''
         self.mc.MOV(edx, gv_ptr.operand(self))
@@ -750,28 +750,39 @@
         self.mc.LEA(eax, op)
         return self.returnvar(eax)
         '''
-        #XXX TODO
-        array_length_offset, array_items_offset, itemsize = arraytoken
-        gv_result = Var(i32)
+        #XXX WIP
         log('%s Builder.genop_getarraysubstruct %s,%s,%s' % (
-            self.block.label, arraytoken, gv_ptr, gv_index))
-        self.asm.append(' %s=%s 0 ;%s Builder.genop_getarraysubstruct %s,%s,%s' % (
-            gv_result.operand2(), gv_result.type, self.block.label, arraytoken, gv_ptr, gv_index))
+            self.block.label, arraytoken, gv_ptr.operand(), gv_index.operand()))
+
+        array_length_offset, array_items_offset, item_size, item_type = arraytoken
+
+        op_size = self._itemaddr(arraytoken, gv_index)
+
+        gv_ptr_var = self._as_var(gv_ptr)
+
+        gv_result = Var(pi8)
+        self.asm.append(' %s=getelementptr %s,%s' % (
+            gv_result.operand2(), gv_ptr_var.operand(), op_size.operand()))
+
         return gv_result
 
     def genop_getarraysize(self, arraytoken, gv_ptr):
-        '''
-        lengthoffset, startoffset, itemoffset = arraytoken
-        self.mc.MOV(edx, gv_ptr.operand(self))
-        return self.returnvar(mem(edx, lengthoffset))
-        '''
-        #XXX TODO
-        array_length_offset, array_items_offset, itemsize = arraytoken
-        gv_result = Var(i32)
         log('%s Builder.genop_getarraysize %s,%s' % (
-            self.block.label, arraytoken, gv_ptr))
-        self.asm.append(' %s=%s 0 ;%s Builder.genop_getarraysize %s,%s' % (
-            gv_result.operand2(), gv_result.type, self.block.label, arraytoken, gv_ptr))
+            self.block.label, arraytoken, gv_ptr.operand()))
+
+        array_length_offset, array_items_offset, item_size, item_type = arraytoken
+        gv_ptr_var = self._as_var(gv_ptr)
+
+        gv_p = Var(gv_ptr_var.type)
+        self.asm.append(' %s=getelementptr %s,%s %s' % (
+            gv_p.operand2(), gv_ptr_var.operand(), i32, array_length_offset))
+
+        gv_p2 = self._cast_to(gv_p, pi32)
+
+        gv_result = Var(i32)
+        self.asm.append(' %s=load %s' % (
+            gv_result.operand2(), gv_p2.operand()))
+
         return gv_result
 
     def _as_var(self, gv):
@@ -782,34 +793,56 @@
                 gv_var.operand2(), inttoptr, i32, gv.operand2(), gv_var.type))
             return gv_var
         return gv
-        
+ 
+    def genop_getarrayitem(self, arraytoken, gv_ptr, gv_index):
+        array_length_offset, array_items_offset, item_size, item_type = arraytoken
+        log('%s Builder.genop_getarrayitem %s,%s[%s]' % (
+            self.block.label, arraytoken, gv_ptr.operand(), gv_index.operand()))
+
+        gv_ptr_var = self._as_var(gv_ptr)
+
+        gv_p = Var(gv_ptr_var.type)
+        self.asm.append(' %s=getelementptr %s,%s %s' % (
+            gv_p.operand2(), gv_ptr_var.operand(), i32, array_items_offset))
+
+        gv_p2 = self._cast_to(gv_p, item_type + '*')
+
+        gv_p3 = Var(gv_p2.type)
+        self.asm.append(' %s=getelementptr %s,%s' % (
+            gv_p3.operand2(), gv_p2.operand(), gv_index.operand()))
+
+        gv_result = Var(item_type)
+        self.asm.append(' %s=load %s' % (
+            gv_result.operand2(), gv_p3.operand()))
+
+        return gv_result
+
     def genop_setarrayitem(self, arraytoken, gv_ptr, gv_index, gv_value):
-        array_length_offset, array_items_offset, itemsize = arraytoken
-        log('%s Builder.genop_setarrayitem %s,%s,%s,%s' % (
+        array_length_offset, array_items_offset, item_size, item_type = arraytoken
+        log('%s Builder.genop_setarrayitem %s,%s[%s]=%s' % (
             self.block.label, arraytoken, gv_ptr.operand(), gv_index.operand(), gv_value.operand()))
 
-        try:
-            offset = array_items_offset / itemsize
-        except TypeError:
-            offset = 4 #XXX (get inspired by ppc backend)
-        gv_i = Var(gv_index.type)
-        self.asm.append(' %s=add %s,%d ;;;;' % (
-            gv_i.operand2(), gv_index.operand(), offset)) #/itemsize correct?
-
         gv_ptr_var = self._as_var(gv_ptr)
+
         gv_p = Var(gv_ptr_var.type)
+        self.asm.append(' %s=getelementptr %s,%s %s' % (
+            gv_p.operand2(), gv_ptr_var.operand(), i32, array_items_offset))
+
+        gv_p2 = self._cast_to(gv_p, item_type + '*')
+
+        gv_p3 = Var(gv_p2.type)
         self.asm.append(' %s=getelementptr %s,%s' % (
-            gv_p.operand2(), gv_ptr_var.operand(), gv_i.operand()))
+            gv_p3.operand2(), gv_p2.operand(), gv_index.operand()))
+
         self.asm.append(' store %s,%s' % (
-            gv_value.operand(), gv_p.operand()))
+            gv_value.operand(), gv_p3.operand()))
 
     def genop_malloc_fixedsize(self, size):
         log('%s Builder.genop_malloc_fixedsize %s' % (
             self.block.label, str(size)))
-        t = i8 + '*'    #XXX or opaque* ?
-        gv_gc_malloc_fnaddr = Var('%s (%s)*' % (t, i32))
-        gv_result = Var(t)
-        #XXX or use addGlobalFunctionMapping in libllvmjit.restart()
+        gv_gc_malloc_fnaddr = Var('%s (%s)*' % (pi8, i32))
+        gv_result = Var(pi8)
+        #or use addGlobalFunctionMapping in libllvmjit.restart()
         self.asm.append(' %s=%s %s %d to %s ;gc_malloc_fnaddr' % (
             gv_gc_malloc_fnaddr.operand2(), inttoptr, i32,
             gc_malloc_fnaddr(), gv_gc_malloc_fnaddr.type))
@@ -817,19 +850,44 @@
             gv_result.operand2(), gv_gc_malloc_fnaddr.operand(), i32, size))
         return gv_result
 
+    def _itemaddr(self, arraytoken, gv_index):
+        length_offset, items_offset, item_size, item_type = arraytoken
+
+        gv_size2 = Var(i32) #i386 uses self.itemaddr here
+        self.asm.append(' %s=mul %s,%d' % (
+            gv_size2.operand2(), gv_index.operand(), item_size))
+
+        gv_size3 = Var(i32)
+        self.asm.append(' %s=add %s,%d' % (
+            gv_size3.operand2(), gv_size2.operand(), items_offset))
+
+        return gv_size3
+
     def genop_malloc_varsize(self, varsizealloctoken, gv_size):
         log('%s Builder.genop_malloc_varsize %s,%s' % (
             self.block.label, varsizealloctoken, gv_size.operand()))
-        t = i8 + '*'    #XXX or opaque* ?
-        gv_gc_malloc_fnaddr = Var('%s (%s)*' % (t, i32))
-        gv_result = Var(t)
-        #XXX or use addGlobalFunctionMapping in libllvmjit.restart()
+
+        length_offset, items_offset, item_size, item_type = varsizealloctoken
+
+        gv_gc_malloc_fnaddr = Var('%s (%s)*' % (pi8, i32))
+        #or use addGlobalFunctionMapping in libllvmjit.restart()
         self.asm.append(' %s=%s %s %d to %s ;gc_malloc_fnaddr (varsize)' % (
             gv_gc_malloc_fnaddr.operand2(), inttoptr, i32,
             gc_malloc_fnaddr(), gv_gc_malloc_fnaddr.type))
+
+        op_size = self._itemaddr(varsizealloctoken, gv_size)
+
+        gv_result = Var(pi8)
         self.asm.append(' %s=call %s(%s)' % (
-            gv_result.operand2(), gv_gc_malloc_fnaddr.operand(), gv_size.operand()))
-        #XXX TODO set length field
+            gv_result.operand2(), gv_gc_malloc_fnaddr.operand(), op_size.operand()))
+
+        gv_p = Var(gv_result.type)
+        self.asm.append(' %s=getelementptr %s,%s %s' % (
+            gv_p.operand2(), gv_result.operand(), i32, length_offset))
+
+        gv_p2 = self._cast_to(gv_p, pi32) #warning: length field hardcoded as int here
+        self.asm.append(' store %s, %s' % (gv_size.operand(), gv_p2.operand()))
+
         return gv_result
 
     def _funcsig_type(self, args_gv, restype):
@@ -839,15 +897,15 @@
         log('%s Builder.genop_call %s,%s,%s' % (
             self.block.label, sigtoken, gv_fnptr, [v.operand() for v in args_gv]))
         argtypes, restype = sigtoken
-        gv_returnvar = Var(restype)
         if isinstance(gv_fnptr, AddrConst):
-            gv_fn = Var(self._funcsig_type(args_gv, restype))
-            self.asm.append(' %s=%s %s to %s' % (
-                gv_fnptr.operand2(), bitcast, gv_fnptr.operand(), gv_fn.type))
+            gv_fn = Var(self._funcsig_type(args_gv, restype) + '*')
+            self.asm.append(' %s=%s %s %s to %s' % (
+                gv_fn.operand2(), bitcast, i32, gv_fnptr.operand2(), gv_fn.type))
             funcsig = gv_fn.operand()
         else:
             #XXX we probably need to call an address directly if we can't resolve the funcsig
             funcsig = self.rgenop.funcsig[gv_fnptr.value]
+        gv_returnvar = Var(restype)
         self.asm.append(' %s=call %s(%s)' % (
                         gv_returnvar.operand2(),
                         funcsig,
@@ -862,6 +920,7 @@
 
     def finish_and_goto(self, outputargs_gv, target):
         # 'target' is a label, which for the llvm backend is a Block
+        log('%s Builder.finish_and_goto' % self.block.label)
         gv = [v.operand() for v in outputargs_gv]
         log('%s Builder.finish_and_goto %s,%s' % (
             self.block.label, gv, target.label))
@@ -974,18 +1033,16 @@
     def kindToken(T):
         # turn the type T into the llvm approximation that we'll use here
         # XXX incomplete
-        if isinstance(T, lltype.Ptr):
-            return i32 + '*'    #or opaque* ?
-        elif T is llmemory.Address:
-            return i32 + '*'    #or apaque* ?
-        if T is lltype.Bool:
-            return 'bool'
+        if isinstance(T, lltype.Ptr) or T is llmemory.Address:
+            return pi8
+        elif T is lltype.Bool:
+            return i1 
         elif T is lltype.Char:
             return i8
         elif T is lltype.Unsigned:
-            return i32  #'uint'
+            return u32
         elif T is lltype.Float:
-            return 'float'
+            return f64
         else:
             return i32  #Signed/UniChar/Void
 
@@ -994,10 +1051,10 @@
     def fieldToken(T, name):
         FIELD = getattr(T, name)
         if isinstance(FIELD, lltype.ContainerType):
-            fieldsize = 0      # not useful for getsubstruct
+            fieldtype = pi8 # not useful for getsubstruct
         else:
-            fieldsize = llmemory.sizeof(FIELD)
-        return (llmemory.offsetof(T, name), fieldsize)
+            fieldtype = RLLVMGenOp.kindToken(FIELD)
+        return (llmemory.offsetof(T, name), fieldtype)
 
     @staticmethod
     @specialize.memo()
@@ -1015,19 +1072,20 @@
             arrayfield = T._arrayfld
             ARRAYFIELD = getattr(T, arrayfield)
             arraytoken = RLLVMGenOp.arrayToken(ARRAYFIELD)
-            length_offset, items_offset, item_size = arraytoken
+            length_offset, items_offset, item_size, item_type = arraytoken
             arrayfield_offset = llmemory.offsetof(T, arrayfield)
             return (arrayfield_offset+length_offset,
                     arrayfield_offset+items_offset,
-                    item_size)
+                    item_size,
+                    item_type)
 
     @staticmethod
     @specialize.memo()
     def arrayToken(A):
-        #XXX TODO
         return (llmemory.ArrayLengthOffset(A),
                 llmemory.ArrayItemsOffset(A),
-                llmemory.ItemOffset(A.OF))
+                llmemory.ItemOffset(A.OF),
+                RLLVMGenOp.kindToken(A.OF))
 
     @staticmethod
     @specialize.memo()

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_exception.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_exception.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_exception.py	Sat Jan 13 12:09:25 2007
@@ -2,7 +2,6 @@
 from pypy.jit.timeshifter.test import test_exception
 from pypy.jit.codegen.llvm.test.test_genc_ts import LLVMTimeshiftingTestMixin
 
-py.test.skip("WIP")
 
 class TestException(LLVMTimeshiftingTestMixin,
                     test_exception.TestException):

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_ts.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_ts.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_ts.py	Sat Jan 13 12:09:25 2007
@@ -5,10 +5,6 @@
 from pypy.jit.codegen.llvm.llvmjit import llvm_version, MINIMAL_VERSION
 
 
-skip_passing = False
-skip_failing = True
-
-
 class LLVMTimeshiftingTestMixin(I386TimeshiftingTestMixin):
     RGenOp = RLLVMGenOp
 
@@ -29,61 +25,7 @@
     if llvm_version() < 2.0:
         test_loop_merging = skip_too_minimal #segfault
         test_two_loops_merging = skip_too_minimal #segfault
-
-    if skip_passing:
-        test_very_simple = skip
-        test_convert_const_to_redbox = skip
-        test_simple_opt_const_propagation2 = skip
-        test_simple_opt_const_propagation1 = skip
-        test_loop_folding = skip
-        test_loop_merging = skip
-        test_two_loops_merging = skip
-        test_convert_greenvar_to_redvar = skip
-        test_green_across_split = skip
-        test_merge_const_before_return = skip
-        test_merge_3_redconsts_before_return = skip
-        test_arith_plus_minus = skip
-        test_plus_minus_all_inlined = skip
-        test_call_simple = skip
-        test_call_2 = skip
-        test_call_3 = skip
-        test_call_4 = skip
-        test_void_call = skip
-        test_green_call = skip
-        test_split_on_green_return = skip
-        test_recursive_call = skip
-        test_simple_indirect_call = skip
-        test_normalize_indirect_call = skip
-        test_normalize_indirect_call_more = skip
-        test_green_red_mismatch_in_call = skip
-        test_red_call_ignored_result = skip
-        test_simple_struct = skip
-        test_simple_array = skip
-        test_setarrayitem  = skip
-        test_degenerated_before_return = skip
-        test_degenerated_before_return_2 = skip
-        test_degenerated_via_substructure = skip
-        test_red_virtual_container = skip
-        test_red_propagate = skip
-        test_red_subcontainer = skip
-        test_red_subcontainer_cast = skip
-        test_merge_structures = skip
-        test_simple_meth = skip
-        test_simple_red_meth = skip
-
-    #failing...
-    if skip_failing:
-        test_degenerated_at_return = skip
-        test_degenerate_with_voids = skip
-        test_red_array = skip
-        test_red_struct_array = skip
-        test_red_varsized_struct = skip
-        test_array_of_voids = skip
-        test_green_with_side_effects = skip
-        test_recursive_with_red_termination_condition = skip
-        test_compile_time_const_tuple = skip
-        test_residual_red_call = skip
+        test_green_char_at_merge = skip #segfault
         test_residual_red_call_with_exc = skip
-
-    test_green_char_at_merge = skip #->SomeObject() (CharRepr @rgenop.py:141 ?)
-
+    else: #needs fixing for >= 2.0
+        test_array_of_voids = skip

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_vlist.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_vlist.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_genc_vlist.py	Sat Jan 13 12:09:25 2007
@@ -3,12 +3,13 @@
 from pypy.jit.codegen.llvm.test.test_genc_ts import LLVMTimeshiftingTestMixin
 
 
-py.test.skip('WIP')
-
 class TestVList(LLVMTimeshiftingTestMixin,
                 test_vlist.TestVList):
 
     # for the individual tests see
     # ====> ../../../timeshifter/test/test_vlist.py
 
-    pass
+    def skip(self):
+        py.test.skip("WIP")
+
+    test_force = skip

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_operation.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_operation.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_operation.py	Sat Jan 13 12:09:25 2007
@@ -1,12 +1,47 @@
 import py
+from pypy.rlib.objectmodel import specialize
+from pypy.rpython.memory.lltypelayout import convert_offset_to_int
 from pypy.jit.codegen.llvm.test.test_llvmjit import skip_unsupported_platform
 from pypy.jit.codegen.i386.test.test_operation import BasicTests
 from pypy.jit.codegen.llvm.rgenop import RLLVMGenOp
 from pypy.jit.codegen.llvm.llvmjit import llvm_version, MINIMAL_VERSION
 
 
+def conv(n):
+    if not isinstance(n, int) and not isinstance(n, str):
+        n = convert_offset_to_int(n)
+    return n
+
+
+class RGenOpPacked(RLLVMGenOp):
+    """Like RLLVMGenOp, but produces concrete offsets in the tokens
+    instead of llmemory.offsets.  These numbers may not agree with
+    your C compiler's.
+    """
+
+    @staticmethod
+    @specialize.memo()
+    def fieldToken(T, name):
+        return tuple(map(conv, RLLVMGenOp.fieldToken(T, name)))
+
+    @staticmethod
+    @specialize.memo()
+    def arrayToken(A):
+        return tuple(map(conv, RLLVMGenOp.arrayToken(A)))
+
+    @staticmethod
+    @specialize.memo()
+    def allocToken(T):
+        return conv(RLLVMGenOp.allocToken(T))
+
+    @staticmethod
+    @specialize.memo()
+    def varsizeAllocToken(A):
+        return tuple(map(conv, RLLVMGenOp.varsizeAllocToken(A)))
+
+
 class LLVMTestBasicMixin(object):
-    RGenOp = RLLVMGenOp
+    RGenOp = RGenOpPacked
 
 
 class TestBasic(LLVMTestBasicMixin,
@@ -23,9 +58,11 @@
             llvm_version(), MINIMAL_VERSION))
 
     if llvm_version() < 2.0:
-        test_float_arithmetic = skip_too_minimal #segfault
         test_unsigned = skip_too_minimal #uint_invert uses incorrect xor constant?
 
+    test_float_arithmetic = skip #XXX llvmjit.execute() returns an int :-(
+    test_float_cast = skip       #XXX llvmjit.execute() returns an int :-(
+
     test_float_pow = skip
     test_unichar_array = skip
     test_char_unichar_fields = skip

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_rgenop.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_rgenop.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/llvm/test/test_rgenop.py	Sat Jan 13 12:09:25 2007
@@ -25,3 +25,5 @@
         test_fact_direct = skip_too_minimal #segfault
 
     test_fact_compile = skip #XXX Blocked block, introducted by this checkin (I don't understand)
+    test_calling_pause_direct = skip #segfault, look into later...
+    test_calling_pause_compile = skip # dito

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/model.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/model.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/model.py	Sat Jan 13 12:09:25 2007
@@ -171,6 +171,15 @@
     def start_writing(self):
         '''Start a builder returned by jump_if_xxx(), or resumes a paused
         builder.'''
+
+
+    # read frame var support
+    
+    def get_frame_base(self):
+        pass
+
+    def get_frame_info(self):
+        pass
         
 class GenLabel(object):
     '''A "smart" label.  Represents an address of the start of a basic

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/instruction.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/instruction.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/instruction.py	Sat Jan 13 12:09:25 2007
@@ -6,11 +6,24 @@
 rFP = r2 # the ABI doesn't specify a frame pointer.  however, we want one
 
 class AllocationSlot(object):
-    pass
+    def __init__(self):
+        # The field alloc points to a singleton used by the register
+        # allocator to detect conflicts.  No two AllocationSlot
+        # instances with the same value in self.alloc can be used at
+        # once.
+        self.alloc = self
+        
+    def make_loc(self):
+        """ When we assign a variable to one of these registers, we
+        call make_loc() to get the actual location instance; that
+        instance will have its alloc field set to self.  For
+        everything but condition registers, this is self."""
+        return self
 
 class _StackSlot(AllocationSlot):
     is_register = False
     def __init__(self, offset):
+        AllocationSlot.__init__(self)
         self.offset = offset
     def __repr__(self):
         return "stack@%s"%(self.offset,)
@@ -34,10 +47,13 @@
 
 class Register(AllocationSlot):
     is_register = True
+    def __init__(self):
+        AllocationSlot.__init__(self)        
 
 class GPR(Register):
     regclass = GP_REGISTER
     def __init__(self, number):
+        Register.__init__(self)
         self.number = number
     def __repr__(self):
         return 'r' + str(self.number)
@@ -46,19 +62,46 @@
 class FPR(Register):
     regclass = FP_REGISTER
     def __init__(self, number):
+        Register.__init__(self)
         self.number = number
 
 fprs = map(GPR, range(32))
 
-class CRF(Register):
+class BaseCRF(Register):
+    """ These represent condition registers; however, we never actually
+    use these as the location of something in the register allocator.
+    Instead, we place it in an instance of CRF which indicates which
+    bits are required to extract the value.  Note that CRF().alloc will
+    always be an instance of this. """
     regclass = CR_FIELD
     def __init__(self, number):
         self.number = number
-    def move_to_gpr(self, allocator, gpr):
-        bit, negated = allocator.crfinfo[self.number]
-        return _CRF2GPR(gpr, self.number*4 + bit, negated)
+        self.alloc = self
+    def make_loc(self):
+        return CRF(self)
+
+crfs = map(BaseCRF, range(8))
 
-crfs = map(CRF, range(8))
+class CRF(Register):
+    regclass = CR_FIELD
+    def __init__(self, crf):
+        Register.__init__(self)
+        self.alloc = crf
+        self.number = crf.number
+        self.info = (-1,-1) # (bit, negated) 
+    def set_info(self, info):
+        assert len(info) == 2
+        self.info = info
+    def make_loc(self):
+        # should never call this on a CRF, only a BaseCRF
+        raise NotImplementedError
+    def move_to_gpr(self, allocator, gpr):
+        bit, negated = self.info
+        return _CRF2GPR(gpr, self.alloc.number*4 + bit, negated)
+    def move_from_gpr(self, allocator, gpr):
+        # cmp2info['ne']
+        self.set_info((2, 1))
+        return _GPR2CRF(self, gpr)
 
 class CTR(Register):
     regclass = CT_REGISTER
@@ -169,6 +212,24 @@
                      self.result_reg.number,
                      self.imm.value)
 
+class MoveCRB2GPR(Insn):
+    def __init__(self, result, gv_condition):
+        Insn.__init__(self)
+        self.result = result
+        self.result_regclass = GP_REGISTER
+        self.reg_args = [gv_condition]
+        self.reg_arg_regclasses = [CR_FIELD]
+    def allocate(self, allocator):
+        self.targetreg = allocator.loc_of(self.result)
+        self.crf = allocator.loc_of(self.reg_args[0])
+    def emit(self, asm):
+        assert isinstance(self.crf, CRF)
+        bit, negated = self.crf.info
+        asm.mfcr(self.targetreg.number)
+        asm.extrwi(self.targetreg.number, self.targetreg.number, 1, self.crf.number*4+bit)
+        if negated:
+            asm.xori(self.targetreg.number, self.targetreg.number, 1)
+
 class Insn_None__GPR_GPR_IMM(Insn):
     def __init__(self, methptr, args):
         Insn.__init__(self)
@@ -214,47 +275,56 @@
                      self.reg3.number)
 
 class CMPInsn(Insn):
-    info = (0,0) # please the annotator for tests that don't use CMPW/CMPWI
-    pass
-
-class CMPW(CMPInsn):
-    def __init__(self, info, result, args):
+    def __init__(self, info, result):
         Insn.__init__(self)
         self.info = info
-
         self.result = result
-        self.result_regclass = CR_FIELD
 
+    def allocate(self, allocator):
+        self.result_reg = allocator.loc_of(self.result)
+        assert isinstance(self.result_reg, CRF)
+        self.result_reg.set_info(self.info)
+
+class CMPW(CMPInsn):
+    def __init__(self, info, result, args):
+        CMPInsn.__init__(self, info, result)
+        self.result_regclass = CR_FIELD
         self.reg_args = args
         self.reg_arg_regclasses = [GP_REGISTER, GP_REGISTER]
 
     def allocate(self, allocator):
-        self.result_reg = allocator.loc_of(self.result)
+        CMPInsn.allocate(self, allocator)
         self.arg_reg1 = allocator.loc_of(self.reg_args[0])
         self.arg_reg2 = allocator.loc_of(self.reg_args[1])
 
     def emit(self, asm):
         asm.cmpw(self.result_reg.number, self.arg_reg1.number, self.arg_reg2.number)
 
+class CMPWL(CMPW):
+    def emit(self, asm):
+        asm.cmpwl(self.result_reg.number, self.arg_reg1.number, self.arg_reg2.number)
+
 class CMPWI(CMPInsn):
     def __init__(self, info, result, args):
-        Insn.__init__(self)
-        self.info = info
+        CMPInsn.__init__(self, info, result)
         self.imm = args[1]
-
-        self.result = result
         self.result_regclass = CR_FIELD
-
         self.reg_args = [args[0]]
         self.reg_arg_regclasses = [GP_REGISTER]
 
     def allocate(self, allocator):
-        self.result_reg = allocator.loc_of(self.result)
+        CMPInsn.allocate(self, allocator)
         self.arg_reg = allocator.loc_of(self.reg_args[0])
 
     def emit(self, asm):
+        #print "CMPWI", asm.mc.tell()
         asm.cmpwi(self.result_reg.number, self.arg_reg.number, self.imm.value)
 
+class CMPWLI(CMPW):
+    def emit(self, asm):
+        asm.cmpwli(self.result_reg.number, self.arg_reg.number, self.imm.value)
+
+
 ## class MTCTR(Insn):
 ##     def __init__(self, result, args):
 ##         Insn.__init__(self)
@@ -285,12 +355,12 @@
         self.targetbuilder = targetbuilder
     def allocate(self, allocator):
         self.crf = allocator.loc_of(self.reg_args[0])
-        self.bit, self.negated = allocator.crfinfo[self.crf.number]
 
+        assert self.targetbuilder.initial_var2loc is None
         self.targetbuilder.initial_var2loc = {}
         for gv_arg in self.jump_args_gv:
             self.targetbuilder.initial_var2loc[gv_arg] = allocator.var2loc[gv_arg]
-        self.targetbuilder.initial_spill_offset = allocator.spill_offset
+        allocator.builders_to_tell_spill_offset_to.append(self.targetbuilder)
     def emit(self, asm):
         if self.targetbuilder.start:
             asm.load_word(rSCRATCH, self.targetbuilder.start)
@@ -298,11 +368,12 @@
             self.targetbuilder.patch_start_here = asm.mc.tell()
             asm.load_word(rSCRATCH, 0)
         asm.mtctr(rSCRATCH)
-        if self.negated ^ self.jump_if_true:
+        bit, negated = self.crf.info
+        if negated ^ self.jump_if_true:
             BO = 12 # jump if relavent bit is set in the CR
         else:
             BO = 4  # jump if relavent bit is NOT set in the CR
-        asm.bcctr(BO, self.crf.number*4 + self.bit)
+        asm.bcctr(BO, self.crf.number*4 + bit)
 
 class SpillCalleeSaves(Insn):
     def __init__(self):
@@ -384,6 +455,20 @@
         self.result_regclass =  NO_REGISTER
         self.result = None
 
+class Move(AllocTimeInsn):
+    def __init__(self, dest, src):
+        self.dest = dest
+        self.src = src
+    def emit(self, asm):
+        asm.mr(self.dest.number, self.src.number)
+
+class Load(AllocTimeInsn):
+    def __init__(self, dest, const):
+        self.dest = dest
+        self.const = const
+    def emit(self, asm):
+        self.const.load_now(asm, self.dest)
+
 class Unspill(AllocTimeInsn):
     """ A special instruction inserted by our register "allocator."  It
     indicates that we need to load a value from the stack into a register
@@ -432,6 +517,14 @@
         if self.negated:
             asm.xori(self.targetreg, self.targetreg, 1)
 
+class _GPR2CRF(AllocTimeInsn):
+    def __init__(self, targetreg, fromreg):
+        AllocTimeInsn.__init__(self)
+        self.targetreg = targetreg
+        self.fromreg = fromreg
+    def emit(self, asm):
+        asm.cmpwi(self.targetreg.number, self.fromreg, 0)
+
 class _GPR2CTR(AllocTimeInsn):
     def __init__(self, fromreg):
         AllocTimeInsn.__init__(self)

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/regalloc.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/regalloc.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/regalloc.py	Sat Jan 13 12:09:25 2007
@@ -33,13 +33,11 @@
         # go through the initial mapping and initialize the data structures
         for var, loc in initial_mapping.iteritems():
             self.set(var, loc)
-            if loc.is_register and loc in self.freeregs[loc.regclass]:
-                self.freeregs[loc.regclass].remove(loc)
+            if loc.is_register and loc.alloc in self.freeregs[loc.regclass]:
+                self.freeregs[loc.regclass].remove(loc.alloc)
                 self.lru.append(var)
 
-        # crfinfo is a bit of a hack used to transmit which bit a compare
-        # instruction set to the branch instruction
-        self.crfinfo = [(0, 0)] * 8
+        self.builders_to_tell_spill_offset_to = []
 
     def set(self, var, loc):
         assert var not in self.var2loc
@@ -77,7 +75,7 @@
         freeregs = self.freeregs[regclass]
 
         if freeregs:
-            reg = freeregs.pop()
+            reg = freeregs.pop().make_loc()
             self.set(newarg, reg)
             if DEBUG_PRINT:
                 print "allocate_reg: Putting %r into fresh register %r" % (newarg, reg)
@@ -103,7 +101,7 @@
             print "allocate_reg: Spilled %r to %r." % (argtospill, self.loc_of(argtospill))
 
         # update data structures to put newarg into the register
-        self.set(newarg, reg)
+        self.set(newarg, reg.alloc.make_loc())
         if DEBUG_PRINT:
             print "allocate_reg: Put %r in stolen reg %r." % (newarg, reg)
         return reg
@@ -172,7 +170,7 @@
                     # it's in the wrong kind of register
                     # (this code is excessively confusing)
                     self.forget(arg, argloc)
-                    self.freeregs[argloc.regclass].append(argloc)
+                    self.freeregs[argloc.regclass].append(argloc.alloc)
                     if argloc.regclass != GP_REGISTER:
                         if argcls == GP_REGISTER:
                             gpr = self._allocate_reg(GP_REGISTER, arg).number
@@ -197,8 +195,6 @@
                 if DEBUG_PRINT:
                     print "Allocating register for result %r..." % (insn.result,)
                 resultreg = self._allocate_reg(insn.result_regclass, insn.result)
-                if isinstance(insn, CMPInsn):
-                    self.crfinfo[resultreg.number] = insn.info
             insn.allocate(self)
             self.insns.append(insn)
         return self.insns

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/rgenop.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/rgenop.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/rgenop.py	Sat Jan 13 12:09:25 2007
@@ -1,6 +1,7 @@
 from pypy.jit.codegen.model import AbstractRGenOp, GenLabel, GenBuilder
 from pypy.jit.codegen.model import GenVar, GenConst, CodeGenSwitch
 from pypy.rpython.lltypesystem import lltype, llmemory
+from pypy.rpython.lltypesystem import lloperation
 from pypy.rlib.objectmodel import specialize, we_are_translated
 from pypy.jit.codegen.ppc.conftest import option
 from ctypes import POINTER, cast, c_void_p, c_int
@@ -20,12 +21,15 @@
     def emit(self, value):
         self.mc.write(value)
 
+_PPC = RPPCAssembler
+
 NSAVEDREGISTERS = 19
 
 DEBUG_TRAP = option.trap
 
 _var_index = [0]
 class Var(GenVar):
+    conditional = False
     def __init__(self):
         self.__magic_index = _var_index[0]
         _var_index[0] += 1
@@ -34,11 +38,16 @@
     def fits_in_immediate(self):
         return False
 
+class ConditionVar(Var):
+    """ Used for vars that originated as the result of a conditional
+    operation, like a == b """
+    conditional = True
+
 class IntConst(GenConst):
 
     def __init__(self, value):
         self.value = value
-
+        
     @specialize.arg(1)
     def revealconst(self, T):
         if isinstance(T, lltype.Ptr):
@@ -50,7 +59,7 @@
 
     def load(self, insns, var):
         insns.append(
-            insn.Insn_GPR__IMM(RPPCAssembler.load_word,
+            insn.Insn_GPR__IMM(_PPC.load_word,
                                var, [self]))
 
     def load_now(self, asm, loc):
@@ -101,28 +110,39 @@
 
 class JumpPatchupGenerator(object):
 
-    def __init__(self, asm, min_offset):
-        self.asm = asm
+    def __init__(self, insns, min_offset, allocator):
+        self.insns = insns
         self.min_offset = min_offset
+        self.allocator = allocator
 
     def emit_move(self, tarloc, srcloc):
         if tarloc == srcloc: return
+        emit = self.insns.append
         if tarloc.is_register and srcloc.is_register:
-            self.asm.mr(tarloc.number, srcloc.number)
+            assert isinstance(tarloc, insn.GPR)
+            if isinstance(srcloc, insn.GPR):
+                emit(insn.Move(tarloc, srcloc))
+            else:
+                assert isinstance(srcloc, insn.CRF)
+                emit(srcloc.move_to_gpr(self.allocator, tarloc.number))
         elif tarloc.is_register and not srcloc.is_register:
-            self.asm.lwz(tarloc.number, rFP, srcloc.offset)
+            emit(insn.Unspill(None, tarloc, srcloc))
+            #self.asm.lwz(tarloc.number, rFP, srcloc.offset)
         elif not tarloc.is_register and srcloc.is_register:
-            self.asm.stw(srcloc.number, rFP, tarloc.offset)
+            emit(insn.Spill(None, srcloc, tarloc))
+            #self.asm.stw(srcloc.number, rFP, tarloc.offset)
         elif not tarloc.is_register and not srcloc.is_register:
-            self.asm.lwz(rSCRATCH, rFP, srcloc.offset)
-            self.asm.stw(rSCRATCH, rFP, tarloc.offset)
+            emit(insn.Unspill(None, insn.gprs[0], srcloc))
+            emit(insn.Spill(None, insn.gprs[0], tarloc))
+            #self.asm.lwz(rSCRATCH, rFP, srcloc.offset)
+            #self.asm.stw(rSCRATCH, rFP, tarloc.offset)
 
     def create_fresh_location(self):
         r = self.min_offset
         self.min_offset -= 4
         return insn.stack_slot(r)
 
-def prepare_for_jump(asm, min_offset, sourcevars, src2loc, target):
+def prepare_for_jump(insns, min_offset, sourcevars, src2loc, target, allocator):
 
     tar2src = {}     # tar var -> src var
     tar2loc = {}
@@ -137,9 +157,9 @@
             tar2loc[tloc] = tloc
             tar2src[tloc] = src
         else:
-            src.load_now(asm, tloc)
+            insns.append(insn.Load(tloc, src))
 
-    gen = JumpPatchupGenerator(asm, min_offset)
+    gen = JumpPatchupGenerator(insns, min_offset, allocator)
     emit_moves(gen, tar2src, tar2loc, src2loc)
     return gen.min_offset
 
@@ -205,13 +225,19 @@
 
     @specialize.arg(1)
     def genop1(self, opname, gv_arg):
+        #print opname, 'on', id(self)
         genmethod = getattr(self, 'op_' + opname)
-        return genmethod(gv_arg)
+        r = genmethod(gv_arg)
+        #print '->', id(r)
+        return r
 
     @specialize.arg(1)
     def genop2(self, opname, gv_arg1, gv_arg2):
+        #print opname, 'on', id(self)
         genmethod = getattr(self, 'op_' + opname)
-        return genmethod(gv_arg1, gv_arg2)
+        r = genmethod(gv_arg1, gv_arg2)
+        #print '->', id(r)
+        return r
 
     def genop_call(self, sigtoken, gv_fnptr, args_gv):
         self.insns.append(insn.SpillCalleeSaves())
@@ -225,14 +251,14 @@
     def genop_getfield(self, fieldtoken, gv_ptr):
         gv_result = Var()
         self.insns.append(
-            insn.Insn_GPR__GPR_IMM(RPPCAssembler.lwz,
+            insn.Insn_GPR__GPR_IMM(_PPC.lwz,
                                    gv_result, [gv_ptr, IntConst(fieldtoken)]))
         return gv_result
 
     def genop_setfield(self, fieldtoken, gv_ptr, gv_value):
         gv_result = Var()
         self.insns.append(
-            insn.Insn_None__GPR_GPR_IMM(RPPCAssembler.stw,
+            insn.Insn_None__GPR_GPR_IMM(_PPC.stw,
                                         [gv_value, gv_ptr, IntConst(fieldtoken)]))
         return gv_result
 
@@ -322,8 +348,34 @@
 ##     def genop_debug_pdb(self):    # may take an args_gv later
 
     def enter_next_block(self, kinds, args_gv):
+        #print 'enter_next_block of', id(self)
         vars_gv = [v for v in args_gv if isinstance(v, Var)]
-        var2loc = self.allocate_and_emit(vars_gv).var2loc
+        #print 'initial_var2loc.keys():', [id(v) for v in self.initial_var2loc.keys()]
+        #print 'initial_var2loc.values():', [id(v) for v in self.initial_var2loc.values()]
+        allocator = self.allocate_and_emit(vars_gv)
+        var2loc = allocator.var2loc
+
+        #print '!!!!', args_gv, var2loc
+
+        self.insns = []
+
+        reallocate = False
+        for i in range(len(args_gv)):
+            v = args_gv[i]
+            if isinstance(v, Var) and isinstance(var2loc[v], insn.CRF):
+                reallocate = True
+                nv = Var()
+                self.insns.append(insn.MoveCRB2GPR(nv, v))
+                args_gv[i] = nv
+        self.initial_var2loc = var2loc
+        if reallocate:
+            allocator = self.allocate_and_emit([v for v in args_gv if isinstance(v, Var)])
+            var2loc = allocator.var2loc
+            self.insns = []
+
+        #print 'var2loc.keys():', [id(v) for v in var2loc.keys()]
+        #print 'var2loc.values():', [id(v) for v in var2loc.values()]
+        #print 'args_gv', [id(v) for v in args_gv]
 
         #print "enter_next_block:", args_gv, var2loc
 
@@ -333,6 +385,11 @@
         for gv in args_gv:
             if isinstance(gv, Var):
                 assert gv in var2loc
+##                 if gv not in var2loc:
+##                     lloperation.llop.debug_print(lltype.Void, gv)
+##                     lloperation.llop.debug_print(lltype.Void, var2loc)
+##                     lloperation.llop.debug_print(lltype.Void, args_gv)
+##                     lloperation.llop.debug_pdb(lltype.Void)
                 loc = var2loc[gv]
                 livevar2loc[gv] = loc
                 if not loc.is_register:
@@ -360,15 +417,19 @@
 
         #print livevar2loc
 
-        self.insns = []
         self.initial_var2loc = livevar2loc
+        #print 'final initial_var2loc.keys():', [id(v) for v in self.initial_var2loc.keys()]
+        #print 'final initial_var2loc.values():', [id(v) for v in self.initial_var2loc.values()]
         self.initial_spill_offset = min_stack_offset
         target_addr = self.asm.mc.tell()
-        self.emit_stack_adjustment()
         return Label(target_addr, arg_locations, min_stack_offset)
 
     def jump_if_false(self, gv_condition, args_gv):
-        return self._jump(gv_condition, False, args_gv)
+        #print 'jump_if_false', [id(v) for v in args_gv]
+        #print id(self)
+        t = self._jump(gv_condition, False, args_gv)
+        #print '->', id(t)
+        return t
 
     def jump_if_true(self, gv_condition, args_gv):
         return self._jump(gv_condition, True, args_gv)
@@ -394,11 +455,11 @@
         self._close()
 
     def finish_and_goto(self, outputargs_gv, target):
-        allocator = self.allocate_and_emit(outputargs_gv)
+        allocator = self.allocate(outputargs_gv)
         min_offset = min(allocator.spill_offset, target.min_stack_offset)
-        min_offset = prepare_for_jump(
-            self.asm, min_offset, outputargs_gv, allocator.var2loc, target)
-        self.patch_stack_adjustment(self._stack_size(min_offset))
+        allocator.spill_offset = prepare_for_jump(
+            self.insns, min_offset, outputargs_gv, allocator.var2loc, target, allocator)
+        self.emit(allocator)
         self.asm.load_word(rSCRATCH, target.startaddr)
         self.asm.mtctr(rSCRATCH)
         self.asm.bctr()
@@ -424,15 +485,17 @@
         if self.final_jump_addr != 0:
             mc = self.rgenop.open_mc()
             target = mc.tell()
-            self.asm.mc = self.rgenop.ExistingCodeBlock(self.final_jump_addr, self.final_jump_addr+8)
+            self.asm.mc = self.rgenop.ExistingCodeBlock(
+                self.final_jump_addr, self.final_jump_addr+8)
             self.asm.load_word(rSCRATCH, target)
             self.asm.mc = mc
-            self.emit_stack_adjustment()
+            self.final_jump_addr = 0
+            self.closed = False
             return self
         else:
             self._open()
+            self.closed = False
             self.maybe_patch_start_here()
-            self.emit_stack_adjustment()
             return self
 
     def maybe_patch_start_here(self):
@@ -444,14 +507,16 @@
             self.patch_start_here = 0
 
     def pause_writing(self, args_gv):
-        self.allocate_and_emit(args_gv)
+        self.initial_var2loc = self.allocate_and_emit(args_gv).var2loc
+        self.insns = []
         self.final_jump_addr = self.asm.mc.tell()
+        self.closed = True
         self.asm.nop()
         self.asm.nop()
         self.asm.mtctr(rSCRATCH)
         self.asm.bctr()
         self._close()
-        return self 
+        return self
 
     # ----------------------------------------------------------------
     # ppc-specific interface:
@@ -506,8 +571,6 @@
         # save stack pointer into linkage area and set stack pointer for us.
         self.asm.stwu(rSP, rSP, -minspace)
 
-        self.emit_stack_adjustment()
-
         return inputargs
 
     def _var_offset(self, v):
@@ -535,17 +598,28 @@
         self.asm.mc = None
 
     def allocate_and_emit(self, live_vars_gv):
+        allocator = self.allocate(live_vars_gv)
+        return self.emit(allocator)
+
+    def allocate(self, live_vars_gv):
         assert self.initial_var2loc is not None
         allocator = RegisterAllocation(
-            self.rgenop.freeregs, self.initial_var2loc, self.initial_spill_offset)
+            self.rgenop.freeregs,
+            self.initial_var2loc,
+            self.initial_spill_offset)
         self.insns = allocator.allocate_for_insns(self.insns)
-        #if self.insns:
-        self.patch_stack_adjustment(self._stack_size(allocator.spill_offset))
+        return allocator
+
+    def emit(self, allocator):
+        if allocator.spill_offset < self.initial_spill_offset:
+            self.emit_stack_adjustment(self._stack_size(allocator.spill_offset))
         for insn in self.insns:
             insn.emit(self.asm)
+        for builder in allocator.builders_to_tell_spill_offset_to:
+            builder.initial_spill_offset = allocator.spill_offset
         return allocator
 
-    def emit_stack_adjustment(self):
+    def emit_stack_adjustment(self, newsize):
         # the ABI requires that at all times that r1 is valid, in the
         # sense that it must point to the bottom of the stack and that
         # executing SP <- *(SP) repeatedly walks the stack.
@@ -560,7 +634,7 @@
         # crf0 (a very small chance of being a problem)
         self.stack_adj_addr = self.asm.mc.tell()
         #print "emit_stack_adjustment at: ", self.stack_adj_addr
-        self.asm.addi(rSCRATCH, rFP, 0) # this is the immediate that later gets patched
+        self.asm.addi(rSCRATCH, rFP, -newsize)
         self.asm.subx(rSCRATCH, rSCRATCH, rSP) # rSCRATCH should now be <= 0
         self.asm.beq(3) # if rSCRATCH == 0, there is no actual adjustment, so
                         # don't end up with the situation where *(rSP) == rSP
@@ -568,53 +642,54 @@
         self.asm.stw(rFP, rSP, 0)
         # branch to "here"
 
-    def patch_stack_adjustment(self, newsize):
-        if self.stack_adj_addr == 0:
-            return
-        #print "patch_stack_adjustment at:", self.stack_adj_addr, newsize
-        # we build an addi instruction by hand here
-        mc = self.asm.mc
-        self.asm.mc = self.rgenop.ExistingCodeBlock(self.stack_adj_addr, self.stack_adj_addr+4)
-        self.asm.addi(rSCRATCH, rFP, -newsize)
-        self.asm.mc = mc
+##     def patch_stack_adjustment(self, newsize):
+##         if self.stack_adj_addr == 0:
+##             return
+##         #print "patch_stack_adjustment at:", self.stack_adj_addr, newsize
+##         # we build an addi instruction by hand here
+##         mc = self.asm.mc
+##         self.asm.mc = self.rgenop.ExistingCodeBlock(self.stack_adj_addr, self.stack_adj_addr+4)
+##         self.asm.addi(rSCRATCH, rFP, -newsize)
+##         self.asm.mc = mc
 
-    def op_int_mul(self, gv_x, gv_y):
+    def _arg_op(self, gv_arg, opcode):
         gv_result = Var()
-        self.insns.append(
-            insn.Insn_GPR__GPR_GPR(RPPCAssembler.mullw,
-                                   gv_result, [gv_x, gv_y]))
+        self.insns.append(insn.Insn_GPR__GPR(opcode, gv_result, gv_arg))
         return gv_result
 
-    def op_int_add(self, gv_x, gv_y):
+    def _arg_arg_op_with_imm(self, gv_x, gv_y, commutative, opcode, opcodei):
         gv_result = Var()
         if gv_y.fits_in_immediate():
             self.insns.append(
-                insn.Insn_GPR__GPR_IMM(RPPCAssembler.addi,
+                insn.Insn_GPR__GPR_IMM(opcodei,
                                        gv_result, [gv_x, gv_y]))
-        elif gv_x.fits_in_immediate():
+        elif gv_x.fits_in_immediate() and commutative:
             self.insns.append(
-                insn.Insn_GPR__GPR_IMM(RPPCAssembler.addi,
+                insn.Insn_GPR__GPR_IMM(opcodei,
                                        gv_result, [gv_y, gv_x]))
         else:
             self.insns.append(
-                insn.Insn_GPR__GPR_GPR(RPPCAssembler.add,
+                insn.Insn_GPR__GPR_GPR(opcode,
                                        gv_result, [gv_x, gv_y]))
         return gv_result
 
-    def op_int_sub(self, gv_x, gv_y):
+    def _arg_arg_op(self, gv_x, gv_y, opcode):
         gv_result = Var()
         self.insns.append(
-            insn.Insn_GPR__GPR_GPR(RPPCAssembler.sub,
+            insn.Insn_GPR__GPR_GPR(opcode,
                                    gv_result, [gv_x, gv_y]))
         return gv_result
 
-    def op_int_floordiv(self, gv_x, gv_y):
+    def _arg_imm_op(self, gv_x, gv_imm, opcode):
         gv_result = Var()
         self.insns.append(
-            insn.Insn_GPR__GPR_GPR(RPPCAssembler.divw,
-                                   gv_result, [gv_x, gv_y]))
+            insn.Insn_GPR__GPR_IMM(opcode,
+                                   gv_result, [gv_x, gv_imm]))
         return gv_result
 
+    def _identity(self, gv_arg):
+        return gv_arg
+
     cmp2info = {
         #      bit-in-crf  negated
         'gt': (    1,         0   ),
@@ -624,18 +699,20 @@
         'eq': (    2,         0   ),
         'ne': (    2,         1   ),
         }
+
     cmp2info_flipped = {
         #      bit-in-crf  negated
-        'gt': (    1,         1   ),
-        'lt': (    0,         1   ),
-        'le': (    1,         0   ),
-        'ge': (    0,         0   ),
+        'gt': (    0,         0   ),
+        'lt': (    1,         0   ),
+        'le': (    0,         1   ),
+        'ge': (    1,         1   ),
         'eq': (    2,         0   ),
         'ne': (    2,         1   ),
         }
 
     def _compare(self, op, gv_x, gv_y):
-        gv_result = Var()
+        #print "op", op
+        gv_result = ConditionVar()
         if gv_y.fits_in_immediate():
             self.insns.append(
                 insn.CMPWI(self.cmp2info[op], gv_result, [gv_x, gv_y]))
@@ -647,15 +724,67 @@
                 insn.CMPW(self.cmp2info[op], gv_result, [gv_x, gv_y]))
         return gv_result
 
-    def op_int_gt(self, gv_x, gv_y):
-        return self._compare('gt', gv_x, gv_y)
+    def _compare_u(self, op, gv_x, gv_y):
+        gv_result = ConditionVar()
+        if gv_y.fits_in_immediate():
+            self.insns.append(
+                insn.CMPWLI(self.cmp2info[op], gv_result, [gv_x, gv_y]))
+        elif gv_x.fits_in_immediate():
+            self.insns.append(
+                insn.CMPWLI(self.cmp2info_flipped[op], gv_result, [gv_y, gv_x]))
+        else:
+            self.insns.append(
+                insn.CMPWL(self.cmp2info[op], gv_result, [gv_x, gv_y]))
+        return gv_result
+
+    def _jump(self, gv_condition, if_true, args_gv):
+        targetbuilder = self.rgenop.newbuilder()
+
+        self.insns.append(
+            insn.Jump(gv_condition, targetbuilder, if_true, args_gv))
+
+        return targetbuilder
+
+    def op_bool_not(self, gv_arg):
+        gv_result = Var()
+        self.insns.append(
+            insn.Insn_GPR__GPR_IMM(RPPCAssembler.subfi,
+                                   gv_result, [gv_arg, rgenop.genconst(1)]))
+        return gv_result
+
+    def op_int_is_true(self, gv_arg):
+        return self._compare('ne', gv_arg, self.rgenop.genconst(0))
+
+    def op_int_neg(self, gv_arg):
+        return self._arg_op(gv_arg, _PPC.neg)
+
+    ## op_int_neg_ovf(self, gv_arg) XXX
+
+    ## op_int_abs(self, gv_arg):
+    ## op_int_abs_ovf(self, gv_arg):
+
+    def op_int_invert(self, gv_arg):
+        return self._arg_op(gv_arg, _PPC.not_)
+
+    def op_int_add(self, gv_x, gv_y):
+        return self._arg_arg_op_with_imm(gv_x, gv_y, True, _PPC.add, _PPC.addi)
+
+    def op_int_sub(self, gv_x, gv_y):
+        return self._arg_arg_op_with_imm(gv_x, gv_y, False, _PPC.sub, _PPC.subi)
+
+    def op_int_mul(self, gv_x, gv_y):
+        return self._arg_arg_op_with_imm(gv_x, gv_y, True, _PPC.mullw, _PPC.mulli)
+
+    def op_int_floordiv(self, gv_x, gv_y):
+        return self._arg_arg_op(gv_x, gv_y, _PPC.divw)
+
+    ## def op_int_floordiv_zer(self, gv_x, gv_y):
+    ## def op_int_mod(self, gv_x, gv_y):
+    ## def op_int_mod_zer(self, gv_x, gv_y):
 
     def op_int_lt(self, gv_x, gv_y):
         return self._compare('lt', gv_x, gv_y)
 
-    def op_int_ge(self, gv_x, gv_y):
-        return self._compare('ge', gv_x, gv_y)
-
     def op_int_le(self, gv_x, gv_y):
         return self._compare('le', gv_x, gv_y)
 
@@ -665,34 +794,115 @@
     def op_int_ne(self, gv_x, gv_y):
         return self._compare('ne', gv_x, gv_y)
 
-    def _jump(self, gv_condition, if_true, args_gv):
-        targetbuilder = self.rgenop.newbuilder()
+    def op_int_gt(self, gv_x, gv_y):
+        return self._compare('gt', gv_x, gv_y)
 
-        self.insns.append(
-            insn.Jump(gv_condition, targetbuilder, if_true, args_gv))
+    def op_int_ge(self, gv_x, gv_y):
+        return self._compare('ge', gv_x, gv_y)
 
-        return targetbuilder
+    def op_int_and(self, gv_x, gv_y):
+        return self._arg_arg_op(gv_x, gv_y, _PPC.and_)
 
-    def op_int_is_true(self, gv_arg):
-        gv_result = Var()
-        self.insns.append(
-            insn.CMPWI(self.cmp2info['ne'], gv_result, [gv_arg, self.rgenop.genconst(0)]))
-        return gv_result
+    def op_int_or(self, gv_x, gv_y):
+        return self._arg_arg_op_with_imm(gv_x, gv_y, True, _PPC.or_, _PPC.ori)
 
-    def op_bool_not(self, gv_arg):
-        gv_result = Var()
-        self.insns.append(
-            insn.CMPWI(self.cmp2info['eq'], gv_result, [gv_arg, self.rgenop.genconst(0)]))
-        return gv_result
+    def op_int_lshift(self, gv_x, gv_y):
+        # could be messy if shift is not in 0 <= ... < 32
+        return self._arg_arg_op_with_imm(gv_x, gv_y, False, _PPC.slw, _PPC.slwi)
+    ## def op_int_lshift_val(self, gv_x, gv_y):
+    def op_int_rshift(self, gv_x, gv_y):
+        return self._arg_arg_op_with_imm(gv_x, gv_y, False, _PPC.sraw, _PPC.srawi)
+    ## def op_int_rshift_val(self, gv_x, gv_y):
 
-    def op_int_neg(self, gv_arg):
-        gv_result = Var()
-        self.insns.append(
-            insn.Insn_GPR__GPR(RPPCAssembler.neg, gv_result, gv_arg))
-        return gv_result
+    def op_int_xor(self, gv_x, gv_y):
+        return self._arg_arg_op_with_imm(gv_x, gv_y, True, _PPC.xor, _PPC.xori)
+
+    ## various int_*_ovfs
+
+    op_uint_is_true = op_int_is_true
+    op_uint_invert = op_int_invert
+
+    op_uint_add = op_int_add
+    op_uint_sub = op_int_sub
+    op_uint_mul = op_int_mul
+
+    def op_uint_floordiv(self, gv_x, gv_y):
+        return self._two_arg_op(gv_x, gv_y, _PPC.divwu)
+
+    ## def op_uint_floordiv_zer(self, gv_x, gv_y):
+    ## def op_uint_mod(self, gv_x, gv_y):
+    ## def op_uint_mod_zer(self, gv_x, gv_y):
+
+    def op_uint_lt(self, gv_x, gv_y):
+        return self._compare_u('lt', gv_x, gv_y)
+
+    def op_uint_le(self, gv_x, gv_y):
+        return self._compare_u('le', gv_x, gv_y)
+
+    def op_uint_eq(self, gv_x, gv_y):
+        return self._compare_u('eq', gv_x, gv_y)
+
+    def op_uint_ne(self, gv_x, gv_y):
+        return self._compare_u('ne', gv_x, gv_y)
+
+    def op_uint_gt(self, gv_x, gv_y):
+        return self._compare_u('gt', gv_x, gv_y)
+
+    def op_uint_ge(self, gv_x, gv_y):
+        return self._compare_u('ge', gv_x, gv_y)
+
+    op_uint_and = op_int_add
+    op_uint_or = op_int_or
+
+    op_uint_lshift = op_int_lshift
+    def op_uint_rshift(self, gv_x, gv_y):
+        return self._arg_arg_op_with_imm(gv_x, gv_y, False, _PPC.srw, _PPC.srwi)
+
+    ## def op_uint_lshift_val(self, gv_x, gv_y):
+    ## def op_uint_rshift(self, gv_x, gv_y):
+    ## def op_uint_rshift_val(self, gv_x, gv_y):
+
+    op_uint_xor = op_int_xor
+
+    # ... floats ...
+
+    # ... llongs, ullongs ...
+
+    # here we assume that booleans are always 1 or 0 and chars are
+    # always zero-padded.
+
+    op_cast_bool_to_int = _identity
+    op_cast_bool_to_uint = _identity
+    ## def op_cast_bool_to_float(self, gv_arg):
+    op_cast_char_to_int = _identity
+    op_cast_unichar_to_int = _identity
+    ## def op_cast_int_to_char(self, gv_arg):
+    ## def op_cast_int_to_unichar(self, gv_arg):
+    op_cast_int_to_uint = _identity
+    ## def op_cast_int_to_float(self, gv_arg):
+    ## def op_cast_int_to_longlong(self, gv_arg):
+    op_cast_uint_to_int = _identity
+    ## def op_cast_uint_to_float(self, gv_arg):
+    ## def op_cast_float_to_int(self, gv_arg):
+    ## def op_cast_float_to_uint(self, gv_arg):
+    ## def op_truncate_longlong_to_int(self, gv_arg):
+
+    # many pointer operations are genop_* special cases above
+
+    op_ptr_eq = op_int_eq
+    op_ptr_ne = op_int_ne
 
     op_ptr_nonzero = op_int_is_true
-    op_ptr_iszero  = op_bool_not        # for now
+    op_ptr_ne      = op_int_ne
+    op_ptr_eq      = op_int_eq
+
+    def op_ptr_iszero(self, gv_arg):
+        return self._compare('eq', gv_arg, self.rgenop.genconst(0))
+
+    op_cast_ptr_to_int     = _identity
+    op_cast_int_to_ptr     = _identity
+
+    # ... address operations ...
 
 
 class RPPCGenOp(AbstractRGenOp):

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/test/test_rgenop.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/test/test_rgenop.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/ppc/test/test_rgenop.py	Sat Jan 13 12:09:25 2007
@@ -1,7 +1,7 @@
 import py
 from pypy.jit.codegen.ppc.rgenop import RPPCGenOp
 from pypy.rpython.lltypesystem import lltype
-from pypy.jit.codegen.test.rgenop_tests import AbstractRGenOpTests, FUNC2
+from pypy.jit.codegen.test.rgenop_tests import AbstractRGenOpTests, FUNC, FUNC2
 from ctypes import cast, c_int, c_void_p, CFUNCTYPE
 from pypy.jit.codegen.ppc import instruction as insn
 
@@ -58,6 +58,50 @@
         res = fnptr(2, 1)
         assert res == 100101
 
+    def test_flipped_cmpwi(self):
+        # return
+        # 1>x + 10*(1<x) + 100*(1>=x) + 1000*(1<=x) + 10000*(1==x) + 100000*(1!=x)
+        rgenop = self.RGenOp()
+        signed_kind = rgenop.kindToken(lltype.Signed)
+        sigtoken = rgenop.sigToken(FUNC)
+        builder, gv_callable, [gv_x] = rgenop.newgraph(sigtoken,
+                                                       "multicmp")
+        gv_one = rgenop.genconst(1)
+
+        gv_gt = builder.genop2("int_gt", gv_one, gv_x)
+        gv_lt = builder.genop2("int_lt", gv_one, gv_x)
+        gv_ge = builder.genop2("int_ge", gv_one, gv_x)
+        gv_le = builder.genop2("int_le", gv_one, gv_x)
+        gv_eq = builder.genop2("int_eq", gv_one, gv_x)
+        gv_ne = builder.genop2("int_ne", gv_one, gv_x)
+
+        gv_gt2 = gv_gt
+        gv_lt2 = builder.genop2("int_mul", rgenop.genconst(10), gv_lt)
+        gv_ge2 = builder.genop2("int_mul", rgenop.genconst(100), gv_ge)
+        gv_le2 = builder.genop2("int_mul", rgenop.genconst(1000), gv_le)
+        gv_eq2 = builder.genop2("int_mul", rgenop.genconst(10000), gv_eq)
+        gv_ne2 = builder.genop2("int_mul", rgenop.genconst(100000), gv_ne)
+
+        gv_r0 = gv_gt
+        gv_r1 = builder.genop2("int_add", gv_r0, gv_lt2)
+        gv_r2 = builder.genop2("int_add", gv_r1, gv_ge2)
+        gv_r3 = builder.genop2("int_add", gv_r2, gv_le2)
+        gv_r4 = builder.genop2("int_add", gv_r3, gv_eq2)
+        gv_r5 = builder.genop2("int_add", gv_r4, gv_ne2)
+
+        builder.finish_and_return(sigtoken, gv_r5)
+        builder.end()
+        fnptr = cast(c_void_p(gv_callable.value), CFUNCTYPE(c_int))
+
+        res = fnptr(0)
+        assert res == 100101
+
+        res = fnptr(1)
+        assert res ==  11100
+
+        res = fnptr(2)
+        assert res == 101010
+
 class TestRPPCGenopNoRegs(TestRPPCGenop):
     RGenOp = FewRegisters
 

Modified: pypy/branch/i386-regalloc/pypy/jit/codegen/test/rgenop_tests.py
==============================================================================
--- pypy/branch/i386-regalloc/pypy/jit/codegen/test/rgenop_tests.py	(original)
+++ pypy/branch/i386-regalloc/pypy/jit/codegen/test/rgenop_tests.py	Sat Jan 13 12:09:25 2007
@@ -389,6 +389,101 @@
         return res
     return fact_runner
 
+def make_func_calling_pause(rgenop):
+    # def f(x):
+    #     if x > 0:
+    #          return x
+    #     else:
+    #          return -x
+    signed_kind = rgenop.kindToken(lltype.Signed)
+    sigtoken = rgenop.sigToken(FUNC)
+    builder, gv_f, [gv_x] = rgenop.newgraph(sigtoken, "abs")
+
+    gv_cond = builder.genop2("int_gt", gv_x, rgenop.genconst(0))
+
+    targetbuilder = builder.jump_if_false(gv_cond, [gv_x])
+
+    builder = builder.pause_writing([gv_x])
+
+    targetbuilder.start_writing()
+    gv_negated = targetbuilder.genop1("int_neg", gv_x)
+    targetbuilder.finish_and_return(sigtoken, gv_negated)
+
+    builder.start_writing()
+    builder.finish_and_return(sigtoken, gv_x)
+
+    return gv_f
+
+def get_func_calling_pause_runner(RGenOp):
+    def runner(x):
+        rgenop = RGenOp()
+        gv_abs = make_func_calling_pause(rgenop)
+        myabs = gv_abs.revealconst(lltype.Ptr(FUNC))
+        res = myabs(x)
+        keepalive_until_here(rgenop)    # to keep the code blocks alive
+        return res
+    return runner
+
+def make_longwinded_and(rgenop):
+    # def f(y): return 2 <= y <= 4
+    # but more like this:
+    # def f(y)
+    #     x = 2 <= y
+    #     if x:
+    #         x = y <= 4
+    #     if x:
+    #        return 1
+    #     else:
+    #        return 0
+
+    signed_kind = rgenop.kindToken(lltype.Signed)
+    sigtoken = rgenop.sigToken(FUNC)
+    builder, gv_f, [gv_y] = rgenop.newgraph(sigtoken, "abs")
+
+    gv_x = builder.genop2("int_le", rgenop.genconst(2), gv_y)
+
+    false_builder = builder.jump_if_false(gv_x, [gv_x])
+
+    gv_x2 = builder.genop2("int_le", gv_y, rgenop.genconst(4))
+
+    args_gv = [gv_x2]
+    label = builder.enter_next_block([signed_kind], args_gv)
+    [gv_x2] = args_gv
+
+    return_false_builder = builder.jump_if_false(gv_x2, [])
+
+    builder.finish_and_return(sigtoken, rgenop.genconst(1))
+
+    false_builder.start_writing()
+    false_builder.finish_and_goto([gv_x], label)
+
+    return_false_builder.start_writing()
+    return_false_builder.finish_and_return(sigtoken, rgenop.genconst(0))
+
+    return gv_f
+
+def make_condition_result_cross_link(rgenop):
+
+    signed_kind = rgenop.kindToken(lltype.Signed)
+    sigtoken = rgenop.sigToken(FUNC)
+    builder, gv_f, [gv_y] = rgenop.newgraph(sigtoken, "foo")
+
+    gv_result = builder.genop2("int_eq", gv_y, rgenop.genconst(0))
+    target1 = builder.jump_if_false(gv_result, [gv_result])
+
+    builder.finish_and_return(sigtoken, rgenop.genconst(1))
+
+    target1.start_writing()
+    target2 = target1.jump_if_false(gv_result, [])
+
+    # this return should be unreachable:
+    target1.finish_and_return(sigtoken, rgenop.genconst(2))
+
+    target2.start_writing()
+    target2.finish_and_return(sigtoken, rgenop.genconst(3))
+
+    return gv_f
+
 class AbstractRGenOpTests(test_boehm.AbstractGCTestClass):
     RGenOp = None
 
@@ -586,3 +681,54 @@
         res = fn(11)
         assert res == 39916800
 
+    def test_calling_pause_direct(self):
+        rgenop = self.RGenOp()
+        gv_abs = make_func_calling_pause(rgenop)
+        fnptr = self.cast(gv_abs, 1)
+        res = fnptr(2)
+        assert res == 2
+        res = fnptr(-42)
+        assert res == 42
+
+    def test_calling_pause_compile(self):
+        fn = self.compile(get_func_calling_pause_runner(self.RGenOp), [int])
+        res = fn(2)
+        assert res == 2
+        res = fn(-72)
+        assert res == 72
+
+    def test_longwinded_and_direct(self):
+        rgenop = self.RGenOp()
+        gv_fn = make_longwinded_and(rgenop)
+        fnptr = self.cast(gv_fn, 1)
+
+        print map(fnptr, range(6))
+
+        res = fnptr(1)
+        assert res == 0
+
+        res = fnptr(2)
+        assert res == 1
+
+        res = fnptr(3)
+        assert res == 1
+
+        res = fnptr(4)
+        assert res == 1
+
+        res = fnptr(5)
+        assert res == 0
+
+    def test_condition_result_cross_link_direct(self):
+        rgenop = self.RGenOp()
+        gv_fn = make_condition_result_cross_link(rgenop)
+        fnptr = self.cast(gv_fn, 1)
+
+        res = fnptr(-1)
+        assert res == 3
+
+        res = fnptr(0)
+        assert res == 1
+
+        res = fnptr(1)
+        assert res == 3



More information about the Pypy-commit mailing list