[pypy-svn] r77477 - in pypy/branch/fast-forward: . lib-python/modified-2.5.2 lib-python/modified-2.7.0 pypy/config pypy/config/test pypy/interpreter pypy/jit/backend/cli pypy/jit/backend/llgraph pypy/jit/backend/llsupport pypy/jit/backend/llsupport/test pypy/jit/backend/llvm pypy/jit/backend/test pypy/jit/backend/x86 pypy/jit/backend/x86/test pypy/jit/codewriter pypy/jit/codewriter/test pypy/jit/metainterp pypy/jit/metainterp/optimizeopt pypy/jit/metainterp/test pypy/jit/tool pypy/module/array/benchmark pypy/module/pypyjit/test pypy/objspace/std pypy/rlib pypy/rlib/test pypy/rpython pypy/rpython/lltypesystem pypy/rpython/lltypesystem/test pypy/rpython/memory pypy/rpython/memory/gc pypy/rpython/memory/gc/test pypy/rpython/memory/gctransform pypy/rpython/memory/test pypy/tool pypy/translator/c pypy/translator/c/gcc pypy/translator/c/test

afa at codespeak.net afa at codespeak.net
Thu Sep 30 00:16:27 CEST 2010


Author: afa
Date: Thu Sep 30 00:16:20 2010
New Revision: 77477

Added:
   pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_string.py
      - copied unchanged from r77476, pypy/trunk/pypy/jit/backend/x86/test/test_string.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimize_nopspec.py
      - copied unchanged from r77476, pypy/trunk/pypy/jit/metainterp/optimize_nopspec.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/string.py
      - copied unchanged from r77476, pypy/trunk/pypy/jit/metainterp/optimizeopt/string.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop_nopspec.py
      - copied unchanged from r77476, pypy/trunk/pypy/jit/metainterp/test/test_loop_nopspec.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resoperation.py
      - copied unchanged from r77476, pypy/trunk/pypy/jit/metainterp/test/test_resoperation.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/inspector.py
      - copied unchanged from r77476, pypy/trunk/pypy/rpython/memory/gc/inspector.py
Removed:
   pypy/branch/fast-forward/pypy/rpython/memory/gc/inspect.py
Modified:
   pypy/branch/fast-forward/   (props changed)
   pypy/branch/fast-forward/lib-python/modified-2.5.2/opcode.py
   pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py
   pypy/branch/fast-forward/pypy/config/test/test_pypyoption.py
   pypy/branch/fast-forward/pypy/config/translationoption.py
   pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py
   pypy/branch/fast-forward/pypy/interpreter/pyopcode.py
   pypy/branch/fast-forward/pypy/jit/backend/cli/method.py
   pypy/branch/fast-forward/pypy/jit/backend/cli/runner.py
   pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py
   pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py
   pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py
   pypy/branch/fast-forward/pypy/jit/backend/llsupport/regalloc.py
   pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py
   pypy/branch/fast-forward/pypy/jit/backend/llvm/compile.py
   pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py
   pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py
   pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/regloc.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/rx86.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regloc.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_runner.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zrpy_gc.py
   pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ztranslation.py
   pypy/branch/fast-forward/pypy/jit/codewriter/assembler.py
   pypy/branch/fast-forward/pypy/jit/codewriter/call.py
   pypy/branch/fast-forward/pypy/jit/codewriter/codewriter.py
   pypy/branch/fast-forward/pypy/jit/codewriter/effectinfo.py
   pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py
   pypy/branch/fast-forward/pypy/jit/codewriter/support.py
   pypy/branch/fast-forward/pypy/jit/codewriter/test/test_jtransform.py
   pypy/branch/fast-forward/pypy/jit/codewriter/test/test_list.py
   pypy/branch/fast-forward/pypy/jit/codewriter/test/test_support.py
   pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py
   pypy/branch/fast-forward/pypy/jit/metainterp/compile.py
   pypy/branch/fast-forward/pypy/jit/metainterp/executor.py
   pypy/branch/fast-forward/pypy/jit/metainterp/gc.py
   pypy/branch/fast-forward/pypy/jit/metainterp/graphpage.py
   pypy/branch/fast-forward/pypy/jit/metainterp/history.py
   pypy/branch/fast-forward/pypy/jit/metainterp/logger.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimize.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimizefindnode.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py
   pypy/branch/fast-forward/pypy/jit/metainterp/optimizeutil.py
   pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py
   pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py
   pypy/branch/fast-forward/pypy/jit/metainterp/resume.py
   pypy/branch/fast-forward/pypy/jit/metainterp/simple_optimize.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/oparser.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_logger.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_oparser.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizefindnode.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_string.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualref.py
   pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py
   pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py
   pypy/branch/fast-forward/pypy/jit/tool/loopviewer.py
   pypy/branch/fast-forward/pypy/jit/tool/showstats.py
   pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py
   pypy/branch/fast-forward/pypy/module/array/benchmark/sumtst.c
   pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py
   pypy/branch/fast-forward/pypy/objspace/std/objspace.py
   pypy/branch/fast-forward/pypy/objspace/std/stringtype.py
   pypy/branch/fast-forward/pypy/rlib/jit.py
   pypy/branch/fast-forward/pypy/rlib/rmmap.py
   pypy/branch/fast-forward/pypy/rlib/rstring.py
   pypy/branch/fast-forward/pypy/rlib/test/test_rstring.py
   pypy/branch/fast-forward/pypy/rpython/annlowlevel.py
   pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py
   pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py
   pypy/branch/fast-forward/pypy/rpython/lltypesystem/rlist.py
   pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py
   pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimark.py
   pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py
   pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py
   pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py
   pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py
   pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py
   pypy/branch/fast-forward/pypy/tool/progressbar.py
   pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py
   pypy/branch/fast-forward/pypy/translator/c/genc.py
   pypy/branch/fast-forward/pypy/translator/c/test/test_lltyped.py
Log:
Merge from trunk


Modified: pypy/branch/fast-forward/lib-python/modified-2.5.2/opcode.py
==============================================================================
--- pypy/branch/fast-forward/lib-python/modified-2.5.2/opcode.py	(original)
+++ pypy/branch/fast-forward/lib-python/modified-2.5.2/opcode.py	Thu Sep 30 00:16:20 2010
@@ -185,6 +185,7 @@
 # pypy modification, experimental bytecode
 def_op('CALL_LIKELY_BUILTIN', 144)    # #args + (#kwargs << 8)
 def_op('LOOKUP_METHOD', 145)          # Index in name list
+hasname.append(145)
 def_op('CALL_METHOD', 146)            # #args not including 'self'
 
 

Modified: pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py
==============================================================================
--- pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py	(original)
+++ pypy/branch/fast-forward/lib-python/modified-2.7.0/opcode.py	Thu Sep 30 00:16:20 2010
@@ -1,4 +1,3 @@
-
 """
 opcode module - potentially shared between dis and other modules which
 operate on bytecodes (e.g. peephole optimizers).
@@ -192,6 +191,7 @@
 # pypy modification, experimental bytecode
 def_op('CALL_LIKELY_BUILTIN', 200)    # #args + (#kwargs << 8)
 def_op('LOOKUP_METHOD', 201)          # Index in name list
+hasname.append(201)
 def_op('CALL_METHOD', 202)            # #args not including 'self'
 
 del def_op, name_op, jrel_op, jabs_op

Modified: pypy/branch/fast-forward/pypy/config/test/test_pypyoption.py
==============================================================================
--- pypy/branch/fast-forward/pypy/config/test/test_pypyoption.py	(original)
+++ pypy/branch/fast-forward/pypy/config/test/test_pypyoption.py	Thu Sep 30 00:16:20 2010
@@ -41,7 +41,7 @@
     assert not conf.translation.backendopt.none
     conf = get_pypy_config()
     set_opt_level(conf, 'mem')
-    assert conf.translation.gc == 'markcompact'
+    assert conf.translation.gcremovetypeptr
     assert not conf.translation.backendopt.none
 
 def test_set_pypy_opt_level():

Modified: pypy/branch/fast-forward/pypy/config/translationoption.py
==============================================================================
--- pypy/branch/fast-forward/pypy/config/translationoption.py	(original)
+++ pypy/branch/fast-forward/pypy/config/translationoption.py	Thu Sep 30 00:16:20 2010
@@ -11,6 +11,8 @@
 DEFL_CLEVER_MALLOC_REMOVAL_INLINE_THRESHOLD = 32.4
 DEFL_LOW_INLINE_THRESHOLD = DEFL_INLINE_THRESHOLD / 2.0
 
+DEFL_GC = "minimark"
+
 IS_64_BITS = sys.maxint > 2147483647
 
 PLATFORMS = [
@@ -105,7 +107,7 @@
     # JIT generation: use -Ojit to enable it
     BoolOption("jit", "generate a JIT",
                default=False,
-               suggests=[("translation.gc", "hybrid"),
+               suggests=[("translation.gc", DEFL_GC),
                          ("translation.gcrootfinder", "asmgcc"),
                          ("translation.list_comprehension_operations", True)]),
     ChoiceOption("jit_backend", "choose the backend for the JIT",
@@ -337,10 +339,10 @@
     '0':    'boehm       nobackendopt',
     '1':    'boehm       lowinline',
     'size': 'boehm       lowinline     remove_asserts',
-    'mem':  'markcompact lowinline     remove_asserts    removetypeptr',
-    '2':    'hybrid      extraopts',
-    '3':    'hybrid      extraopts     remove_asserts',
-    'jit':  'hybrid      extraopts     jit',
+    'mem':  DEFL_GC + '  lowinline     remove_asserts    removetypeptr',
+    '2':    DEFL_GC + '  extraopts',
+    '3':    DEFL_GC + '  extraopts     remove_asserts',
+    'jit':  DEFL_GC + '  extraopts     jit',
     }
 
 def final_check_config(config):

Modified: pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py
==============================================================================
--- pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py	(original)
+++ pypy/branch/fast-forward/pypy/interpreter/baseobjspace.py	Thu Sep 30 00:16:20 2010
@@ -12,6 +12,7 @@
 from pypy.rlib.timer import DummyTimer, Timer
 from pypy.rlib.rarithmetic import r_uint
 from pypy.rlib import jit
+from pypy.tool.sourcetools import func_with_new_name
 import os, sys, py
 
 __all__ = ['ObjSpace', 'OperationError', 'Wrappable', 'W_Root']
@@ -749,12 +750,17 @@
                                 (i, plural)))
         return items
 
+    unpackiterable_unroll = jit.unroll_safe(func_with_new_name(unpackiterable,
+                                            'unpackiterable_unroll'))
+
     def fixedview(self, w_iterable, expected_length=-1):
         """ A fixed list view of w_iterable. Don't modify the result
         """
         return make_sure_not_resized(self.unpackiterable(w_iterable,
                                                          expected_length)[:])
 
+    fixedview_unroll = fixedview
+
     def listview(self, w_iterable, expected_length=-1):
         """ A non-fixed view of w_iterable. Don't modify the result
         """

Modified: pypy/branch/fast-forward/pypy/interpreter/pyopcode.py
==============================================================================
--- pypy/branch/fast-forward/pypy/interpreter/pyopcode.py	(original)
+++ pypy/branch/fast-forward/pypy/interpreter/pyopcode.py	Thu Sep 30 00:16:20 2010
@@ -637,7 +637,7 @@
 
     def UNPACK_SEQUENCE(self, itemcount, next_instr):
         w_iterable = self.popvalue()
-        items = self.space.fixedview(w_iterable, itemcount)
+        items = self.space.fixedview_unroll(w_iterable, itemcount)
         self.pushrevvalues(itemcount, items)
 
     def STORE_ATTR(self, nameindex, next_instr):

Modified: pypy/branch/fast-forward/pypy/jit/backend/cli/method.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/cli/method.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/cli/method.py	Thu Sep 30 00:16:20 2010
@@ -207,9 +207,9 @@
 
     def _collect_types(self, operations, box2classes):
         for op in operations:
-            if op.opnum in (rop.GETFIELD_GC, rop.SETFIELD_GC):
+            if op.getopnum() in (rop.GETFIELD_GC, rop.SETFIELD_GC):
                 box = op.args[0]
-                descr = op.descr
+                descr = op.getdescr()
                 assert isinstance(descr, runner.FieldDescr)
                 box2classes.setdefault(box, []).append(descr.selfclass)
             if op in self.cliloop.guard2ops:
@@ -335,7 +335,7 @@
         while self.i < N:
             op = oplist[self.i]
             self.emit_debug(op.repr())
-            func = self.operations[op.opnum]
+            func = self.operations[op.getopnum()]
             assert func is not None
             func(self, op)
             self.i += 1
@@ -357,10 +357,10 @@
         assert op.is_guard()
         if op in self.cliloop.guard2ops:
             inputargs, suboperations = self.cliloop.guard2ops[op]
-            self.match_var_fox_boxes(op.fail_args, inputargs)
+            self.match_var_fox_boxes(op.getfailargs(), inputargs)
             self.emit_operations(suboperations)
         else:
-            self.emit_return_failed_op(op, op.fail_args)
+            self.emit_return_failed_op(op, op.getfailargs())
 
     def emit_end(self):
         assert self.branches == []
@@ -410,7 +410,7 @@
 
     def emit_ovf_op(self, op, emit_op):
         next_op = self.oplist[self.i+1]
-        if next_op.opnum == rop.GUARD_NO_OVERFLOW:
+        if next_op.getopnum() == rop.GUARD_NO_OVERFLOW:
                 self.i += 1
                 self.emit_ovf_op_and_guard(op, next_op, emit_op)
                 return
@@ -544,7 +544,7 @@
         self.emit_guard_overflow_impl(op, OpCodes.Brfalse)
 
     def emit_op_jump(self, op):
-        target_token = op.descr
+        target_token = op.getdescr()
         assert isinstance(target_token, LoopToken)
         if target_token.cliloop is self.cliloop:
             # jump to the beginning of the loop
@@ -586,7 +586,7 @@
         self.store_result(op)
 
     def emit_op_instanceof(self, op):
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, runner.TypeDescr)
         clitype = descr.get_clitype()
         op.args[0].load(self)
@@ -604,7 +604,7 @@
         self.store_result(op)
 
     def emit_op_call_impl(self, op):
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, runner.StaticMethDescr)
         delegate_type = descr.get_delegate_clitype()
         meth_invoke = descr.get_meth_info()
@@ -619,7 +619,7 @@
     emit_op_call_pure = emit_op_call
 
     def emit_op_oosend(self, op):
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, runner.MethDescr)
         clitype = descr.get_self_clitype()
         methinfo = descr.get_meth_info()
@@ -639,7 +639,7 @@
             self.store_result(op)
 
     def emit_op_getfield_gc(self, op):
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, runner.FieldDescr)
         clitype = descr.get_self_clitype()
         fieldinfo = descr.get_field_info()
@@ -653,7 +653,7 @@
     emit_op_getfield_gc_pure = emit_op_getfield_gc
 
     def emit_op_setfield_gc(self, op):
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, runner.FieldDescr)
         clitype = descr.get_self_clitype()
         fieldinfo = descr.get_field_info()
@@ -665,7 +665,7 @@
         self.il.Emit(OpCodes.Stfld, fieldinfo)
 
     def emit_op_getarrayitem_gc(self, op):
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, runner.TypeDescr)
         clitype = descr.get_array_clitype()
         itemtype = descr.get_clitype()
@@ -678,7 +678,7 @@
     emit_op_getarrayitem_gc_pure = emit_op_getarrayitem_gc
 
     def emit_op_setarrayitem_gc(self, op):
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, runner.TypeDescr)
         clitype = descr.get_array_clitype()
         itemtype = descr.get_clitype()
@@ -689,7 +689,7 @@
         self.il.Emit(OpCodes.Stelem, itemtype)
 
     def emit_op_arraylen_gc(self, op):
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, runner.TypeDescr)
         clitype = descr.get_array_clitype()
         op.args[0].load(self)
@@ -698,7 +698,7 @@
         self.store_result(op)
 
     def emit_op_new_array(self, op):
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, runner.TypeDescr)
         item_clitype = descr.get_clitype()
         if item_clitype is None:

Modified: pypy/branch/fast-forward/pypy/jit/backend/cli/runner.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/cli/runner.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/cli/runner.py	Thu Sep 30 00:16:20 2010
@@ -105,7 +105,7 @@
     def _attach_token_to_faildescrs(self, token, operations):
         for op in operations:
             if op.is_guard():
-                descr = op.descr
+                descr = op.getdescr()
                 assert isinstance(descr, AbstractFailDescr)
                 descr._loop_token = token
                 descr._guard_op = op
@@ -136,7 +136,7 @@
         func = cliloop.funcbox.holder.GetFunc()
         func(self.get_inputargs())
         op = self.failing_ops[self.inputargs.get_failed_op()]
-        return op.descr
+        return op.getdescr()
         
     def set_future_value_int(self, index, intvalue):
         self.get_inputargs().set_int(index, intvalue)

Modified: pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/llgraph/llimpl.py	Thu Sep 30 00:16:20 2010
@@ -129,7 +129,7 @@
     'arraylen_gc'     : (('ref',), 'int'),
     'call'            : (('ref', 'varargs'), 'intorptr'),
     'call_assembler'  : (('varargs',), 'intorptr'),
-    'cond_call_gc_wb' : (('ptr', 'ptr'), None),
+    'cond_call_gc_wb' : (('ptr',), None),
     'oosend'          : (('varargs',), 'intorptr'),
     'oosend_pure'     : (('varargs',), 'intorptr'),
     'guard_true'      : (('bool',), None),
@@ -810,7 +810,7 @@
                  FLOAT: 0.0}
             return d[calldescr.typeinfo]
 
-    def op_cond_call_gc_wb(self, descr, a, b):
+    def op_cond_call_gc_wb(self, descr, a):
         py.test.skip("cond_call_gc_wb not supported")
 
     def op_oosend(self, descr, obj, *args):
@@ -1382,6 +1382,20 @@
     uni = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), string)
     uni.chars[index] = unichr(newvalue)
 
+def do_copystrcontent(src, dst, srcstart, dststart, length):
+    src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), src)
+    dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), dst)
+    assert 0 <= srcstart <= srcstart + length <= len(src.chars)
+    assert 0 <= dststart <= dststart + length <= len(dst.chars)
+    rstr.copy_string_contents(src, dst, srcstart, dststart, length)
+
+def do_copyunicodecontent(src, dst, srcstart, dststart, length):
+    src = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), src)
+    dst = lltype.cast_opaque_ptr(lltype.Ptr(rstr.UNICODE), dst)
+    assert 0 <= srcstart <= srcstart + length <= len(src.chars)
+    assert 0 <= dststart <= dststart + length <= len(dst.chars)
+    rstr.copy_unicode_contents(src, dst, srcstart, dststart, length)
+
 # ---------- call ----------
 
 _call_args_i = []

Modified: pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/llgraph/runner.py	Thu Sep 30 00:16:20 2010
@@ -151,16 +151,17 @@
 
     def _compile_operations(self, c, operations, var2index):
         for op in operations:
-            llimpl.compile_add(c, op.opnum)
-            descr = op.descr
+            llimpl.compile_add(c, op.getopnum())
+            descr = op.getdescr()
             if isinstance(descr, Descr):
                 llimpl.compile_add_descr(c, descr.ofs, descr.typeinfo)
-            if isinstance(descr, history.LoopToken) and op.opnum != rop.JUMP:
+            if isinstance(descr, history.LoopToken) and op.getopnum() != rop.JUMP:
                 llimpl.compile_add_loop_token(c, descr)
             if self.is_oo and isinstance(descr, (OODescr, MethDescr)):
                 # hack hack, not rpython
-                c._obj.externalobj.operations[-1].descr = descr
-            for x in op.args:
+                c._obj.externalobj.operations[-1].setdescr(descr)
+            for i in range(op.numargs()):
+                x = op.getarg(i)
                 if isinstance(x, history.Box):
                     llimpl.compile_add_var(c, var2index[x])
                 elif isinstance(x, history.ConstInt):
@@ -173,10 +174,10 @@
                     raise Exception("'%s' args contain: %r" % (op.getopname(),
                                                                x))
             if op.is_guard():
-                faildescr = op.descr
+                faildescr = op.getdescr()
                 assert isinstance(faildescr, history.AbstractFailDescr)
                 faildescr._fail_args_types = []
-                for box in op.fail_args:
+                for box in op.getfailargs():
                     if box is None:
                         type = history.HOLE
                     else:
@@ -185,7 +186,7 @@
                 fail_index = self.get_fail_descr_number(faildescr)
                 index = llimpl.compile_add_fail(c, fail_index)
                 faildescr._compiled_fail = c, index
-                for box in op.fail_args:
+                for box in op.getfailargs():
                     if box is not None:
                         llimpl.compile_add_fail_arg(c, var2index[box])
                     else:
@@ -203,13 +204,13 @@
                                                                x))
         op = operations[-1]
         assert op.is_final()
-        if op.opnum == rop.JUMP:
-            targettoken = op.descr
+        if op.getopnum() == rop.JUMP:
+            targettoken = op.getdescr()
             assert isinstance(targettoken, history.LoopToken)
             compiled_version = targettoken._llgraph_compiled_version
             llimpl.compile_add_jump_target(c, compiled_version)
-        elif op.opnum == rop.FINISH:
-            faildescr = op.descr
+        elif op.getopnum() == rop.FINISH:
+            faildescr = op.getdescr()
             index = self.get_fail_descr_number(faildescr)
             llimpl.compile_add_fail(c, index)
         else:
@@ -280,7 +281,7 @@
     def __init__(self, *args, **kwds):
         BaseCPU.__init__(self, *args, **kwds)
         self.fielddescrof_vtable = self.fielddescrof(rclass.OBJECT, 'typeptr')
-        
+
     def fielddescrof(self, S, fieldname):
         ofs, size = symbolic.get_field_token(S, fieldname)
         token = history.getkind(getattr(S, fieldname))
@@ -504,7 +505,7 @@
             return ootype.cast_to_object(e)
         else:
             return ootype.NULL
-        
+
     def get_exc_value(self):
         if llimpl._last_exception:
             earg = llimpl._last_exception.args[1]
@@ -580,7 +581,7 @@
         x = descr.callmeth(selfbox, argboxes)
         # XXX: return None if METH.RESULT is Void
         return x
-    
+
 
 def make_getargs(ARGS):
     argsiter = unrolling_iterable(ARGS)
@@ -612,7 +613,7 @@
 class KeyManager(object):
     """
     Helper class to convert arbitrary dictionary keys to integers.
-    """    
+    """
 
     def __init__(self):
         self.keys = {}
@@ -695,7 +696,7 @@
         self.ARRAY = ARRAY = ootype.Array(TYPE)
         def create():
             return boxresult(TYPE, ootype.new(TYPE))
-        
+
         def create_array(lengthbox):
             n = lengthbox.getint()
             return boxresult(ARRAY, ootype.oonewarray(ARRAY, n))
@@ -757,7 +758,7 @@
             obj = objbox.getref(TYPE)
             value = unwrap(T, valuebox)
             setattr(obj, fieldname, value)
-            
+
         self.getfield = getfield
         self.setfield = setfield
         self._is_pointer_field = (history.getkind(T) == 'ref')

Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/gc.py	Thu Sep 30 00:16:20 2010
@@ -41,9 +41,12 @@
     moving_gc = False
     gcrootmap = None
 
-    def __init__(self, gcdescr, translator, rtyper):
-        GcLLDescription.__init__(self, gcdescr, translator, rtyper)
-        # grab a pointer to the Boehm 'malloc' function
+    @classmethod
+    def configure_boehm_once(cls):
+        """ Configure boehm only once, since we don't cache failures
+        """
+        if hasattr(cls, 'malloc_fn_ptr'):
+            return cls.malloc_fn_ptr
         from pypy.rpython.tool import rffi_platform
         compilation_info = rffi_platform.configure_boehm()
 
@@ -59,13 +62,20 @@
             GC_MALLOC = "GC_local_malloc"
         else:
             GC_MALLOC = "GC_malloc"
-
         malloc_fn_ptr = rffi.llexternal(GC_MALLOC,
                                         [lltype.Signed], # size_t, but good enough
                                         llmemory.GCREF,
                                         compilation_info=compilation_info,
                                         sandboxsafe=True,
                                         _nowrapper=True)
+        cls.malloc_fn_ptr = malloc_fn_ptr
+        cls.compilation_info = compilation_info
+        return malloc_fn_ptr
+
+    def __init__(self, gcdescr, translator, rtyper):
+        GcLLDescription.__init__(self, gcdescr, translator, rtyper)
+        # grab a pointer to the Boehm 'malloc' function
+        malloc_fn_ptr = self.configure_boehm_once()
         self.funcptr_for_new = malloc_fn_ptr
 
         # on some platform GC_init is required before any other
@@ -73,7 +83,7 @@
         # XXX move this to tests
         init_fn_ptr = rffi.llexternal("GC_init",
                                       [], lltype.Void,
-                                      compilation_info=compilation_info,
+                                      compilation_info=self.compilation_info,
                                       sandboxsafe=True,
                                       _nowrapper=True)
 
@@ -123,7 +133,7 @@
 
 
 # ____________________________________________________________
-# All code below is for the hybrid GC
+# All code below is for the hybrid or minimark GC
 
 
 class GcRefList:
@@ -157,7 +167,7 @@
 
     def alloc_gcref_list(self, n):
         # Important: the GRREF_LISTs allocated are *non-movable*.  This
-        # requires support in the gc (only the hybrid GC supports it so far).
+        # requires support in the gc (hybrid GC or minimark GC so far).
         if we_are_translated():
             list = rgc.malloc_nonmovable(self.GCREF_LIST, n)
             assert list, "malloc_nonmovable failed!"
@@ -340,8 +350,9 @@
         self.translator = translator
         self.llop1 = llop1
 
-        # we need the hybrid GC for GcRefList.alloc_gcref_list() to work
-        if gcdescr.config.translation.gc != 'hybrid':
+        # we need the hybrid or minimark GC for GcRefList.alloc_gcref_list()
+        # to work
+        if gcdescr.config.translation.gc not in ('hybrid', 'minimark'):
             raise NotImplementedError("--gc=%s not implemented with the JIT" %
                                       (gcdescr.config.translation.gc,))
 
@@ -372,8 +383,7 @@
         self.gcheaderbuilder = GCHeaderBuilder(self.HDRPTR.TO)
         (self.array_basesize, _, self.array_length_ofs) = \
              symbolic.get_array_token(lltype.GcArray(lltype.Signed), True)
-        min_ns = self.GCClass.TRANSLATION_PARAMS['min_nursery_size']
-        self.max_size_of_young_obj = self.GCClass.get_young_fixedsize(min_ns)
+        self.max_size_of_young_obj = self.GCClass.JIT_max_size_of_young_obj()
 
         # make a malloc function, with three arguments
         def malloc_basic(size, tid):
@@ -394,7 +404,7 @@
         self.GC_MALLOC_BASIC = lltype.Ptr(lltype.FuncType(
             [lltype.Signed, lltype.Signed], llmemory.GCREF))
         self.WB_FUNCPTR = lltype.Ptr(lltype.FuncType(
-            [llmemory.Address, llmemory.Address], lltype.Void))
+            [llmemory.Address], lltype.Void))
         self.write_barrier_descr = WriteBarrierDescr(self)
         #
         def malloc_array(itemsize, tid, num_elem):
@@ -540,8 +550,7 @@
             # the GC, and call it immediately
             llop1 = self.llop1
             funcptr = llop1.get_write_barrier_failing_case(self.WB_FUNCPTR)
-            funcptr(llmemory.cast_ptr_to_adr(gcref_struct),
-                    llmemory.cast_ptr_to_adr(gcref_newptr))
+            funcptr(llmemory.cast_ptr_to_adr(gcref_struct))
 
     def rewrite_assembler(self, cpu, operations):
         # Perform two kinds of rewrites in parallel:
@@ -559,12 +568,12 @@
         #
         newops = []
         for op in operations:
-            if op.opnum == rop.DEBUG_MERGE_POINT:
+            if op.getopnum() == rop.DEBUG_MERGE_POINT:
                 continue
             # ---------- replace ConstPtrs with GETFIELD_RAW ----------
             # xxx some performance issue here
-            for i in range(len(op.args)):
-                v = op.args[i]
+            for i in range(op.numargs()):
+                v = op.getarg(i)
                 if isinstance(v, ConstPtr) and bool(v.value):
                     addr = self.gcrefs.get_address_of_gcref(v.value)
                     # ^^^even for non-movable objects, to record their presence
@@ -574,30 +583,30 @@
                         newops.append(ResOperation(rop.GETFIELD_RAW,
                                                    [ConstInt(addr)], box,
                                                    self.single_gcref_descr))
-                        op.args[i] = box
+                        op.setarg(i, box)
             # ---------- write barrier for SETFIELD_GC ----------
-            if op.opnum == rop.SETFIELD_GC:
-                v = op.args[1]
+            if op.getopnum() == rop.SETFIELD_GC:
+                v = op.getarg(1)
                 if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
                                              bool(v.value)): # store a non-NULL
-                    self._gen_write_barrier(newops, op.args[0], v)
-                    op = ResOperation(rop.SETFIELD_RAW, op.args, None,
-                                      descr=op.descr)
+                    self._gen_write_barrier(newops, op.getarg(0))
+                    op = op.copy_and_change(rop.SETFIELD_RAW)
             # ---------- write barrier for SETARRAYITEM_GC ----------
-            if op.opnum == rop.SETARRAYITEM_GC:
-                v = op.args[2]
+            if op.getopnum() == rop.SETARRAYITEM_GC:
+                v = op.getarg(2)
                 if isinstance(v, BoxPtr) or (isinstance(v, ConstPtr) and
                                              bool(v.value)): # store a non-NULL
-                    self._gen_write_barrier(newops, op.args[0], v)
-                    op = ResOperation(rop.SETARRAYITEM_RAW, op.args, None,
-                                      descr=op.descr)
+                    # XXX detect when we should produce a
+                    # write_barrier_from_array
+                    self._gen_write_barrier(newops, op.getarg(0))
+                    op = op.copy_and_change(rop.SETARRAYITEM_RAW)
             # ----------
             newops.append(op)
         del operations[:]
         operations.extend(newops)
 
-    def _gen_write_barrier(self, newops, v_base, v_value):
-        args = [v_base, v_value]
+    def _gen_write_barrier(self, newops, v_base):
+        args = [v_base]
         newops.append(ResOperation(rop.COND_CALL_GC_WB, args, None,
                                    descr=self.write_barrier_descr))
 

Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/regalloc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/llsupport/regalloc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/regalloc.py	Thu Sep 30 00:16:20 2010
@@ -81,6 +81,10 @@
         for v in vars:
             self.possibly_free_var(v)
 
+    def possibly_free_vars_for_op(self, op):
+        for i in range(op.numargs()):
+            self.possibly_free_var(op.getarg(i))
+
     def _check_invariants(self):
         if not we_are_translated():
             # make sure no duplicates

Modified: pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/llsupport/test/test_gc.py	Thu Sep 30 00:16:20 2010
@@ -141,19 +141,20 @@
                             repr(offset_to_length), p))
         return p
 
-    def _write_barrier_failing_case(self, adr_struct, adr_newptr):
-        self.record.append(('barrier', adr_struct, adr_newptr))
+    def _write_barrier_failing_case(self, adr_struct):
+        self.record.append(('barrier', adr_struct))
 
     def get_write_barrier_failing_case(self, FPTRTYPE):
         return llhelper(FPTRTYPE, self._write_barrier_failing_case)
 
 
 class TestFramework:
+    gc = 'hybrid'
 
     def setup_method(self, meth):
         class config_:
             class translation:
-                gc = 'hybrid'
+                gc = self.gc
                 gcrootfinder = 'asmgcc'
                 gctransformer = 'framework'
                 gcremovetypeptr = False
@@ -238,7 +239,6 @@
         s_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, s)
         r_gcref = lltype.cast_opaque_ptr(llmemory.GCREF, r)
         s_adr = llmemory.cast_ptr_to_adr(s)
-        r_adr = llmemory.cast_ptr_to_adr(r)
         #
         s_hdr.tid &= ~gc_ll_descr.GCClass.JIT_WB_IF_FLAG
         gc_ll_descr.do_write_barrier(s_gcref, r_gcref)
@@ -246,7 +246,7 @@
         #
         s_hdr.tid |= gc_ll_descr.GCClass.JIT_WB_IF_FLAG
         gc_ll_descr.do_write_barrier(s_gcref, r_gcref)
-        assert self.llop1.record == [('barrier', s_adr, r_adr)]
+        assert self.llop1.record == [('barrier', s_adr)]
 
     def test_gen_write_barrier(self):
         gc_ll_descr = self.gc_ll_descr
@@ -254,22 +254,20 @@
         #
         newops = []
         v_base = BoxPtr()
-        v_value = BoxPtr()
-        gc_ll_descr._gen_write_barrier(newops, v_base, v_value)
+        gc_ll_descr._gen_write_barrier(newops, v_base)
         assert llop1.record == []
         assert len(newops) == 1
-        assert newops[0].opnum == rop.COND_CALL_GC_WB
-        assert newops[0].args[0] == v_base
-        assert newops[0].args[1] == v_value
+        assert newops[0].getopnum() == rop.COND_CALL_GC_WB
+        assert newops[0].getarg(0) == v_base
         assert newops[0].result is None
-        wbdescr = newops[0].descr
+        wbdescr = newops[0].getdescr()
         assert isinstance(wbdescr.jit_wb_if_flag, int)
         assert isinstance(wbdescr.jit_wb_if_flag_byteofs, int)
         assert isinstance(wbdescr.jit_wb_if_flag_singlebyte, int)
 
     def test_get_rid_of_debug_merge_point(self):
         operations = [
-            ResOperation(rop.DEBUG_MERGE_POINT, [], None),
+            ResOperation(rop.DEBUG_MERGE_POINT, ['dummy'], None),
             ]
         gc_ll_descr = self.gc_ll_descr
         gc_ll_descr.rewrite_assembler(None, operations)
@@ -298,13 +296,14 @@
         gc_ll_descr.gcrefs = MyFakeGCRefList()
         gc_ll_descr.rewrite_assembler(MyFakeCPU(), operations)
         assert len(operations) == 2
-        assert operations[0].opnum == rop.GETFIELD_RAW
-        assert operations[0].args == [ConstInt(43)]
-        assert operations[0].descr == gc_ll_descr.single_gcref_descr
+        assert operations[0].getopnum() == rop.GETFIELD_RAW
+        assert operations[0].getarg(0) == ConstInt(43)
+        assert operations[0].getdescr() == gc_ll_descr.single_gcref_descr
         v_box = operations[0].result
         assert isinstance(v_box, BoxPtr)
-        assert operations[1].opnum == rop.PTR_EQ
-        assert operations[1].args == [v_random_box, v_box]
+        assert operations[1].getopnum() == rop.PTR_EQ
+        assert operations[1].getarg(0) == v_random_box
+        assert operations[1].getarg(1) == v_box
         assert operations[1].result == v_result
 
     def test_rewrite_assembler_1_cannot_move(self):
@@ -336,8 +335,9 @@
         finally:
             rgc.can_move = old_can_move
         assert len(operations) == 1
-        assert operations[0].opnum == rop.PTR_EQ
-        assert operations[0].args == [v_random_box, ConstPtr(s_gcref)]
+        assert operations[0].getopnum() == rop.PTR_EQ
+        assert operations[0].getarg(0) == v_random_box
+        assert operations[0].getarg(1) == ConstPtr(s_gcref)
         assert operations[0].result == v_result
         # check that s_gcref gets added to the list anyway, to make sure
         # that the GC sees it
@@ -356,14 +356,14 @@
         gc_ll_descr.rewrite_assembler(self.fake_cpu, operations)
         assert len(operations) == 2
         #
-        assert operations[0].opnum == rop.COND_CALL_GC_WB
-        assert operations[0].args[0] == v_base
-        assert operations[0].args[1] == v_value
+        assert operations[0].getopnum() == rop.COND_CALL_GC_WB
+        assert operations[0].getarg(0) == v_base
         assert operations[0].result is None
         #
-        assert operations[1].opnum == rop.SETFIELD_RAW
-        assert operations[1].args == [v_base, v_value]
-        assert operations[1].descr == field_descr
+        assert operations[1].getopnum() == rop.SETFIELD_RAW
+        assert operations[1].getarg(0) == v_base
+        assert operations[1].getarg(1) == v_value
+        assert operations[1].getdescr() == field_descr
 
     def test_rewrite_assembler_3(self):
         # check write barriers before SETARRAYITEM_GC
@@ -379,11 +379,16 @@
         gc_ll_descr.rewrite_assembler(self.fake_cpu, operations)
         assert len(operations) == 2
         #
-        assert operations[0].opnum == rop.COND_CALL_GC_WB
-        assert operations[0].args[0] == v_base
-        assert operations[0].args[1] == v_value
+        assert operations[0].getopnum() == rop.COND_CALL_GC_WB
+        assert operations[0].getarg(0) == v_base
         assert operations[0].result is None
         #
-        assert operations[1].opnum == rop.SETARRAYITEM_RAW
-        assert operations[1].args == [v_base, v_index, v_value]
-        assert operations[1].descr == array_descr
+        assert operations[1].getopnum() == rop.SETARRAYITEM_RAW
+        assert operations[1].getarg(0) == v_base
+        assert operations[1].getarg(1) == v_index
+        assert operations[1].getarg(2) == v_value
+        assert operations[1].getdescr() == array_descr
+
+
+class TestFrameworkMiniMark(TestFramework):
+    gc = 'minimark'

Modified: pypy/branch/fast-forward/pypy/jit/backend/llvm/compile.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/llvm/compile.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/llvm/compile.py	Thu Sep 30 00:16:20 2010
@@ -107,7 +107,7 @@
         # store away the exception into self.backup_exc_xxx, *unless* the
         # branch starts with a further GUARD_EXCEPTION/GUARD_NO_EXCEPTION.
         if exc:
-            opnum = operations[0].opnum
+            opnum = operations[0].getopnum()
             if opnum not in (rop.GUARD_EXCEPTION, rop.GUARD_NO_EXCEPTION):
                 self._store_away_exception()
         # Normal handling of the operations follows.
@@ -115,7 +115,7 @@
             self._generate_op(op)
 
     def _generate_op(self, op):
-        opnum = op.opnum
+        opnum = op.getopnum()
         for i, name in all_operations:
             if opnum == i:
                 meth = getattr(self, name)
@@ -475,7 +475,7 @@
         return location
 
     def generate_GETFIELD_GC(self, op):
-        loc = self._generate_field_gep(op.args[0], op.descr)
+        loc = self._generate_field_gep(op.args[0], op.getdescr())
         self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "")
 
     generate_GETFIELD_GC_PURE  = generate_GETFIELD_GC
@@ -483,7 +483,7 @@
     generate_GETFIELD_RAW_PURE = generate_GETFIELD_GC
 
     def generate_SETFIELD_GC(self, op):
-        fielddescr = op.descr
+        fielddescr = op.getdescr()
         loc = self._generate_field_gep(op.args[0], fielddescr)
         assert isinstance(fielddescr, FieldDescr)
         getarg = self.cpu.getarg_by_index[fielddescr.size_index]
@@ -491,7 +491,7 @@
         llvm_rffi.LLVMBuildStore(self.builder, value_ref, loc, "")
 
     def generate_CALL(self, op):
-        calldescr = op.descr
+        calldescr = op.getdescr()
         assert isinstance(calldescr, CallDescr)
         ty_function_ptr = self.cpu.get_calldescr_ty_function_ptr(calldescr)
         v = op.args[0]
@@ -579,7 +579,7 @@
         self.vars[op.result] = llvm_rffi.LLVMBuildLoad(self.builder, loc, "")
 
     def generate_ARRAYLEN_GC(self, op):
-        arraydescr = op.descr
+        arraydescr = op.getdescr()
         assert isinstance(arraydescr, ArrayDescr)
         self._generate_len(op, arraydescr.ty_array_ptr,
                            self.cpu.const_array_index_length)
@@ -598,7 +598,7 @@
         return location
 
     def _generate_array_gep(self, op):
-        arraydescr = op.descr
+        arraydescr = op.getdescr()
         assert isinstance(arraydescr, ArrayDescr)
         location = self._generate_gep(op, arraydescr.ty_array_ptr,
                                       self.cpu.const_array_index_array)
@@ -612,7 +612,7 @@
 
     def generate_SETARRAYITEM_GC(self, op):
         loc = self._generate_array_gep(op)
-        arraydescr = op.descr
+        arraydescr = op.getdescr()
         assert isinstance(arraydescr, ArrayDescr)
         getarg = self.cpu.getarg_by_index[arraydescr.itemsize_index]
         value_ref = getarg(self, op.args[2])
@@ -660,7 +660,7 @@
         return res
 
     def generate_NEW(self, op):
-        sizedescr = op.descr
+        sizedescr = op.getdescr()
         assert isinstance(sizedescr, SizeDescr)
         res = self._generate_new(self.cpu._make_const_int(sizedescr.size))
         self.vars[op.result] = res
@@ -695,7 +695,7 @@
         self.vars[op.result] = res
 
     def generate_NEW_ARRAY(self, op):
-        arraydescr = op.descr
+        arraydescr = op.getdescr()
         assert isinstance(arraydescr, ArrayDescr)
         self._generate_new_array(op, arraydescr.ty_array_ptr,
                                  self.cpu._make_const_int(arraydescr.itemsize),

Modified: pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/test/runner_test.py	Thu Sep 30 00:16:20 2010
@@ -1,5 +1,6 @@
 import py, sys, random, os, struct, operator
 from pypy.jit.metainterp.history import (AbstractFailDescr,
+                                         AbstractDescr,
                                          BasicFailDescr,
                                          BoxInt, Box, BoxPtr,
                                          LoopToken,
@@ -39,7 +40,7 @@
             else:
                 raise NotImplementedError(box)
         res = self.cpu.execute_token(looptoken)
-        if res is operations[-1].descr:
+        if res is operations[-1].getdescr():
             self.guard_failed = False
         else:
             self.guard_failed = True
@@ -74,10 +75,11 @@
                       ResOperation(rop.FINISH, results, None,
                                    descr=BasicFailDescr(0))]
         if operations[0].is_guard():
-            operations[0].fail_args = []
+            operations[0].setfailargs([])
             if not descr:
                 descr = BasicFailDescr(1)
-        operations[0].descr = descr
+        if descr is not None:
+            operations[0].setdescr(descr)
         inputargs = []
         for box in valueboxes:
             if isinstance(box, Box) and box not in inputargs:
@@ -116,7 +118,7 @@
             ResOperation(rop.JUMP, [i1], None, descr=looptoken),
             ]
         inputargs = [i0]
-        operations[2].fail_args = [i1]
+        operations[2].setfailargs([i1])
         
         self.cpu.compile_loop(inputargs, operations, looptoken)
         self.cpu.set_future_value_int(0, 2)
@@ -137,7 +139,7 @@
             ResOperation(rop.JUMP, [i1], None, descr=looptoken),
             ]
         inputargs = [i0]
-        operations[2].fail_args = [None, None, i1, None]
+        operations[2].setfailargs([None, None, i1, None])
         
         self.cpu.compile_loop(inputargs, operations, looptoken)
         self.cpu.set_future_value_int(0, 2)
@@ -160,7 +162,7 @@
             ResOperation(rop.JUMP, [i1], None, descr=looptoken),
             ]
         inputargs = [i0]
-        operations[2].fail_args = [i1]
+        operations[2].setfailargs([i1])
         wr_i1 = weakref.ref(i1)
         wr_guard = weakref.ref(operations[2])
         self.cpu.compile_loop(inputargs, operations, looptoken)
@@ -184,7 +186,7 @@
             ResOperation(rop.JUMP, [i1], None, descr=looptoken),
             ]
         inputargs = [i0]
-        operations[2].fail_args = [i1]
+        operations[2].setfailargs([i1])
         self.cpu.compile_loop(inputargs, operations, looptoken)
 
         i1b = BoxInt()
@@ -194,7 +196,7 @@
             ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2),
             ResOperation(rop.JUMP, [i1b], None, descr=looptoken),
         ]
-        bridge[1].fail_args = [i1b]
+        bridge[1].setfailargs([i1b])
 
         self.cpu.compile_bridge(faildescr1, [i1b], bridge)        
 
@@ -218,7 +220,7 @@
             ResOperation(rop.JUMP, [i1], None, descr=looptoken),
             ]
         inputargs = [i0]
-        operations[2].fail_args = [None, i1, None]
+        operations[2].setfailargs([None, i1, None])
         self.cpu.compile_loop(inputargs, operations, looptoken)
 
         i1b = BoxInt()
@@ -228,7 +230,7 @@
             ResOperation(rop.GUARD_TRUE, [i3], None, descr=faildescr2),
             ResOperation(rop.JUMP, [i1b], None, descr=looptoken),
         ]
-        bridge[1].fail_args = [i1b]
+        bridge[1].setfailargs([i1b])
 
         self.cpu.compile_bridge(faildescr1, [i1b], bridge)        
 
@@ -251,7 +253,7 @@
             ResOperation(rop.JUMP, [i1], None, descr=looptoken),
             ]
         inputargs = [i0]
-        operations[2].fail_args = [None, i1, None]
+        operations[2].setfailargs([None, i1, None])
         self.cpu.compile_loop(inputargs, operations, looptoken)
 
         self.cpu.set_future_value_int(0, 2)
@@ -317,7 +319,7 @@
                          descr=BasicFailDescr()),
             ResOperation(rop.JUMP, [z, t], None, descr=looptoken),
             ]
-        operations[-2].fail_args = [t, z]
+        operations[-2].setfailargs([t, z])
         cpu.compile_loop([x, y], operations, looptoken)
         self.cpu.set_future_value_int(0, 0)
         self.cpu.set_future_value_int(1, 10)
@@ -363,7 +365,7 @@
                     ResOperation(rop.FINISH, [v_res], None,
                                  descr=BasicFailDescr(2)),
                     ]
-                ops[1].fail_args = []
+                ops[1].setfailargs([])
             else:
                 v_exc = self.cpu.ts.BoxRef()
                 ops = [
@@ -372,7 +374,7 @@
                                  descr=BasicFailDescr(1)),
                     ResOperation(rop.FINISH, [], None, descr=BasicFailDescr(2)),
                     ]
-                ops[1].fail_args = [v_res]
+                ops[1].setfailargs([v_res])
             #
             looptoken = LoopToken()
             self.cpu.compile_loop([v1, v2], ops, looptoken)
@@ -814,6 +816,23 @@
         r = self.execute_operation(rop.STRGETITEM, [s_box, BoxInt(4)], 'int')
         assert r.value == 153
 
+    def test_copystrcontent(self):
+        s_box = self.alloc_string("abcdef")
+        for s_box in [s_box, s_box.constbox()]:
+            for srcstart_box in [BoxInt(2), ConstInt(2)]:
+                for dststart_box in [BoxInt(3), ConstInt(3)]:
+                    for length_box in [BoxInt(4), ConstInt(4)]:
+                        for r_box_is_const in [False, True]:
+                            r_box = self.alloc_string("!???????!")
+                            if r_box_is_const:
+                                r_box = r_box.constbox()
+                                self.execute_operation(rop.COPYSTRCONTENT,
+                                                       [s_box, r_box,
+                                                        srcstart_box,
+                                                        dststart_box,
+                                                        length_box], 'void')
+                                assert self.look_string(r_box) == "!??cdef?!"
+
     def test_do_unicode_basic(self):
         u = self.cpu.bh_newunicode(5)
         self.cpu.bh_unicodesetitem(u, 4, 123)
@@ -909,8 +928,8 @@
                 ResOperation(rop.GUARD_TRUE, [i2], None),
                 ResOperation(rop.JUMP, jumpargs, None, descr=looptoken),
                 ]
-            operations[2].fail_args = inputargs[:]
-            operations[2].descr = faildescr
+            operations[2].setfailargs(inputargs[:])
+            operations[2].setdescr(faildescr)
             #
             self.cpu.compile_loop(inputargs, operations, looptoken)
             #
@@ -975,7 +994,7 @@
             ResOperation(rop.GUARD_TRUE, [i2], None, descr=faildescr1),
             ResOperation(rop.FINISH, fboxes, None, descr=faildescr2),
             ]
-        operations[-2].fail_args = fboxes
+        operations[-2].setfailargs(fboxes)
         looptoken = LoopToken()
         self.cpu.compile_loop(fboxes, operations, looptoken)
 
@@ -1098,7 +1117,7 @@
                                          descr=BasicFailDescr(4)),
                             ResOperation(rop.FINISH, [], None,
                                          descr=BasicFailDescr(5))]
-                        operations[1].fail_args = []
+                        operations[1].setfailargs([])
                         looptoken = LoopToken()
                         # Use "set" to unique-ify inputargs
                         unique_testcase_list = list(set(testcase))
@@ -1197,6 +1216,10 @@
         s_box = BoxPtr(lltype.cast_opaque_ptr(llmemory.GCREF, s))
         return s_box
 
+    def look_string(self, string_box):
+        s = string_box.getref(lltype.Ptr(rstr.STR))
+        return ''.join(s.chars)
+
     def alloc_unicode(self, unicode):
         u = rstr.mallocunicode(len(unicode))
         for i in range(len(unicode)):
@@ -1404,15 +1427,15 @@
         assert not excvalue
 
     def test_cond_call_gc_wb(self):
-        def func_void(a, b):
-            record.append((a, b))
+        def func_void(a):
+            record.append(a)
         record = []
         #
         S = lltype.GcStruct('S', ('tid', lltype.Signed))
-        FUNC = self.FuncType([lltype.Ptr(S), lltype.Signed], lltype.Void)
+        FUNC = self.FuncType([lltype.Ptr(S)], lltype.Void)
         func_ptr = llhelper(lltype.Ptr(FUNC), func_void)
         funcbox = self.get_funcbox(self.cpu, func_ptr)
-        class WriteBarrierDescr:
+        class WriteBarrierDescr(AbstractDescr):
             jit_wb_if_flag = 4096
             jit_wb_if_flag_byteofs = struct.pack("i", 4096).index('\x10')
             jit_wb_if_flag_singlebyte = 0x10
@@ -1430,10 +1453,10 @@
             sgcref = lltype.cast_opaque_ptr(llmemory.GCREF, s)
             del record[:]
             self.execute_operation(rop.COND_CALL_GC_WB,
-                                   [BoxPtr(sgcref), ConstInt(-2121)],
+                                   [BoxPtr(sgcref)],
                                    'void', descr=WriteBarrierDescr())
             if cond:
-                assert record == [(s, -2121)]
+                assert record == [s]
             else:
                 assert record == []
 
@@ -1462,7 +1485,7 @@
         ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
         ResOperation(rop.FINISH, [i0], None, descr=BasicFailDescr(0))
         ]
-        ops[2].fail_args = [i1, i0]
+        ops[2].setfailargs([i1, i0])
         looptoken = LoopToken()
         self.cpu.compile_loop([i0, i1], ops, looptoken)
         self.cpu.set_future_value_int(0, 20)
@@ -1506,7 +1529,7 @@
         ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
         ResOperation(rop.FINISH, [i2], None, descr=BasicFailDescr(0))
         ]
-        ops[2].fail_args = [i1, i2, i0]
+        ops[2].setfailargs([i1, i2, i0])
         looptoken = LoopToken()
         self.cpu.compile_loop([i0, i1], ops, looptoken)
         self.cpu.set_future_value_int(0, 20)
@@ -1551,7 +1574,7 @@
         ResOperation(rop.GUARD_NOT_FORCED, [], None, descr=faildescr),
         ResOperation(rop.FINISH, [f2], None, descr=BasicFailDescr(0))
         ]
-        ops[2].fail_args = [i1, f2, i0]
+        ops[2].setfailargs([i1, f2, i0])
         looptoken = LoopToken()
         self.cpu.compile_loop([i0, i1], ops, looptoken)
         self.cpu.set_future_value_int(0, 20)
@@ -1824,7 +1847,7 @@
         f2 = float_add(f0, f1)
         finish(f2)'''
         loop = parse(ops)
-        done_number = self.cpu.get_fail_descr_number(loop.operations[-1].descr)
+        done_number = self.cpu.get_fail_descr_number(loop.operations[-1].getdescr())
         looptoken = LoopToken()
         looptoken.outermost_jitdriver_sd = FakeJitDriverSD()
         self.cpu.compile_loop(loop.inputargs, loop.operations, looptoken)

Modified: pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/test/test_ll_random.py	Thu Sep 30 00:16:20 2010
@@ -464,7 +464,7 @@
         self.put(builder, args, descr)
         op = ResOperation(rop.GUARD_NO_EXCEPTION, [], None,
                           descr=BasicFailDescr())
-        op.fail_args = fail_subset
+        op.setfailargs(fail_subset)
         builder.loop.operations.append(op)
 
 # 5. Non raising-call and GUARD_EXCEPTION
@@ -486,7 +486,7 @@
         exc_box = ConstAddr(llmemory.cast_ptr_to_adr(vtableptr), builder.cpu)
         op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(),
                           descr=BasicFailDescr())
-        op.fail_args = builder.subset_of_intvars(r)
+        op.setfailargs(builder.subset_of_intvars(r))
         op._exc_box = None
         builder.should_fail_by = op
         builder.guard_op = op
@@ -507,7 +507,7 @@
         exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
         op = ResOperation(rop.GUARD_EXCEPTION, [exc_box], BoxPtr(),
                           descr=BasicFailDescr())
-        op.fail_args = fail_subset
+        op.setfailargs(fail_subset)
         builder.loop.operations.append(op)
 
 # 4. raising call and guard_no_exception
@@ -524,7 +524,7 @@
         op = ResOperation(rop.GUARD_NO_EXCEPTION, [], BoxPtr(),
                           descr=BasicFailDescr())
         op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
-        op.fail_args = builder.subset_of_intvars(r)
+        op.setfailargs(builder.subset_of_intvars(r))
         builder.should_fail_by = op
         builder.guard_op = op
         builder.loop.operations.append(op)
@@ -548,7 +548,7 @@
         op = ResOperation(rop.GUARD_EXCEPTION, [other_box], BoxPtr(),
                           descr=BasicFailDescr())
         op._exc_box = ConstAddr(llmemory.cast_ptr_to_adr(exc), builder.cpu)
-        op.fail_args = builder.subset_of_intvars(r)
+        op.setfailargs(builder.subset_of_intvars(r))
         builder.should_fail_by = op
         builder.guard_op = op
         builder.loop.operations.append(op)

Modified: pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/test/test_random.py	Thu Sep 30 00:16:20 2010
@@ -86,7 +86,7 @@
 
     def process_operation(self, s, op, names, subops):
         args = []
-        for v in op.args:
+        for v in op.getarglist():
             if v in names:
                 args.append(names[v])
 ##            elif isinstance(v, ConstAddr):
@@ -105,11 +105,11 @@
                 args.append('ConstInt(%d)' % v.value)
             else:
                 raise NotImplementedError(v)
-        if op.descr is None:
+        if op.getdescr() is None:
             descrstr = ''
         else:
             try:
-                descrstr = ', ' + op.descr._random_info
+                descrstr = ', ' + op.getdescr()._random_info
             except AttributeError:
                 descrstr = ', descr=...'
         print >>s, '        ResOperation(rop.%s, [%s], %s%s),' % (
@@ -129,7 +129,7 @@
 
         def print_loop_prebuilt(ops):
             for op in ops:
-                for arg in op.args:
+                for arg in op.getarglist():
                     if isinstance(arg, ConstPtr):
                         if arg not in names:
                             writevar(arg, 'const_ptr')
@@ -191,7 +191,7 @@
         if self.should_fail_by is None:
             fail_args = self.loop.operations[-1].args
         else:
-            fail_args = self.should_fail_by.fail_args
+            fail_args = self.should_fail_by.getfailargs()
         for i, v in enumerate(fail_args):
             if isinstance(v, (BoxFloat, ConstFloat)):
                 print >>s, ('    assert cpu.get_latest_value_float(%d) == %r'
@@ -284,8 +284,8 @@
             builder.intvars[:] = original_intvars
         else:
             op = ResOperation(rop.GUARD_NO_OVERFLOW, [], None)
-        op.descr = BasicFailDescr()
-        op.fail_args = fail_subset
+        op.setdescr(BasicFailDescr())
+        op.setfailargs(fail_subset)
         builder.loop.operations.append(op)
 
 class BinaryOvfOperation(AbstractOvfOperation, BinaryOperation):
@@ -345,8 +345,8 @@
     def produce_into(self, builder, r):
         op, passing = self.gen_guard(builder, r)
         builder.loop.operations.append(op)
-        op.descr = BasicFailDescr()
-        op.fail_args = builder.subset_of_intvars(r)
+        op.setdescr(BasicFailDescr())
+        op.setfailargs(builder.subset_of_intvars(r))
         if not passing:
             builder.should_fail_by = op
             builder.guard_op = op
@@ -553,7 +553,7 @@
         endvars = []
         used_later = {}
         for op in loop.operations:
-            for v in op.args:
+            for v in op.getarglist():
                 used_later[v] = True
         for v in startvars:
             if v not in used_later:
@@ -577,11 +577,11 @@
 
     def get_fail_args(self):
         if self.should_fail_by.is_guard():
-            assert self.should_fail_by.fail_args is not None
-            return self.should_fail_by.fail_args
+            assert self.should_fail_by.getfailargs() is not None
+            return self.should_fail_by.getfailargs()
         else:
-            assert self.should_fail_by.opnum == rop.FINISH
-            return self.should_fail_by.args
+            assert self.should_fail_by.getopnum() == rop.FINISH
+            return self.should_fail_by.getarglist()
 
     def clear_state(self):
         for v, S, fields in self.prebuilt_ptr_consts:
@@ -606,7 +606,7 @@
             else:
                 raise NotImplementedError(box)
         fail = cpu.execute_token(self.loop.token)
-        assert fail is self.should_fail_by.descr
+        assert fail is self.should_fail_by.getdescr()
         for i, v in enumerate(self.get_fail_args()):
             if isinstance(v, (BoxFloat, ConstFloat)):
                 value = cpu.get_latest_value_float(i)
@@ -620,7 +620,7 @@
         exc = cpu.grab_exc_value()
         if (self.guard_op is not None and
             self.guard_op.is_guard_exception()):
-            if self.guard_op.opnum == rop.GUARD_NO_EXCEPTION:
+            if self.guard_op.getopnum() == rop.GUARD_NO_EXCEPTION:
                 assert exc
         else:
             assert not exc
@@ -633,26 +633,26 @@
             else:
                 op = ResOperation(rop.GUARD_EXCEPTION, [guard_op._exc_box],
                                   BoxPtr())
-            op.descr = BasicFailDescr()
-            op.fail_args = []
+            op.setdescr(BasicFailDescr())
+            op.setfailargs([])
             return op
 
         if self.dont_generate_more:
             return False
         r = self.r
         guard_op = self.guard_op
-        fail_args = guard_op.fail_args
-        fail_descr = guard_op.descr
+        fail_args = guard_op.getfailargs()
+        fail_descr = guard_op.getdescr()
         op = self.should_fail_by
-        if not op.fail_args:
+        if not op.getfailargs():
             return False
         # generate the branch: a sequence of operations that ends in a FINISH
         subloop = DummyLoop([])
         if guard_op.is_guard_exception():
             subloop.operations.append(exc_handling(guard_op))
         bridge_builder = self.builder.fork(self.builder.cpu, subloop,
-                                           op.fail_args[:])
-        self.generate_ops(bridge_builder, r, subloop, op.fail_args[:])
+                                           op.getfailargs()[:])
+        self.generate_ops(bridge_builder, r, subloop, op.getfailargs()[:])
         # note that 'self.guard_op' now points to the guard that will fail in
         # this new bridge, while 'guard_op' still points to the guard that
         # has just failed.

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/assembler.py	Thu Sep 30 00:16:20 2010
@@ -181,6 +181,7 @@
         self.malloc_fixedsize_slowpath1 = 0
         self.malloc_fixedsize_slowpath2 = 0
         self.pending_guard_tokens = None
+        self.memcpy_addr = 0
         self.setup_failure_recovery()
         self._debug = False
         self.debug_counter_descr = cpu.fielddescrof(DEBUG_COUNTER, 'i')
@@ -212,6 +213,7 @@
                 ll_new_unicode = gc_ll_descr.get_funcptr_for_newunicode()
                 self.malloc_unicode_func_addr = rffi.cast(lltype.Signed,
                                                           ll_new_unicode)
+            self.memcpy_addr = self.cpu.cast_ptr_to_int(codebuf.memcpy_fn)
             self.mc = MachineCodeBlockWrapper(self, self.mc_size, self.cpu.profile_agent)
             self._build_failure_recovery(False)
             self._build_failure_recovery(True)
@@ -390,8 +392,8 @@
     def _find_debug_merge_point(self, operations):
 
         for op in operations:
-            if op.opnum == rop.DEBUG_MERGE_POINT:
-                funcname = op.args[0]._get_str()
+            if op.getopnum() == rop.DEBUG_MERGE_POINT:
+                funcname = op.getarg(0)._get_str()
                 break
         else:
             funcname = "<loop %d>" % len(self.loop_run_counters)
@@ -419,7 +421,6 @@
             mc.MOV_ri(X86_64_SCRATCH_REG.value, adr_new_target)
             mc.JMP_r(X86_64_SCRATCH_REG.value)
 
-        mc.valgrind_invalidated()
         mc.done()
 
     def _inject_debugging_code(self, operations):
@@ -475,7 +476,6 @@
         # align, e.g. for Mac OS X        
         aligned_words = align_stack_words(words+2)-2 # 2 = EIP+EBP
         mc.writeimm32(-WORD * aligned_words)
-        mc.valgrind_invalidated()
         mc.done()
 
     def _call_header(self):
@@ -598,7 +598,6 @@
         target = newlooptoken._x86_direct_bootstrap_code
         mc = codebuf.InMemoryCodeBuilder(oldadr, oldadr + 16)
         mc.JMP(imm(target))
-        mc.valgrind_invalidated()
         mc.done()
 
     def _assemble_bootstrap_code(self, inputargs, arglocs):
@@ -684,25 +683,25 @@
             self.mc.POP(loc)
 
     def regalloc_perform(self, op, arglocs, resloc):
-        genop_list[op.opnum](self, op, arglocs, resloc)
+        genop_list[op.getopnum()](self, op, arglocs, resloc)
 
     def regalloc_perform_discard(self, op, arglocs):
-        genop_discard_list[op.opnum](self, op, arglocs)
+        genop_discard_list[op.getopnum()](self, op, arglocs)
 
     def regalloc_perform_with_guard(self, op, guard_op, faillocs,
                                     arglocs, resloc, current_depths):
-        faildescr = guard_op.descr
+        faildescr = guard_op.getdescr()
         assert isinstance(faildescr, AbstractFailDescr)
         faildescr._x86_current_depths = current_depths
-        failargs = guard_op.fail_args
-        guard_opnum = guard_op.opnum
+        failargs = guard_op.getfailargs()
+        guard_opnum = guard_op.getopnum()
         guard_token = self.implement_guard_recovery(guard_opnum,
                                                     faildescr, failargs,
                                                     faillocs)
         if op is None:
             dispatch_opnum = guard_opnum
         else:
-            dispatch_opnum = op.opnum
+            dispatch_opnum = op.getopnum()
         res = genop_guard_list[dispatch_opnum](self, op, guard_op, guard_token,
                                                arglocs, resloc)
         faildescr._x86_adr_jump_offset = res
@@ -712,8 +711,8 @@
         self.regalloc_perform_with_guard(None, guard_op, faillocs, arglocs,
                                          resloc, current_depths)
 
-    def load_effective_addr(self, sizereg, baseofs, scale, result):
-        self.mc.LEA(result, addr_add(imm(0), sizereg, baseofs, scale))
+    def load_effective_addr(self, sizereg, baseofs, scale, result, frm=imm(0)):
+        self.mc.LEA(result, addr_add(frm, sizereg, baseofs, scale))
 
     def _unaryop(asmop):
         def genop_unary(self, op, arglocs, resloc):
@@ -728,7 +727,7 @@
     def _cmpop(cond, rev_cond):
         def genop_cmp(self, op, arglocs, result_loc):
             rl = result_loc.lowest8bits()
-            if isinstance(op.args[0], Const):
+            if isinstance(op.getarg(0), Const):
                 self.mc.CMP(arglocs[1], arglocs[0])
                 self.mc.SET_ir(rx86.Conditions[rev_cond], rl.value)
             else:
@@ -758,8 +757,8 @@
 
     def _cmpop_guard(cond, rev_cond, false_cond, false_rev_cond):
         def genop_cmp_guard(self, op, guard_op, guard_token, arglocs, result_loc):
-            guard_opnum = guard_op.opnum
-            if isinstance(op.args[0], Const):
+            guard_opnum = guard_op.getopnum()
+            if isinstance(op.getarg(0), Const):
                 self.mc.CMP(arglocs[1], arglocs[0])
                 if guard_opnum == rop.GUARD_FALSE:
                     return self.implement_guard(guard_token, rev_cond)
@@ -776,7 +775,7 @@
     def _cmpop_guard_float(cond, false_cond, need_jp):
         def genop_cmp_guard_float(self, op, guard_op, guard_token, arglocs,
                                   result_loc):
-            guard_opnum = guard_op.opnum
+            guard_opnum = guard_op.getopnum()
             self.mc.UCOMISD(arglocs[0], arglocs[1])
             # 16 is enough space for the rel8 jumps below and the rel32
             # jump in implement_guard
@@ -945,7 +944,7 @@
     genop_guard_float_ge = _cmpop_guard_float("AE", "B", False)
 
     def genop_guard_float_ne(self, op, guard_op, guard_token, arglocs, result_loc):
-        guard_opnum = guard_op.opnum
+        guard_opnum = guard_op.getopnum()
         self.mc.UCOMISD(arglocs[0], arglocs[1])
         # 16 is enough space for the rel8 jumps below and the rel32
         # jump in implement_guard
@@ -973,7 +972,7 @@
         self.mc.CVTSI2SD(resloc, arglocs[0])
 
     def genop_guard_int_is_true(self, op, guard_op, guard_token, arglocs, resloc):
-        guard_opnum = guard_op.opnum
+        guard_opnum = guard_op.getopnum()
         self.mc.CMP(arglocs[0], imm(0))
         if guard_opnum == rop.GUARD_TRUE:
             return self.implement_guard(guard_token, 'Z')
@@ -987,7 +986,7 @@
         self.mc.MOVZX8(resloc, rl)
 
     def genop_guard_int_is_zero(self, op, guard_op, guard_token, arglocs, resloc):
-        guard_opnum = guard_op.opnum
+        guard_opnum = guard_op.getopnum()
         self.mc.CMP(arglocs[0], imm(0))
         if guard_opnum == rop.GUARD_TRUE:
             return self.implement_guard(guard_token, 'NZ')
@@ -1031,7 +1030,7 @@
         if self.cpu.vtable_offset is not None:
             assert isinstance(loc, RegLoc)
             assert isinstance(loc_vtable, ImmedLoc)
-            self.mc.MOV_mi((loc.value, self.cpu.vtable_offset), loc_vtable.value)
+            self.mc.MOV(mem(loc, self.cpu.vtable_offset), loc_vtable)
 
     # XXX genop_new is abused for all varsized mallocs with Boehm, for now
     # (instead of genop_new_array, genop_newstr, genop_newunicode)
@@ -1123,7 +1122,7 @@
         assert isinstance(baseofs, ImmedLoc)
         assert isinstance(scale_loc, ImmedLoc)
         dest_addr = AddressLoc(base_loc, ofs_loc, scale_loc.value, baseofs.value)
-        if op.args[2].type == FLOAT:
+        if op.getarg(2).type == FLOAT:
             self.mc.MOVSD(dest_addr, value_loc)
         else:
             if IS_X86_64 and scale_loc.value == 3:
@@ -1219,7 +1218,7 @@
         return addr
 
     def _gen_guard_overflow(self, guard_op, guard_token):
-        guard_opnum = guard_op.opnum
+        guard_opnum = guard_op.getopnum()
         if guard_opnum == rop.GUARD_NO_OVERFLOW:
             return self.implement_guard(guard_token, 'O')
         elif guard_opnum == rop.GUARD_OVERFLOW:
@@ -1247,8 +1246,8 @@
     genop_guard_guard_isnull = genop_guard_guard_false
 
     def genop_guard_guard_value(self, ign_1, guard_op, guard_token, locs, ign_2):
-        if guard_op.args[0].type == FLOAT:
-            assert guard_op.args[1].type == FLOAT
+        if guard_op.getarg(0).type == FLOAT:
+            assert guard_op.getarg(1).type == FLOAT
             self.mc.UCOMISD(locs[0], locs[1])
         else:
             self.mc.CMP(locs[0], locs[1])
@@ -1639,8 +1638,8 @@
         assert isinstance(sizeloc, ImmedLoc)
         size = sizeloc.value
 
-        if isinstance(op.args[0], Const):
-            x = imm(op.args[0].getint())
+        if isinstance(op.getarg(0), Const):
+            x = imm(op.getarg(0).getint())
         else:
             x = arglocs[1]
         if x is eax:
@@ -1659,7 +1658,7 @@
     
     def genop_guard_call_may_force(self, op, guard_op, guard_token,
                                    arglocs, result_loc):
-        faildescr = guard_op.descr
+        faildescr = guard_op.getdescr()
         fail_index = self.cpu.get_fail_descr_number(faildescr)
         self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index)
         self.genop_call(op, arglocs, result_loc)
@@ -1668,10 +1667,10 @@
 
     def genop_guard_call_assembler(self, op, guard_op, guard_token,
                                    arglocs, result_loc):
-        faildescr = guard_op.descr
+        faildescr = guard_op.getdescr()
         fail_index = self.cpu.get_fail_descr_number(faildescr)
         self.mc.MOV_bi(FORCE_INDEX_OFS, fail_index)
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, LoopToken)
         assert len(arglocs) - 2 == len(descr._x86_arglocs[0])
         #
@@ -1756,7 +1755,7 @@
     def genop_discard_cond_call_gc_wb(self, op, arglocs):
         # use 'mc._mc' directly instead of 'mc', to avoid
         # bad surprizes if the code buffer is mostly full
-        descr = op.descr
+        descr = op.getdescr()
         if we_are_translated():
             cls = self.cpu.gc_ll_descr.has_write_barrier_class()
             assert cls is not None and isinstance(descr, cls)
@@ -1768,6 +1767,7 @@
         jz_location = self.mc.get_relative_pos()
         # the following is supposed to be the slow path, so whenever possible
         # we choose the most compact encoding over the most efficient one.
+        # XXX improve a bit, particularly for IS_X86_64.
         for i in range(len(arglocs)-1, -1, -1):
             loc = arglocs[i]
             if isinstance(loc, RegLoc):
@@ -1780,12 +1780,11 @@
                     self.mc.PUSH_i32(loc.getint())
         
         if IS_X86_64:
-            # We clobber these registers to pass the arguments, but that's
+            # We clobber this register to pass the arguments, but that's
             # okay, because consider_cond_call_gc_wb makes sure that any
             # caller-save registers with values in them are present in arglocs,
             # so they are saved on the stack above and restored below 
             self.mc.MOV_rs(edi.value, 0)
-            self.mc.MOV_rs(esi.value, 8)
 
         # misaligned stack in the call, but it's ok because the write barrier
         # is not going to call anything more.  Also, this assumes that the
@@ -1866,6 +1865,7 @@
         offset = self.mc.get_relative_pos() - jmp_adr
         assert 0 < offset <= 127
         self.mc.overwrite(jmp_adr-1, [chr(offset)])
+        # on 64-bits, 'tid' is a value that fits in 31 bits
         self.mc.MOV_mi((eax.value, 0), tid)
         self.mc.MOV(heap(nursery_free_adr), edx)
         

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/codebuf.py	Thu Sep 30 00:16:20 2010
@@ -1,6 +1,6 @@
 
 import os, sys
-from pypy.rpython.lltypesystem import lltype, rffi
+from pypy.rpython.lltypesystem import lltype, llmemory, rffi
 from pypy.translator.tool.cbuild import ExternalCompilationInfo
 from pypy.jit.backend.x86.rx86 import X86_32_CodeBuilder, X86_64_CodeBuilder
 from pypy.jit.backend.x86.regloc import LocationCodeBuilder
@@ -29,6 +29,9 @@
         self._pos = 0
 
     def overwrite(self, pos, listofchars):
+        """ Overwrite a specified position with a given list of chars
+        (position is relative
+        """
         make_sure_not_resized(listofchars)
         assert pos + len(listofchars) <= self._size
         for c in listofchars:
@@ -49,35 +52,38 @@
         self.writechar(chr(n))
 
     def get_relative_pos(self):
+        """ Current position, relative to code start
+        """
         return self._pos
 
     def tell(self):
+        """ Tell the current address at machine code block
+        """
         baseaddr = rffi.cast(lltype.Signed, self._data)
         return baseaddr + self._pos
 
-    def seekback(self, count):
-        pos = self._pos - count
-        self._pos = pos
-        self._last_dump_start = pos
-
     def done(self):
-        # normally, no special action is needed here
+        """ Called at the end of writing of each piece of machine code.
+        Even though this function doesn't do much, it's extremely important
+        to call this for all tools to work, like valgrind or machine code
+        dumping
+        """
+        self.valgrind_invalidated()
         if machine_code_dumper.enabled:
             machine_code_dumper.dump_range(self, self._last_dump_start,
                                            self._pos)
             self._last_dump_start = self._pos
 
-    def redone(self, frm, to):
-        if machine_code_dumper.enabled:
-            baseaddr = rffi.cast(lltype.Signed, self._data)
-            machine_code_dumper.dump_range(self, frm - baseaddr, to - baseaddr)
-
     def log(self, msg):
+        """ Insert information into machine code dumper, if enabled
+        """
         if machine_code_dumper.enabled:
             machine_code_dumper.dump(self, 'LOG', self._pos, msg)
 
     def valgrind_invalidated(self):
-        # mark the range of the InMemoryCodeBuilder as invalidated for Valgrind
+        """ Mark the range of the InMemoryCodeBuilder as invalidated
+        for Valgrind
+        """
         from pypy.jit.backend.x86 import valgrind
         valgrind.discard_translations(self._data, self._size)
 
@@ -146,7 +152,7 @@
             # Hack to make sure that mcs are not within 32-bits of one
             # another for testing purposes
             from pypy.rlib.rmmap import hint
-            hint.pos += 0xFFFFFFFF
+            hint.pos += 0x80000000 - map_size
             
         self._init(data, map_size)
 
@@ -158,6 +164,12 @@
 
 # ____________________________________________________________
 
+memcpy_fn = rffi.llexternal('memcpy', [llmemory.Address, llmemory.Address,
+                                       rffi.SIZE_T], lltype.Void,
+                            sandboxsafe=True, _nowrapper=True)
+
+# ____________________________________________________________
+
 if sys.platform == 'win32':
     ensure_sse2_floats = lambda : None
 else:

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/regalloc.py	Thu Sep 30 00:16:20 2010
@@ -224,7 +224,7 @@
         assert tmpreg not in nonfloatlocs
         assert xmmtmp not in floatlocs
         # note: we need to make a copy of inputargs because possibly_free_vars
-        # is also used on op.args, which is a non-resizable list
+        # is also used on op args, which is a non-resizable list
         self.possibly_free_vars(list(inputargs))
         return nonfloatlocs, floatlocs
 
@@ -234,6 +234,12 @@
         else:
             self.rm.possibly_free_var(var)
 
+    def possibly_free_vars_for_op(self, op):
+        for i in range(op.numargs()):
+            var = op.getarg(i)
+            if var is not None: # xxx kludgy
+                self.possibly_free_var(var)
+
     def possibly_free_vars(self, vars):
         for var in vars:
             if var is not None: # xxx kludgy
@@ -262,12 +268,12 @@
                                               selected_reg, need_lower_byte)
 
     def _compute_loop_consts(self, inputargs, jump, looptoken):
-        if jump.opnum != rop.JUMP or jump.descr is not looptoken:
+        if jump.getopnum() != rop.JUMP or jump.getdescr() is not looptoken:
             loop_consts = {}
         else:
             loop_consts = {}
             for i in range(len(inputargs)):
-                if inputargs[i] is jump.args[i]:
+                if inputargs[i] is jump.getarg(i):
                     loop_consts[inputargs[i]] = i
         return loop_consts
 
@@ -301,7 +307,7 @@
             if reg not in used:
                 self.xrm.free_regs.append(reg)
         # note: we need to make a copy of inputargs because possibly_free_vars
-        # is also used on op.args, which is a non-resizable list
+        # is also used on op args, which is a non-resizable list
         self.possibly_free_vars(list(inputargs))
         self.rm._check_invariants()
         self.xrm._check_invariants()
@@ -312,7 +318,7 @@
         self.assembler.regalloc_perform(op, arglocs, result_loc)
 
     def locs_for_fail(self, guard_op):
-        return [self.loc(v) for v in guard_op.fail_args]
+        return [self.loc(v) for v in guard_op.getfailargs()]
 
     def perform_with_guard(self, op, guard_op, arglocs, result_loc):
         faillocs = self.locs_for_fail(guard_op)
@@ -324,7 +330,7 @@
                                                    current_depths)
         if op.result is not None:
             self.possibly_free_var(op.result)
-        self.possibly_free_vars(guard_op.fail_args)
+        self.possibly_free_vars(guard_op.getfailargs())
 
     def perform_guard(self, guard_op, arglocs, result_loc):
         faillocs = self.locs_for_fail(guard_op)
@@ -338,7 +344,7 @@
         self.assembler.regalloc_perform_guard(guard_op, faillocs, arglocs,
                                               result_loc,
                                               current_depths)
-        self.possibly_free_vars(guard_op.fail_args)        
+        self.possibly_free_vars(guard_op.getfailargs())        
 
     def PerformDiscard(self, op, arglocs):
         if not we_are_translated():
@@ -346,24 +352,24 @@
         self.assembler.regalloc_perform_discard(op, arglocs)
 
     def can_merge_with_next_guard(self, op, i, operations):
-        if op.opnum == rop.CALL_MAY_FORCE or op.opnum == rop.CALL_ASSEMBLER:
-            assert operations[i + 1].opnum == rop.GUARD_NOT_FORCED
+        if op.getopnum() == rop.CALL_MAY_FORCE or op.getopnum() == rop.CALL_ASSEMBLER:
+            assert operations[i + 1].getopnum() == rop.GUARD_NOT_FORCED
             return True
         if not op.is_comparison():
             if op.is_ovf():
-                if (operations[i + 1].opnum != rop.GUARD_NO_OVERFLOW and
-                    operations[i + 1].opnum != rop.GUARD_OVERFLOW):
+                if (operations[i + 1].getopnum() != rop.GUARD_NO_OVERFLOW and
+                    operations[i + 1].getopnum() != rop.GUARD_OVERFLOW):
                     print "int_xxx_ovf not followed by guard_(no)_overflow"
                     raise AssertionError
                 return True
             return False
-        if (operations[i + 1].opnum != rop.GUARD_TRUE and
-            operations[i + 1].opnum != rop.GUARD_FALSE):
+        if (operations[i + 1].getopnum() != rop.GUARD_TRUE and
+            operations[i + 1].getopnum() != rop.GUARD_FALSE):
             return False
-        if operations[i + 1].args[0] is not op.result:
+        if operations[i + 1].getarg(0) is not op.result:
             return False
         if (self.longevity[op.result][1] > i + 1 or
-            op.result in operations[i + 1].fail_args):
+            op.result in operations[i + 1].getfailargs()):
             return False
         return True
 
@@ -376,13 +382,13 @@
             self.xrm.position = i
             if op.has_no_side_effect() and op.result not in self.longevity:
                 i += 1
-                self.possibly_free_vars(op.args)
+                self.possibly_free_vars_for_op(op)
                 continue
             if self.can_merge_with_next_guard(op, i, operations):
-                oplist_with_guard[op.opnum](self, op, operations[i + 1])
+                oplist_with_guard[op.getopnum()](self, op, operations[i + 1])
                 i += 1
             else:
-                oplist[op.opnum](self, op)
+                oplist[op.getopnum()](self, op)
             if op.result is not None:
                 self.possibly_free_var(op.result)
             self.rm._check_invariants()
@@ -402,19 +408,20 @@
             op = operations[i]
             if op.result is not None:
                 start_live[op.result] = i
-            for arg in op.args:
+            for j in range(op.numargs()):
+                arg = op.getarg(j)
                 if isinstance(arg, Box):
                     if arg not in start_live:
-                        print "Bogus arg in operation %d at %d" % (op.opnum, i)
+                        print "Bogus arg in operation %d at %d" % (op.getopnum(), i)
                         raise AssertionError
                     longevity[arg] = (start_live[arg], i)
             if op.is_guard():
-                for arg in op.fail_args:
+                for arg in op.getfailargs():
                     if arg is None: # hole
                         continue
                     assert isinstance(arg, Box)
                     if arg not in start_live:
-                        print "Bogus arg in guard %d at %d" % (op.opnum, i)
+                        print "Bogus arg in guard %d at %d" % (op.getopnum(), i)
                         raise AssertionError
                     longevity[arg] = (start_live[arg], i)
         for arg in inputargs:
@@ -432,9 +439,9 @@
         return self.rm.loc(v)
 
     def _consider_guard(self, op):
-        loc = self.rm.make_sure_var_in_reg(op.args[0])
+        loc = self.rm.make_sure_var_in_reg(op.getarg(0))
         self.perform_guard(op, [loc], None)
-        self.rm.possibly_free_var(op.args[0])
+        self.rm.possibly_free_var(op.getarg(0))
 
     consider_guard_true = _consider_guard
     consider_guard_false = _consider_guard
@@ -442,52 +449,54 @@
     consider_guard_isnull = _consider_guard
 
     def consider_finish(self, op):
-        locs = [self.loc(v) for v in op.args]
-        locs_are_ref = [v.type == REF for v in op.args]
-        fail_index = self.assembler.cpu.get_fail_descr_number(op.descr)
+        locs = [self.loc(op.getarg(i)) for i in range(op.numargs())]
+        locs_are_ref = [op.getarg(i).type == REF for i in range(op.numargs())]
+        fail_index = self.assembler.cpu.get_fail_descr_number(op.getdescr())
         self.assembler.generate_failure(fail_index, locs, self.exc,
                                         locs_are_ref)
-        self.possibly_free_vars(op.args)
+        self.possibly_free_vars_for_op(op)
 
     def consider_guard_no_exception(self, op):
         self.perform_guard(op, [], None)
 
     def consider_guard_exception(self, op):
-        loc = self.rm.make_sure_var_in_reg(op.args[0])
+        loc = self.rm.make_sure_var_in_reg(op.getarg(0))
         box = TempBox()
-        loc1 = self.rm.force_allocate_reg(box, op.args)
+        args = op.getarglist()
+        loc1 = self.rm.force_allocate_reg(box, args)
         if op.result in self.longevity:
             # this means, is it ever used
-            resloc = self.rm.force_allocate_reg(op.result, op.args + [box])
+            resloc = self.rm.force_allocate_reg(op.result, args + [box])
         else:
             resloc = None
         self.perform_guard(op, [loc, loc1], resloc)
-        self.rm.possibly_free_vars(op.args)
+        self.rm.possibly_free_vars_for_op(op)
         self.rm.possibly_free_var(box)
 
     consider_guard_no_overflow = consider_guard_no_exception
     consider_guard_overflow    = consider_guard_no_exception
 
     def consider_guard_value(self, op):
-        x = self.make_sure_var_in_reg(op.args[0])
-        y = self.loc(op.args[1])
+        x = self.make_sure_var_in_reg(op.getarg(0))
+        y = self.loc(op.getarg(1))
         self.perform_guard(op, [x, y], None)
-        self.possibly_free_vars(op.args)
+        self.possibly_free_vars_for_op(op)
 
     def consider_guard_class(self, op):
-        assert isinstance(op.args[0], Box)
-        x = self.rm.make_sure_var_in_reg(op.args[0])
-        y = self.loc(op.args[1])
+        assert isinstance(op.getarg(0), Box)
+        x = self.rm.make_sure_var_in_reg(op.getarg(0))
+        y = self.loc(op.getarg(1))
         self.perform_guard(op, [x, y], None)
-        self.rm.possibly_free_vars(op.args)
+        self.rm.possibly_free_vars_for_op(op)
 
     consider_guard_nonnull_class = consider_guard_class
 
     def _consider_binop_part(self, op):
-        x = op.args[0]
-        argloc = self.loc(op.args[1])
-        loc = self.rm.force_result_in_reg(op.result, x, op.args)
-        self.rm.possibly_free_var(op.args[1])
+        x = op.getarg(0)
+        argloc = self.loc(op.getarg(1))
+        args = op.getarglist()
+        loc = self.rm.force_result_in_reg(op.result, x, args)
+        self.rm.possibly_free_var(op.getarg(1))
         return loc, argloc
 
     def _consider_binop(self, op):
@@ -510,26 +519,27 @@
     consider_int_add_ovf = _consider_binop_with_guard
 
     def consider_int_neg(self, op):
-        res = self.rm.force_result_in_reg(op.result, op.args[0])
+        res = self.rm.force_result_in_reg(op.result, op.getarg(0))
         self.Perform(op, [res], res)
 
     consider_int_invert = consider_int_neg
 
     def consider_int_lshift(self, op):
-        if isinstance(op.args[1], Const):
-            loc2 = self.rm.convert_to_imm(op.args[1])
+        if isinstance(op.getarg(1), Const):
+            loc2 = self.rm.convert_to_imm(op.getarg(1))
         else:
-            loc2 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx)
-        loc1 = self.rm.force_result_in_reg(op.result, op.args[0], op.args)
+            loc2 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx)
+        args = op.getarglist()
+        loc1 = self.rm.force_result_in_reg(op.result, op.getarg(0), args)
         self.Perform(op, [loc1, loc2], loc1)
-        self.rm.possibly_free_vars(op.args)
+        self.rm.possibly_free_vars_for_op(op)
 
     consider_int_rshift  = consider_int_lshift
     consider_uint_rshift = consider_int_lshift
 
     def _consider_int_div_or_mod(self, op, resultreg, trashreg):
-        l0 = self.rm.make_sure_var_in_reg(op.args[0], selected_reg=eax)
-        l1 = self.rm.make_sure_var_in_reg(op.args[1], selected_reg=ecx)
+        l0 = self.rm.make_sure_var_in_reg(op.getarg(0), selected_reg=eax)
+        l1 = self.rm.make_sure_var_in_reg(op.getarg(1), selected_reg=ecx)
         l2 = self.rm.force_allocate_reg(op.result, selected_reg=resultreg)
         # the register (eax or edx) not holding what we are looking for
         # will be just trash after that operation
@@ -538,7 +548,7 @@
         assert l0 is eax
         assert l1 is ecx
         assert l2 is resultreg
-        self.rm.possibly_free_vars(op.args)
+        self.rm.possibly_free_vars_for_op(op)
         self.rm.possibly_free_var(tmpvar)
 
     def consider_int_mod(self, op):
@@ -552,17 +562,18 @@
     consider_uint_floordiv = consider_int_floordiv
 
     def _consider_compop(self, op, guard_op):
-        vx = op.args[0]
-        vy = op.args[1]
+        vx = op.getarg(0)
+        vy = op.getarg(1)
         arglocs = [self.loc(vx), self.loc(vy)]
         if (vx in self.rm.reg_bindings or vy in self.rm.reg_bindings or
             isinstance(vx, Const) or isinstance(vy, Const)):
             pass
         else:
             arglocs[0] = self.rm.make_sure_var_in_reg(vx)
-        self.rm.possibly_free_vars(op.args)
+        args = op.getarglist()
+        self.rm.possibly_free_vars(args)
         if guard_op is None:
-            loc = self.rm.force_allocate_reg(op.result, op.args,
+            loc = self.rm.force_allocate_reg(op.result, args,
                                              need_lower_byte=True)
             self.Perform(op, arglocs, loc)
         else:
@@ -582,10 +593,11 @@
     consider_ptr_ne = _consider_compop
 
     def _consider_float_op(self, op):
-        loc1 = self.xrm.loc(op.args[1])
-        loc0 = self.xrm.force_result_in_reg(op.result, op.args[0], op.args)
+        loc1 = self.xrm.loc(op.getarg(1))
+        args = op.getarglist()
+        loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0), args)
         self.Perform(op, [loc0, loc1], loc0)
-        self.xrm.possibly_free_vars(op.args)
+        self.xrm.possibly_free_vars_for_op(op)
 
     consider_float_add = _consider_float_op
     consider_float_sub = _consider_float_op
@@ -593,11 +605,12 @@
     consider_float_truediv = _consider_float_op
 
     def _consider_float_cmp(self, op, guard_op):
-        loc0 = self.xrm.make_sure_var_in_reg(op.args[0], op.args,
+        args = op.getarglist()
+        loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), args,
                                              imm_fine=False)
-        loc1 = self.xrm.loc(op.args[1])
+        loc1 = self.xrm.loc(op.getarg(1))
         arglocs = [loc0, loc1]
-        self.xrm.possibly_free_vars(op.args)
+        self.xrm.possibly_free_vars_for_op(op)
         if guard_op is None:
             res = self.rm.force_allocate_reg(op.result, need_lower_byte=True)
             self.Perform(op, arglocs, res)
@@ -612,26 +625,26 @@
     consider_float_ge = _consider_float_cmp
 
     def consider_float_neg(self, op):
-        loc0 = self.xrm.force_result_in_reg(op.result, op.args[0])
+        loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0))
         self.Perform(op, [loc0], loc0)
-        self.xrm.possibly_free_var(op.args[0])
+        self.xrm.possibly_free_var(op.getarg(0))
 
     def consider_float_abs(self, op):
-        loc0 = self.xrm.force_result_in_reg(op.result, op.args[0])
+        loc0 = self.xrm.force_result_in_reg(op.result, op.getarg(0))
         self.Perform(op, [loc0], loc0)
-        self.xrm.possibly_free_var(op.args[0])
+        self.xrm.possibly_free_var(op.getarg(0))
 
     def consider_cast_float_to_int(self, op):
-        loc0 = self.xrm.make_sure_var_in_reg(op.args[0], imm_fine=False)
+        loc0 = self.xrm.make_sure_var_in_reg(op.getarg(0), imm_fine=False)
         loc1 = self.rm.force_allocate_reg(op.result)
         self.Perform(op, [loc0], loc1)
-        self.xrm.possibly_free_var(op.args[0])
+        self.xrm.possibly_free_var(op.getarg(0))
 
     def consider_cast_int_to_float(self, op):
-        loc0 = self.rm.loc(op.args[0])
+        loc0 = self.rm.loc(op.getarg(0))
         loc1 = self.xrm.force_allocate_reg(op.result)
         self.Perform(op, [loc0], loc1)
-        self.rm.possibly_free_var(op.args[0])
+        self.rm.possibly_free_var(op.getarg(0))
 
     def _call(self, op, arglocs, force_store=[], guard_not_forced_op=None):
         save_all_regs = guard_not_forced_op is not None
@@ -650,11 +663,11 @@
             self.Perform(op, arglocs, resloc)
 
     def _consider_call(self, op, guard_not_forced_op=None):
-        calldescr = op.descr
+        calldescr = op.getdescr()
         assert isinstance(calldescr, BaseCallDescr)
-        assert len(calldescr.arg_classes) == len(op.args) - 1
+        assert len(calldescr.arg_classes) == op.numargs() - 1
         size = calldescr.get_result_size(self.translate_support_code)
-        self._call(op, [imm(size)] + [self.loc(arg) for arg in op.args],
+        self._call(op, [imm(size)] + [self.loc(op.getarg(i)) for i in range(op.numargs())],
                    guard_not_forced_op=guard_not_forced_op)
 
     def consider_call(self, op):
@@ -665,30 +678,27 @@
         self._consider_call(op, guard_op)
 
     def consider_call_assembler(self, op, guard_op):
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, LoopToken)
         jd = descr.outermost_jitdriver_sd
         assert jd is not None
         size = jd.portal_calldescr.get_result_size(self.translate_support_code)
         vable_index = jd.index_of_virtualizable
         if vable_index >= 0:
-            self.rm._sync_var(op.args[vable_index])
-            vable = self.fm.loc(op.args[vable_index])
+            self.rm._sync_var(op.getarg(vable_index))
+            vable = self.fm.loc(op.getarg(vable_index))
         else:
             vable = imm(0)
         self._call(op, [imm(size), vable] +
-                   [self.loc(arg) for arg in op.args],
+                   [self.loc(op.getarg(i)) for i in range(op.numargs())],
                    guard_not_forced_op=guard_op)
         
     def consider_cond_call_gc_wb(self, op):
         assert op.result is None
-        loc_newvalue = self.rm.make_sure_var_in_reg(op.args[1], op.args)
-        # ^^^ we force loc_newvalue in a reg (unless it's a Const),
-        # because it will be needed anyway by the following setfield_gc.
-        # It avoids loading it twice from the memory.
-        loc_base = self.rm.make_sure_var_in_reg(op.args[0], op.args,
+        args = op.getarglist()
+        loc_base = self.rm.make_sure_var_in_reg(op.getarg(0), args,
                                                 imm_fine=False)
-        arglocs = [loc_base, loc_newvalue]
+        arglocs = [loc_base]
         # add eax, ecx and edx as extra "arguments" to ensure they are
         # saved and restored.  Fish in self.rm to know which of these
         # registers really need to be saved (a bit of a hack).  Moreover,
@@ -700,7 +710,7 @@
                 and self.rm.stays_alive(v)):
                 arglocs.append(reg)
         self.PerformDiscard(op, arglocs)
-        self.rm.possibly_free_vars(op.args)
+        self.rm.possibly_free_vars_for_op(op)
 
     def _fastpath_malloc(self, op, descr):
         assert isinstance(descr, BaseSizeDescr)
@@ -725,15 +735,15 @@
 
     def consider_new(self, op):
         gc_ll_descr = self.assembler.cpu.gc_ll_descr
-        if gc_ll_descr.can_inline_malloc(op.descr):
-            self._fastpath_malloc(op, op.descr)
+        if gc_ll_descr.can_inline_malloc(op.getdescr()):
+            self._fastpath_malloc(op, op.getdescr())
         else:
-            args = gc_ll_descr.args_for_new(op.descr)
+            args = gc_ll_descr.args_for_new(op.getdescr())
             arglocs = [imm(x) for x in args]
             return self._call(op, arglocs)
 
     def consider_new_with_vtable(self, op):
-        classint = op.args[0].getint()
+        classint = op.getarg(0).getint()
         descrsize = heaptracker.vtable2descr(self.assembler.cpu, classint)
         if self.assembler.cpu.gc_ll_descr.can_inline_malloc(descrsize):
             self._fastpath_malloc(op, descrsize)
@@ -742,34 +752,34 @@
         else:
             args = self.assembler.cpu.gc_ll_descr.args_for_new(descrsize)
             arglocs = [imm(x) for x in args]
-            arglocs.append(self.loc(op.args[0]))
+            arglocs.append(self.loc(op.getarg(0)))
             return self._call(op, arglocs)
 
     def consider_newstr(self, op):
         gc_ll_descr = self.assembler.cpu.gc_ll_descr
         if gc_ll_descr.get_funcptr_for_newstr is not None:
             # framework GC
-            loc = self.loc(op.args[0])
+            loc = self.loc(op.getarg(0))
             return self._call(op, [loc])
         # boehm GC (XXX kill the following code at some point)
         ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.STR, self.translate_support_code)
         assert itemsize == 1
-        return self._malloc_varsize(ofs_items, ofs, 0, op.args[0],
+        return self._malloc_varsize(ofs_items, ofs, 0, op.getarg(0),
                                     op.result)
 
     def consider_newunicode(self, op):
         gc_ll_descr = self.assembler.cpu.gc_ll_descr
         if gc_ll_descr.get_funcptr_for_newunicode is not None:
             # framework GC
-            loc = self.loc(op.args[0])
+            loc = self.loc(op.getarg(0))
             return self._call(op, [loc])
         # boehm GC (XXX kill the following code at some point)
         ofs_items, itemsize, ofs = symbolic.get_array_token(rstr.UNICODE, self.translate_support_code)
         if itemsize == 4:
-            return self._malloc_varsize(ofs_items, ofs, 2, op.args[0],
+            return self._malloc_varsize(ofs_items, ofs, 2, op.getarg(0),
                                         op.result)
         elif itemsize == 2:
-            return self._malloc_varsize(ofs_items, ofs, 1, op.args[0],
+            return self._malloc_varsize(ofs_items, ofs, 1, op.getarg(0),
                                         op.result)
         else:
             assert False, itemsize
@@ -784,7 +794,7 @@
         else:
             tempbox = None
             other_loc = imm(ofs_items + (v.getint() << scale))
-        self._call(ResOperation(rop.NEW, [v], res_v),
+        self._call(ResOperation(rop.NEW, [], res_v),
                    [other_loc], [v])
         loc = self.rm.make_sure_var_in_reg(v, [res_v])
         assert self.loc(res_v) == eax
@@ -792,22 +802,22 @@
         self.rm.possibly_free_var(v)
         if tempbox is not None:
             self.rm.possibly_free_var(tempbox)
-        self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [], None),
+        self.PerformDiscard(ResOperation(rop.SETFIELD_GC, [None, None], None),
                             [eax, imm(ofs_length), imm(WORD), loc])
 
     def consider_new_array(self, op):
         gc_ll_descr = self.assembler.cpu.gc_ll_descr
         if gc_ll_descr.get_funcptr_for_newarray is not None:
             # framework GC
-            args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.descr)
+            args = self.assembler.cpu.gc_ll_descr.args_for_new_array(op.getdescr())
             arglocs = [imm(x) for x in args]
-            arglocs.append(self.loc(op.args[0]))
+            arglocs.append(self.loc(op.getarg(0)))
             return self._call(op, arglocs)
         # boehm GC (XXX kill the following code at some point)
         scale_of_field, basesize, ofs_length, _ = (
-            self._unpack_arraydescr(op.descr))
+            self._unpack_arraydescr(op.getdescr()))
         return self._malloc_varsize(basesize, ofs_length, scale_of_field,
-                                    op.args[0], op.result)
+                                    op.getarg(0), op.result)
 
     def _unpack_arraydescr(self, arraydescr):
         assert isinstance(arraydescr, BaseArrayDescr)
@@ -829,50 +839,54 @@
         return imm(ofs), imm(size), ptr
 
     def consider_setfield_gc(self, op):
-        ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.descr)
+        ofs_loc, size_loc, ptr = self._unpack_fielddescr(op.getdescr())
         assert isinstance(size_loc, ImmedLoc)
         if size_loc.value == 1:
             need_lower_byte = True
         else:
             need_lower_byte = False
-        base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
-        value_loc = self.make_sure_var_in_reg(op.args[1], op.args,
+        args = op.getarglist()
+        base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+        value_loc = self.make_sure_var_in_reg(op.getarg(1), args,
                                               need_lower_byte=need_lower_byte)
-        self.possibly_free_vars(op.args)
+        self.possibly_free_vars(args)
         self.PerformDiscard(op, [base_loc, ofs_loc, size_loc, value_loc])
 
     consider_setfield_raw = consider_setfield_gc
 
     def consider_strsetitem(self, op):
-        base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
-        ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
-        value_loc = self.rm.make_sure_var_in_reg(op.args[2], op.args,
+        args = op.getarglist()
+        base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+        ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+        value_loc = self.rm.make_sure_var_in_reg(op.getarg(2), args,
                                                  need_lower_byte=True)
-        self.rm.possibly_free_vars(op.args)
+        self.rm.possibly_free_vars_for_op(op)
         self.PerformDiscard(op, [base_loc, ofs_loc, value_loc])
 
     consider_unicodesetitem = consider_strsetitem
 
     def consider_setarrayitem_gc(self, op):
-        scale, ofs, _, ptr = self._unpack_arraydescr(op.descr)
-        base_loc  = self.rm.make_sure_var_in_reg(op.args[0], op.args)
+        scale, ofs, _, ptr = self._unpack_arraydescr(op.getdescr())
+        args = op.getarglist()
+        base_loc  = self.rm.make_sure_var_in_reg(op.getarg(0), args)
         if scale == 0:
             need_lower_byte = True
         else:
             need_lower_byte = False
-        value_loc = self.make_sure_var_in_reg(op.args[2], op.args,
+        value_loc = self.make_sure_var_in_reg(op.getarg(2), args,
                                           need_lower_byte=need_lower_byte)
-        ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
-        self.possibly_free_vars(op.args)
+        ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+        self.possibly_free_vars(args)
         self.PerformDiscard(op, [base_loc, ofs_loc, value_loc,
                                  imm(scale), imm(ofs)])
 
     consider_setarrayitem_raw = consider_setarrayitem_gc
 
     def consider_getfield_gc(self, op):
-        ofs_loc, size_loc, _ = self._unpack_fielddescr(op.descr)
-        base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
-        self.rm.possibly_free_vars(op.args)
+        ofs_loc, size_loc, _ = self._unpack_fielddescr(op.getdescr())
+        args = op.getarglist()
+        base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+        self.rm.possibly_free_vars(args)
         result_loc = self.force_allocate_reg(op.result)
         self.Perform(op, [base_loc, ofs_loc, size_loc], result_loc)
 
@@ -881,10 +895,11 @@
     consider_getfield_gc_pure = consider_getfield_gc
 
     def consider_getarrayitem_gc(self, op):
-        scale, ofs, _, _ = self._unpack_arraydescr(op.descr)
-        base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
-        ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
-        self.rm.possibly_free_vars(op.args)
+        scale, ofs, _, _ = self._unpack_arraydescr(op.getdescr())
+        args = op.getarglist()
+        base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+        ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+        self.rm.possibly_free_vars_for_op(op)
         result_loc = self.force_allocate_reg(op.result)
         self.Perform(op, [base_loc, ofs_loc, imm(scale), imm(ofs)], result_loc)
 
@@ -893,8 +908,8 @@
 
     def consider_int_is_true(self, op, guard_op):
         # doesn't need arg to be in a register
-        argloc = self.loc(op.args[0])
-        self.rm.possibly_free_var(op.args[0])
+        argloc = self.loc(op.getarg(0))
+        self.rm.possibly_free_var(op.getarg(0))
         if guard_op is not None:
             self.perform_with_guard(op, guard_op, [argloc], None)
         else:
@@ -904,42 +919,81 @@
     consider_int_is_zero = consider_int_is_true
 
     def consider_same_as(self, op):
-        argloc = self.loc(op.args[0])
-        self.possibly_free_var(op.args[0])
+        argloc = self.loc(op.getarg(0))
+        self.possibly_free_var(op.getarg(0))
         resloc = self.force_allocate_reg(op.result)
         self.Perform(op, [argloc], resloc)
     #consider_cast_ptr_to_int = consider_same_as
 
     def consider_strlen(self, op):
-        base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
-        self.rm.possibly_free_vars(op.args)
+        args = op.getarglist()
+        base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+        self.rm.possibly_free_vars_for_op(op)
         result_loc = self.rm.force_allocate_reg(op.result)
         self.Perform(op, [base_loc], result_loc)
 
     consider_unicodelen = consider_strlen
 
     def consider_arraylen_gc(self, op):
-        arraydescr = op.descr
+        arraydescr = op.getdescr()
         assert isinstance(arraydescr, BaseArrayDescr)
         ofs = arraydescr.get_ofs_length(self.translate_support_code)
-        base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
-        self.rm.possibly_free_vars(op.args)
+        args = op.getarglist()
+        base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+        self.rm.possibly_free_vars_for_op(op)
         result_loc = self.rm.force_allocate_reg(op.result)
         self.Perform(op, [base_loc, imm(ofs)], result_loc)
 
     def consider_strgetitem(self, op):
-        base_loc = self.rm.make_sure_var_in_reg(op.args[0], op.args)
-        ofs_loc = self.rm.make_sure_var_in_reg(op.args[1], op.args)
-        self.rm.possibly_free_vars(op.args)
+        args = op.getarglist()
+        base_loc = self.rm.make_sure_var_in_reg(op.getarg(0), args)
+        ofs_loc = self.rm.make_sure_var_in_reg(op.getarg(1), args)
+        self.rm.possibly_free_vars_for_op(op)
         result_loc = self.rm.force_allocate_reg(op.result)
         self.Perform(op, [base_loc, ofs_loc], result_loc)
 
     consider_unicodegetitem = consider_strgetitem
 
+    def consider_copystrcontent(self, op):
+        # compute the source address
+        args = op.getarglist()
+        base_loc = self.rm.make_sure_var_in_reg(args[0], args)
+        ofs_loc = self.rm.make_sure_var_in_reg(args[2], args)
+        self.rm.possibly_free_var(args[0])
+        self.rm.possibly_free_var(args[2])
+        srcaddr_box = TempBox()
+        srcaddr_loc = self.rm.force_allocate_reg(srcaddr_box)
+        self._gen_address_inside_string(base_loc, ofs_loc, srcaddr_loc)
+        # compute the destination address
+        base_loc = self.rm.make_sure_var_in_reg(args[1], args)
+        ofs_loc = self.rm.make_sure_var_in_reg(args[3], args)
+        self.rm.possibly_free_var(args[1])
+        self.rm.possibly_free_var(args[3])
+        dstaddr_box = TempBox()
+        dstaddr_loc = self.rm.force_allocate_reg(dstaddr_box)
+        self._gen_address_inside_string(base_loc, ofs_loc, dstaddr_loc)
+        # call memcpy()
+        length_loc = self.loc(args[4])
+        self.rm.before_call()
+        self.xrm.before_call()
+        self.assembler._emit_call(imm(self.assembler.memcpy_addr),
+                                  [dstaddr_loc, srcaddr_loc, length_loc])
+        self.rm.possibly_free_var(args[4])
+        self.rm.possibly_free_var(dstaddr_box)
+        self.rm.possibly_free_var(srcaddr_box)
+
+    def _gen_address_inside_string(self, baseloc, ofsloc, resloc):
+        cpu = self.assembler.cpu
+        ofs_items, itemsize, _ = symbolic.get_array_token(rstr.STR,
+                                                  self.translate_support_code)
+        assert itemsize == 1
+        self.assembler.load_effective_addr(ofsloc, ofs_items, 0,
+                                           resloc, baseloc)
+
     def consider_jump(self, op):
         assembler = self.assembler
         assert self.jump_target_descr is None
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, LoopToken)
         self.jump_target_descr = descr
         nonfloatlocs, floatlocs = assembler.target_arglocs(self.jump_target_descr)
@@ -951,17 +1005,20 @@
         xmmtmp = X86XMMRegisterManager.all_regs[0]
         xmmtmploc = self.xrm.force_allocate_reg(box1, selected_reg=xmmtmp)
         # Part about non-floats
-        src_locations = [self.loc(arg) for arg in op.args if arg.type != FLOAT]
+        # XXX we don't need a copy, we only just the original list
+        src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs()) 
+                         if op.getarg(i).type != FLOAT]
         assert tmploc not in nonfloatlocs
         dst_locations = [loc for loc in nonfloatlocs if loc is not None]
         remap_frame_layout(assembler, src_locations, dst_locations, tmploc)
         # Part about floats
-        src_locations = [self.loc(arg) for arg in op.args if arg.type == FLOAT]
+        src_locations = [self.loc(op.getarg(i)) for i in range(op.numargs()) 
+                         if op.getarg(i).type == FLOAT]
         dst_locations = [loc for loc in floatlocs if loc is not None]
         remap_frame_layout(assembler, src_locations, dst_locations, xmmtmp)
         self.rm.possibly_free_var(box)
         self.xrm.possibly_free_var(box1)
-        self.possibly_free_vars(op.args)
+        self.possibly_free_vars_for_op(op)
         assembler.closing_jump(self.jump_target_descr)
 
     def consider_debug_merge_point(self, op):
@@ -1002,12 +1059,21 @@
 def add_none_argument(fn):
     return lambda self, op: fn(self, op, None)
 
+def is_comparison_or_ovf_op(opnum):
+    from pypy.jit.metainterp.resoperation import opclasses, AbstractResOp
+    cls = opclasses[opnum]
+    # hack hack: in theory they are instance method, but they don't use
+    # any instance field, we can use a fake object
+    class Fake(cls):
+        pass
+    op = Fake(None)
+    return op.is_comparison() or op.is_ovf()
+
 for name, value in RegAlloc.__dict__.iteritems():
     if name.startswith('consider_'):
         name = name[len('consider_'):]
         num = getattr(rop, name.upper())
-        if (ResOperation(num, [], None).is_comparison()
-            or ResOperation(num, [], None).is_ovf()
+        if (is_comparison_or_ovf_op(num)
             or num == rop.CALL_MAY_FORCE or num == rop.CALL_ASSEMBLER):
             oplist_with_guard[num] = value
             oplist[num] = add_none_argument(value)

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/regloc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/regloc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/regloc.py	Thu Sep 30 00:16:20 2010
@@ -33,6 +33,8 @@
     def value_a(self): raise AssertionError("value_a undefined")
     def value_m(self): raise AssertionError("value_m undefined")
 
+    def find_unused_reg(self): return eax
+
 class StackLoc(AssemblerLocation):
     _immutable_ = True
     def __init__(self, position, ebp_offset, num_words, type):
@@ -88,6 +90,12 @@
     def assembler(self):
         return '%' + repr(self)
 
+    def find_unused_reg(self):
+        if self.value == eax.value:
+            return edx
+        else:
+            return eax
+
 class ImmedLoc(AssemblerLocation):
     _immutable_ = True
     width = WORD
@@ -137,6 +145,12 @@
                 self._location_code = 'a'
                 self.loc_a = (base_loc.value, scaled_loc.value, scale, static_offset)
 
+    def __repr__(self):
+        dict = {'j': 'value', 'a': 'loc_a', 'm': 'loc_m', 'a':'loc_a'}
+        attr = dict.get(self._location_code, '?')
+        info = getattr(self, attr, '?')
+        return '<AddressLoc %r: %s>' % (self._location_code, info)
+
     def location_code(self):
         return self._location_code
 
@@ -146,6 +160,21 @@
     def value_m(self):
         return self.loc_m
 
+    def find_unused_reg(self):
+        if self._location_code == 'm':
+            if self.loc_m[0] == eax.value:
+                return edx
+        elif self._location_code == 'a':
+            if self.loc_a[0] == eax.value:
+                if self.loc_a[1] == edx.value:
+                    return ecx
+                return edx
+            if self.loc_a[1] == eax.value:
+                if self.loc_a[0] == edx.value:
+                    return ecx
+                return edx
+        return eax
+
 class ConstFloatLoc(AssemblerLocation):
     # XXX: We have to use this class instead of just AddressLoc because
     # AddressLoc is "untyped" and also we to have need some sort of unique
@@ -159,6 +188,9 @@
         self.value = address
         self.const_id = const_id
 
+    def __repr__(self):
+        return '<ConstFloatLoc(%s, %s)>' % (self.value, self.const_id)
+
     def _getregkey(self):
         # XXX: 1000 is kind of magic: We just don't want to be confused
         # with any registers
@@ -206,6 +238,32 @@
     _scratch_register_value = 0
 
     def _binaryop(name):
+
+        def insn_with_64_bit_immediate(self, loc1, loc2):
+            # These are the worst cases:
+            val2 = loc2.value_i()
+            code1 = loc1.location_code()
+            if (code1 == 'j'
+                or (code1 == 'm' and not rx86.fits_in_32bits(loc1.value_m()[1]))
+                or (code1 == 'a' and not rx86.fits_in_32bits(loc1.value_a()[3]))):
+                # INSN_ji, and both operands are 64-bit; or INSN_mi or INSN_ai
+                # and the constant offset in the address is 64-bit.
+                # Hopefully this doesn't happen too often
+                freereg = loc1.find_unused_reg()
+                self.PUSH_r(freereg.value)
+                self.MOV_ri(freereg.value, val2)
+                INSN(self, loc1, freereg)
+                self.POP_r(freereg.value)
+            else:
+                # For this case, we should not need the scratch register more than here.
+                self._load_scratch(val2)
+                INSN(self, loc1, X86_64_SCRATCH_REG)
+
+        def invoke(self, codes, val1, val2):
+            methname = name + "_" + codes
+            _rx86_getattr(self, methname)(val1, val2)
+        invoke._annspecialcase_ = 'specialize:arg(1)'
+
         def INSN(self, loc1, loc2):
             code1 = loc1.location_code()
             code2 = loc2.location_code()
@@ -218,38 +276,39 @@
             if loc1 is X86_64_SCRATCH_REG and not name.startswith("MOV"):
                 assert code2 not in ('j', 'i')
 
-            for possible_code1 in unrolling_location_codes:
-                if code1 == possible_code1:
-                    for possible_code2 in unrolling_location_codes:
-                        if code2 == possible_code2:
+            for possible_code2 in unrolling_location_codes:
+                if code2 == possible_code2:
+                    val2 = getattr(loc2, "value_" + possible_code2)()
+                    #
+                    # Fake out certain operations for x86_64
+                    if self.WORD == 8 and possible_code2 == 'i' and not rx86.fits_in_32bits(val2):
+                        insn_with_64_bit_immediate(self, loc1, loc2)
+                        return
+                    #
+                    # Regular case
+                    for possible_code1 in unrolling_location_codes:
+                        if code1 == possible_code1:
                             val1 = getattr(loc1, "value_" + possible_code1)()
-                            val2 = getattr(loc2, "value_" + possible_code2)()
-                            # Fake out certain operations for x86_64
-                            if self.WORD == 8 and possible_code2 == 'i' and not rx86.fits_in_32bits(val2):
-                                if possible_code1 == 'j':
-                                    # This is the worst case: INSN_ji, and both operands are 64-bit
-                                    # Hopefully this doesn't happen too often
-                                    self.PUSH_r(eax.value)
-                                    self.MOV_ri(eax.value, val1)
-                                    self.MOV_ri(X86_64_SCRATCH_REG.value, val2)
-                                    methname = name + "_mr"
-                                    _rx86_getattr(self, methname)((eax.value, 0), X86_64_SCRATCH_REG.value)
-                                    self.POP_r(eax.value)
-                                else:
-                                    self.MOV_ri(X86_64_SCRATCH_REG.value, val2)
-                                    methname = name + "_" + possible_code1 + "r"
-                                    _rx86_getattr(self, methname)(val1, X86_64_SCRATCH_REG.value)
-                            elif self.WORD == 8 and possible_code1 == 'j':
-                                reg_offset = self._addr_as_reg_offset(val1)
-                                methname = name + "_" + "m" + possible_code2
-                                _rx86_getattr(self, methname)(reg_offset, val2)
+                            # More faking out of certain operations for x86_64
+                            if self.WORD == 8 and possible_code1 == 'j':
+                                val1 = self._addr_as_reg_offset(val1)
+                                invoke(self, "m" + possible_code2, val1, val2)
                             elif self.WORD == 8 and possible_code2 == 'j':
-                                reg_offset = self._addr_as_reg_offset(val2)
-                                methname = name + "_" + possible_code1 + "m"
-                                _rx86_getattr(self, methname)(val1, reg_offset)
+                                val2 = self._addr_as_reg_offset(val2)
+                                invoke(self, possible_code1 + "m", val1, val2)
+                            elif possible_code1 == 'm' and not rx86.fits_in_32bits(val1[1]):
+                                val1 = self._fix_static_offset_64_m(val1)
+                                invoke(self, "a" + possible_code2, val1, val2)
+                            elif possible_code2 == 'm' and not rx86.fits_in_32bits(val2[1]):
+                                val2 = self._fix_static_offset_64_m(val2)
+                                invoke(self, possible_code1 + "a", val1, val2)
                             else:
-                                methname = name + "_" + possible_code1 + possible_code2
-                                _rx86_getattr(self, methname)(val1, val2)
+                                if possible_code1 == 'a' and not rx86.fits_in_32bits(val1[3]):
+                                    val1 = self._fix_static_offset_64_a(val1)
+                                if possible_code2 == 'a' and not rx86.fits_in_32bits(val2[3]):
+                                    val2 = self._fix_static_offset_64_a(val2)
+                                invoke(self, possible_code1 + possible_code2, val1, val2)
+                            return
 
         return func_with_new_name(INSN, "INSN_" + name)
 
@@ -260,7 +319,7 @@
                 if code == possible_code:
                     val = getattr(loc, "value_" + possible_code)()
                     if self.WORD == 8 and possible_code == 'i' and not rx86.fits_in_32bits(val):
-                        self.MOV_ri(X86_64_SCRATCH_REG.value, val)
+                        self._load_scratch(val)
                         _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value)
                     else:
                         methname = name + "_" + possible_code
@@ -280,7 +339,7 @@
                             _rx86_getattr(self, name + "_l")(val)
                         else:
                             assert self.WORD == 8
-                            self.MOV_ri(X86_64_SCRATCH_REG.value, val)
+                            self._load_scratch(val)
                             _rx86_getattr(self, name + "_r")(X86_64_SCRATCH_REG.value)
                     else:
                         methname = name + "_" + possible_code
@@ -317,6 +376,40 @@
         self.MOV_ri(X86_64_SCRATCH_REG.value, addr)
         return (X86_64_SCRATCH_REG.value, 0)
 
+    def _fix_static_offset_64_m(self, (basereg, static_offset)):
+        # For cases where an AddressLoc has the location_code 'm', but
+        # where the static offset does not fit in 32-bits.  We have to fall
+        # back to the X86_64_SCRATCH_REG.  Note that this returns a location
+        # encoded as mode 'a'.  These are all possibly rare cases; don't try
+        # to reuse a past value of the scratch register at all.
+        self._scratch_register_known = False
+        self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset)
+        return (basereg, X86_64_SCRATCH_REG.value, 0, 0)
+
+    def _fix_static_offset_64_a(self, (basereg, scalereg,
+                                       scale, static_offset)):
+        # For cases where an AddressLoc has the location_code 'a', but
+        # where the static offset does not fit in 32-bits.  We have to fall
+        # back to the X86_64_SCRATCH_REG.  In one case it is even more
+        # annoying.  These are all possibly rare cases; don't try to reuse a
+        # past value of the scratch register at all.
+        self._scratch_register_known = False
+        self.MOV_ri(X86_64_SCRATCH_REG.value, static_offset)
+        #
+        if basereg != rx86.NO_BASE_REGISTER:
+            self.LEA_ra(X86_64_SCRATCH_REG.value,
+                        (basereg, X86_64_SCRATCH_REG.value, 0, 0))
+        return (X86_64_SCRATCH_REG.value, scalereg, scale, 0)
+
+    def _load_scratch(self, value):
+        if (self._scratch_register_known
+            and value == self._scratch_register_value):
+            return
+        if self._reuse_scratch_register:
+            self._scratch_register_known = True
+            self._scratch_register_value = value
+        self.MOV_ri(X86_64_SCRATCH_REG.value, value)
+
     def begin_reuse_scratch_register(self):
         # Flag the beginning of a block where it is okay to reuse the value
         # of the scratch register. In theory we shouldn't have to do this if

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/rx86.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/rx86.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/rx86.py	Thu Sep 30 00:16:20 2010
@@ -506,6 +506,7 @@
     LEA32_rb = insn(rex_w, '\x8D', register(1,8),stack_bp(2,force_32bits=True))
     LEA_ra = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_scaled_reg_plus_const(2))
     LEA_rm = insn(rex_w, '\x8D', register(1, 8), mem_reg_plus_const(2))
+    LEA_rj = insn(rex_w, '\x8D', register(1, 8), '\x05', immediate(2))
 
     CALL_l = insn('\xE8', relative(1))
     CALL_r = insn(rex_nw, '\xFF', register(1), chr(0xC0 | (2<<3)))

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/conftest.py	Thu Sep 30 00:16:20 2010
@@ -5,3 +5,6 @@
 def pytest_runtest_setup(item):
     if cpu not in ('x86', 'x86_64'):
         py.test.skip("x86/x86_64 tests skipped: cpu is %r" % (cpu,))
+    if cpu == 'x86_64':
+        from pypy.rpython.lltypesystem import ll2ctypes
+        ll2ctypes.do_allocation_in_far_regions()

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_recompilation.py	Thu Sep 30 00:16:20 2010
@@ -47,7 +47,7 @@
         finish(i3, i4, i5, i6, i7, i8, i9, descr=fdescr2)
         '''
         bridge = self.attach_bridge(ops, loop, -2)
-        descr = loop.operations[2].descr
+        descr = loop.operations[2].getdescr()
         new = descr._x86_bridge_frame_depth
         assert descr._x86_bridge_param_depth == 0        
         # XXX: Maybe add enough ops to force stack on 64-bit as well?
@@ -114,8 +114,8 @@
         assert loop.token._x86_param_depth == 0
         # XXX: Maybe add enough ops to force stack on 64-bit as well?
         if IS_X86_32:
-            assert guard_op.descr._x86_bridge_frame_depth > loop_frame_depth
-        assert guard_op.descr._x86_bridge_param_depth == 0
+            assert guard_op.getdescr()._x86_bridge_frame_depth > loop_frame_depth
+        assert guard_op.getdescr()._x86_bridge_param_depth == 0
         self.cpu.set_future_value_int(0, 0)
         self.cpu.set_future_value_int(1, 0)
         self.cpu.set_future_value_int(2, 0)

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regalloc.py	Thu Sep 30 00:16:20 2010
@@ -9,7 +9,7 @@
 from pypy.jit.backend.llsupport.descr import GcCache
 from pypy.jit.backend.detect_cpu import getcpuclass
 from pypy.jit.backend.x86.regalloc import RegAlloc, X86RegisterManager,\
-     FloatConstants
+     FloatConstants, is_comparison_or_ovf_op
 from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64
 from pypy.jit.metainterp.test.oparser import parse
 from pypy.rpython.lltypesystem import lltype, llmemory, rffi
@@ -17,6 +17,11 @@
 from pypy.rpython.lltypesystem import rclass, rstr
 from pypy.jit.backend.x86.rx86 import *
 
+def test_is_comparison_or_ovf_op():
+    assert not is_comparison_or_ovf_op(rop.INT_ADD)
+    assert is_comparison_or_ovf_op(rop.INT_ADD_OVF)
+    assert is_comparison_or_ovf_op(rop.INT_EQ)
+
 CPU = getcpuclass()
 class MockGcDescr(GcCache):
     def get_funcptr_for_new(self):
@@ -159,8 +164,8 @@
         assert guard_op.is_guard()
         bridge = self.parse(ops, **kwds)
         assert ([box.type for box in bridge.inputargs] ==
-                [box.type for box in guard_op.fail_args])
-        faildescr = guard_op.descr
+                [box.type for box in guard_op.getfailargs()])
+        faildescr = guard_op.getdescr()
         self.cpu.compile_bridge(faildescr, bridge.inputargs, bridge.operations)
         return bridge
 
@@ -607,7 +612,7 @@
         '''
         bridge = self.attach_bridge(ops, loop, -2)
 
-        assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2)
+        assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
 
         self.cpu.set_future_value_int(0, 4)
         self.cpu.set_future_value_int(1, 7)        
@@ -630,7 +635,7 @@
         '''
         bridge = self.attach_bridge(ops, loop, -2)
 
-        assert loop.operations[-2].descr._x86_bridge_param_depth == self.expected_param_depth(2)
+        assert loop.operations[-2].getdescr()._x86_bridge_param_depth == self.expected_param_depth(2)
 
         self.cpu.set_future_value_int(0, 4)
         self.cpu.set_future_value_int(1, 7)        

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regloc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regloc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_regloc.py	Thu Sep 30 00:16:20 2010
@@ -58,23 +58,250 @@
     expected_ofs = pos_addr - (neg_addr+5)
     assert s.getvalue() == '\xE9' + struct.pack("<i", expected_ofs)
 
-def test_reuse_scratch_register():
-    if not IS_X86_64:
-        py.test.skip()
-
-    base_addr = 0xFEDCBA9876543210
-    cb = LocationCodeBuilder64()
-    cb.begin_reuse_scratch_register()
-    cb.MOV(ecx, heap(base_addr))
-    cb.MOV(ecx, heap(base_addr + 8))
-    cb.end_reuse_scratch_register()
-
-    expected_instructions = (
-            # mov r11, 0xFEDCBA9876543210
-            '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' +
-            # mov rcx, [r11]
-            '\x49\x8B\x0B' +
-            # mov rcx, [r11+8]
-            '\x49\x8B\x4B\x08'
-    )
-    assert cb.getvalue() == expected_instructions
+
+class Test64Bits:
+
+    def setup_class(cls):
+        if not IS_X86_64:
+            py.test.skip()
+
+    def test_reuse_scratch_register(self):
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.begin_reuse_scratch_register()
+        cb.MOV(ecx, heap(base_addr))
+        cb.MOV(ecx, heap(base_addr + 8))
+        cb.end_reuse_scratch_register()
+
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE' +
+                # mov rcx, [r11]
+                '\x49\x8B\x0B' +
+                # mov rcx, [r11+8]
+                '\x49\x8B\x4B\x08'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    # ------------------------------------------------------------
+
+    def test_64bit_address_1(self):
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.CMP(ecx, AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr))
+        # this case is a CMP_rj
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # cmp rcx, [r11]
+                '\x49\x3B\x0B'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_64bit_address_2(self):
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(ecx, AddressLoc(ImmedLoc(0), edx, 3, base_addr))
+        # this case is a CMP_ra
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov rcx, [r11+8*rdx]
+                '\x49\x8B\x0C\xD3'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_64bit_address_3(self):
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(ecx, AddressLoc(edx, ImmedLoc(0), 0, base_addr))
+        # this case is a CMP_rm
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov rcx, [rdx+r11]
+                '\x4A\x8B\x0C\x1A'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_64bit_address_4(self):
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.begin_reuse_scratch_register()
+        assert cb._reuse_scratch_register is True
+        assert cb._scratch_register_known is False
+        cb.MOV(ecx, AddressLoc(edx, esi, 2, base_addr))
+        assert cb._reuse_scratch_register is True
+        assert cb._scratch_register_known is False
+        # this case is a CMP_ra
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # lea r11, [rdx+r11]
+                '\x4E\x8D\x1C\x1A'
+                # mov rcx, [r11+4*rsi]
+                '\x49\x8B\x0C\xB3'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    # ------------------------------------------------------------
+
+    def test_MOV_immed32_into_64bit_address_1(self):
+        immed = -0x01234567
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_ji
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [r11], -0x01234567
+                '\x49\xC7\x03\x99\xBA\xDC\xFE'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed32_into_64bit_address_2(self):
+        immed = -0x01234567
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(ImmedLoc(0), edx, 3, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_ai
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [r11+8*rdx], -0x01234567
+                '\x49\xC7\x04\xD3\x99\xBA\xDC\xFE'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed32_into_64bit_address_3(self):
+        immed = -0x01234567
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(edx, ImmedLoc(0), 0, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_mi
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [rdx+r11], -0x01234567
+                '\x4A\xC7\x04\x1A\x99\xBA\xDC\xFE'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed32_into_64bit_address_4(self):
+        immed = -0x01234567
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(edx, esi, 2, base_addr), ImmedLoc(immed))
+        # this case is a MOV_ai
+        #
+        expected_instructions = (
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # lea r11, [rdx+r11]
+                '\x4E\x8D\x1C\x1A'
+                # mov [r11+4*rsi], -0x01234567
+                '\x49\xC7\x04\xB3\x99\xBA\xDC\xFE'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    # ------------------------------------------------------------
+
+    def test_MOV_immed64_into_64bit_address_1(self):
+        immed = 0x0123456789ABCDEF
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(ImmedLoc(0), ImmedLoc(0), 0, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_ji
+        #
+        expected_instructions = (
+                # push rax
+                '\x50'
+                # mov rax, 0x0123456789ABCDEF
+                '\x48\xB8\xEF\xCD\xAB\x89\x67\x45\x23\x01'
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [r11], rax
+                '\x49\x89\x03'
+                # pop rax
+                '\x58'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed64_into_64bit_address_2(self):
+        immed = 0x0123456789ABCDEF
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(ImmedLoc(0), edx, 3, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_ai
+        #
+        expected_instructions = (
+                # push rax
+                '\x50'
+                # mov rax, 0x0123456789ABCDEF
+                '\x48\xB8\xEF\xCD\xAB\x89\x67\x45\x23\x01'
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [r11+8*rdx], rax
+                '\x49\x89\x04\xD3'
+                # pop rax
+                '\x58'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed64_into_64bit_address_3(self):
+        immed = 0x0123456789ABCDEF
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(eax, ImmedLoc(0), 0, base_addr),
+               ImmedLoc(immed))
+        # this case is a MOV_mi
+        #
+        expected_instructions = (
+                # push rdx
+                '\x52'
+                # mov rdx, 0x0123456789ABCDEF
+                '\x48\xBA\xEF\xCD\xAB\x89\x67\x45\x23\x01'
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # mov [rax+r11], rdx
+                '\x4A\x89\x14\x18'
+                # pop rdx
+                '\x5A'
+        )
+        assert cb.getvalue() == expected_instructions
+
+    def test_MOV_immed64_into_64bit_address_4(self):
+        immed = 0x0123456789ABCDEF
+        base_addr = 0xFEDCBA9876543210
+        cb = LocationCodeBuilder64()
+        cb.MOV(AddressLoc(edx, eax, 2, base_addr), ImmedLoc(immed))
+        # this case is a MOV_ai
+        #
+        expected_instructions = (
+                # push rcx
+                '\x51'
+                # mov rcx, 0x0123456789ABCDEF
+                '\x48\xB9\xEF\xCD\xAB\x89\x67\x45\x23\x01'
+                # mov r11, 0xFEDCBA9876543210
+                '\x49\xBB\x10\x32\x54\x76\x98\xBA\xDC\xFE'
+                # lea r11, [rdx+r11]
+                '\x4E\x8D\x1C\x1A'
+                # mov [r11+4*rax], rcx
+                '\x49\x89\x0C\x83'
+                # pop rcx
+                '\x59'
+        )
+        assert cb.getvalue() == expected_instructions

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_runner.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_runner.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_runner.py	Thu Sep 30 00:16:20 2010
@@ -265,7 +265,7 @@
                         ResOperation(rop.FINISH, [ConstInt(0)], None,
                                      descr=BasicFailDescr()),
                         ]
-                    ops[-2].fail_args = [i1]
+                    ops[-2].setfailargs([i1])
                     looptoken = LoopToken()
                     self.cpu.compile_loop([b], ops, looptoken)
                     if op == rop.INT_IS_TRUE:
@@ -314,7 +314,7 @@
                         ResOperation(rop.FINISH, [ConstInt(0)], None,
                                      descr=BasicFailDescr()),
                         ]
-                    ops[-2].fail_args = [i1]
+                    ops[-2].setfailargs([i1])
                     inputargs = [i for i in (a, b) if isinstance(i, Box)]
                     looptoken = LoopToken()
                     self.cpu.compile_loop(inputargs, ops, looptoken)
@@ -353,7 +353,7 @@
             ResOperation(rop.JUMP, [i1], None, descr=looptoken),
             ]
         inputargs = [i0]
-        operations[3].fail_args = [i1]
+        operations[3].setfailargs([i1])
         self.cpu.compile_loop(inputargs, operations, looptoken)
         name, loopaddress, loopsize = agent.functions[0]
         assert name == "Loop # 0: hello"
@@ -368,7 +368,7 @@
             ResOperation(rop.DEBUG_MERGE_POINT, [FakeString("bye")], None),
             ResOperation(rop.JUMP, [i1b], None, descr=looptoken),
         ]
-        bridge[1].fail_args = [i1b]
+        bridge[1].setfailargs([i1b])
 
         self.cpu.compile_bridge(faildescr1, [i1b], bridge)        
         name, address, size = agent.functions[1]
@@ -462,7 +462,7 @@
                 cmp_result = BoxInt()
                 ops.append(ResOperation(float_op, args, cmp_result))
                 ops.append(ResOperation(guard_op, [cmp_result], None, descr=BasicFailDescr()))
-                ops[-1].fail_args = [failed]
+                ops[-1].setfailargs([failed])
 
             ops.append(ResOperation(rop.FINISH, [finished], None, descr=BasicFailDescr()))
 

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zrpy_gc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zrpy_gc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_zrpy_gc.py	Thu Sep 30 00:16:20 2010
@@ -18,6 +18,7 @@
 from pypy.jit.backend.llsupport.gc import GcLLDescr_framework
 from pypy.tool.udir import udir
 from pypy.jit.backend.x86.arch import IS_X86_64
+from pypy.config.translationoption import DEFL_GC
 import py.test
 
 class X(object):
@@ -126,7 +127,8 @@
 
 # ______________________________________________________________________
 
-class TestCompileHybrid(object):
+class TestCompileFramework(object):
+    # Test suite using (so far) the minimark GC.
     def setup_class(cls):
         funcs = []
         name_to_func = {}
@@ -175,13 +177,13 @@
         OLD_DEBUG = GcLLDescr_framework.DEBUG
         try:
             GcLLDescr_framework.DEBUG = True
-            cls.cbuilder = compile(get_entry(allfuncs), "hybrid",
+            cls.cbuilder = compile(get_entry(allfuncs), DEFL_GC,
                                    gcrootfinder="asmgcc", jit=True)
         finally:
             GcLLDescr_framework.DEBUG = OLD_DEBUG
 
     def run(self, name, n=2000):
-        pypylog = udir.join('TestCompileHybrid.log')
+        pypylog = udir.join('TestCompileFramework.log')
         res = self.cbuilder.cmdexec("%s %d" %(name, n),
                                     env={'PYPYLOG': ':%s' % pypylog})
         assert int(res) == 20
@@ -189,7 +191,7 @@
     def run_orig(self, name, n, x):
         self.main_allfuncs(name, n, x)
 
-    def define_compile_hybrid_1(cls):
+    def define_compile_framework_1(cls):
         # a moving GC.  Supports malloc_varsize_nonmovable.  Simple test, works
         # without write_barriers and root stack enumeration.
         def f(n, x, *args):
@@ -199,10 +201,10 @@
             return (n, x) + args
         return None, f, None
 
-    def test_compile_hybrid_1(self):
-        self.run('compile_hybrid_1')
+    def test_compile_framework_1(self):
+        self.run('compile_framework_1')
 
-    def define_compile_hybrid_2(cls):
+    def define_compile_framework_2(cls):
         # More complex test, requires root stack enumeration but
         # not write_barriers.
         def f(n, x, *args):
@@ -215,10 +217,10 @@
             return (n, x) + args
         return None, f, None
 
-    def test_compile_hybrid_2(self):
-        self.run('compile_hybrid_2')
+    def test_compile_framework_2(self):
+        self.run('compile_framework_2')
 
-    def define_compile_hybrid_3(cls):
+    def define_compile_framework_3(cls):
         # Third version of the test.  Really requires write_barriers.
         def f(n, x, *args):
             x.next = None
@@ -241,13 +243,13 @@
 
 
 
-    def test_compile_hybrid_3(self):
+    def test_compile_framework_3(self):
         x_test = X()
         x_test.foo = 5
-        self.run_orig('compile_hybrid_3', 6, x_test)     # check that it does not raise CheckError
-        self.run('compile_hybrid_3')
+        self.run_orig('compile_framework_3', 6, x_test)     # check that it does not raise CheckError
+        self.run('compile_framework_3')
 
-    def define_compile_hybrid_3_extra(cls):
+    def define_compile_framework_3_extra(cls):
         # Extra version of the test, with tons of live vars around the residual
         # call that all contain a GC pointer.
         @dont_look_inside
@@ -287,11 +289,11 @@
             return n, None, x0, x1, x2, x3, x4, x5, x6, x7, None, None
         return before, f, None
 
-    def test_compile_hybrid_3_extra(self):
-        self.run_orig('compile_hybrid_3_extra', 6, None)     # check that it does not raise CheckError
-        self.run('compile_hybrid_3_extra')
+    def test_compile_framework_3_extra(self):
+        self.run_orig('compile_framework_3_extra', 6, None)     # check that it does not raise CheckError
+        self.run('compile_framework_3_extra')
 
-    def define_compile_hybrid_4(cls):
+    def define_compile_framework_4(cls):
         # Fourth version of the test, with __del__.
         from pypy.rlib.debug import debug_print
         class Counter:
@@ -311,10 +313,10 @@
             return (n, x) + args
         return before, f, None
 
-    def test_compile_hybrid_4(self):
-        self.run('compile_hybrid_4')
+    def test_compile_framework_4(self):
+        self.run('compile_framework_4')
 
-    def define_compile_hybrid_5(cls):
+    def define_compile_framework_5(cls):
         # Test string manipulation.
         def f(n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s):
             n -= x.foo
@@ -324,10 +326,10 @@
             check(len(s) == 1*5 + 2*45 + 3*450 + 4*500)
         return None, f, after
 
-    def test_compile_hybrid_5(self):
-        self.run('compile_hybrid_5')
+    def test_compile_framework_5(self):
+        self.run('compile_framework_5')
 
-    def define_compile_hybrid_7(cls):
+    def define_compile_framework_7(cls):
         # Array of pointers (test the write barrier for setarrayitem_gc)
         def before(n, x):
             return n, x, None, None, None, None, None, None, None, None, [X(123)], None
@@ -391,10 +393,10 @@
             check(l[15].x == 142)
         return before, f, after
 
-    def test_compile_hybrid_7(self):
-        self.run('compile_hybrid_7')
+    def test_compile_framework_7(self):
+        self.run('compile_framework_7')
 
-    def define_compile_hybrid_external_exception_handling(cls):
+    def define_compile_framework_external_exception_handling(cls):
         def before(n, x):
             x = X(0)
             return n, x, None, None, None, None, None, None, None, None, None, None        
@@ -427,10 +429,10 @@
 
         return before, f, None
 
-    def test_compile_hybrid_external_exception_handling(self):
-        self.run('compile_hybrid_external_exception_handling')
+    def test_compile_framework_external_exception_handling(self):
+        self.run('compile_framework_external_exception_handling')
             
-    def define_compile_hybrid_bug1(self):
+    def define_compile_framework_bug1(self):
         @purefunction
         def nonmoving():
             x = X(1)
@@ -453,10 +455,10 @@
 
         return None, f, None
 
-    def test_compile_hybrid_bug1(self):
-        self.run('compile_hybrid_bug1', 200)
+    def test_compile_framework_bug1(self):
+        self.run('compile_framework_bug1', 200)
 
-    def define_compile_hybrid_vref(self):
+    def define_compile_framework_vref(self):
         from pypy.rlib.jit import virtual_ref, virtual_ref_finish
         class A:
             pass
@@ -469,10 +471,10 @@
             return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
         return None, f, None
 
-    def test_compile_hybrid_vref(self):
-        self.run('compile_hybrid_vref', 200)
+    def test_compile_framework_vref(self):
+        self.run('compile_framework_vref', 200)
 
-    def define_compile_hybrid_float(self):
+    def define_compile_framework_float(self):
         # test for a bug: the fastpath_malloc does not save and restore
         # xmm registers around the actual call to the slow path
         class A:
@@ -519,5 +521,5 @@
             return n, x, x0, x1, x2, x3, x4, x5, x6, x7, l, s
         return None, f, None
 
-    def test_compile_hybrid_float(self):
-        self.run('compile_hybrid_float')
+    def test_compile_framework_float(self):
+        self.run('compile_framework_float')

Modified: pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ztranslation.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ztranslation.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/backend/x86/test/test_ztranslation.py	Thu Sep 30 00:16:20 2010
@@ -8,6 +8,7 @@
 from pypy.jit.codewriter.policy import StopAtXPolicy
 from pypy.translator.translator import TranslationContext
 from pypy.jit.backend.x86.arch import IS_X86_32, IS_X86_64
+from pypy.config.translationoption import DEFL_GC
 
 class TestTranslationX86(CCompiledMixin):
     CPUClass = getcpuclass()
@@ -118,7 +119,7 @@
 
     def _get_TranslationContext(self):
         t = TranslationContext()
-        t.config.translation.gc = 'hybrid'
+        t.config.translation.gc = DEFL_GC   # 'hybrid' or 'minimark'
         t.config.translation.gcrootfinder = 'asmgcc'
         t.config.translation.list_comprehension_operations = True
         t.config.translation.gcremovetypeptr = True

Modified: pypy/branch/fast-forward/pypy/jit/codewriter/assembler.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/codewriter/assembler.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/codewriter/assembler.py	Thu Sep 30 00:16:20 2010
@@ -232,3 +232,11 @@
                 return
             addr = llmemory.cast_ptr_to_adr(value)
             self.list_of_addr2name.append((addr, name))
+
+    def finished(self):
+        # Helper called at the end of assembling.  Registers the extra
+        # functions shown in _callinfo_for_oopspec.
+        from pypy.jit.codewriter.effectinfo import _callinfo_for_oopspec
+        for _, func in _callinfo_for_oopspec.values():
+            func = heaptracker.int2adr(func)
+            self.see_raw_object(func.ptr)

Modified: pypy/branch/fast-forward/pypy/jit/codewriter/call.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/codewriter/call.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/codewriter/call.py	Thu Sep 30 00:16:20 2010
@@ -185,7 +185,7 @@
                                          FUNC.RESULT)
         return (fnaddr, calldescr)
 
-    def getcalldescr(self, op):
+    def getcalldescr(self, op, oopspecindex=EffectInfo.OS_NONE):
         """Return the calldescr that describes all calls done by 'op'.
         This returns a calldescr that we can put in the corresponding
         call operation in the calling jitcode.  It gets an effectinfo
@@ -226,7 +226,8 @@
             extraeffect = EffectInfo.EF_CANNOT_RAISE
         #
         effectinfo = effectinfo_from_writeanalyze(
-            self.readwrite_analyzer.analyze(op), self.cpu, extraeffect)
+            self.readwrite_analyzer.analyze(op), self.cpu, extraeffect,
+            oopspecindex)
         #
         if pure or loopinvariant:
             assert effectinfo is not None

Modified: pypy/branch/fast-forward/pypy/jit/codewriter/codewriter.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/codewriter/codewriter.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/codewriter/codewriter.py	Thu Sep 30 00:16:20 2010
@@ -73,6 +73,7 @@
             count += 1
             if not count % 500:
                 log.info("Produced %d jitcodes" % count)
+        self.assembler.finished()
         heaptracker.finish_registering(self.cpu)
         log.info("there are %d JitCode instances." % count)
 

Modified: pypy/branch/fast-forward/pypy/jit/codewriter/effectinfo.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/codewriter/effectinfo.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/codewriter/effectinfo.py	Thu Sep 30 00:16:20 2010
@@ -15,13 +15,32 @@
     EF_LOOPINVARIANT                   = 3 #special: call it only once per loop
     EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE = 4 #can raise and force virtualizables
 
+    # the 'oopspecindex' field is one of the following values:
+    OS_NONE                     = 0    # normal case, no oopspec
+    OS_ARRAYCOPY                = 1    # "list.ll_arraycopy"
+    OS_STR_CONCAT               = 2    # "stroruni.concat"
+    OS_UNI_CONCAT               = 3    # "stroruni.concat"
+    OS_STR_SLICE                = 4    # "stroruni.slice"
+    OS_UNI_SLICE                = 5    # "stroruni.slice"
+    OS_STR_EQUAL                = 6    # "stroruni.equal"
+    OS_UNI_EQUAL                = 7    # "stroruni.equal"
+    OS_STREQ_SLICE_CHECKNULL    = 8    # s2!=NULL and s1[x:x+length]==s2
+    OS_STREQ_SLICE_NONNULL      = 9    # s1[x:x+length]==s2   (assert s2!=NULL)
+    OS_STREQ_SLICE_CHAR         = 10   # s1[x:x+length]==char
+    OS_STREQ_NONNULL            = 11   # s1 == s2    (assert s1!=NULL,s2!=NULL)
+    OS_STREQ_NONNULL_CHAR       = 12   # s1 == char  (assert s1!=NULL)
+    OS_STREQ_CHECKNULL_CHAR     = 13   # s1!=NULL and s1==char
+    OS_STREQ_LENGTHOK           = 14   # s1 == s2    (assert len(s1)==len(s2))
+
     def __new__(cls, readonly_descrs_fields,
                 write_descrs_fields, write_descrs_arrays,
-                extraeffect=EF_CAN_RAISE):
+                extraeffect=EF_CAN_RAISE,
+                oopspecindex=OS_NONE):
         key = (frozenset(readonly_descrs_fields),
                frozenset(write_descrs_fields),
                frozenset(write_descrs_arrays),
-               extraeffect)
+               extraeffect,
+               oopspecindex)
         if key in cls._cache:
             return cls._cache[key]
         result = object.__new__(cls)
@@ -29,6 +48,7 @@
         result.write_descrs_fields = write_descrs_fields
         result.write_descrs_arrays = write_descrs_arrays
         result.extraeffect = extraeffect
+        result.oopspecindex = oopspecindex
         cls._cache[key] = result
         return result
 
@@ -36,7 +56,8 @@
         return self.extraeffect >= self.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE
 
 def effectinfo_from_writeanalyze(effects, cpu,
-                                 extraeffect=EffectInfo.EF_CAN_RAISE):
+                                 extraeffect=EffectInfo.EF_CAN_RAISE,
+                                 oopspecindex=EffectInfo.OS_NONE):
     from pypy.translator.backendopt.writeanalyze import top_set
     if effects is top_set:
         return None
@@ -73,7 +94,8 @@
     return EffectInfo(readonly_descrs_fields,
                       write_descrs_fields,
                       write_descrs_arrays,
-                      extraeffect)
+                      extraeffect,
+                      oopspecindex)
 
 def consider_struct(TYPE, fieldname):
     if fieldType(TYPE, fieldname) is lltype.Void:
@@ -104,3 +126,33 @@
     def analyze_simple_operation(self, op):
         return op.opname in ('jit_force_virtualizable',
                              'jit_force_virtual')
+
+# ____________________________________________________________
+
+_callinfo_for_oopspec = {} # {oopspecindex: (calldescr, func_as_int)}
+
+def callinfo_for_oopspec(oopspecindex):
+    """A function that returns the calldescr and the function
+    address (as an int) of one of the OS_XYZ functions defined above.
+    Don't use this if there might be several implementations of the same
+    OS_XYZ specialized by type, e.g. OS_ARRAYCOPY."""
+    try:
+        return _callinfo_for_oopspec[oopspecindex]
+    except KeyError:
+        return (None, 0)
+
+
+def _funcptr_for_oopspec_memo(oopspecindex):
+    from pypy.jit.codewriter import heaptracker
+    _, func_as_int = callinfo_for_oopspec(oopspecindex)
+    funcadr = heaptracker.int2adr(func_as_int)
+    return funcadr.ptr
+_funcptr_for_oopspec_memo._annspecialcase_ = 'specialize:memo'
+
+def funcptr_for_oopspec(oopspecindex):
+    """A memo function that returns a pointer to the function described
+    by OS_XYZ (as a real low-level function pointer)."""
+    funcptr = _funcptr_for_oopspec_memo(oopspecindex)
+    assert funcptr
+    return funcptr
+funcptr_for_oopspec._annspecialcase_ = 'specialize:arg(0)'

Modified: pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/codewriter/jtransform.py	Thu Sep 30 00:16:20 2010
@@ -1,16 +1,18 @@
 import py, sys
-from pypy.rpython.lltypesystem import lltype, rstr, rclass
+from pypy.rpython.lltypesystem import lltype, llmemory, rstr, rclass
 from pypy.rpython import rlist
 from pypy.jit.metainterp.history import getkind
 from pypy.objspace.flow.model import SpaceOperation, Variable, Constant
 from pypy.objspace.flow.model import Block, Link, c_last_exception
 from pypy.jit.codewriter.flatten import ListOfKind, IndirectCallTargets
 from pypy.jit.codewriter import support, heaptracker
+from pypy.jit.codewriter.effectinfo import EffectInfo, _callinfo_for_oopspec
 from pypy.jit.codewriter.policy import log
 from pypy.jit.metainterp.typesystem import deref, arrayItem
 from pypy.rlib import objectmodel
 from pypy.rlib.jit import _we_are_jitted
 from pypy.translator.simplify import get_funcobj
+from pypy.translator.unsimplify import varoftype
 
 
 def transform_graph(graph, cpu=None, callcontrol=None, portal_jd=None):
@@ -248,11 +250,13 @@
         kind = self.callcontrol.guess_call_kind(op)
         return getattr(self, 'handle_%s_indirect_call' % kind)(op)
 
-    def rewrite_call(self, op, namebase, initialargs):
+    def rewrite_call(self, op, namebase, initialargs, args=None):
         """Turn 'i0 = direct_call(fn, i1, i2, ref1, ref2)'
            into 'i0 = xxx_call_ir_i(fn, descr, [i1,i2], [ref1,ref2])'.
            The name is one of '{residual,direct}_call_{r,ir,irf}_{i,r,f,v}'."""
-        lst_i, lst_r, lst_f = self.make_three_lists(op.args[1:])
+        if args is None:
+            args = op.args[1:]
+        lst_i, lst_r, lst_f = self.make_three_lists(args)
         reskind = getkind(op.result.concretetype)[0]
         if lst_f or reskind == 'f': kinds = 'irf'
         elif lst_i: kinds = 'ir'
@@ -310,6 +314,8 @@
         # dispatch to various implementations depending on the oopspec_name
         if oopspec_name.startswith('list.') or oopspec_name == 'newlist':
             prepare = self._handle_list_call
+        elif oopspec_name.startswith('stroruni.'):
+            prepare = self._handle_stroruni_call
         elif oopspec_name.startswith('virtual_ref'):
             prepare = self._handle_virtual_ref_call
         else:
@@ -982,10 +988,7 @@
         return extraop + [op]
 
     def do_fixed_list_ll_arraycopy(self, op, args, arraydescr):
-        calldescr = self.callcontrol.getcalldescr(op)
-        return SpaceOperation('arraycopy',
-                              [calldescr, op.args[0]] + args + [arraydescr],
-                              op.result)
+        return self._handle_oopspec_call(op, args, EffectInfo.OS_ARRAYCOPY)
 
     # ---------- resizable lists ----------
 
@@ -1023,6 +1026,92 @@
                               [args[0], lengthdescr], op.result)
 
     # ----------
+    # Strings and Unicodes.
+
+    def _handle_oopspec_call(self, op, args, oopspecindex):
+        calldescr = self.callcontrol.getcalldescr(op, oopspecindex)
+        if isinstance(op.args[0].value, str):
+            pass  # for tests only
+        else:
+            func = heaptracker.adr2int(
+                llmemory.cast_ptr_to_adr(op.args[0].value))
+            _callinfo_for_oopspec[oopspecindex] = calldescr, func
+        op1 = self.rewrite_call(op, 'residual_call',
+                                [op.args[0], calldescr],
+                                args=args)
+        if self.callcontrol.calldescr_canraise(calldescr):
+            op1 = [op1, SpaceOperation('-live-', [], None)]
+        return op1
+
+    def _register_extra_helper(self, oopspecindex, oopspec_name,
+                               argtypes, resulttype):
+        # a bit hackish
+        if oopspecindex in _callinfo_for_oopspec:
+            return
+        c_func, TP = support.builtin_func_for_spec(self.cpu.rtyper,
+                                                   oopspec_name, argtypes,
+                                                   resulttype)
+        op = SpaceOperation('pseudo_call',
+                            [c_func] + [varoftype(T) for T in argtypes],
+                            varoftype(resulttype))
+        calldescr = self.callcontrol.getcalldescr(op, oopspecindex)
+        func = heaptracker.adr2int(
+            llmemory.cast_ptr_to_adr(c_func.value))
+        _callinfo_for_oopspec[oopspecindex] = calldescr, func
+
+    def _handle_stroruni_call(self, op, oopspec_name, args):
+        if args[0].concretetype.TO == rstr.STR:
+            dict = {"stroruni.concat": EffectInfo.OS_STR_CONCAT,
+                    "stroruni.slice":  EffectInfo.OS_STR_SLICE,
+                    "stroruni.equal":  EffectInfo.OS_STR_EQUAL,
+                    }
+        elif args[0].concretetype.TO == rstr.UNICODE:
+            dict = {"stroruni.concat": EffectInfo.OS_UNI_CONCAT,
+                    "stroruni.slice":  EffectInfo.OS_UNI_SLICE,
+                    "stroruni.equal":  EffectInfo.OS_UNI_EQUAL,
+                    }
+        else:
+            assert 0, "args[0].concretetype must be STR or UNICODE"
+        #
+        if oopspec_name == "stroruni.equal":
+            SoU = args[0].concretetype     # Ptr(STR) or Ptr(UNICODE)
+            for otherindex, othername, argtypes, resulttype in [
+
+                (EffectInfo.OS_STREQ_SLICE_CHECKNULL,
+                     "str.eq_slice_checknull",
+                     [SoU, lltype.Signed, lltype.Signed, SoU],
+                     lltype.Signed),
+                (EffectInfo.OS_STREQ_SLICE_NONNULL,
+                     "str.eq_slice_nonnull",
+                     [SoU, lltype.Signed, lltype.Signed, SoU],
+                     lltype.Signed),
+                (EffectInfo.OS_STREQ_SLICE_CHAR,
+                     "str.eq_slice_char",
+                     [SoU, lltype.Signed, lltype.Signed, lltype.Char],
+                     lltype.Signed),
+                (EffectInfo.OS_STREQ_NONNULL,
+                     "str.eq_nonnull",
+                     [SoU, SoU],
+                     lltype.Signed),
+                (EffectInfo.OS_STREQ_NONNULL_CHAR,
+                     "str.eq_nonnull_char",
+                     [SoU, lltype.Char],
+                     lltype.Signed),
+                (EffectInfo.OS_STREQ_CHECKNULL_CHAR,
+                     "str.eq_checknull_char",
+                     [SoU, lltype.Char],
+                     lltype.Signed),
+                (EffectInfo.OS_STREQ_LENGTHOK,
+                     "str.eq_lengthok",
+                     [SoU, SoU],
+                     lltype.Signed),
+                ]:
+                self._register_extra_helper(otherindex, othername,
+                                            argtypes, resulttype)
+        #
+        return self._handle_oopspec_call(op, args, dict[oopspec_name])
+
+    # ----------
     # VirtualRefs.
 
     def _handle_virtual_ref_call(self, op, oopspec_name, args):

Modified: pypy/branch/fast-forward/pypy/jit/codewriter/support.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/codewriter/support.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/codewriter/support.py	Thu Sep 30 00:16:20 2010
@@ -275,10 +275,86 @@
 
     # ---------- strings and unicode ----------
 
-    _ll_5_string_copy_contents = ll_rstr.copy_string_contents
-
     _ll_1_str_str2unicode = ll_rstr.LLHelpers.ll_str2unicode
-    _ll_5_unicode_copy_contents = ll_rstr.copy_unicode_contents
+
+    def _ll_4_str_eq_slice_checknull(s1, start, length, s2):
+        """str1[start : start + length] == str2."""
+        if not s2:
+            return 0
+        chars2 = s2.chars
+        if len(chars2) != length:
+            return 0
+        j = 0
+        chars1 = s1.chars
+        while j < length:
+            if chars1[start + j] != chars2[j]:
+                return 0
+            j += 1
+        return 1
+
+    def _ll_4_str_eq_slice_nonnull(s1, start, length, s2):
+        """str1[start : start + length] == str2, assuming str2 != NULL."""
+        chars2 = s2.chars
+        if len(chars2) != length:
+            return 0
+        j = 0
+        chars1 = s1.chars
+        while j < length:
+            if chars1[start + j] != chars2[j]:
+                return 0
+            j += 1
+        return 1
+
+    def _ll_4_str_eq_slice_char(s1, start, length, c2):
+        """str1[start : start + length] == c2."""
+        if length != 1:
+            return 0
+        if s1.chars[start] != c2:
+            return 0
+        return 1
+
+    def _ll_2_str_eq_nonnull(s1, s2):
+        len1 = len(s1.chars)
+        len2 = len(s2.chars)
+        if len1 != len2:
+            return 0
+        j = 0
+        chars1 = s1.chars
+        chars2 = s2.chars
+        while j < len1:
+            if chars1[j] != chars2[j]:
+                return 0
+            j += 1
+        return 1
+
+    def _ll_2_str_eq_nonnull_char(s1, c2):
+        chars = s1.chars
+        if len(chars) != 1:
+            return 0
+        if chars[0] != c2:
+            return 0
+        return 1
+
+    def _ll_2_str_eq_checknull_char(s1, c2):
+        if not s1:
+            return 0
+        chars = s1.chars
+        if len(chars) != 1:
+            return 0
+        if chars[0] != c2:
+            return 0
+        return 1
+
+    def _ll_2_str_eq_lengthok(s1, s2):
+        j = 0
+        chars1 = s1.chars
+        chars2 = s2.chars
+        len1 = len(chars1)
+        while j < len1:
+            if chars1[j] != chars2[j]:
+                return 0
+            j += 1
+        return 1
 
     # ---------- malloc with del ----------
 

Modified: pypy/branch/fast-forward/pypy/jit/codewriter/test/test_jtransform.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/codewriter/test/test_jtransform.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/codewriter/test/test_jtransform.py	Thu Sep 30 00:16:20 2010
@@ -1,11 +1,16 @@
+import py
 import random
 from pypy.objspace.flow.model import FunctionGraph, Block, Link
 from pypy.objspace.flow.model import SpaceOperation, Variable, Constant
 from pypy.jit.codewriter.jtransform import Transformer
 from pypy.jit.metainterp.history import getkind
-from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr
+from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr, rlist
 from pypy.translator.unsimplify import varoftype
-from pypy.jit.codewriter import heaptracker
+from pypy.jit.codewriter import heaptracker, effectinfo
+from pypy.jit.codewriter.flatten import ListOfKind
+
+def const(x):
+    return Constant(x, lltype.typeOf(x))
 
 class FakeRTyper:
     class type_system: name = 'lltypesystem'
@@ -17,6 +22,8 @@
         return ('calldescr', FUNC, ARGS, RESULT)
     def fielddescrof(self, STRUCT, name):
         return ('fielddescr', STRUCT, name)
+    def arraydescrof(self, ARRAY):
+        return FakeDescr(('arraydescr', ARRAY))
     def sizeof(self, STRUCT):
         return FakeDescr(('sizedescr', STRUCT))
 
@@ -67,6 +74,14 @@
     def calldescr_canraise(self, calldescr):
         return False
 
+class FakeBuiltinCallControl:
+    def guess_call_kind(self, op):
+        return 'builtin'
+    def getcalldescr(self, op, oopspecindex):
+        return 'calldescr-%d' % oopspecindex
+    def calldescr_canraise(self, calldescr):
+        return False
+
 
 def test_optimize_goto_if_not():
     v1 = Variable()
@@ -107,7 +122,7 @@
     assert block.operations == []
     assert block.exitswitch == ('int_gt', v1, v2)
     assert block.exits == exits
-    assert exits[1].args == [Constant(True, lltype.Bool)]
+    assert exits[1].args == [const(True)]
 
 def test_optimize_goto_if_not__unknownop():
     v3 = Variable(); v3.concretetype = lltype.Bool
@@ -159,8 +174,8 @@
            'float_gt': ('float_gt', 'float_lt'),
            }
     v3 = varoftype(lltype.Signed)
-    for v1 in [varoftype(lltype.Signed), Constant(42, lltype.Signed)]:
-        for v2 in [varoftype(lltype.Signed), Constant(43, lltype.Signed)]:
+    for v1 in [varoftype(lltype.Signed), const(42)]:
+        for v2 in [varoftype(lltype.Signed), const(43)]:
             for name1, name2 in ops.items():
                 op = SpaceOperation(name1, [v1, v2], v3)
                 op1 = Transformer(FakeCPU()).rewrite_operation(op)
@@ -177,8 +192,8 @@
 
 def test_symmetric_int_add_ovf():
     v3 = varoftype(lltype.Signed)
-    for v1 in [varoftype(lltype.Signed), Constant(42, lltype.Signed)]:
-        for v2 in [varoftype(lltype.Signed), Constant(43, lltype.Signed)]:
+    for v1 in [varoftype(lltype.Signed), const(42)]:
+        for v2 in [varoftype(lltype.Signed), const(43)]:
             op = SpaceOperation('int_add_nonneg_ovf', [v1, v2], v3)
             oplist = Transformer(FakeCPU()).rewrite_operation(op)
             op0, op1 = oplist
@@ -218,7 +233,7 @@
 def get_direct_call_op(argtypes, restype):
     FUNC = lltype.FuncType(argtypes, restype)
     fnptr = lltype.functionptr(FUNC, "g")    # no graph
-    c_fnptr = Constant(fnptr, concretetype=lltype.typeOf(fnptr))
+    c_fnptr = const(fnptr)
     vars = [varoftype(TYPE) for TYPE in argtypes]
     v_result = varoftype(restype)
     op = SpaceOperation('direct_call', [c_fnptr] + vars, v_result)
@@ -465,7 +480,7 @@
     v1 = varoftype(lltype.Signed)
     v2 = varoftype(lltype.Signed)
     v3 = varoftype(lltype.Bool)
-    c0 = Constant(0, lltype.Signed)
+    c0 = const(0)
     #
     for opname, reducedname in [('int_eq', 'int_is_zero'),
                                 ('int_ne', 'int_is_true')]:
@@ -488,7 +503,7 @@
     v1 = varoftype(rclass.OBJECTPTR)
     v2 = varoftype(rclass.OBJECTPTR)
     v3 = varoftype(lltype.Bool)
-    c0 = Constant(lltype.nullptr(rclass.OBJECT), rclass.OBJECTPTR)
+    c0 = const(lltype.nullptr(rclass.OBJECT))
     #
     for opname, reducedname in [('ptr_eq', 'ptr_iszero'),
                                 ('ptr_ne', 'ptr_nonzero')]:
@@ -511,7 +526,7 @@
     v1 = varoftype(rclass.NONGCOBJECTPTR)
     v2 = varoftype(rclass.NONGCOBJECTPTR)
     v3 = varoftype(lltype.Bool)
-    c0 = Constant(lltype.nullptr(rclass.NONGCOBJECT), rclass.NONGCOBJECTPTR)
+    c0 = const(lltype.nullptr(rclass.NONGCOBJECT))
     #
     for opname, reducedname in [('ptr_eq', 'int_is_zero'),
                                 ('ptr_ne', 'int_is_true')]:
@@ -656,3 +671,119 @@
     oplist = tr.rewrite_operation(op)
     assert oplist[0].opname == 'inline_call_ir_i'
     assert oplist[0].args[0] == 'somejitcode'
+
+def test_str_newstr():
+    c_STR = Constant(rstr.STR, lltype.Void)
+    c_flavor = Constant({'flavor': 'gc'}, lltype.Void)
+    v1 = varoftype(lltype.Signed)
+    v2 = varoftype(lltype.Ptr(rstr.STR))
+    op = SpaceOperation('malloc_varsize', [c_STR, c_flavor, v1], v2)
+    op1 = Transformer().rewrite_operation(op)
+    assert op1.opname == 'newstr'
+    assert op1.args == [v1]
+    assert op1.result == v2
+
+def test_str_concat():
+    # test that the oopspec is present and correctly transformed
+    PSTR = lltype.Ptr(rstr.STR)
+    FUNC = lltype.FuncType([PSTR, PSTR], PSTR)
+    func = lltype.functionptr(FUNC, 'll_strconcat',
+                              _callable=rstr.LLHelpers.ll_strconcat)
+    v1 = varoftype(PSTR)
+    v2 = varoftype(PSTR)
+    v3 = varoftype(PSTR)
+    op = SpaceOperation('direct_call', [const(func), v1, v2], v3)
+    tr = Transformer(FakeCPU(), FakeBuiltinCallControl())
+    op1 = tr.rewrite_operation(op)
+    assert op1.opname == 'residual_call_r_r'
+    assert op1.args[0].value == func
+    assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_CONCAT
+    assert op1.args[2] == ListOfKind('ref', [v1, v2])
+    assert op1.result == v3
+
+def test_unicode_concat():
+    # test that the oopspec is present and correctly transformed
+    PSTR = lltype.Ptr(rstr.UNICODE)
+    FUNC = lltype.FuncType([PSTR, PSTR], PSTR)
+    func = lltype.functionptr(FUNC, 'll_strconcat',
+                              _callable=rstr.LLHelpers.ll_strconcat)
+    v1 = varoftype(PSTR)
+    v2 = varoftype(PSTR)
+    v3 = varoftype(PSTR)
+    op = SpaceOperation('direct_call', [const(func), v1, v2], v3)
+    tr = Transformer(FakeCPU(), FakeBuiltinCallControl())
+    op1 = tr.rewrite_operation(op)
+    assert op1.opname == 'residual_call_r_r'
+    assert op1.args[0].value == func
+    assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_CONCAT
+    assert op1.args[2] == ListOfKind('ref', [v1, v2])
+    assert op1.result == v3
+    #
+    # check the callinfo_for_oopspec
+    got = effectinfo.callinfo_for_oopspec(effectinfo.EffectInfo.OS_UNI_CONCAT)
+    assert got[0] == op1.args[1]    # the calldescr
+    assert heaptracker.int2adr(got[1]) == llmemory.cast_ptr_to_adr(func)
+
+def test_str_slice():
+    # test that the oopspec is present and correctly transformed
+    PSTR = lltype.Ptr(rstr.STR)
+    INT = lltype.Signed
+    FUNC = lltype.FuncType([PSTR, INT, INT], PSTR)
+    func = lltype.functionptr(FUNC, '_ll_stringslice',
+                            _callable=rstr.LLHelpers._ll_stringslice)
+    v1 = varoftype(PSTR)
+    v2 = varoftype(INT)
+    v3 = varoftype(INT)
+    v4 = varoftype(PSTR)
+    op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4)
+    tr = Transformer(FakeCPU(), FakeBuiltinCallControl())
+    op1 = tr.rewrite_operation(op)
+    assert op1.opname == 'residual_call_ir_r'
+    assert op1.args[0].value == func
+    assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_STR_SLICE
+    assert op1.args[2] == ListOfKind('int', [v2, v3])
+    assert op1.args[3] == ListOfKind('ref', [v1])
+    assert op1.result == v4
+
+def test_unicode_slice():
+    # test that the oopspec is present and correctly transformed
+    PUNICODE = lltype.Ptr(rstr.UNICODE)
+    INT = lltype.Signed
+    FUNC = lltype.FuncType([PUNICODE, INT, INT], PUNICODE)
+    func = lltype.functionptr(FUNC, '_ll_stringslice',
+                            _callable=rstr.LLHelpers._ll_stringslice)
+    v1 = varoftype(PUNICODE)
+    v2 = varoftype(INT)
+    v3 = varoftype(INT)
+    v4 = varoftype(PUNICODE)
+    op = SpaceOperation('direct_call', [const(func), v1, v2, v3], v4)
+    tr = Transformer(FakeCPU(), FakeBuiltinCallControl())
+    op1 = tr.rewrite_operation(op)
+    assert op1.opname == 'residual_call_ir_r'
+    assert op1.args[0].value == func
+    assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_UNI_SLICE
+    assert op1.args[2] == ListOfKind('int', [v2, v3])
+    assert op1.args[3] == ListOfKind('ref', [v1])
+    assert op1.result == v4
+
+def test_list_ll_arraycopy():
+    from pypy.rlib.rgc import ll_arraycopy
+    LIST = lltype.GcArray(lltype.Signed)
+    PLIST = lltype.Ptr(LIST)
+    INT = lltype.Signed
+    FUNC = lltype.FuncType([PLIST]*2+[INT]*3, lltype.Void)
+    func = lltype.functionptr(FUNC, 'll_arraycopy', _callable=ll_arraycopy)
+    v1 = varoftype(PLIST)
+    v2 = varoftype(PLIST)
+    v3 = varoftype(INT)
+    v4 = varoftype(INT)
+    v5 = varoftype(INT)
+    v6 = varoftype(lltype.Void)
+    op = SpaceOperation('direct_call', [const(func), v1, v2, v3, v4, v5], v6)
+    tr = Transformer(FakeCPU(), FakeBuiltinCallControl())
+    op1 = tr.rewrite_operation(op)
+    assert op1.opname == 'residual_call_ir_v'
+    assert op1.args[0].value == func
+    assert op1.args[1] == 'calldescr-%d' % effectinfo.EffectInfo.OS_ARRAYCOPY
+    assert op1.args[2] == ListOfKind('int', [v3, v4, v5])
+    assert op1.args[3] == ListOfKind('ref', [v1, v2])

Modified: pypy/branch/fast-forward/pypy/jit/codewriter/test/test_list.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/codewriter/test/test_list.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/codewriter/test/test_list.py	Thu Sep 30 00:16:20 2010
@@ -36,10 +36,16 @@
 
 class FakeCallControl:
     class getcalldescr(AbstractDescr):
-        def __init__(self, op):
+        def __init__(self, op, oopspecindex=0):
             self.op = op
+            self.oopspecindex = oopspecindex
         def __repr__(self):
-            return '<CallDescr>'
+            if self.oopspecindex == 0:
+                return '<CallDescr>'
+            else:
+                return '<CallDescrOS%d>' % self.oopspecindex
+    def calldescr_canraise(self, calldescr):
+        return False
 
 def builtin_test(oopspec_name, args, RESTYPE, expected):
     v_result = varoftype(RESTYPE)
@@ -99,7 +105,7 @@
                   varoftype(lltype.Signed), 
                   varoftype(lltype.Signed)],
                  lltype.Void, """
-                     arraycopy <CallDescr>, $'myfunc', %r0, %r1, %i0, %i1, %i2, <ArrayDescr>
+                     residual_call_ir_v $'myfunc', <CallDescrOS1>, I[%i0, %i1, %i2], R[%r0, %r1]
                  """)
 
 def test_fixed_getitem():

Modified: pypy/branch/fast-forward/pypy/jit/codewriter/test/test_support.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/codewriter/test/test_support.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/codewriter/test/test_support.py	Thu Sep 30 00:16:20 2010
@@ -1,7 +1,8 @@
 import py
 from pypy.rpython.lltypesystem import lltype
+from pypy.rpython.annlowlevel import llstr
 from pypy.objspace.flow.model import Variable, Constant, SpaceOperation
-from pypy.jit.codewriter.support import decode_builtin_call
+from pypy.jit.codewriter.support import decode_builtin_call, LLtypeHelpers
 
 def newconst(x):
     return Constant(x, lltype.typeOf(x))
@@ -65,3 +66,70 @@
     assert opargs == [newconst(myarray), newconst(2), vc, vi]
     #impl = runner.get_oopspec_impl('spam.foobar', lltype.Ptr(A))
     #assert impl(myarray, 2, 'A', 5) == 42 * ord('A')
+
+def test_streq_slice_checknull():
+    p1 = llstr("hello world")
+    p2 = llstr("wor")
+    func = LLtypeHelpers._ll_4_str_eq_slice_checknull.im_func
+    assert func(p1, 6, 3, p2) == True
+    assert func(p1, 6, 2, p2) == False
+    assert func(p1, 5, 3, p2) == False
+    assert func(p1, 2, 1, llstr(None)) == False
+
+def test_streq_slice_nonnull():
+    p1 = llstr("hello world")
+    p2 = llstr("wor")
+    func = LLtypeHelpers._ll_4_str_eq_slice_nonnull.im_func
+    assert func(p1, 6, 3, p2) == True
+    assert func(p1, 6, 2, p2) == False
+    assert func(p1, 5, 3, p2) == False
+    py.test.raises(AttributeError, func, p1, 2, 1, llstr(None))
+
+def test_streq_slice_char():
+    p1 = llstr("hello world")
+    func = LLtypeHelpers._ll_4_str_eq_slice_char.im_func
+    assert func(p1, 6, 3, "w") == False
+    assert func(p1, 6, 0, "w") == False
+    assert func(p1, 6, 1, "w") == True
+    assert func(p1, 6, 1, "x") == False
+
+def test_streq_nonnull():
+    p1 = llstr("wor")
+    p2 = llstr("wor")
+    assert p1 != p2
+    func = LLtypeHelpers._ll_2_str_eq_nonnull.im_func
+    assert func(p1, p1) == True
+    assert func(p1, p2) == True
+    assert func(p1, llstr("wrl")) == False
+    assert func(p1, llstr("world")) == False
+    assert func(p1, llstr("w")) == False
+    py.test.raises(AttributeError, func, p1, llstr(None))
+    py.test.raises(AttributeError, func, llstr(None), p2)
+
+def test_streq_nonnull_char():
+    func = LLtypeHelpers._ll_2_str_eq_nonnull_char.im_func
+    assert func(llstr("wor"), "x") == False
+    assert func(llstr("w"), "x") == False
+    assert func(llstr(""), "x") == False
+    assert func(llstr("x"), "x") == True
+    py.test.raises(AttributeError, func, llstr(None), "x")
+
+def test_streq_checknull_char():
+    func = LLtypeHelpers._ll_2_str_eq_checknull_char.im_func
+    assert func(llstr("wor"), "x") == False
+    assert func(llstr("w"), "x") == False
+    assert func(llstr(""), "x") == False
+    assert func(llstr("x"), "x") == True
+    assert func(llstr(None), "x") == False
+
+def test_streq_lengthok():
+    p1 = llstr("wor")
+    p2 = llstr("wor")
+    assert p1 != p2
+    func = LLtypeHelpers._ll_2_str_eq_lengthok.im_func
+    assert func(p1, p1) == True
+    assert func(p1, p2) == True
+    assert func(p1, llstr("wrl")) == False
+    py.test.raises(IndexError, func, p1, llstr("w"))
+    py.test.raises(AttributeError, func, p1, llstr(None))
+    py.test.raises(AttributeError, func, llstr(None), p2)

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/blackhole.py	Thu Sep 30 00:16:20 2010
@@ -1024,10 +1024,6 @@
     def bhimpl_arraylen_gc(cpu, array, arraydescr):
         return cpu.bh_arraylen_gc(arraydescr, array)
 
-    @arguments("cpu", "d", "i", "r", "r", "i", "i", "i", "d")
-    def bhimpl_arraycopy(cpu, calldescr, func, x1, x2, x3, x4, x5, arraydescr):
-        cpu.bh_call_v(func, calldescr, [x3, x4, x5], [x1, x2], None)
-
     @arguments("cpu", "r", "d", "d", "i", returns="i")
     def bhimpl_getarrayitem_vable_i(cpu, vable, fielddescr, arraydescr, index):
         array = cpu.bh_getfield_gc_r(vable, fielddescr)

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/compile.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/compile.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/compile.py	Thu Sep 30 00:16:20 2010
@@ -51,7 +51,7 @@
 def compile_new_loop(metainterp, old_loop_tokens, greenkey, start):
     """Try to compile a new loop by closing the current history back
     to the first operation.
-    """    
+    """
     history = metainterp.history
     loop = create_empty_loop(metainterp)
     loop.greenkey = greenkey
@@ -65,7 +65,7 @@
     jitdriver_sd = metainterp.jitdriver_sd
     loop_token = make_loop_token(len(loop.inputargs), jitdriver_sd)
     loop.token = loop_token
-    loop.operations[-1].descr = loop_token     # patch the target of the JUMP
+    loop.operations[-1].setdescr(loop_token)     # patch the target of the JUMP
     try:
         old_loop_token = jitdriver_sd.warmstate.optimize_loop(
             metainterp_sd, old_loop_tokens, loop)
@@ -133,7 +133,7 @@
     metainterp_sd.profiler.end_backend()
     if not we_are_translated():
         metainterp_sd.stats.compiled()
-    metainterp_sd.log("compiled new bridge")            
+    metainterp_sd.log("compiled new bridge")
 
 # ____________________________________________________________
 
@@ -177,7 +177,7 @@
 
 class TerminatingLoopToken(LoopToken):
     terminating = True
-    
+
     def __init__(self, nargs, finishdescr):
         self.specnodes = [prebuiltNotSpecNode]*nargs
         self.finishdescr = finishdescr
@@ -233,14 +233,14 @@
         self.metainterp_sd = metainterp_sd
 
     def store_final_boxes(self, guard_op, boxes):
-        guard_op.fail_args = boxes
-        self.guard_opnum = guard_op.opnum
+        guard_op.setfailargs(boxes)
+        self.guard_opnum = guard_op.getopnum()
 
     def make_a_counter_per_value(self, guard_value_op):
-        assert guard_value_op.opnum == rop.GUARD_VALUE
-        box = guard_value_op.args[0]
+        assert guard_value_op.getopnum() == rop.GUARD_VALUE
+        box = guard_value_op.getarg(0)
         try:
-            i = guard_value_op.fail_args.index(box)
+            i = guard_value_op.getfailargs().index(box)
         except ValueError:
             return     # xxx probably very rare
         else:
@@ -508,7 +508,7 @@
 def compile_new_bridge(metainterp, old_loop_tokens, resumekey):
     """Try to compile a new bridge leading from the beginning of the history
     to some existing place.
-    """    
+    """
     # The history contains new operations to attach as the code for the
     # failure of 'resumekey.guard_op'.
     #
@@ -540,13 +540,14 @@
     op = new_loop.operations[-1]
     if not isinstance(target_loop_token, TerminatingLoopToken):
         # normal case
-        op.descr = target_loop_token     # patch the jump target
+        op.setdescr(target_loop_token)     # patch the jump target
     else:
         # The target_loop_token is a pseudo loop token,
         # e.g. loop_tokens_done_with_this_frame_void[0]
         # Replace the operation with the real operation we want, i.e. a FINISH
         descr = target_loop_token.finishdescr
-        new_op = ResOperation(rop.FINISH, op.args, None, descr=descr)
+        args = op.getarglist()
+        new_op = ResOperation(rop.FINISH, args, None, descr=descr)
         new_loop.operations[-1] = new_op
 
 # ____________________________________________________________
@@ -597,6 +598,6 @@
         ResOperation(rop.GUARD_NO_EXCEPTION, [], None, descr=faildescr),
         ResOperation(rop.FINISH, finishargs, None, descr=jd.portal_finishtoken)
         ]
-    operations[1].fail_args = []
+    operations[1].setfailargs([])
     cpu.compile_loop(inputargs, operations, loop_token)
     return loop_token

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/executor.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/executor.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/executor.py	Thu Sep 30 00:16:20 2010
@@ -2,7 +2,7 @@
 """
 
 import py
-from pypy.rpython.lltypesystem import lltype, llmemory
+from pypy.rpython.lltypesystem import lltype, llmemory, rstr
 from pypy.rpython.ootypesystem import ootype
 from pypy.rpython.lltypesystem.lloperation import llop
 from pypy.rlib.rarithmetic import ovfcheck, r_uint, intmask
@@ -165,12 +165,6 @@
 def do_new_with_vtable(cpu, _, clsbox):
     return BoxPtr(exec_new_with_vtable(cpu, clsbox))
 
-def do_arraycopy(cpu, _, calldescr, funcbox, x1box, x2box,
-                 x3box, x4box, x5box, arraydescr):
-    cpu.bh_call_v(funcbox.getint(), calldescr,
-                  [x3box.getint(), x4box.getint(), x5box.getint()],
-                  [x1box.getref_base(), x2box.getref_base()], None)
-
 def do_int_add_ovf(cpu, metainterp, box1, box2):
     # the overflow operations can be called without a metainterp, if an
     # overflow cannot occur
@@ -209,6 +203,24 @@
 def do_same_as(cpu, _, box):
     return box.clonebox()
 
+def do_copystrcontent(cpu, _, srcbox, dstbox,
+                      srcstartbox, dststartbox, lengthbox):
+    src = srcbox.getptr(lltype.Ptr(rstr.STR))
+    dst = dstbox.getptr(lltype.Ptr(rstr.STR))
+    srcstart = srcstartbox.getint()
+    dststart = dststartbox.getint()
+    length = lengthbox.getint()
+    rstr.copy_string_contents(src, dst, srcstart, dststart, length)
+
+def do_copyunicodecontent(cpu, _, srcbox, dstbox,
+                          srcstartbox, dststartbox, lengthbox):
+    src = srcbox.getptr(lltype.Ptr(rstr.UNICODE))
+    dst = dstbox.getptr(lltype.Ptr(rstr.UNICODE))
+    srcstart = srcstartbox.getint()
+    dststart = dststartbox.getint()
+    length = lengthbox.getint()
+    rstr.copy_unicode_contents(src, dst, srcstart, dststart, length)
+
 # ____________________________________________________________
 
 ##def do_force_token(cpu):

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/gc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/gc.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/gc.py	Thu Sep 30 00:16:20 2010
@@ -19,6 +19,9 @@
 class GC_hybrid(GcDescription):
     malloc_zero_filled = True
 
+class GC_minimark(GcDescription):
+    malloc_zero_filled = True
+
 
 def get_description(config):
     name = config.translation.gc

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/graphpage.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/graphpage.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/graphpage.py	Thu Sep 30 00:16:20 2010
@@ -17,13 +17,13 @@
     for graph, highlight in graphs:
         for op in graph.get_operations():
             if is_interesting_guard(op):
-                graphs.append((SubGraph(op.descr._debug_suboperations),
+                graphs.append((SubGraph(op.getdescr()._debug_suboperations),
                                highlight))
     graphpage = ResOpGraphPage(graphs, errmsg)
     graphpage.display()
 
 def is_interesting_guard(op):
-    return hasattr(op.descr, '_debug_suboperations')
+    return hasattr(op.getdescr(), '_debug_suboperations')
 
 
 class ResOpGraphPage(GraphPage):
@@ -76,7 +76,7 @@
             for i, op in enumerate(graph.get_operations()):
                 if is_interesting_guard(op):
                     self.mark_starter(graphindex, i+1)
-                if op.opnum == rop.DEBUG_MERGE_POINT:
+                if op.getopnum() == rop.DEBUG_MERGE_POINT:
                     if not last_was_mergepoint:
                         last_was_mergepoint = True
                         self.mark_starter(graphindex, i)
@@ -155,7 +155,7 @@
             op = operations[opindex]
             lines.append(repr(op))
             if is_interesting_guard(op):
-                tgt = op.descr._debug_suboperations[0]
+                tgt = op.getdescr()._debug_suboperations[0]
                 tgt_g, tgt_i = self.all_operations[tgt]
                 self.genedge((graphindex, opstartindex),
                              (tgt_g, tgt_i),
@@ -167,8 +167,8 @@
                 self.genedge((graphindex, opstartindex),
                              (graphindex, opindex))
                 break
-        if op.opnum == rop.JUMP:
-            tgt = op.descr
+        if op.getopnum() == rop.JUMP:
+            tgt = op.getdescr()
             tgt_g = -1
             if tgt is None:
                 tgt_g = graphindex
@@ -191,7 +191,8 @@
     def getlinks(self):
         boxes = {}
         for op in self.all_operations:
-            for box in op.args + [op.result]:
+            args = op.getarglist() + [op.result]
+            for box in args:
                 if getattr(box, 'is_box', False):
                     boxes[box] = True
         links = {}

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/history.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/history.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/history.py	Thu Sep 30 00:16:20 2010
@@ -532,7 +532,7 @@
 class BoxFloat(Box):
     type = FLOAT
     _attrs_ = ('value',)
-    
+
     def __init__(self, floatval=0.0):
         assert isinstance(floatval, float)
         self.value = floatval
@@ -685,6 +685,19 @@
         return llmemory.cast_adr_to_int(adr, "emulated")
     return i
 
+def get_const_ptr_for_string(s):
+    from pypy.rpython.annlowlevel import llstr
+    if not we_are_translated():
+        try:
+            return _const_ptr_for_string[s]
+        except KeyError:
+            pass
+    result = ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF, llstr(s)))
+    if not we_are_translated():
+        _const_ptr_for_string[s] = result
+    return result
+_const_ptr_for_string = {}
+
 # ____________________________________________________________
 
 # The TreeLoop class contains a loop or a generalized loop, i.e. a tree
@@ -759,33 +772,34 @@
         assert len(seen) == len(inputargs), (
                "duplicate Box in the Loop.inputargs")
         TreeLoop.check_consistency_of_branch(operations, seen)
-        
+
     @staticmethod
     def check_consistency_of_branch(operations, seen):
         "NOT_RPYTHON"
         for op in operations:
-            for box in op.args:
+            for i in range(op.numargs()):
+                box = op.getarg(i)
                 if isinstance(box, Box):
                     assert box in seen
             if op.is_guard():
-                assert op.descr is not None
-                if hasattr(op.descr, '_debug_suboperations'):
-                    ops = op.descr._debug_suboperations
+                assert op.getdescr() is not None
+                if hasattr(op.getdescr(), '_debug_suboperations'):
+                    ops = op.getdescr()._debug_suboperations
                     TreeLoop.check_consistency_of_branch(ops, seen.copy())
-                for box in op.fail_args or []:
+                for box in op.getfailargs() or []:
                     if box is not None:
                         assert isinstance(box, Box)
                         assert box in seen
             else:
-                assert op.fail_args is None
+                assert op.getfailargs() is None
             box = op.result
             if box is not None:
                 assert isinstance(box, Box)
                 assert box not in seen
                 seen[box] = True
         assert operations[-1].is_final()
-        if operations[-1].opnum == rop.JUMP:
-            target = operations[-1].descr
+        if operations[-1].getopnum() == rop.JUMP:
+            target = operations[-1].getdescr()
             if target is not None:
                 assert isinstance(target, LoopToken)
 
@@ -793,7 +807,8 @@
         # RPython-friendly
         print '%r: inputargs =' % self, self._dump_args(self.inputargs)
         for op in self.operations:
-            print '\t', op.getopname(), self._dump_args(op.args), \
+            args = op.getarglist()
+            print '\t', op.getopname(), self._dump_args(args), \
                   self._dump_box(op.result)
 
     def _dump_args(self, boxes):
@@ -809,14 +824,14 @@
         return '<%s>' % (self.name,)
 
 def _list_all_operations(result, operations, omit_finish=True):
-    if omit_finish and operations[-1].opnum == rop.FINISH:
+    if omit_finish and operations[-1].getopnum() == rop.FINISH:
         # xxx obscure
         return
     result.extend(operations)
     for op in operations:
-        if op.is_guard() and op.descr:
-            if hasattr(op.descr, '_debug_suboperations'):
-                ops = op.descr._debug_suboperations
+        if op.is_guard() and op.getdescr():
+            if hasattr(op.getdescr(), '_debug_suboperations'):
+                ops = op.getdescr()._debug_suboperations
                 _list_all_operations(result, ops, omit_finish)
 
 # ____________________________________________________________
@@ -885,7 +900,7 @@
         self.aborted_count += 1
 
     def entered(self):
-        self.enter_count += 1        
+        self.enter_count += 1
 
     def compiled(self):
         self.compiled_count += 1
@@ -898,7 +913,7 @@
 
     def add_new_loop(self, loop):
         self.loops.append(loop)
-        
+
     # test read interface
 
     def get_all_loops(self):

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/logger.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/logger.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/logger.py	Thu Sep 30 00:16:20 2010
@@ -79,27 +79,27 @@
             debug_print('[' + args + ']')
         for i in range(len(operations)):
             op = operations[i]
-            if op.opnum == rop.DEBUG_MERGE_POINT:
-                loc = op.args[0]._get_str()
+            if op.getopnum() == rop.DEBUG_MERGE_POINT:
+                loc = op.getarg(0)._get_str()
                 debug_print("debug_merge_point('%s')" % (loc,))
                 continue
-            args = ", ".join([self.repr_of_arg(memo, arg) for arg in op.args])
+            args = ", ".join([self.repr_of_arg(memo, op.getarg(i)) for i in range(op.numargs())])
             if op.result is not None:
                 res = self.repr_of_arg(memo, op.result) + " = "
             else:
                 res = ""
             is_guard = op.is_guard()
-            if op.descr is not None:
-                descr = op.descr
+            if op.getdescr() is not None:
+                descr = op.getdescr()
                 if is_guard and self.guard_number:
                     index = self.metainterp_sd.cpu.get_fail_descr_number(descr)
                     r = "<Guard%d>" % index
                 else:
                     r = self.repr_of_descr(descr)
                 args += ', descr=' +  r
-            if is_guard and op.fail_args is not None:
+            if is_guard and op.getfailargs() is not None:
                 fail_args = ' [' + ", ".join([self.repr_of_arg(memo, arg)
-                                              for arg in op.fail_args]) + ']'
+                                              for arg in op.getfailargs()]) + ']'
             else:
                 fail_args = ''
             debug_print(res + op.getopname() +

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimize.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/optimize.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/optimize.py	Thu Sep 30 00:16:20 2010
@@ -43,7 +43,7 @@
     finder.find_nodes_bridge(bridge)
     for old_loop_token in old_loop_tokens:
         if finder.bridge_matches(old_loop_token.specnodes):
-            bridge.operations[-1].descr = old_loop_token   # patch jump target
+            bridge.operations[-1].setdescr(old_loop_token)   # patch jump target
             optimize_bridge_1(metainterp_sd, bridge)
             return old_loop_token
     return None

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizefindnode.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/optimizefindnode.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizefindnode.py	Thu Sep 30 00:16:20 2010
@@ -144,7 +144,7 @@
 
     def find_nodes(self, operations):
         for op in operations:
-            opnum = op.opnum
+            opnum = op.getopnum()
             for value, func in find_nodes_ops:
                 if opnum == value:
                     func(self, op)
@@ -154,77 +154,79 @@
 
     def find_nodes_default(self, op):
         if op.is_always_pure():
-            for arg in op.args:
+            for i in range(op.numargs()):
+                arg = op.getarg(i)
                 if self.get_constant_box(arg) is None:
                     break
             else:
                 # all constant arguments: we can constant-fold
-                argboxes = [self.get_constant_box(arg) for arg in op.args]
+                argboxes = [self.get_constant_box(op.getarg(i))
+                            for i in range(op.numargs())]
                 resbox = execute_nonspec(self.cpu, None,
-                                         op.opnum, argboxes, op.descr)
+                                         op.getopnum(), argboxes, op.getdescr())
                 self.set_constant_node(op.result, resbox.constbox())
         # default case: mark the arguments as escaping
-        for box in op.args:
-            self.getnode(box).mark_escaped()
+        for i in range(op.numargs()):
+            self.getnode(op.getarg(i)).mark_escaped()
 
     def find_nodes_no_escape(self, op):
         pass    # for operations that don't escape their arguments
 
     find_nodes_PTR_EQ        = find_nodes_no_escape
     find_nodes_PTR_NE        = find_nodes_no_escape
-    find_nodes_INSTANCEOF    = find_nodes_no_escape
+    ##find_nodes_INSTANCEOF    = find_nodes_no_escape
     find_nodes_GUARD_NONNULL = find_nodes_no_escape
     find_nodes_GUARD_ISNULL  = find_nodes_no_escape
 
     def find_nodes_NEW_WITH_VTABLE(self, op):
         instnode = InstanceNode()
-        box = op.args[0]
+        box = op.getarg(0)
         assert isinstance(box, Const)
         instnode.knownclsbox = box
         self.nodes[op.result] = instnode
 
     def find_nodes_NEW(self, op):
         instnode = InstanceNode()
-        instnode.structdescr = op.descr
+        instnode.structdescr = op.getdescr()
         self.nodes[op.result] = instnode
 
     def find_nodes_NEW_ARRAY(self, op):
-        lengthbox = op.args[0]
+        lengthbox = op.getarg(0)
         lengthbox = self.get_constant_box(lengthbox)
         if lengthbox is None:
             return     # var-sized arrays are not virtual
         arraynode = InstanceNode()
         arraynode.arraysize = lengthbox.getint()
-        arraynode.arraydescr = op.descr
+        arraynode.arraydescr = op.getdescr()
         self.nodes[op.result] = arraynode
 
     def find_nodes_ARRAYLEN_GC(self, op):
-        arraynode = self.getnode(op.args[0])
+        arraynode = self.getnode(op.getarg(0))
         if arraynode.arraydescr is not None:
             resbox = ConstInt(arraynode.arraysize)
             self.set_constant_node(op.result, resbox)
 
     def find_nodes_GUARD_CLASS(self, op):
-        instnode = self.getnode(op.args[0])
+        instnode = self.getnode(op.getarg(0))
         if instnode.fromstart:    # only useful (and safe) in this case
-            box = op.args[1]
+            box = op.getarg(1)
             assert isinstance(box, Const)
             instnode.knownclsbox = box
 
     def find_nodes_GUARD_VALUE(self, op):
-        instnode = self.getnode(op.args[0])
+        instnode = self.getnode(op.getarg(0))
         if instnode.fromstart:    # only useful (and safe) in this case
-            box = op.args[1]
+            box = op.getarg(1)
             assert isinstance(box, Const)
             instnode.knownvaluebox = box
 
     def find_nodes_SETFIELD_GC(self, op):
-        instnode = self.getnode(op.args[0])
-        fieldnode = self.getnode(op.args[1])
+        instnode = self.getnode(op.getarg(0))
+        fieldnode = self.getnode(op.getarg(1))
         if instnode.escaped:
             fieldnode.mark_escaped()
             return     # nothing to be gained from tracking the field
-        field = op.descr
+        field = op.getdescr()
         assert isinstance(field, AbstractValue)
         if instnode.curfields is None:
             instnode.curfields = {}
@@ -232,10 +234,10 @@
         instnode.add_escape_dependency(fieldnode)
 
     def find_nodes_GETFIELD_GC(self, op):
-        instnode = self.getnode(op.args[0])
+        instnode = self.getnode(op.getarg(0))
         if instnode.escaped:
             return     # nothing to be gained from tracking the field
-        field = op.descr
+        field = op.getdescr()
         assert isinstance(field, AbstractValue)
         if instnode.curfields is not None and field in instnode.curfields:
             fieldnode = instnode.curfields[field]
@@ -254,13 +256,13 @@
     find_nodes_GETFIELD_GC_PURE = find_nodes_GETFIELD_GC
 
     def find_nodes_SETARRAYITEM_GC(self, op):
-        indexbox = op.args[1]
+        indexbox = op.getarg(1)
         indexbox = self.get_constant_box(indexbox)
         if indexbox is None:
             self.find_nodes_default(op)            # not a Const index
             return
-        arraynode = self.getnode(op.args[0])
-        itemnode = self.getnode(op.args[2])
+        arraynode = self.getnode(op.getarg(0))
+        itemnode = self.getnode(op.getarg(2))
         if arraynode.escaped:
             itemnode.mark_escaped()
             return     # nothing to be gained from tracking the item
@@ -270,12 +272,12 @@
         arraynode.add_escape_dependency(itemnode)
 
     def find_nodes_GETARRAYITEM_GC(self, op):
-        indexbox = op.args[1]
+        indexbox = op.getarg(1)
         indexbox = self.get_constant_box(indexbox)
         if indexbox is None:
             self.find_nodes_default(op)            # not a Const index
             return
-        arraynode = self.getnode(op.args[0])
+        arraynode = self.getnode(op.getarg(0))
         if arraynode.escaped:
             return     # nothing to be gained from tracking the item
         index = indexbox.getint()
@@ -298,13 +300,15 @@
     def find_nodes_JUMP(self, op):
         # only set up the 'unique' field of the InstanceNodes;
         # real handling comes later (build_result_specnodes() for loops).
-        for box in op.args:
+        for i in range(op.numargs()):
+            box = op.getarg(i)
             self.getnode(box).set_unique_nodes()
 
     def find_nodes_FINISH(self, op):
         # only for bridges, and only for the ones that end in a 'return'
         # or 'raise'; all other cases end with a JUMP.
-        for box in op.args:
+        for i in range(op.numargs()):
+            box = op.getarg(i)
             self.getnode(box).unique = UNIQUE_NO
 
 find_nodes_ops = _findall(NodeFinder, 'find_nodes_')
@@ -315,16 +319,17 @@
 class PerfectSpecializationFinder(NodeFinder):
     node_fromstart = InstanceNode(fromstart=True)
 
-    def find_nodes_loop(self, loop):
+    def find_nodes_loop(self, loop, build_specnodes=True):
         self._loop = loop
         self.setup_input_nodes(loop.inputargs)
         self.find_nodes(loop.operations)
-        self.build_result_specnodes(loop)
+        if build_specnodes:
+            self.build_result_specnodes(loop)
 
     def show(self):
         from pypy.jit.metainterp.viewnode import viewnodes, view
         op = self._loop.operations[-1]
-        assert op.opnum == rop.JUMP
+        assert op.getopnum() == rop.JUMP
         exitnodes = [self.getnode(arg) for arg in op.args]
         viewnodes(self.inputnodes, exitnodes)
         if hasattr(self._loop.token, "specnodes"):
@@ -343,14 +348,14 @@
         # Build the list of specnodes based on the result
         # computed by NodeFinder.find_nodes().
         op = loop.operations[-1]
-        assert op.opnum == rop.JUMP
-        assert len(self.inputnodes) == len(op.args)
+        assert op.getopnum() == rop.JUMP
+        assert len(self.inputnodes) == op.numargs()
         while True:
             self.restart_needed = False
             specnodes = []
-            for i in range(len(op.args)):
+            for i in range(op.numargs()):
                 inputnode = self.inputnodes[i]
-                exitnode = self.getnode(op.args[i])
+                exitnode = self.getnode(op.getarg(i))
                 specnodes.append(self.intersect(inputnode, exitnode))
             if not self.restart_needed:
                 break
@@ -562,9 +567,9 @@
 
     def bridge_matches(self, nextloop_specnodes):
         jump_op = self.jump_op
-        assert len(jump_op.args) == len(nextloop_specnodes)
+        assert jump_op.numargs() == len(nextloop_specnodes)
         for i in range(len(nextloop_specnodes)):
-            exitnode = self.getnode(jump_op.args[i])
+            exitnode = self.getnode(jump_op.getarg(i))
             if not nextloop_specnodes[i].matches_instance_node(exitnode):
                 return False
         return True

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/__init__.py	Thu Sep 30 00:16:20 2010
@@ -3,6 +3,7 @@
 from pypy.jit.metainterp.optimizeopt.intbounds import OptIntBounds
 from pypy.jit.metainterp.optimizeopt.virtualize import OptVirtualize
 from pypy.jit.metainterp.optimizeopt.heap import OptHeap
+from pypy.jit.metainterp.optimizeopt.string import OptString
 
 def optimize_loop_1(metainterp_sd, loop, virtuals=True):
     """Optimize loop.operations to make it match the input of loop.specnodes
@@ -13,6 +14,7 @@
     optimizations = [OptIntBounds(),
                      OptRewrite(),
                      OptVirtualize(),
+                     OptString(),
                      OptHeap(),
                     ]
     optimizer = Optimizer(metainterp_sd, loop, optimizations, virtuals)
@@ -23,4 +25,3 @@
     expect 'specnodes' on the bridge.
     """
     optimize_loop_1(metainterp_sd, bridge, False)
-        

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/heap.py	Thu Sep 30 00:16:20 2010
@@ -45,7 +45,7 @@
             op = self.lazy_setfields.get(descr, None)
             if op is None:
                 return None
-            return self.getvalue(op.args[1])
+            return self.getvalue(op.getarg(1))
         return d.get(value, None)
 
     def cache_arrayitem_value(self, descr, value, indexvalue, fieldvalue, write=False):
@@ -105,7 +105,7 @@
         if op.is_guard():
             self.optimizer.pendingfields = self.force_lazy_setfields_for_guard()
             return
-        opnum = op.opnum
+        opnum = op.getopnum()
         if (opnum == rop.SETFIELD_GC or
             opnum == rop.SETARRAYITEM_GC or
             opnum == rop.DEBUG_MERGE_POINT):
@@ -117,7 +117,7 @@
             if opnum == rop.CALL_ASSEMBLER:
                 effectinfo = None
             else:
-                effectinfo = op.descr.get_extra_info()
+                effectinfo = op.getdescr().get_extra_info()
             if effectinfo is not None:
                 # XXX we can get the wrong complexity here, if the lists
                 # XXX stored on effectinfo are large
@@ -142,7 +142,7 @@
                 return
             self.force_all_lazy_setfields()
         elif op.is_final() or (not we_are_translated() and
-                               op.opnum < 0):   # escape() operations
+                               op.getopnum() < 0):   # escape() operations
             self.force_all_lazy_setfields()
         self.clean_caches()
 
@@ -166,10 +166,11 @@
             # - is_comparison() for cases like "int_eq/setfield_gc/guard_true"
             # - CALL_MAY_FORCE: "call_may_force/setfield_gc/guard_not_forced"
             # - is_ovf(): "int_add_ovf/setfield_gc/guard_no_overflow"
-            opnum = prevop.opnum
+            opnum = prevop.getopnum()
+            lastop_args = lastop.getarglist()
             if ((prevop.is_comparison() or opnum == rop.CALL_MAY_FORCE
                  or prevop.is_ovf())
-                and prevop.result not in lastop.args):
+                and prevop.result not in lastop_args):
                 newoperations[-2] = lastop
                 newoperations[-1] = prevop
 
@@ -189,9 +190,9 @@
             # the only really interesting case that we need to handle in the
             # guards' resume data is that of a virtual object that is stored
             # into a field of a non-virtual object.
-            value = self.getvalue(op.args[0])
+            value = self.getvalue(op.getarg(0))
             assert not value.is_virtual()      # it must be a non-virtual
-            fieldvalue = self.getvalue(op.args[1])
+            fieldvalue = self.getvalue(op.getarg(1))
             if fieldvalue.is_virtual():
                 # this is the case that we leave to resume.py
                 pendingfields.append((descr, value.box,
@@ -202,20 +203,20 @@
 
     def force_lazy_setfield_if_necessary(self, op, value, write=False):
         try:
-            op1 = self.lazy_setfields[op.descr]
+            op1 = self.lazy_setfields[op.getdescr()]
         except KeyError:
             if write:
-                self.lazy_setfields_descrs.append(op.descr)
+                self.lazy_setfields_descrs.append(op.getdescr())
         else:
-            if self.getvalue(op1.args[0]) is not value:
-                self.force_lazy_setfield(op.descr)
+            if self.getvalue(op1.getarg(0)) is not value:
+                self.force_lazy_setfield(op.getdescr())
 
     def optimize_GETFIELD_GC(self, op):
-        value = self.getvalue(op.args[0])
+        value = self.getvalue(op.getarg(0))
         self.force_lazy_setfield_if_necessary(op, value)
         # check if the field was read from another getfield_gc just before
         # or has been written to recently
-        fieldvalue = self.read_cached_field(op.descr, value)
+        fieldvalue = self.read_cached_field(op.getdescr(), value)
         if fieldvalue is not None:
             self.make_equal_to(op.result, fieldvalue)
             return
@@ -225,38 +226,38 @@
         self.emit_operation(op) # FIXME: These might need constant propagation?
         # then remember the result of reading the field
         fieldvalue = self.getvalue(op.result)
-        self.cache_field_value(op.descr, value, fieldvalue)
+        self.cache_field_value(op.getdescr(), value, fieldvalue)
 
     def optimize_SETFIELD_GC(self, op):
-        value = self.getvalue(op.args[0])
-        fieldvalue = self.getvalue(op.args[1])
+        value = self.getvalue(op.getarg(0))
+        fieldvalue = self.getvalue(op.getarg(1))
         self.force_lazy_setfield_if_necessary(op, value, write=True)
-        self.lazy_setfields[op.descr] = op
+        self.lazy_setfields[op.getdescr()] = op
         # remember the result of future reads of the field
-        self.cache_field_value(op.descr, value, fieldvalue, write=True)
+        self.cache_field_value(op.getdescr(), value, fieldvalue, write=True)
 
     def optimize_GETARRAYITEM_GC(self, op):
-        value = self.getvalue(op.args[0])
-        indexvalue = self.getvalue(op.args[1])
-        fieldvalue = self.read_cached_arrayitem(op.descr, value, indexvalue)
+        value = self.getvalue(op.getarg(0))
+        indexvalue = self.getvalue(op.getarg(1))
+        fieldvalue = self.read_cached_arrayitem(op.getdescr(), value, indexvalue)
         if fieldvalue is not None:
             self.make_equal_to(op.result, fieldvalue)
             return
         ###self.optimizer.optimize_default(op)
         self.emit_operation(op) # FIXME: These might need constant propagation?
         fieldvalue = self.getvalue(op.result)
-        self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue)
+        self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue)
 
     def optimize_SETARRAYITEM_GC(self, op):
         self.emit_operation(op)
-        value = self.getvalue(op.args[0])
-        fieldvalue = self.getvalue(op.args[2])
-        indexvalue = self.getvalue(op.args[1])
-        self.cache_arrayitem_value(op.descr, value, indexvalue, fieldvalue,
+        value = self.getvalue(op.getarg(0))
+        fieldvalue = self.getvalue(op.getarg(2))
+        indexvalue = self.getvalue(op.getarg(1))
+        self.cache_arrayitem_value(op.getdescr(), value, indexvalue, fieldvalue,
                                    write=True)
 
     def propagate_forward(self, op):
-        opnum = op.opnum
+        opnum = op.getopnum()
         for value, func in optimize_ops:
             if opnum == value:
                 func(self, op)

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/intbounds.py	Thu Sep 30 00:16:20 2010
@@ -10,7 +10,7 @@
        remove redundant guards"""
 
     def propagate_forward(self, op):
-        opnum = op.opnum
+        opnum = op.getopnum()
         for value, func in optimize_ops:
             if opnum == value:
                 func(self, op)
@@ -31,7 +31,7 @@
             op = self.optimizer.producer[box]
         except KeyError:
             return
-        opnum = op.opnum
+        opnum = op.getopnum()
         for value, func in propagate_bounds_ops:
             if opnum == value:
                 func(self, op)
@@ -39,14 +39,14 @@
 
     def optimize_GUARD_TRUE(self, op):
         self.emit_operation(op)
-        self.propagate_bounds_backward(op.args[0])
+        self.propagate_bounds_backward(op.getarg(0))
 
     optimize_GUARD_FALSE = optimize_GUARD_TRUE
     optimize_GUARD_VALUE = optimize_GUARD_TRUE
 
     def optimize_INT_AND(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         self.emit_operation(op)
 
         r = self.getvalue(op.result)
@@ -60,74 +60,74 @@
                 r.intbound.intersect(IntBound(0,val))
 
     def optimize_INT_SUB(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         self.emit_operation(op)
         r = self.getvalue(op.result)
         r.intbound.intersect(v1.intbound.sub_bound(v2.intbound))
 
     def optimize_INT_ADD(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         self.emit_operation(op)
         r = self.getvalue(op.result)
         r.intbound.intersect(v1.intbound.add_bound(v2.intbound))
 
     def optimize_INT_MUL(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         self.emit_operation(op)
         r = self.getvalue(op.result)
         r.intbound.intersect(v1.intbound.mul_bound(v2.intbound))
 
     def optimize_INT_ADD_OVF(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         resbound = v1.intbound.add_bound(v2.intbound)
         if resbound.has_lower and resbound.has_upper and \
-           self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
+           self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW:
             # Transform into INT_ADD and remove guard
-            op.opnum = rop.INT_ADD
+            op = op.copy_and_change(rop.INT_ADD)
             self.skip_nextop()
-            self.optimize_INT_ADD(op)
+            self.optimize_INT_ADD(op) # emit the op
         else:
             self.emit_operation(op)
             r = self.getvalue(op.result)
             r.intbound.intersect(resbound)
 
     def optimize_INT_SUB_OVF(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         resbound = v1.intbound.sub_bound(v2.intbound)
         if resbound.has_lower and resbound.has_upper and \
-               self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
+               self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW:
             # Transform into INT_SUB and remove guard
-            op.opnum = rop.INT_SUB
+            op = op.copy_and_change(rop.INT_SUB)
             self.skip_nextop()
-            self.optimize_INT_SUB(op)
+            self.optimize_INT_SUB(op) # emit the op
         else:
             self.emit_operation(op)
             r = self.getvalue(op.result)
             r.intbound.intersect(resbound)
 
     def optimize_INT_MUL_OVF(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         resbound = v1.intbound.mul_bound(v2.intbound)
         if resbound.has_lower and resbound.has_upper and \
-               self.nextop().opnum == rop.GUARD_NO_OVERFLOW:
+               self.nextop().getopnum() == rop.GUARD_NO_OVERFLOW:
             # Transform into INT_MUL and remove guard
-            op.opnum = rop.INT_MUL
+            op = op.copy_and_change(rop.INT_MUL)
             self.skip_nextop()
-            self.optimize_INT_MUL(op)
+            self.optimize_INT_MUL(op) # emit the op
         else:
             self.emit_operation(op)
             r = self.getvalue(op.result)
             r.intbound.intersect(resbound)
 
     def optimize_INT_LT(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         if v1.intbound.known_lt(v2.intbound):
             self.make_constant_int(op.result, 1)
         elif v1.intbound.known_ge(v2.intbound):
@@ -136,8 +136,8 @@
             self.emit_operation(op)
 
     def optimize_INT_GT(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         if v1.intbound.known_gt(v2.intbound):
             self.make_constant_int(op.result, 1)
         elif v1.intbound.known_le(v2.intbound):
@@ -146,8 +146,8 @@
             self.emit_operation(op)
 
     def optimize_INT_LE(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         if v1.intbound.known_le(v2.intbound):
             self.make_constant_int(op.result, 1)
         elif v1.intbound.known_gt(v2.intbound):
@@ -156,8 +156,8 @@
             self.emit_operation(op)
 
     def optimize_INT_GE(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         if v1.intbound.known_ge(v2.intbound):
             self.make_constant_int(op.result, 1)
         elif v1.intbound.known_lt(v2.intbound):
@@ -166,8 +166,8 @@
             self.emit_operation(op)
 
     def optimize_INT_EQ(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         if v1.intbound.known_gt(v2.intbound):
             self.make_constant_int(op.result, 0)
         elif v1.intbound.known_lt(v2.intbound):
@@ -176,8 +176,8 @@
             self.emit_operation(op)
 
     def optimize_INT_NE(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         if v1.intbound.known_gt(v2.intbound):
             self.make_constant_int(op.result, 1)
         elif v1.intbound.known_lt(v2.intbound):
@@ -192,115 +192,114 @@
 
     optimize_STRLEN = optimize_ARRAYLEN_GC
 
-    def make_int_lt(self, args):
-        v1 = self.getvalue(args[0])
-        v2 = self.getvalue(args[1])
+    def make_int_lt(self, box1, box2):
+        v1 = self.getvalue(box1)
+        v2 = self.getvalue(box2)
         if v1.intbound.make_lt(v2.intbound):
-            self.propagate_bounds_backward(args[0])
+            self.propagate_bounds_backward(box1)
         if v2.intbound.make_gt(v1.intbound):
-            self.propagate_bounds_backward(args[1])
+            self.propagate_bounds_backward(box2)
 
-
-    def make_int_le(self, args):
-        v1 = self.getvalue(args[0])
-        v2 = self.getvalue(args[1])
+    def make_int_le(self, box1, box2):
+        v1 = self.getvalue(box1)
+        v2 = self.getvalue(box2)
         if v1.intbound.make_le(v2.intbound):
-            self.propagate_bounds_backward(args[0])
+            self.propagate_bounds_backward(box1)
         if v2.intbound.make_ge(v1.intbound):
-            self.propagate_bounds_backward(args[1])
+            self.propagate_bounds_backward(box2)
 
-    def make_int_gt(self, args):
-        self.make_int_lt([args[1], args[0]])
+    def make_int_gt(self, box1, box2):
+        self.make_int_lt(box2, box1)
 
-    def make_int_ge(self, args):
-        self.make_int_le([args[1], args[0]])
+    def make_int_ge(self, box1, box2):
+        self.make_int_le(box2, box1)
 
     def propagate_bounds_INT_LT(self, op):
         r = self.getvalue(op.result)
         if r.is_constant():
             if r.box.same_constant(CONST_1):
-                self.make_int_lt(op.args)
+                self.make_int_lt(op.getarg(0), op.getarg(1))
             else:
-                self.make_int_ge(op.args)
+                self.make_int_ge(op.getarg(0), op.getarg(1))
 
     def propagate_bounds_INT_GT(self, op):
         r = self.getvalue(op.result)
         if r.is_constant():
             if r.box.same_constant(CONST_1):
-                self.make_int_gt(op.args)
+                self.make_int_gt(op.getarg(0), op.getarg(1))
             else:
-                self.make_int_le(op.args)
+                self.make_int_le(op.getarg(0), op.getarg(1))
 
     def propagate_bounds_INT_LE(self, op):
         r = self.getvalue(op.result)
         if r.is_constant():
             if r.box.same_constant(CONST_1):
-                self.make_int_le(op.args)
+                self.make_int_le(op.getarg(0), op.getarg(1))
             else:
-                self.make_int_gt(op.args)
+                self.make_int_gt(op.getarg(0), op.getarg(1))
 
     def propagate_bounds_INT_GE(self, op):
         r = self.getvalue(op.result)
         if r.is_constant():
             if r.box.same_constant(CONST_1):
-                self.make_int_ge(op.args)
+                self.make_int_ge(op.getarg(0), op.getarg(1))
             else:
-                self.make_int_lt(op.args)
+                self.make_int_lt(op.getarg(0), op.getarg(1))
 
     def propagate_bounds_INT_EQ(self, op):
         r = self.getvalue(op.result)
         if r.is_constant():
             if r.box.same_constant(CONST_1):
-                v1 = self.getvalue(op.args[0])
-                v2 = self.getvalue(op.args[1])
+                v1 = self.getvalue(op.getarg(0))
+                v2 = self.getvalue(op.getarg(1))
                 if v1.intbound.intersect(v2.intbound):
-                    self.propagate_bounds_backward(op.args[0])
+                    self.propagate_bounds_backward(op.getarg(0))
                 if v2.intbound.intersect(v1.intbound):
-                    self.propagate_bounds_backward(op.args[1])
+                    self.propagate_bounds_backward(op.getarg(1))
 
     def propagate_bounds_INT_NE(self, op):
         r = self.getvalue(op.result)
         if r.is_constant():
             if r.box.same_constant(CONST_0):
-                v1 = self.getvalue(op.args[0])
-                v2 = self.getvalue(op.args[1])
+                v1 = self.getvalue(op.getarg(0))
+                v2 = self.getvalue(op.getarg(1))
                 if v1.intbound.intersect(v2.intbound):
-                    self.propagate_bounds_backward(op.args[0])
+                    self.propagate_bounds_backward(op.getarg(0))
                 if v2.intbound.intersect(v1.intbound):
-                    self.propagate_bounds_backward(op.args[1])
+                    self.propagate_bounds_backward(op.getarg(1))
 
     def propagate_bounds_INT_ADD(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         r = self.getvalue(op.result)
         b = r.intbound.sub_bound(v2.intbound)
         if v1.intbound.intersect(b):
-            self.propagate_bounds_backward(op.args[0])
+            self.propagate_bounds_backward(op.getarg(0))
         b = r.intbound.sub_bound(v1.intbound)
         if v2.intbound.intersect(b):
-            self.propagate_bounds_backward(op.args[1])
+            self.propagate_bounds_backward(op.getarg(1))
 
     def propagate_bounds_INT_SUB(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         r = self.getvalue(op.result)
         b = r.intbound.add_bound(v2.intbound)
         if v1.intbound.intersect(b):
-            self.propagate_bounds_backward(op.args[0])
+            self.propagate_bounds_backward(op.getarg(0))
         b = r.intbound.sub_bound(v1.intbound).mul(-1)
         if v2.intbound.intersect(b):
-            self.propagate_bounds_backward(op.args[1])
+            self.propagate_bounds_backward(op.getarg(1))
 
     def propagate_bounds_INT_MUL(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         r = self.getvalue(op.result)
         b = r.intbound.div_bound(v2.intbound)
         if v1.intbound.intersect(b):
-            self.propagate_bounds_backward(op.args[0])
+            self.propagate_bounds_backward(op.getarg(0))
         b = r.intbound.div_bound(v1.intbound)
         if v2.intbound.intersect(b):
-            self.propagate_bounds_backward(op.args[1])
+            self.propagate_bounds_backward(op.getarg(1))
 
     propagate_bounds_INT_ADD_OVF  = propagate_bounds_INT_ADD
     propagate_bounds_INT_SUB_OVF  = propagate_bounds_INT_SUB

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/optimizer.py	Thu Sep 30 00:16:20 2010
@@ -12,17 +12,19 @@
 from pypy.rpython.lltypesystem import lltype
 from pypy.jit.metainterp.history import AbstractDescr, make_hashable_int
 from pypy.jit.metainterp.optimizeopt.intutils import IntBound, IntUnbounded
+from pypy.tool.pairtype import extendabletype
 
 LEVEL_UNKNOWN    = '\x00'
 LEVEL_NONNULL    = '\x01'
 LEVEL_KNOWNCLASS = '\x02'     # might also mean KNOWNARRAYDESCR, for arrays
-LEVEL_CONSTANT   = '\x03'        
+LEVEL_CONSTANT   = '\x03'
 
 import sys
 MAXINT = sys.maxint
 MININT = -sys.maxint - 1
-        
+
 class OptValue(object):
+    __metaclass__ = extendabletype
     _attrs_ = ('box', 'known_class', 'last_guard_index', 'level', 'intbound')
     last_guard_index = -1
 
@@ -36,7 +38,7 @@
         if isinstance(box, Const):
             self.make_constant(box)
         # invariant: box is a Const if and only if level == LEVEL_CONSTANT
-        
+
     def force_box(self):
         return self.box
 
@@ -126,6 +128,7 @@
     def setitem(self, index, value):
         raise NotImplementedError
 
+
 class ConstantValue(OptValue):
     def __init__(self, box):
         self.make_constant(box)
@@ -134,6 +137,7 @@
 CONST_1      = ConstInt(1)
 CVAL_ZERO    = ConstantValue(CONST_0)
 CVAL_ZERO_FLOAT = ConstantValue(ConstFloat(0.0))
+CVAL_UNINITIALIZED_ZERO = ConstantValue(CONST_0)
 llhelper.CVAL_NULLREF = ConstantValue(llhelper.CONST_NULL)
 oohelper.CVAL_NULLREF = ConstantValue(oohelper.CONST_NULL)
 
@@ -171,7 +175,7 @@
 
     def new_const_item(self, arraydescr):
         return self.optimizer.new_const_item(arraydescr)
-    
+
     def pure(self, opnum, args, result):
         op = ResOperation(opnum, args, result)
         self.optimizer.pure_operations[self.optimizer.make_args_key(op)] = op
@@ -184,7 +188,7 @@
 
     def setup(self, virtuals):
         pass
-    
+
 class Optimizer(Optimization):
 
     def __init__(self, metainterp_sd, loop, optimizations=None, virtuals=True):
@@ -249,6 +253,7 @@
         return None
 
     def make_equal_to(self, box, value):
+        assert isinstance(value, OptValue)
         assert box not in self.values
         self.values[box] = value
 
@@ -306,9 +311,12 @@
         # accumulate counters
         self.resumedata_memo.update_counters(self.metainterp_sd.profiler)
 
+    def send_extra_operation(self, op):
+        self.first_optimization.propagate_forward(op)
+
     def propagate_forward(self, op):
         self.producer[op.result] = op
-        opnum = op.opnum
+        opnum = op.getopnum()
         for value, func in optimize_ops:
             if opnum == value:
                 func(self, op)
@@ -323,15 +331,15 @@
         self._emit_operation(op)
 
     def _emit_operation(self, op):
-        for i in range(len(op.args)):
-            arg = op.args[i]
+        for i in range(op.numargs()):
+            arg = op.getarg(i)
             if arg in self.values:
                 box = self.values[arg].force_box()
-                op.args[i] = box
+                op.setarg(i, box)
         self.metainterp_sd.profiler.count(jitprof.OPT_OPS)
         if op.is_guard():
             self.metainterp_sd.profiler.count(jitprof.OPT_GUARDS)
-            self.store_final_boxes_in_guard(op)
+            op = self.store_final_boxes_in_guard(op)
         elif op.can_raise():
             self.exception_might_have_happened = True
         elif op.returns_bool_result():
@@ -340,7 +348,7 @@
 
     def store_final_boxes_in_guard(self, op):
         ###pendingfields = self.heap_op_optimizer.force_lazy_setfields_for_guard()
-        descr = op.descr
+        descr = op.getdescr()
         assert isinstance(descr, compile.ResumeGuardDescr)
         modifier = resume.ResumeDataVirtualAdder(descr, self.resumedata_memo)
         newboxes = modifier.finish(self.values, self.pendingfields)
@@ -348,49 +356,54 @@
             compile.giveup()
         descr.store_final_boxes(op, newboxes)
         #
-        if op.opnum == rop.GUARD_VALUE:
-            if self.getvalue(op.args[0]) in self.bool_boxes:
+        if op.getopnum() == rop.GUARD_VALUE:
+            if self.getvalue(op.getarg(0)) in self.bool_boxes:
                 # Hack: turn guard_value(bool) into guard_true/guard_false.
                 # This is done after the operation is emitted to let
                 # store_final_boxes_in_guard set the guard_opnum field of the
                 # descr to the original rop.GUARD_VALUE.
-                constvalue = op.args[1].getint()
+                constvalue = op.getarg(1).getint()
                 if constvalue == 0:
                     opnum = rop.GUARD_FALSE
                 elif constvalue == 1:
                     opnum = rop.GUARD_TRUE
                 else:
                     raise AssertionError("uh?")
-                op.opnum = opnum
-                op.args = [op.args[0]]
+                newop = ResOperation(opnum, [op.getarg(0)], op.result, descr)
+                newop.setfailargs(op.getfailargs())
+                return newop
             else:
                 # a real GUARD_VALUE.  Make it use one counter per value.
                 descr.make_a_counter_per_value(op)
+        return op
 
     def make_args_key(self, op):
-        args = op.args[:]
-        for i in range(len(args)):
-            arg = args[i]
+        args = []
+        for i in range(op.numargs()):
+            arg = op.getarg(i)
             if arg in self.values:
-                args[i] = self.values[arg].get_key_box()
-        args.append(ConstInt(op.opnum))
+                args.append(self.values[arg].get_key_box())
+            else:
+                args.append(arg)
+        args.append(ConstInt(op.getopnum()))
         return args
-            
+
     def optimize_default(self, op):
         canfold = op.is_always_pure()
         is_ovf = op.is_ovf()
         if is_ovf:
             nextop = self.loop.operations[self.i + 1]
-            canfold = nextop.opnum == rop.GUARD_NO_OVERFLOW
+            canfold = nextop.getopnum() == rop.GUARD_NO_OVERFLOW
         if canfold:
-            for arg in op.args:
-                if self.get_constant_box(arg) is None:
+            for i in range(op.numargs()):
+                if self.get_constant_box(op.getarg(i)) is None:
                     break
             else:
                 # all constant arguments: constant-fold away
-                argboxes = [self.get_constant_box(arg) for arg in op.args]
+                argboxes = [self.get_constant_box(op.getarg(i))
+                            for i in range(op.numargs())]
                 resbox = execute_nonspec(self.cpu, None,
-                                         op.opnum, argboxes, op.descr)
+                                         op.getopnum(), argboxes, op.getdescr())
                 self.make_constant(op.result, resbox.constbox())
                 if is_ovf:
                     self.i += 1 # skip next operation, it is the unneeded guard
@@ -399,8 +412,8 @@
             # did we do the exact same operation already?
             args = self.make_args_key(op)
             oldop = self.pure_operations.get(args, None)
-            if oldop is not None and oldop.descr is op.descr:
-                assert oldop.opnum == op.opnum
+            if oldop is not None and oldop.getdescr() is op.getdescr():
+                assert oldop.getopnum() == op.getopnum()
                 self.make_equal_to(op.result, self.getvalue(oldop.result))
                 if is_ovf:
                     self.i += 1 # skip next operation, it is the unneeded guard

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/rewrite.py	Thu Sep 30 00:16:20 2010
@@ -14,7 +14,7 @@
         if self.find_rewritable_bool(op, args):
             return
 
-        opnum = op.opnum
+        opnum = op.getopnum()
         for value, func in optimize_ops:
             if opnum == value:
                 func(self, op)
@@ -24,7 +24,7 @@
         
     def try_boolinvers(self, op, targs):
         oldop = self.optimizer.pure_operations.get(targs, None)
-        if oldop is not None and oldop.descr is op.descr:
+        if oldop is not None and oldop.getdescr() is op.getdescr():
             value = self.getvalue(oldop.result)
             if value.is_constant():
                 if value.box.same_constant(CONST_1):
@@ -39,7 +39,7 @@
 
     def find_rewritable_bool(self, op, args):
         try:
-            oldopnum = opboolinvers[op.opnum]
+            oldopnum = opboolinvers[op.getopnum()]
             targs = [args[0], args[1], ConstInt(oldopnum)]
             if self.try_boolinvers(op, targs):
                 return True
@@ -47,17 +47,17 @@
             pass
 
         try:
-            oldopnum = opboolreflex[op.opnum] # FIXME: add INT_ADD, INT_MUL
+            oldopnum = opboolreflex[op.getopnum()] # FIXME: add INT_ADD, INT_MUL
             targs = [args[1], args[0], ConstInt(oldopnum)]
             oldop = self.optimizer.pure_operations.get(targs, None)
-            if oldop is not None and oldop.descr is op.descr:
+            if oldop is not None and oldop.getdescr() is op.getdescr():
                 self.make_equal_to(op.result, self.getvalue(oldop.result))
                 return True
         except KeyError:
             pass
 
         try:
-            oldopnum = opboolinvers[opboolreflex[op.opnum]]
+            oldopnum = opboolinvers[opboolreflex[op.getopnum()]]
             targs = [args[1], args[0], ConstInt(oldopnum)]
             if self.try_boolinvers(op, targs):
                 return True
@@ -67,16 +67,16 @@
         return False
 
     def optimize_INT_AND(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         if v1.is_null() or v2.is_null():
             self.make_constant_int(op.result, 0)
         else:
             self.emit_operation(op)
 
     def optimize_INT_OR(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         if v1.is_null():
             self.make_equal_to(op.result, v2)
         elif v2.is_null():
@@ -85,20 +85,20 @@
             self.emit_operation(op)
 
     def optimize_INT_SUB(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
         if v2.is_constant() and v2.box.getint() == 0:
             self.make_equal_to(op.result, v1)
         else:
             self.emit_operation(op)
 
         # Synthesize the reverse ops for optimize_default to reuse
-        self.pure(rop.INT_ADD, [op.result, op.args[1]], op.args[0])
-        self.pure(rop.INT_SUB, [op.args[0], op.result], op.args[1])
+        self.pure(rop.INT_ADD, [op.result, op.getarg(1)], op.getarg(0))
+        self.pure(rop.INT_SUB, [op.getarg(0), op.result], op.getarg(1))
 
     def optimize_INT_ADD(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
 
         # If one side of the op is 0 the result is the other side.
         if v1.is_constant() and v1.box.getint() == 0:
@@ -109,12 +109,12 @@
             self.emit_operation(op)
 
         # Synthesize the reverse op for optimize_default to reuse
-        self.pure(rop.INT_SUB, [op.result, op.args[1]], op.args[0])
-        self.pure(rop.INT_SUB, [op.result, op.args[0]], op.args[1])
+        self.pure(rop.INT_SUB, [op.result, op.getarg(1)], op.getarg(0))
+        self.pure(rop.INT_SUB, [op.result, op.getarg(0)], op.getarg(1))
 
     def optimize_INT_MUL(self, op):
-        v1 = self.getvalue(op.args[0])
-        v2 = self.getvalue(op.args[1])
+        v1 = self.getvalue(op.getarg(0))
+        v2 = self.getvalue(op.getarg(1))
 
         # If one side of the op is 1 the result is the other side.
         if v1.is_constant() and v1.box.getint() == 1:
@@ -128,18 +128,21 @@
             self.emit_operation(op)
 
     def optimize_CALL_PURE(self, op):
-        for arg in op.args:
+        for i in range(op.numargs()):
+            arg = op.getarg(i)
             if self.get_constant_box(arg) is None:
                 break
         else:
             # all constant arguments: constant-fold away
-            self.make_constant(op.result, op.args[0])
+            self.make_constant(op.result, op.getarg(0))
             return
         # replace CALL_PURE with just CALL
-        self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
-                                         op.descr))
+        args = op.getarglist()[1:]
+        self.emit_operation(ResOperation(rop.CALL, args, op.result,
+                                         op.getdescr()))
+
     def optimize_guard(self, op, constbox, emit_operation=True):
-        value = self.getvalue(op.args[0])
+        value = self.getvalue(op.getarg(0))
         if value.is_constant():
             box = value.box
             assert isinstance(box, Const)
@@ -151,7 +154,7 @@
         value.make_constant(constbox)
 
     def optimize_GUARD_ISNULL(self, op):
-        value = self.getvalue(op.args[0])
+        value = self.getvalue(op.getarg(0))
         if value.is_null():
             return
         elif value.is_nonnull():
@@ -160,7 +163,7 @@
         value.make_constant(self.optimizer.cpu.ts.CONST_NULL)
 
     def optimize_GUARD_NONNULL(self, op):
-        value = self.getvalue(op.args[0])
+        value = self.getvalue(op.getarg(0))
         if value.is_nonnull():
             return
         elif value.is_null():
@@ -169,25 +172,25 @@
         value.make_nonnull(len(self.optimizer.newoperations) - 1)
 
     def optimize_GUARD_VALUE(self, op):
-        value = self.getvalue(op.args[0])
+        value = self.getvalue(op.getarg(0))
         emit_operation = True
         if value.last_guard_index != -1:
             # there already has been a guard_nonnull or guard_class or
             # guard_nonnull_class on this value, which is rather silly.
             # replace the original guard with a guard_value
             old_guard_op = self.optimizer.newoperations[value.last_guard_index]
-            old_opnum = old_guard_op.opnum
-            old_guard_op.opnum = rop.GUARD_VALUE
-            old_guard_op.args = [old_guard_op.args[0], op.args[1]]
+            new_guard_op = old_guard_op.copy_and_change(rop.GUARD_VALUE,
+                                             args = [old_guard_op.getarg(0), op.getarg(1)])
+            self.optimizer.newoperations[value.last_guard_index] = new_guard_op
             # hack hack hack.  Change the guard_opnum on
-            # old_guard_op.descr so that when resuming,
+            # new_guard_op.getdescr() so that when resuming,
             # the operation is not skipped by pyjitpl.py.
-            descr = old_guard_op.descr
+            descr = new_guard_op.getdescr()
             assert isinstance(descr, compile.ResumeGuardDescr)
             descr.guard_opnum = rop.GUARD_VALUE
-            descr.make_a_counter_per_value(old_guard_op)
+            descr.make_a_counter_per_value(new_guard_op)
             emit_operation = False
-        constbox = op.args[1]
+        constbox = op.getarg(1)
         assert isinstance(constbox, Const)
         self.optimize_guard(op, constbox, emit_operation)
 
@@ -198,8 +201,8 @@
         self.optimize_guard(op, CONST_0)
 
     def optimize_GUARD_CLASS(self, op):
-        value = self.getvalue(op.args[0])
-        expectedclassbox = op.args[1]
+        value = self.getvalue(op.getarg(0))
+        expectedclassbox = op.getarg(1)
         assert isinstance(expectedclassbox, Const)
         realclassbox = value.get_constant_class(self.optimizer.cpu)
         if realclassbox is not None:
@@ -213,15 +216,16 @@
             # there already has been a guard_nonnull or guard_class or
             # guard_nonnull_class on this value.
             old_guard_op = self.optimizer.newoperations[value.last_guard_index]
-            if old_guard_op.opnum == rop.GUARD_NONNULL:
+            if old_guard_op.getopnum() == rop.GUARD_NONNULL:
                 # it was a guard_nonnull, which we replace with a
                 # guard_nonnull_class.
-                old_guard_op.opnum = rop.GUARD_NONNULL_CLASS
-                old_guard_op.args = [old_guard_op.args[0], op.args[1]]
+                new_guard_op = old_guard_op.copy_and_change (rop.GUARD_NONNULL_CLASS,
+                                         args = [old_guard_op.getarg(0), op.getarg(1)])
+                self.optimizer.newoperations[value.last_guard_index] = new_guard_op
                 # hack hack hack.  Change the guard_opnum on
-                # old_guard_op.descr so that when resuming,
+                # new_guard_op.getdescr() so that when resuming,
                 # the operation is not skipped by pyjitpl.py.
-                descr = old_guard_op.descr
+                descr = new_guard_op.getdescr()
                 assert isinstance(descr, compile.ResumeGuardDescr)
                 descr.guard_opnum = rop.GUARD_NONNULL_CLASS
                 emit_operation = False
@@ -239,18 +243,18 @@
         self.optimizer.exception_might_have_happened = False
 
     def optimize_CALL_LOOPINVARIANT(self, op):
-        funcvalue = self.getvalue(op.args[0])
+        funcvalue = self.getvalue(op.getarg(0))
         if not funcvalue.is_constant():
             self.emit_operation(op)
             return
-        key = make_hashable_int(op.args[0].getint())
+        key = make_hashable_int(op.getarg(0).getint())
         resvalue = self.optimizer.loop_invariant_results.get(key, None)
         if resvalue is not None:
             self.make_equal_to(op.result, resvalue)
             return
         # change the op to be a normal call, from the backend's point of view
         # there is no reason to have a separate operation for this
-        op.opnum = rop.CALL
+        op = op.copy_and_change(rop.CALL)
         self.emit_operation(op)
         resvalue = self.getvalue(op.result)
         self.optimizer.loop_invariant_results[key] = resvalue
@@ -265,17 +269,17 @@
             self.emit_operation(op)
 
     def optimize_INT_IS_TRUE(self, op):
-        if self.getvalue(op.args[0]) in self.optimizer.bool_boxes:
-            self.make_equal_to(op.result, self.getvalue(op.args[0]))
+        if self.getvalue(op.getarg(0)) in self.optimizer.bool_boxes:
+            self.make_equal_to(op.result, self.getvalue(op.getarg(0)))
             return
-        self._optimize_nullness(op, op.args[0], True)
+        self._optimize_nullness(op, op.getarg(0), True)
 
     def optimize_INT_IS_ZERO(self, op):
-        self._optimize_nullness(op, op.args[0], False)
+        self._optimize_nullness(op, op.getarg(0), False)
 
     def _optimize_oois_ooisnot(self, op, expect_isnot):
-        value0 = self.getvalue(op.args[0])
-        value1 = self.getvalue(op.args[1])
+        value0 = self.getvalue(op.getarg(0))
+        value1 = self.getvalue(op.getarg(1))
         if value0.is_virtual():
             if value1.is_virtual():
                 intres = (value0 is value1) ^ expect_isnot
@@ -285,9 +289,9 @@
         elif value1.is_virtual():
             self.make_constant_int(op.result, expect_isnot)
         elif value1.is_null():
-            self._optimize_nullness(op, op.args[0], expect_isnot)
+            self._optimize_nullness(op, op.getarg(0), expect_isnot)
         elif value0.is_null():
-            self._optimize_nullness(op, op.args[1], expect_isnot)
+            self._optimize_nullness(op, op.getarg(1), expect_isnot)
         elif value0 is value1:
             self.make_constant_int(op.result, not expect_isnot)
         else:
@@ -307,17 +311,17 @@
     def optimize_PTR_EQ(self, op):
         self._optimize_oois_ooisnot(op, False)
 
-    def optimize_INSTANCEOF(self, op):
-        value = self.getvalue(op.args[0])
-        realclassbox = value.get_constant_class(self.optimizer.cpu)
-        if realclassbox is not None:
-            checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr)
-            result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu,
-                                                      realclassbox, 
-                                                      checkclassbox)
-            self.make_constant_int(op.result, result)
-            return
-        self.emit_operation(op)
+##    def optimize_INSTANCEOF(self, op):
+##        value = self.getvalue(op.args[0])
+##        realclassbox = value.get_constant_class(self.optimizer.cpu)
+##        if realclassbox is not None:
+##            checkclassbox = self.optimizer.cpu.typedescr2classbox(op.descr)
+##            result = self.optimizer.cpu.ts.subclassOf(self.optimizer.cpu,
+##                                                      realclassbox, 
+##                                                      checkclassbox)
+##            self.make_constant_int(op.result, result)
+##            return
+##        self.emit_operation(op)
 
 optimize_ops = _findall(OptRewrite, 'optimize_')
         

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeopt/virtualize.py	Thu Sep 30 00:16:20 2010
@@ -188,12 +188,12 @@
                 itemboxes.append(itemvalue.get_key_box())
             modifier.register_virtual_fields(self.keybox, itemboxes)
             for itemvalue in self._items:
-                if itemvalue is not self.constvalue:
-                    itemvalue.get_args_for_fail(modifier)
+                itemvalue.get_args_for_fail(modifier)
 
     def _make_virtual(self, modifier):
         return modifier.make_varray(self.arraydescr)
 
+
 class __extend__(SpecNode):
     def setup_virtual_node(self, optimizer, box, newinputargs):
         raise NotImplementedError
@@ -258,7 +258,7 @@
     def setup(self, virtuals):
         if not virtuals:
             return
-        
+
         inputargs = self.optimizer.loop.inputargs
         specnodes = self.optimizer.loop.token.specnodes
         assert len(inputargs) == len(specnodes)
@@ -285,18 +285,18 @@
     def optimize_JUMP(self, op):
         orgop = self.optimizer.loop.operations[-1]
         exitargs = []
-        target_loop_token = orgop.descr
+        target_loop_token = orgop.getdescr()
         assert isinstance(target_loop_token, LoopToken)
         specnodes = target_loop_token.specnodes
-        assert len(op.args) == len(specnodes)
+        assert op.numargs() == len(specnodes)
         for i in range(len(specnodes)):
-            value = self.getvalue(op.args[i])
+            value = self.getvalue(op.getarg(i))
             specnodes[i].teardown_virtual_node(self, value, exitargs)
-        op.args = exitargs[:]
+        op = op.copy_and_change(op.getopnum(), args=exitargs[:])
         self.emit_operation(op)
 
     def optimize_VIRTUAL_REF(self, op):
-        indexbox = op.args[1]
+        indexbox = op.getarg(1)
         #
         # get some constants
         vrefinfo = self.optimizer.metainterp_sd.virtualref_info
@@ -322,17 +322,16 @@
         # typically a PyPy PyFrame, and now is the end of its execution, so
         # forcing it now does not have catastrophic effects.
         vrefinfo = self.optimizer.metainterp_sd.virtualref_info
-        # op.args[1] should really never point to null here
+        # op.getarg(1) should really never point to null here
         # - set 'forced' to point to the real object
-        op1 = ResOperation(rop.SETFIELD_GC, op.args, None,
-                          descr = vrefinfo.descr_forced)
-        self.optimize_SETFIELD_GC(op1)
+        seo = self.optimizer.send_extra_operation
+        seo(ResOperation(rop.SETFIELD_GC, op.getarglist(), None,
+                         descr = vrefinfo.descr_forced))
         # - set 'virtual_token' to TOKEN_NONE
-        args = [op.args[0], ConstInt(vrefinfo.TOKEN_NONE)]
-        op1 = ResOperation(rop.SETFIELD_GC, args, None,
-                      descr = vrefinfo.descr_virtual_token)
-        self.optimize_SETFIELD_GC(op1)
-        # Note that in some cases the virtual in op.args[1] has been forced
+        args = [op.getarg(0), ConstInt(vrefinfo.TOKEN_NONE)]
+        seo(ResOperation(rop.SETFIELD_GC, args, None,
+                         descr = vrefinfo.descr_virtual_token))
+        # Note that in some cases the virtual in op.getarg(1) has been forced
         # already.  This is fine.  In that case, and *if* a residual
         # CALL_MAY_FORCE suddenly turns out to access it, then it will
         # trigger a ResumeGuardForcedDescr.handle_async_forcing() which
@@ -340,11 +339,11 @@
         # was already forced).
 
     def optimize_GETFIELD_GC(self, op):
-        value = self.getvalue(op.args[0])
+        value = self.getvalue(op.getarg(0))
         if value.is_virtual():
             # optimizefindnode should ensure that fieldvalue is found
             assert isinstance(value, AbstractVirtualValue)
-            fieldvalue = value.getfield(op.descr, None)
+            fieldvalue = value.getfield(op.getdescr(), None)
             assert fieldvalue is not None
             self.make_equal_to(op.result, fieldvalue)
         else:
@@ -357,36 +356,36 @@
     optimize_GETFIELD_GC_PURE = optimize_GETFIELD_GC
 
     def optimize_SETFIELD_GC(self, op):
-        value = self.getvalue(op.args[0])
-        fieldvalue = self.getvalue(op.args[1])
+        value = self.getvalue(op.getarg(0))
         if value.is_virtual():
-            value.setfield(op.descr, fieldvalue)
+            fieldvalue = self.getvalue(op.getarg(1))
+            value.setfield(op.getdescr(), fieldvalue)
         else:
             value.ensure_nonnull()
             ###self.heap_op_optimizer.optimize_SETFIELD_GC(op, value, fieldvalue)
             self.emit_operation(op)
 
     def optimize_NEW_WITH_VTABLE(self, op):
-        self.make_virtual(op.args[0], op.result, op)
+        self.make_virtual(op.getarg(0), op.result, op)
 
     def optimize_NEW(self, op):
-        self.make_vstruct(op.descr, op.result, op)
+        self.make_vstruct(op.getdescr(), op.result, op)
 
     def optimize_NEW_ARRAY(self, op):
-        sizebox = self.get_constant_box(op.args[0])
+        sizebox = self.get_constant_box(op.getarg(0))
         if sizebox is not None:
             # if the original 'op' did not have a ConstInt as argument,
             # build a new one with the ConstInt argument
-            if not isinstance(op.args[0], ConstInt):
+            if not isinstance(op.getarg(0), ConstInt):
                 op = ResOperation(rop.NEW_ARRAY, [sizebox], op.result,
-                                  descr=op.descr)
-            self.make_varray(op.descr, sizebox.getint(), op.result, op)
+                                  descr=op.getdescr())
+            self.make_varray(op.getdescr(), sizebox.getint(), op.result, op)
         else:
-            ###self.optimize_default(op)
+            self.getvalue(op.result).ensure_nonnull()
             self.emit_operation(op)
 
     def optimize_ARRAYLEN_GC(self, op):
-        value = self.getvalue(op.args[0])
+        value = self.getvalue(op.getarg(0))
         if value.is_virtual():
             self.make_constant_int(op.result, value.getlength())
         else:
@@ -395,9 +394,9 @@
             self.emit_operation(op)
 
     def optimize_GETARRAYITEM_GC(self, op):
-        value = self.getvalue(op.args[0])
+        value = self.getvalue(op.getarg(0))
         if value.is_virtual():
-            indexbox = self.get_constant_box(op.args[1])
+            indexbox = self.get_constant_box(op.getarg(1))
             if indexbox is not None:
                 itemvalue = value.getitem(indexbox.getint())
                 self.make_equal_to(op.result, itemvalue)
@@ -411,41 +410,18 @@
     optimize_GETARRAYITEM_GC_PURE = optimize_GETARRAYITEM_GC
 
     def optimize_SETARRAYITEM_GC(self, op):
-        value = self.getvalue(op.args[0])
+        value = self.getvalue(op.getarg(0))
         if value.is_virtual():
-            indexbox = self.get_constant_box(op.args[1])
+            indexbox = self.get_constant_box(op.getarg(1))
             if indexbox is not None:
-                value.setitem(indexbox.getint(), self.getvalue(op.args[2]))
+                value.setitem(indexbox.getint(), self.getvalue(op.getarg(2)))
                 return
         value.ensure_nonnull()
         ###self.heap_op_optimizer.optimize_SETARRAYITEM_GC(op, value, fieldvalue)
         self.emit_operation(op)
 
-    def optimize_ARRAYCOPY(self, op):
-        source_value = self.getvalue(op.args[2])
-        dest_value = self.getvalue(op.args[3])
-        source_start_box = self.get_constant_box(op.args[4])
-        dest_start_box = self.get_constant_box(op.args[5])
-        length = self.get_constant_box(op.args[6])
-        if (source_value.is_virtual() and source_start_box and dest_start_box
-            and length and dest_value.is_virtual()):
-            # XXX optimize the case where dest value is not virtual,
-            #     but we still can avoid a mess
-            source_start = source_start_box.getint()
-            dest_start = dest_start_box.getint()
-            for index in range(length.getint()):
-                val = source_value.getitem(index + source_start)
-                dest_value.setitem(index + dest_start, val)
-            return
-        if length and length.getint() == 0:
-            return # 0-length arraycopy
-        descr = op.args[0]
-        assert isinstance(descr, AbstractDescr)
-        self.emit_operation(ResOperation(rop.CALL, op.args[1:], op.result,
-                                         descr))
-
     def propagate_forward(self, op):
-        opnum = op.opnum
+        opnum = op.getopnum()
         for value, func in optimize_ops:
             if opnum == value:
                 func(self, op)

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/optimizeutil.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/optimizeutil.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/optimizeutil.py	Thu Sep 30 00:16:20 2010
@@ -14,6 +14,11 @@
 
 def _findall(Class, name_prefix):
     result = []
+    for name in dir(Class):
+        if name.startswith(name_prefix):
+            opname = name[len(name_prefix):]
+            if opname.isupper():
+                assert hasattr(resoperation.rop, opname)
     for value, name in resoperation.opname.items():
         if hasattr(Class, name_prefix + name):
             result.append((value, getattr(Class, name_prefix + name)))

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/pyjitpl.py	Thu Sep 30 00:16:20 2010
@@ -159,7 +159,7 @@
         if got_type == history.INT:
             self.registers_i[target_index] = resultbox
         elif got_type == history.REF:
-            #debug_print(' ->', 
+            #debug_print(' ->',
             #            llmemory.cast_ptr_to_adr(resultbox.getref_base()))
             self.registers_r[target_index] = resultbox
         elif got_type == history.FLOAT:
@@ -421,14 +421,6 @@
     def opimpl_arraylen_gc(self, arraybox, arraydescr):
         return self.execute_with_descr(rop.ARRAYLEN_GC, arraydescr, arraybox)
 
-    @arguments("descr", "box", "box", "box", "box", "box", "box", "descr")
-    def opimpl_arraycopy(self, calldescr, fnptr, sourcebox, destbox,
-                         source_startbox, dest_startbox, lengthbox,
-                         arraydescr):
-        self.execute_with_descr(rop.ARRAYCOPY, arraydescr, calldescr, fnptr,
-                                sourcebox, destbox, source_startbox,
-                                dest_startbox, lengthbox)
-
     @arguments("orgpc", "box", "descr", "box")
     def opimpl_check_neg_index(self, orgpc, arraybox, arraydescr, indexbox):
         negbox = self.metainterp.execute_and_record(
@@ -446,7 +438,7 @@
     def opimpl_newlist(self, structdescr, lengthdescr, itemsdescr, arraydescr,
                        sizebox):
         sbox = self.metainterp.execute_and_record(rop.NEW, structdescr)
-        self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr, 
+        self.metainterp.execute_and_record(rop.SETFIELD_GC, lengthdescr,
                                            sbox, sizebox)
         abox = self.metainterp.execute_and_record(rop.NEW_ARRAY, arraydescr,
                                                   sizebox)
@@ -1004,7 +996,7 @@
             resumedescr = compile.ResumeGuardDescr(metainterp_sd,
                                                    original_greenkey)
         guard_op = metainterp.history.record(opnum, moreargs, None,
-                                             descr=resumedescr)       
+                                             descr=resumedescr)
         virtualizable_boxes = None
         if metainterp.jitdriver_sd.virtualizable_info is not None:
             virtualizable_boxes = metainterp.virtualizable_boxes
@@ -1463,7 +1455,7 @@
             resbox = self._record_helper_nonpure_varargs(opnum, resbox, descr, argboxes)
         return resbox
 
-    def _record_helper_pure(self, opnum, resbox, descr, *argboxes): 
+    def _record_helper_pure(self, opnum, resbox, descr, *argboxes):
         canfold = self._all_constants(*argboxes)
         if canfold:
             resbox = resbox.constbox()       # ensure it is a Const
@@ -1472,7 +1464,7 @@
             resbox = resbox.nonconstbox()    # ensure it is a Box
             return self._record_helper_nonpure_varargs(opnum, resbox, descr, list(argboxes))
 
-    def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes): 
+    def _record_helper_pure_varargs(self, opnum, resbox, descr, argboxes):
         canfold = self._all_constants_varargs(argboxes)
         if canfold:
             resbox = resbox.constbox()       # ensure it is a Const
@@ -1485,7 +1477,7 @@
         assert resbox is None or isinstance(resbox, Box)
         # record the operation
         profiler = self.staticdata.profiler
-        profiler.count_ops(opnum, RECORDED_OPS)        
+        profiler.count_ops(opnum, RECORDED_OPS)
         op = self.history.record(opnum, argboxes, resbox, descr)
         self.attach_debug_info(op)
         return resbox
@@ -1667,7 +1659,7 @@
 
         # Search in current_merge_points for original_boxes with compatible
         # green keys, representing the beginning of the same loop as the one
-        # we end now. 
+        # we end now.
 
         num_green_args = self.jitdriver_sd.num_green_args
         for j in range(len(self.current_merge_points)-1, -1, -1):
@@ -1922,7 +1914,7 @@
         vrefbox = self.virtualref_boxes[i+1]
         # record VIRTUAL_REF_FINISH just before the current CALL_MAY_FORCE
         call_may_force_op = self.history.operations.pop()
-        assert call_may_force_op.opnum == rop.CALL_MAY_FORCE
+        assert call_may_force_op.getopnum() == rop.CALL_MAY_FORCE
         self.history.record(rop.VIRTUAL_REF_FINISH,
                             [vrefbox, virtualbox], None)
         self.history.operations.append(call_may_force_op)
@@ -2088,10 +2080,10 @@
         """ Patch a CALL into a CALL_PURE.
         """
         op = self.history.operations[-1]
-        assert op.opnum == rop.CALL
+        assert op.getopnum() == rop.CALL
         resbox_as_const = resbox.constbox()
-        for arg in op.args:
-            if not isinstance(arg, Const):
+        for i in range(op.numargs()):
+            if not isinstance(op.getarg(i), Const):
                 break
         else:
             # all-constants: remove the CALL operation now and propagate a
@@ -2100,8 +2092,8 @@
             return resbox_as_const
         # not all constants (so far): turn CALL into CALL_PURE, which might
         # be either removed later by optimizeopt or turned back into CALL.
-        op.opnum = rop.CALL_PURE
-        op.args = [resbox_as_const] + op.args
+        newop = op.copy_and_change(rop.CALL_PURE, args=[resbox_as_const]+op.getarglist())
+        self.history.operations[-1] = newop
         return resbox
 
     def direct_assembler_call(self, targetjitdriver_sd):
@@ -2109,10 +2101,11 @@
         patching the CALL_MAY_FORCE that occurred just now.
         """
         op = self.history.operations.pop()
-        assert op.opnum == rop.CALL_MAY_FORCE
+        assert op.getopnum() == rop.CALL_MAY_FORCE
         num_green_args = targetjitdriver_sd.num_green_args
-        greenargs = op.args[1:num_green_args+1]
-        args = op.args[num_green_args+1:]
+        arglist = op.getarglist()
+        greenargs = arglist[1:num_green_args+1]
+        args = arglist[num_green_args+1:]
         assert len(args) == targetjitdriver_sd.num_red_args
         vinfo = targetjitdriver_sd.virtualizable_info
         if vinfo is not None:
@@ -2122,9 +2115,7 @@
             # ^^^ and not "+=", which makes 'args' a resizable list
         warmrunnerstate = targetjitdriver_sd.warmstate
         token = warmrunnerstate.get_assembler_token(greenargs, args)
-        op.opnum = rop.CALL_ASSEMBLER
-        op.args = args
-        op.descr = token
+        op = op.copy_and_change(rop.CALL_ASSEMBLER, args=args, descr=token)
         self.history.operations.append(op)
 
 # ____________________________________________________________

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/resoperation.py	Thu Sep 30 00:16:20 2010
@@ -1,42 +1,90 @@
 from pypy.rlib.objectmodel import we_are_translated
 from pypy.rlib.debug import make_sure_not_resized
 
-class ResOperation(object):
-    """The central ResOperation class, representing one operation."""
+def ResOperation(opnum, args, result, descr=None):
+    cls = opclasses[opnum]
+    op = cls(result)
+    op.initarglist(args)
+    if descr is not None:
+        assert isinstance(op, ResOpWithDescr)
+        op.setdescr(descr)
+    return op
+
 
-    # for 'guard_*'
-    fail_args = None
+class AbstractResOp(object):
+    """The central ResOperation class, representing one operation."""
 
     # debug
     name = ""
     pc = 0
 
-    def __init__(self, opnum, args, result, descr=None):
-        make_sure_not_resized(args)
-        assert isinstance(opnum, int)
-        self.opnum = opnum
-        self.args = list(args)
-        make_sure_not_resized(self.args)
-        assert not isinstance(result, list)
+    def __init__(self, result):
         self.result = result
-        self.setdescr(descr)
+
+    # methods implemented by each concrete class
+    # ------------------------------------------
+    
+    def getopnum(self):
+        raise NotImplementedError
+
+    # methods implemented by the arity mixins
+    # ---------------------------------------
+
+    def initarglist(self, args):
+        "This is supposed to be called only just after the ResOp has been created"
+        raise NotImplementedError
+
+    def getarglist(self):
+        raise NotImplementedError
+
+    def getarg(self, i):
+        raise NotImplementedError
+
+    def setarg(self, i, box):
+        raise NotImplementedError
+
+    def numargs(self):
+        raise NotImplementedError
+
+
+    # methods implemented by GuardResOp
+    # ---------------------------------
+
+    def getfailargs(self):
+        return None
+
+    def setfailargs(self, fail_args):
+        raise NotImplementedError
+
+    # methods implemented by ResOpWithDescr
+    # -------------------------------------
+
+    def getdescr(self):
+        return None
 
     def setdescr(self, descr):
-        # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt
-        # instance provided by the backend holding details about the type
-        # of the operation.  It must inherit from AbstractDescr.  The
-        # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(),
-        # cpu.calldescrof(), and cpu.typedescrof().
-        from pypy.jit.metainterp.history import check_descr
-        check_descr(descr)
-        self.descr = descr
+        raise NotImplementedError
+
+    # common methods
+    # --------------
+
+    def copy_and_change(self, opnum, args=None, result=None, descr=None):
+        "shallow copy: the returned operation is meant to be used in place of self"
+        if args is None:
+            args = self.getarglist()
+        if result is None:
+            result = self.result
+        if descr is None:
+            descr = self.getdescr()
+        newop = ResOperation(opnum, args, result, descr)
+        return newop
 
     def clone(self):
-        descr = self.descr
+        args = self.getarglist()
+        descr = self.getdescr()
         if descr is not None:
             descr = descr.clone_if_mutable()
-        op = ResOperation(self.opnum, self.args, self.result, descr)
-        op.fail_args = self.fail_args
+        op = ResOperation(self.getopnum(), args, self.result, descr)
         if not we_are_translated():
             op.name = self.name
             op.pc = self.pc
@@ -55,82 +103,271 @@
             prefix = "%s:%s   " % (self.name, self.pc)
         else:
             prefix = ""
-        if self.descr is None or we_are_translated():
+        args = self.getarglist()
+        descr = self.getdescr()
+        if descr is None or we_are_translated():
             return '%s%s%s(%s)' % (prefix, sres, self.getopname(),
-                                 ', '.join([str(a) for a in self.args]))
+                                 ', '.join([str(a) for a in args]))
         else:
             return '%s%s%s(%s, descr=%r)' % (prefix, sres, self.getopname(),
-                            ', '.join([str(a) for a in self.args]), self.descr)
+                                             ', '.join([str(a) for a in args]), descr)
 
     def getopname(self):
         try:
-            return opname[self.opnum].lower()
+            return opname[self.getopnum()].lower()
         except KeyError:
-            return '<%d>' % self.opnum
+            return '<%d>' % self.getopnum()
 
     def is_guard(self):
-        return rop._GUARD_FIRST <= self.opnum <= rop._GUARD_LAST
+        return rop._GUARD_FIRST <= self.getopnum() <= rop._GUARD_LAST
 
     def is_foldable_guard(self):
-        return rop._GUARD_FOLDABLE_FIRST <= self.opnum <= rop._GUARD_FOLDABLE_LAST
+        return rop._GUARD_FOLDABLE_FIRST <= self.getopnum() <= rop._GUARD_FOLDABLE_LAST
 
     def is_guard_exception(self):
-        return (self.opnum == rop.GUARD_EXCEPTION or
-                self.opnum == rop.GUARD_NO_EXCEPTION)
+        return (self.getopnum() == rop.GUARD_EXCEPTION or
+                self.getopnum() == rop.GUARD_NO_EXCEPTION)
 
     def is_guard_overflow(self):
-        return (self.opnum == rop.GUARD_OVERFLOW or
-                self.opnum == rop.GUARD_NO_OVERFLOW)
+        return (self.getopnum() == rop.GUARD_OVERFLOW or
+                self.getopnum() == rop.GUARD_NO_OVERFLOW)
 
     def is_always_pure(self):
-        return rop._ALWAYS_PURE_FIRST <= self.opnum <= rop._ALWAYS_PURE_LAST
+        return rop._ALWAYS_PURE_FIRST <= self.getopnum() <= rop._ALWAYS_PURE_LAST
 
     def has_no_side_effect(self):
-        return rop._NOSIDEEFFECT_FIRST <= self.opnum <= rop._NOSIDEEFFECT_LAST
+        return rop._NOSIDEEFFECT_FIRST <= self.getopnum() <= rop._NOSIDEEFFECT_LAST
 
     def can_raise(self):
-        return rop._CANRAISE_FIRST <= self.opnum <= rop._CANRAISE_LAST
+        return rop._CANRAISE_FIRST <= self.getopnum() <= rop._CANRAISE_LAST
 
     def is_ovf(self):
-        return rop._OVF_FIRST <= self.opnum <= rop._OVF_LAST
+        return rop._OVF_FIRST <= self.getopnum() <= rop._OVF_LAST
 
     def is_comparison(self):
         return self.is_always_pure() and self.returns_bool_result()
 
     def is_final(self):
-        return rop._FINAL_FIRST <= self.opnum <= rop._FINAL_LAST
+        return rop._FINAL_FIRST <= self.getopnum() <= rop._FINAL_LAST
 
     def returns_bool_result(self):
-        opnum = self.opnum
+        opnum = self.getopnum()
         if we_are_translated():
             assert opnum >= 0
         elif opnum < 0:
             return False     # for tests
         return opboolresult[opnum]
 
+
+# ===================
+# Top of the hierachy
+# ===================
+
+class PlainResOp(AbstractResOp):
+    pass
+
+class ResOpWithDescr(AbstractResOp):
+
+    _descr = None
+
+    def getdescr(self):
+        return self._descr
+
+    def setdescr(self, descr):
+        # for 'call', 'new', 'getfield_gc'...: the descr is a prebuilt
+        # instance provided by the backend holding details about the type
+        # of the operation.  It must inherit from AbstractDescr.  The
+        # backend provides it with cpu.fielddescrof(), cpu.arraydescrof(),
+        # cpu.calldescrof(), and cpu.typedescrof().
+        from pypy.jit.metainterp.history import check_descr
+        check_descr(descr)
+        self._descr = descr
+
+class GuardResOp(ResOpWithDescr):
+
+    _fail_args = None
+
+    def getfailargs(self):
+        return self._fail_args
+
+    def setfailargs(self, fail_args):
+        self._fail_args = fail_args
+
+    def copy_and_change(self, opnum, args=None, result=None, descr=None):
+        newop = AbstractResOp.copy_and_change(self, opnum, args, result, descr)
+        newop.setfailargs(self.getfailargs())
+        return newop
+
+    def clone(self):
+        newop = AbstractResOp.clone(self)
+        newop.setfailargs(self.getfailargs())
+        return newop
+
+
+# ============
+# arity mixins
+# ============
+
+class NullaryOp(object):
+    _mixin_ = True
+
+    def initarglist(self, args):
+        assert len(args) == 0
+
+    def getarglist(self):
+        return []
+
+    def numargs(self):
+        return 0
+
+    def getarg(self, i):
+        raise IndexError
+    
+    def setarg(self, i, box):
+        raise IndexError
+
+
+class UnaryOp(object):
+    _mixin_ = True
+    _arg0 = None
+
+    def initarglist(self, args):
+        assert len(args) == 1
+        self._arg0, = args
+
+    def getarglist(self):
+        return [self._arg0]
+
+    def numargs(self):
+        return 1
+
+    def getarg(self, i):
+        if i == 0:
+            return self._arg0
+        else:
+            raise IndexError
+    
+    def setarg(self, i, box):
+        if i == 0:
+            self._arg0 = box
+        else:
+            raise IndexError
+
+
+class BinaryOp(object):
+    _mixin_ = True
+    _arg0 = None
+    _arg1 = None
+
+    def initarglist(self, args):
+        assert len(args) == 2
+        self._arg0, self._arg1 = args
+
+    def getarglist(self):
+        return [self._arg0, self._arg1, self._arg2]
+
+    def numargs(self):
+        return 2
+
+    def getarg(self, i):
+        if i == 0:
+            return self._arg0
+        elif i == 1:
+            return self._arg1
+        else:
+            raise IndexError
+    
+    def setarg(self, i, box):
+        if i == 0:
+            self._arg0 = box
+        elif i == 1:
+            self._arg1 = box
+        else:
+            raise IndexError
+
+    def getarglist(self):
+        return [self._arg0, self._arg1]
+
+
+class TernaryOp(object):
+    _mixin_ = True
+    _arg0 = None
+    _arg1 = None
+    _arg2 = None
+
+    def initarglist(self, args):
+        assert len(args) == 3
+        self._arg0, self._arg1, self._arg2 = args
+
+    def getarglist(self):
+        return [self._arg0, self._arg1, self._arg2]
+
+    def numargs(self):
+        return 3
+
+    def getarg(self, i):
+        if i == 0:
+            return self._arg0
+        elif i == 1:
+            return self._arg1
+        elif i == 2:
+            return self._arg2
+        else:
+            raise IndexError
+    
+    def setarg(self, i, box):
+        if i == 0:
+            self._arg0 = box
+        elif i == 1:
+            self._arg1 = box
+        elif i == 2:
+            self._arg2 = box
+        else:
+            raise IndexError
+
+class N_aryOp(object):
+    _mixin_ = True
+    _args = None
+
+    def initarglist(self, args):
+        self._args = args
+
+    def getarglist(self):
+        return self._args
+
+    def numargs(self):
+        return len(self._args)
+
+    def getarg(self, i):
+        return self._args[i]
+    
+    def setarg(self, i, box):
+        self._args[i] = box
+
+
 # ____________________________________________________________
 
 _oplist = [
     '_FINAL_FIRST',
-    'JUMP',
-    'FINISH',
+    'JUMP/*d',
+    'FINISH/*d',
     '_FINAL_LAST',
 
     '_GUARD_FIRST',
     '_GUARD_FOLDABLE_FIRST',
-    'GUARD_TRUE',
-    'GUARD_FALSE',
-    'GUARD_VALUE',
-    'GUARD_CLASS',
-    'GUARD_NONNULL',
-    'GUARD_ISNULL',
-    'GUARD_NONNULL_CLASS',
+    'GUARD_TRUE/1d',
+    'GUARD_FALSE/1d',
+    'GUARD_VALUE/2d',
+    'GUARD_CLASS/2d',
+    'GUARD_NONNULL/1d',
+    'GUARD_ISNULL/1d',
+    'GUARD_NONNULL_CLASS/2d',
     '_GUARD_FOLDABLE_LAST',
-    'GUARD_NO_EXCEPTION',
-    'GUARD_EXCEPTION',
-    'GUARD_NO_OVERFLOW',
-    'GUARD_OVERFLOW',
-    'GUARD_NOT_FORCED',
+    'GUARD_NO_EXCEPTION/0d',
+    'GUARD_EXCEPTION/1d',
+    'GUARD_NO_OVERFLOW/0d',
+    'GUARD_OVERFLOW/0d',
+    'GUARD_NOT_FORCED/0d',
     '_GUARD_LAST', # ----- end of guard operations -----
 
     '_NOSIDEEFFECT_FIRST', # ----- start of no_side_effect operations -----
@@ -213,24 +450,25 @@
     'SETARRAYITEM_RAW/3d',
     'SETFIELD_GC/2d',
     'SETFIELD_RAW/2d',
-    'ARRAYCOPY/7d',      # removed before it's passed to the backend
     'NEWSTR/1',
     'STRSETITEM/3',
     'UNICODESETITEM/3',
     'NEWUNICODE/1',
-    #'RUNTIMENEW/1',     # ootype operation
-    'COND_CALL_GC_WB',  # [objptr, newvalue]   (for the write barrier)
+    #'RUNTIMENEW/1',     # ootype operation    
+    'COND_CALL_GC_WB/1d',  # [objptr]   (for the write barrier)
     'DEBUG_MERGE_POINT/1',      # debugging only
     'VIRTUAL_REF_FINISH/2',   # removed before it's passed to the backend
+    'COPYSTRCONTENT/5',       # src, dst, srcstart, dststart, length
+    'COPYUNICODECONTENT/5',
 
     '_CANRAISE_FIRST', # ----- start of can_raise operations -----
-    'CALL',
-    'CALL_ASSEMBLER',
-    'CALL_MAY_FORCE',
-    'CALL_LOOPINVARIANT',
+    'CALL/*d',
+    'CALL_ASSEMBLER/*d',
+    'CALL_MAY_FORCE/*d',
+    'CALL_LOOPINVARIANT/*d',
     #'OOSEND',                     # ootype operation
     #'OOSEND_PURE',                # ootype operation
-    'CALL_PURE',             # removed before it's passed to the backend
+    'CALL_PURE/*d',             # removed before it's passed to the backend
                              # CALL_PURE(result, func, arg_1,..,arg_n)
     '_CANRAISE_LAST', # ----- end of can_raise operations -----
 
@@ -247,6 +485,7 @@
 class rop(object):
     pass
 
+opclasses = []   # mapping numbers to the concrete ResOp class
 opname = {}      # mapping numbers to the original names, for debugging
 oparity = []     # mapping numbers to the arity of the operation or -1
 opwithdescr = [] # mapping numbers to a flag "takes a descr"
@@ -261,16 +500,62 @@
             name, arity = name.split('/')
             withdescr = 'd' in arity
             boolresult = 'b' in arity
-            arity = int(arity.rstrip('db'))
+            arity = arity.rstrip('db')
+            if arity == '*':
+                arity = -1
+            else:
+                arity = int(arity)
         else:
             arity, withdescr, boolresult = -1, True, False       # default
         setattr(rop, name, i)
         if not name.startswith('_'):
             opname[i] = name
+            cls = create_class_for_op(name, i, arity, withdescr)
+        else:
+            cls = None
+        opclasses.append(cls)
         oparity.append(arity)
         opwithdescr.append(withdescr)
         opboolresult.append(boolresult)
-    assert len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist)
+    assert len(opclasses)==len(oparity)==len(opwithdescr)==len(opboolresult)==len(_oplist)
+
+def get_base_class(mixin, base):
+    try:
+        return get_base_class.cache[(mixin, base)]
+    except KeyError:
+        arity_name = mixin.__name__[:-2]  # remove the trailing "Op"
+        name = arity_name + base.__name__ # something like BinaryPlainResOp
+        bases = (mixin, base)
+        cls = type(name, bases, {})
+        get_base_class.cache[(mixin, base)] = cls
+        return cls
+get_base_class.cache = {}
+
+def create_class_for_op(name, opnum, arity, withdescr):
+    arity2mixin = {
+        0: NullaryOp,
+        1: UnaryOp,
+        2: BinaryOp,
+        3: TernaryOp
+        }
+    
+    is_guard = name.startswith('GUARD')
+    if is_guard:
+        assert withdescr
+        baseclass = GuardResOp
+    elif withdescr:
+        baseclass = ResOpWithDescr
+    else:
+        baseclass = PlainResOp
+    mixin = arity2mixin.get(arity, N_aryOp)
+
+    def getopnum(self):
+        return opnum
+
+    cls_name = '%s_OP' % name
+    bases = (get_base_class(mixin, baseclass),)
+    dic = {'getopnum': getopnum}
+    return type(cls_name, bases, dic)
 
 setup(__name__ == '__main__')   # print out the table when run directly
 del _oplist

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/resume.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/resume.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/resume.py	Thu Sep 30 00:16:20 2010
@@ -4,10 +4,12 @@
 from pypy.jit.metainterp.history import INT, REF, FLOAT, HOLE
 from pypy.jit.metainterp.resoperation import rop
 from pypy.jit.metainterp import jitprof
-from pypy.rpython.lltypesystem import lltype, llmemory, rffi
+from pypy.jit.codewriter.effectinfo import EffectInfo, callinfo_for_oopspec
+from pypy.jit.codewriter.effectinfo import funcptr_for_oopspec
+from pypy.rpython.lltypesystem import lltype, llmemory, rffi, rstr
 from pypy.rlib import rarithmetic
 from pypy.rlib.objectmodel import we_are_translated, specialize
-from pypy.rlib.debug import have_debug_prints
+from pypy.rlib.debug import have_debug_prints, ll_assert
 from pypy.rlib.debug import debug_start, debug_stop, debug_print
 
 # Logic to encode the chain of frames and the state of the boxes at a
@@ -253,6 +255,15 @@
     def make_varray(self, arraydescr):
         return VArrayInfo(arraydescr)
 
+    def make_vstrplain(self):
+        return VStrPlainInfo()
+
+    def make_vstrconcat(self):
+        return VStrConcatInfo()
+
+    def make_vstrslice(self):
+        return VStrSliceInfo()
+
     def register_virtual_fields(self, virtualbox, fieldboxes):
         tagged = self.liveboxes_from_env.get(virtualbox, UNASSIGNEDVIRTUAL)
         self.liveboxes[virtualbox] = tagged
@@ -397,9 +408,7 @@
 
 
 class AbstractVirtualInfo(object):
-    #def allocate(self, metainterp):
-    #    raise NotImplementedError
-    #def setfields(self, decoder, struct):
+    #def allocate(self, decoder, index):
     #    raise NotImplementedError
     def equals(self, fieldnums):
         return tagged_list_eq(self.fieldnums, fieldnums)
@@ -419,6 +428,7 @@
         for i in range(len(self.fielddescrs)):
             descr = self.fielddescrs[i]
             decoder.setfield(descr, struct, self.fieldnums[i])
+        return struct
 
     def debug_prints(self):
         assert len(self.fielddescrs) == len(self.fieldnums)
@@ -433,8 +443,10 @@
         self.known_class = known_class
 
     @specialize.argtype(1)
-    def allocate(self, decoder):
-        return decoder.allocate_with_vtable(self.known_class)
+    def allocate(self, decoder, index):
+        struct = decoder.allocate_with_vtable(self.known_class)
+        decoder.virtuals_cache[index] = struct
+        return self.setfields(decoder, struct)
 
     def debug_prints(self):
         debug_print("\tvirtualinfo", self.known_class.repr_rpython())
@@ -446,8 +458,10 @@
         self.typedescr = typedescr
 
     @specialize.argtype(1)
-    def allocate(self, decoder):
-        return decoder.allocate_struct(self.typedescr)
+    def allocate(self, decoder, index):
+        struct = decoder.allocate_struct(self.typedescr)
+        decoder.virtuals_cache[index] = struct
+        return self.setfields(decoder, struct)
 
     def debug_prints(self):
         debug_print("\tvstructinfo", self.typedescr.repr_rpython())
@@ -459,14 +473,11 @@
         #self.fieldnums = ...
 
     @specialize.argtype(1)
-    def allocate(self, decoder):
+    def allocate(self, decoder, index):
         length = len(self.fieldnums)
-        return decoder.allocate_array(self.arraydescr, length)
-
-    @specialize.argtype(1)
-    def setfields(self, decoder, array):
         arraydescr = self.arraydescr
-        length = len(self.fieldnums)
+        array = decoder.allocate_array(arraydescr, length)
+        decoder.virtuals_cache[index] = array
         # NB. the check for the kind of array elements is moved out of the loop
         if arraydescr.is_array_of_pointers():
             for i in range(length):
@@ -480,12 +491,65 @@
             for i in range(length):
                 decoder.setarrayitem_int(arraydescr, array, i,
                                          self.fieldnums[i])
+        return array
 
     def debug_prints(self):
         debug_print("\tvarrayinfo", self.arraydescr)
         for i in self.fieldnums:
             debug_print("\t\t", str(untag(i)))
 
+
+class VStrPlainInfo(AbstractVirtualInfo):
+    """Stands for the string made out of the characters of all fieldnums."""
+
+    @specialize.argtype(1)
+    def allocate(self, decoder, index):
+        length = len(self.fieldnums)
+        string = decoder.allocate_string(length)
+        decoder.virtuals_cache[index] = string
+        for i in range(length):
+            decoder.string_setitem(string, i, self.fieldnums[i])
+        return string
+
+    def debug_prints(self):
+        debug_print("\tvstrplaininfo length", len(self.fieldnums))
+
+
+class VStrConcatInfo(AbstractVirtualInfo):
+    """Stands for the string made out of the concatenation of two
+    other strings."""
+
+    @specialize.argtype(1)
+    def allocate(self, decoder, index):
+        # xxx for blackhole resuming, this will build all intermediate
+        # strings and throw them away immediately, which is a bit sub-
+        # efficient.  Not sure we care.
+        left, right = self.fieldnums
+        string = decoder.concat_strings(left, right)
+        decoder.virtuals_cache[index] = string
+        return string
+
+    def debug_prints(self):
+        debug_print("\tvstrconcatinfo")
+        for i in self.fieldnums:
+            debug_print("\t\t", str(untag(i)))
+
+
+class VStrSliceInfo(AbstractVirtualInfo):
+    """Stands for the string made out of slicing another string."""
+
+    @specialize.argtype(1)
+    def allocate(self, decoder, index):
+        largerstr, start, length = self.fieldnums
+        string = decoder.slice_string(largerstr, start, length)
+        decoder.virtuals_cache[index] = string
+        return string
+
+    def debug_prints(self):
+        debug_print("\tvstrsliceinfo")
+        for i in self.fieldnums:
+            debug_print("\t\t", str(untag(i)))
+
 # ____________________________________________________________
 
 class AbstractResumeDataReader(object):
@@ -496,7 +560,8 @@
     blackholing and want the best performance.
     """
     _mixin_ = True
-    virtuals = None
+    rd_virtuals = None
+    virtuals_cache = None
     virtual_default = None
 
     def _init(self, cpu, storage):
@@ -508,17 +573,29 @@
         self._prepare_virtuals(storage.rd_virtuals)
         self._prepare_pendingfields(storage.rd_pendingfields)
 
+    def getvirtual(self, index):
+        # Returns the index'th virtual, building it lazily if needed.
+        # Note that this may be called recursively; that's why the
+        # allocate() methods must fill in the cache as soon as they
+        # have the object, before they fill its fields.
+        v = self.virtuals_cache[index]
+        if not v:
+            v = self.rd_virtuals[index].allocate(self, index)
+            ll_assert(v == self.virtuals_cache[index], "resume.py: bad cache")
+        return v
+
+    def force_all_virtuals(self):
+        rd_virtuals = self.rd_virtuals
+        if rd_virtuals:
+            for i in range(len(rd_virtuals)):
+                if rd_virtuals[i] is not None:
+                    self.getvirtual(i)
+        return self.virtuals_cache
+
     def _prepare_virtuals(self, virtuals):
         if virtuals:
-            self.virtuals = [self.virtual_default] * len(virtuals)
-            for i in range(len(virtuals)):
-                vinfo = virtuals[i]
-                if vinfo is not None:
-                    self.virtuals[i] = vinfo.allocate(self)
-            for i in range(len(virtuals)):
-                vinfo = virtuals[i]
-                if vinfo is not None:
-                    vinfo.setfields(self, self.virtuals[i])
+            self.rd_virtuals = virtuals
+            self.virtuals_cache = [self.virtual_default] * len(virtuals)
 
     def _prepare_pendingfields(self, pendingfields):
         if pendingfields is not None:
@@ -622,6 +699,32 @@
         return self.metainterp.execute_and_record(rop.NEW_ARRAY,
                                                   arraydescr, ConstInt(length))
 
+    def allocate_string(self, length):
+        return self.metainterp.execute_and_record(rop.NEWSTR,
+                                                  None, ConstInt(length))
+
+    def string_setitem(self, strbox, index, charnum):
+        charbox = self.decode_box(charnum, INT)
+        self.metainterp.execute_and_record(rop.STRSETITEM, None,
+                                           strbox, ConstInt(index), charbox)
+
+    def concat_strings(self, str1num, str2num):
+        calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_CONCAT)
+        str1box = self.decode_box(str1num, REF)
+        str2box = self.decode_box(str2num, REF)
+        return self.metainterp.execute_and_record_varargs(
+            rop.CALL, [ConstInt(func), str1box, str2box], calldescr)
+
+    def slice_string(self, strnum, startnum, lengthnum):
+        calldescr, func = callinfo_for_oopspec(EffectInfo.OS_STR_SLICE)
+        strbox = self.decode_box(strnum, REF)
+        startbox = self.decode_box(startnum, INT)
+        lengthbox = self.decode_box(lengthnum, INT)
+        stopbox = self.metainterp.execute_and_record(rop.INT_ADD, None,
+                                                     startbox, lengthbox)
+        return self.metainterp.execute_and_record_varargs(
+            rop.CALL, [ConstInt(func), strbox, startbox, stopbox], calldescr)
+
     def setfield(self, descr, structbox, fieldnum):
         if descr.is_pointer_field():
             kind = REF
@@ -663,9 +766,7 @@
             else:
                 box = self.consts[num]
         elif tag == TAGVIRTUAL:
-            virtuals = self.virtuals
-            assert virtuals is not None
-            box = virtuals[num]
+            box = self.getvirtual(num)
         elif tag == TAGINT:
             box = ConstInt(num)
         else:
@@ -750,7 +851,7 @@
     resumereader.handling_async_forcing()
     vrefinfo = metainterp_sd.virtualref_info
     resumereader.consume_vref_and_vable(vrefinfo, vinfo)
-    return resumereader.virtuals
+    return resumereader.force_all_virtuals()
 
 class ResumeDataDirectReader(AbstractResumeDataReader):
     unique_id = lambda: None
@@ -768,7 +869,9 @@
             # special case for resuming after a GUARD_NOT_FORCED: we already
             # have the virtuals
             self.resume_after_guard_not_forced = 2
-            self.virtuals = all_virtuals
+            self.virtuals_cache = all_virtuals
+            # self.rd_virtuals can remain None, because virtuals_cache is
+            # already filled
 
     def handling_async_forcing(self):
         self.resume_after_guard_not_forced = 1
@@ -839,6 +942,31 @@
     def allocate_array(self, arraydescr, length):
         return self.cpu.bh_new_array(arraydescr, length)
 
+    def allocate_string(self, length):
+        return self.cpu.bh_newstr(length)
+
+    def string_setitem(self, str, index, charnum):
+        char = self.decode_int(charnum)
+        self.cpu.bh_strsetitem(str, index, char)
+
+    def concat_strings(self, str1num, str2num):
+        str1 = self.decode_ref(str1num)
+        str2 = self.decode_ref(str2num)
+        str1 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str1)
+        str2 = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str2)
+        funcptr = funcptr_for_oopspec(EffectInfo.OS_STR_CONCAT)
+        result = funcptr(str1, str2)
+        return lltype.cast_opaque_ptr(llmemory.GCREF, result)
+
+    def slice_string(self, strnum, startnum, lengthnum):
+        str = self.decode_ref(strnum)
+        start = self.decode_int(startnum)
+        length = self.decode_int(lengthnum)
+        str = lltype.cast_opaque_ptr(lltype.Ptr(rstr.STR), str)
+        funcptr = funcptr_for_oopspec(EffectInfo.OS_STR_SLICE)
+        result = funcptr(str, start, start + length)
+        return lltype.cast_opaque_ptr(llmemory.GCREF, result)
+
     def setfield(self, descr, struct, fieldnum):
         if descr.is_pointer_field():
             newvalue = self.decode_ref(fieldnum)
@@ -881,9 +1009,7 @@
                 return self.cpu.ts.NULLREF
             return self.consts[num].getref_base()
         elif tag == TAGVIRTUAL:
-            virtuals = self.virtuals
-            assert virtuals is not None
-            return virtuals[num]
+            return self.getvirtual(num)
         else:
             assert tag == TAGBOX
             if num < 0:

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/simple_optimize.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/simple_optimize.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/simple_optimize.py	Thu Sep 30 00:16:20 2010
@@ -9,17 +9,14 @@
 
 def transform(op):
     from pypy.jit.metainterp.history import AbstractDescr
-    # change ARRAYCOPY to call, so we don't have to pass around
-    # unnecessary information to the backend.  Do the same with VIRTUAL_REF_*.
-    if op.opnum == rop.ARRAYCOPY:
-        descr = op.args[0]
-        assert isinstance(descr, AbstractDescr)
-        op = ResOperation(rop.CALL, op.args[1:], op.result, descr=descr)
-    elif op.opnum == rop.CALL_PURE:
-        op = ResOperation(rop.CALL, op.args[1:], op.result, op.descr)
-    elif op.opnum == rop.VIRTUAL_REF:
-        op = ResOperation(rop.SAME_AS, [op.args[0]], op.result)
-    elif op.opnum == rop.VIRTUAL_REF_FINISH:
+    # Rename CALL_PURE to CALL.
+    # Simplify the VIRTUAL_REF_* so that they don't show up in the backend.
+    if op.getopnum() == rop.CALL_PURE:
+        op = ResOperation(rop.CALL, op.getarglist()[1:], op.result,
+                          op.getdescr())
+    elif op.getopnum() == rop.VIRTUAL_REF:
+        op = ResOperation(rop.SAME_AS, [op.getarg(0)], op.result)
+    elif op.getopnum() == rop.VIRTUAL_REF_FINISH:
         return []
     return [op]
 
@@ -36,7 +33,7 @@
         newoperations = []
         for op in loop.operations:
             if op.is_guard():
-                descr = op.descr
+                descr = op.getdescr()
                 assert isinstance(descr, compile.ResumeGuardDescr)
                 modifier = resume.ResumeDataVirtualAdder(descr, memo)
                 newboxes = modifier.finish(EMPTY_VALUES)

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/oparser.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/oparser.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/oparser.py	Thu Sep 30 00:16:20 2010
@@ -5,28 +5,39 @@
 
 from pypy.jit.metainterp.history import TreeLoop, BoxInt, ConstInt,\
      ConstObj, ConstPtr, Box, BasicFailDescr, BoxFloat, ConstFloat,\
-     LoopToken
-from pypy.jit.metainterp.resoperation import rop, ResOperation
+     LoopToken, get_const_ptr_for_string
+from pypy.jit.metainterp.resoperation import rop, ResOperation, ResOpWithDescr, N_aryOp
 from pypy.jit.metainterp.typesystem import llhelper
 from pypy.jit.codewriter.heaptracker import adr2int
 from pypy.rpython.lltypesystem import lltype, llmemory
 from pypy.rpython.ootypesystem import ootype
-from pypy.rpython.annlowlevel import llstr
 
 class ParseError(Exception):
     pass
 
-
 class Boxes(object):
     pass
 
+class ESCAPE_OP(N_aryOp, ResOpWithDescr):
+
+    OPNUM = -123
+
+    def __init__(self, opnum, args, result, descr=None):
+        assert opnum == self.OPNUM
+        self.result = result
+        self.initarglist(args)
+        self.setdescr(descr)
+
+    def getopnum(self):
+        return self.OPNUM
+
 class ExtendedTreeLoop(TreeLoop):
 
     def getboxes(self):
         def opboxes(operations):
             for op in operations:
                 yield op.result
-                for box in op.args:
+                for box in op.getarglist():
                     yield box
         def allboxes():
             for box in self.inputargs:
@@ -52,7 +63,8 @@
 
 class OpParser(object):
     def __init__(self, input, cpu, namespace, type_system, boxkinds,
-                 invent_fail_descr=default_fail_descr):
+                 invent_fail_descr=default_fail_descr,
+                 nonstrict=False):
         self.input = input
         self.vars = {}
         self.cpu = cpu
@@ -64,6 +76,7 @@
         else:
             self._cache = {}
         self.invent_fail_descr = invent_fail_descr
+        self.nonstrict = nonstrict
         self.looptoken = LoopToken()
 
     def get_const(self, name, typ):
@@ -122,11 +135,14 @@
         vars = []
         for elem in elements:
             elem = elem.strip()
-            box = self.box_for_var(elem)
-            vars.append(box)
-            self.vars[elem] = box
+            vars.append(self.newvar(elem))
         return vars
 
+    def newvar(self, elem):
+        box = self.box_for_var(elem)
+        self.vars[elem] = box
+        return box
+
     def is_float(self, arg):
         try:
             float(arg)
@@ -145,8 +161,7 @@
             if arg.startswith('"') or arg.startswith("'"):
                 # XXX ootype
                 info = arg.strip("'\"")
-                return ConstPtr(lltype.cast_opaque_ptr(llmemory.GCREF,
-                                                       llstr(info)))
+                return get_const_ptr_for_string(info)
             if arg.startswith('ConstClass('):
                 name = arg[len('ConstClass('):-1]
                 return self.get_const(name, 'class')
@@ -160,6 +175,8 @@
             elif arg.startswith('ConstPtr('):
                 name = arg[len('ConstPtr('):-1]
                 return self.get_const(name, 'ptr')
+            if arg not in self.vars and self.nonstrict:
+                self.newvar(arg)
             return self.vars[arg]
 
     def parse_op(self, line):
@@ -171,7 +188,7 @@
             opnum = getattr(rop, opname.upper())
         except AttributeError:
             if opname == 'escape':
-                opnum = -123
+                opnum = ESCAPE_OP.OPNUM
             else:
                 raise ParseError("unknown op: %s" % opname)
         endnum = line.rfind(')')
@@ -184,7 +201,8 @@
             if opname == 'debug_merge_point':
                 allargs = [argspec]
             else:
-                allargs = argspec.split(",")
+                allargs = [arg for arg in argspec.split(",")
+                           if arg != '']
 
             poss_descr = allargs[-1].strip()
             if poss_descr.startswith('descr='):
@@ -199,7 +217,7 @@
         if rop._GUARD_FIRST <= opnum <= rop._GUARD_LAST:
             i = line.find('[', endnum) + 1
             j = line.find(']', i)
-            if i <= 0 or j <= 0:
+            if (i <= 0 or j <= 0) and not self.nonstrict:
                 raise ParseError("missing fail_args for guard operation")
             fail_args = []
             if i < j:
@@ -228,6 +246,12 @@
                     descr = self.looptoken
         return opnum, args, descr, fail_args
 
+    def create_op(self, opnum, args, result, descr):
+        if opnum == ESCAPE_OP.OPNUM:
+            return ESCAPE_OP(opnum, args, result, descr)
+        else:
+            return ResOperation(opnum, args, result, descr)
+
     def parse_result_op(self, line):
         res, op = line.split("=", 1)
         res = res.strip()
@@ -237,14 +261,16 @@
             raise ParseError("Double assign to var %s in line: %s" % (res, line))
         rvar = self.box_for_var(res)
         self.vars[res] = rvar
-        res = ResOperation(opnum, args, rvar, descr)
-        res.fail_args = fail_args
+        res = self.create_op(opnum, args, rvar, descr)
+        if fail_args is not None:
+            res.setfailargs(fail_args)
         return res
 
     def parse_op_no_result(self, line):
         opnum, args, descr, fail_args = self.parse_op(line)
-        res = ResOperation(opnum, args, None, descr)
-        res.fail_args = fail_args
+        res = self.create_op(opnum, args, None, descr)
+        if fail_args is not None:
+            res.setfailargs(fail_args)
         return res
 
     def parse_next_op(self, line):
@@ -257,11 +283,14 @@
         lines = self.input.splitlines()
         ops = []
         newlines = []
+        first_comment = None
         for line in lines:
             # for simplicity comments are not allowed on
             # debug_merge_point lines
             if '#' in line and 'debug_merge_point(' not in line:
                 if line.lstrip()[0] == '#': # comment only
+                    if first_comment is None:
+                        first_comment = line
                     continue
                 comm = line.rfind('#')
                 rpar = line.find(')') # assume there's a op(...)
@@ -270,12 +299,12 @@
             if not line.strip():
                 continue  # a comment or empty line
             newlines.append(line)
-        base_indent, inpargs = self.parse_inpargs(newlines[0])
-        newlines = newlines[1:]
+        base_indent, inpargs, newlines = self.parse_inpargs(newlines)
         num, ops = self.parse_ops(base_indent, newlines, 0)
         if num < len(newlines):
             raise ParseError("unexpected dedent at line: %s" % newlines[num])
         loop = ExtendedTreeLoop("loop")
+        loop.comment = first_comment
         loop.token = self.looptoken
         loop.operations = ops
         loop.inputargs = inpargs
@@ -296,23 +325,27 @@
                 num += 1
         return num, ops
 
-    def parse_inpargs(self, line):
-        base_indent = line.find('[')
+    def parse_inpargs(self, lines):
+        line = lines[0]
+        base_indent = len(line) - len(line.lstrip(' '))
         line = line.strip()
+        if not line.startswith('[') and self.nonstrict:
+            return base_indent, [], lines
+        lines = lines[1:]
         if line == '[]':
-            return base_indent, []
-        if base_indent == -1 or not line.endswith(']'):
+            return base_indent, [], lines
+        if not line.startswith('[') or not line.endswith(']'):
             raise ParseError("Wrong header: %s" % line)
         inpargs = self.parse_header_line(line[1:-1])
-        return base_indent, inpargs
+        return base_indent, inpargs, lines
 
 def parse(input, cpu=None, namespace=None, type_system='lltype',
           boxkinds=None, invent_fail_descr=default_fail_descr,
-          no_namespace=False):
+          no_namespace=False, nonstrict=False):
     if namespace is None and not no_namespace:
         namespace = {}
     return OpParser(input, cpu, namespace, type_system, boxkinds,
-                    invent_fail_descr).parse()
+                    invent_fail_descr, nonstrict).parse()
 
 def pure_parse(*args, **kwds):
     kwds['invent_fail_descr'] = None

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_basic.py	Thu Sep 30 00:16:20 2010
@@ -296,7 +296,7 @@
             found = 0
             for op in get_stats().loops[0]._all_operations():
                 if op.getopname() == 'guard_true':
-                    liveboxes = op.fail_args
+                    liveboxes = op.getfailargs()
                     assert len(liveboxes) == 3
                     for box in liveboxes:
                         assert isinstance(box, history.BoxInt)

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_logger.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_logger.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_logger.py	Thu Sep 30 00:16:20 2010
@@ -100,8 +100,8 @@
         debug_merge_point("info")
         '''
         loop, oloop = self.reparse(inp, check_equal=False)
-        assert loop.operations[0].args[0]._get_str() == 'info'
-        assert oloop.operations[0].args[0]._get_str() == 'info'
+        assert loop.operations[0].getarg(0)._get_str() == 'info'
+        assert oloop.operations[0].getarg(0)._get_str() == 'info'
         
     def test_floats(self):
         inp = '''

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_loop.py	Thu Sep 30 00:16:20 2010
@@ -178,7 +178,7 @@
             found = 0
             for op in get_stats().loops[0]._all_operations():
                 if op.getopname() == 'guard_true':
-                    liveboxes = op.fail_args
+                    liveboxes = op.getfailargs()
                     assert len(liveboxes) == 2     # x, y (in some order)
                     assert isinstance(liveboxes[0], history.BoxInt)
                     assert isinstance(liveboxes[1], history.BoxInt)

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_oparser.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_oparser.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_oparser.py	Thu Sep 30 00:16:20 2010
@@ -16,10 +16,10 @@
     """
     loop = parse(x)
     assert len(loop.operations) == 3
-    assert [op.opnum for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB,
+    assert [op.getopnum() for op in loop.operations] == [rop.INT_ADD, rop.INT_SUB,
                                                     rop.FINISH]
     assert len(loop.inputargs) == 2
-    assert loop.operations[-1].descr
+    assert loop.operations[-1].getdescr()
 
 def test_const_ptr_subops():
     x = """
@@ -30,8 +30,8 @@
     vtable = lltype.nullptr(S)
     loop = parse(x, None, locals())
     assert len(loop.operations) == 1
-    assert loop.operations[0].descr
-    assert loop.operations[0].fail_args == []
+    assert loop.operations[0].getdescr()
+    assert loop.operations[0].getfailargs() == []
 
 def test_descr():
     class Xyz(AbstractDescr):
@@ -43,7 +43,7 @@
     """
     stuff = Xyz()
     loop = parse(x, None, locals())
-    assert loop.operations[0].descr is stuff
+    assert loop.operations[0].getdescr() is stuff
 
 def test_after_fail():
     x = """
@@ -64,7 +64,7 @@
     """
     stuff = Xyz()
     loop = parse(x, None, locals())
-    assert loop.operations[0].descr is stuff
+    assert loop.operations[0].getdescr() is stuff
 
 def test_boxname():
     x = """
@@ -111,7 +111,7 @@
     TP = lltype.GcArray(lltype.Signed)
     NULL = lltype.cast_opaque_ptr(llmemory.GCREF, lltype.nullptr(TP))
     loop = parse(x, None, {'func_ptr' : NULL})
-    assert loop.operations[0].args[0].value == NULL
+    assert loop.operations[0].getarg(0).value == NULL
 
 def test_jump_target():
     x = '''
@@ -119,7 +119,7 @@
     jump()
     '''
     loop = parse(x)
-    assert loop.operations[0].descr is loop.token
+    assert loop.operations[0].getdescr() is loop.token
 
 def test_jump_target_other():
     looptoken = LoopToken()
@@ -128,7 +128,7 @@
     jump(descr=looptoken)
     '''
     loop = parse(x, namespace=locals())
-    assert loop.operations[0].descr is looptoken
+    assert loop.operations[0].getdescr() is looptoken
 
 def test_floats():
     x = '''
@@ -136,7 +136,7 @@
     f1 = float_add(f0, 3.5)
     '''
     loop = parse(x)
-    assert isinstance(loop.operations[0].args[0], BoxFloat)
+    assert isinstance(loop.operations[0].getarg(0), BoxFloat)
     
 def test_debug_merge_point():
     x = '''
@@ -147,10 +147,10 @@
     debug_merge_point('(stuff) #1')
     '''
     loop = parse(x)
-    assert loop.operations[0].args[0]._get_str() == 'info'
-    assert loop.operations[1].args[0]._get_str() == 'info'
-    assert loop.operations[2].args[0]._get_str() == "<some ('other,')> info"
-    assert loop.operations[3].args[0]._get_str() == "(stuff) #1"
+    assert loop.operations[0].getarg(0)._get_str() == 'info'
+    assert loop.operations[1].getarg(0)._get_str() == 'info'
+    assert loop.operations[2].getarg(0)._get_str() == "<some ('other,')> info"
+    assert loop.operations[3].getarg(0)._get_str() == "(stuff) #1"
     
 
 def test_descr_with_obj_print():
@@ -174,3 +174,32 @@
 
 def test_parse_no_namespace():
     loop = parse(example_loop_log, no_namespace=True)
+
+def test_attach_comment_to_loop():
+    loop = parse(example_loop_log, no_namespace=True)
+    assert loop.comment == '# bridge out of Guard12, 6 ops'
+
+def test_parse_new_with_comma():
+    # this is generated by PYPYJITLOG, check that we can handle it
+    x = '''
+    []
+    p0 = new(, descr=<SizeDescr 12>)
+    '''
+    loop = parse(x)
+    assert loop.operations[0].getopname() == 'new'
+
+def test_no_fail_args():
+    x = '''
+    [i0]
+    guard_true(i0, descr=<Guard0>)
+    '''
+    loop = parse(x, nonstrict=True)
+    assert loop.operations[0].getfailargs() == []
+
+def test_no_inputargs():
+    x = '''
+    i2 = int_add(i0, i1)
+    '''
+    loop = parse(x, nonstrict=True)
+    assert loop.inputargs == []
+    assert loop.operations[0].getopname() == 'int_add'

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizefindnode.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizefindnode.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizefindnode.py	Thu Sep 30 00:16:20 2010
@@ -1,6 +1,6 @@
 import py, random
 
-from pypy.rpython.lltypesystem import lltype, llmemory, rclass
+from pypy.rpython.lltypesystem import lltype, llmemory, rclass, rstr
 from pypy.rpython.ootypesystem import ootype
 from pypy.rpython.lltypesystem.rclass import OBJECT, OBJECT_VTABLE
 
@@ -115,6 +115,36 @@
     mayforcevirtdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
                  EffectInfo([nextdescr], [], [],
                             EffectInfo.EF_FORCES_VIRTUAL_OR_VIRTUALIZABLE))
+    arraycopydescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [], oopspecindex=EffectInfo.OS_ARRAYCOPY))
+    strconcatdescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_CONCAT))
+    slicedescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_SLICE))
+    strequaldescr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [], oopspecindex=EffectInfo.OS_STR_EQUAL))
+    streq_slice_checknull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [],
+                     oopspecindex=EffectInfo.OS_STREQ_SLICE_CHECKNULL))
+    streq_slice_nonnull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [],
+                     oopspecindex=EffectInfo.OS_STREQ_SLICE_NONNULL))
+    streq_slice_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [],
+                     oopspecindex=EffectInfo.OS_STREQ_SLICE_CHAR))
+    streq_nonnull_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [],
+                     oopspecindex=EffectInfo.OS_STREQ_NONNULL))
+    streq_nonnull_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [],
+                     oopspecindex=EffectInfo.OS_STREQ_NONNULL_CHAR))
+    streq_checknull_char_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [],
+                     oopspecindex=EffectInfo.OS_STREQ_CHECKNULL_CHAR))
+    streq_lengthok_descr = cpu.calldescrof(FUNC, FUNC.ARGS, FUNC.RESULT,
+                 EffectInfo([], [], [],
+                     oopspecindex=EffectInfo.OS_STREQ_LENGTHOK))
+
     class LoopToken(AbstractDescr):
         pass
     asmdescr = LoopToken() # it can be whatever, it's not a descr though

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_optimizeopt.py	Thu Sep 30 00:16:20 2010
@@ -42,7 +42,7 @@
     opt = optimizeopt.Optimizer(FakeMetaInterpStaticData(LLtypeMixin.cpu),
                                 None)
     fdescr = ResumeGuardDescr(None, None)
-    op = ResOperation(rop.GUARD_TRUE, [], None, descr=fdescr)
+    op = ResOperation(rop.GUARD_TRUE, ['dummy'], None, descr=fdescr)
     # setup rd data
     fi0 = resume.FrameInfo(None, "code0", 11)
     fdescr.rd_frame_info_list = resume.FrameInfo(fi0, "code1", 33)
@@ -50,11 +50,11 @@
     fdescr.rd_snapshot = resume.Snapshot(snapshot0, [b1])
     #
     opt.store_final_boxes_in_guard(op)
-    if op.fail_args == [b0, b1]:
+    if op.getfailargs() == [b0, b1]:
         assert fdescr.rd_numb.nums      == [tag(1, TAGBOX)]
         assert fdescr.rd_numb.prev.nums == [tag(0, TAGBOX)]
     else:
-        assert op.fail_args == [b1, b0]
+        assert op.getfailargs() == [b1, b0]
         assert fdescr.rd_numb.nums      == [tag(0, TAGBOX)]
         assert fdescr.rd_numb.prev.nums == [tag(1, TAGBOX)]
     assert fdescr.rd_virtuals is None
@@ -140,24 +140,26 @@
             print '%-39s| %s' % (txt1[:39], txt2[:39])
             txt1 = txt1[39:]
             txt2 = txt2[39:]
-        assert op1.opnum == op2.opnum
-        assert len(op1.args) == len(op2.args)
-        for x, y in zip(op1.args, op2.args):
+        assert op1.getopnum() == op2.getopnum()
+        assert op1.numargs() == op2.numargs()
+        for i in range(op1.numargs()):
+            x = op1.getarg(i)
+            y = op2.getarg(i)
             assert x == remap.get(y, y)
         if op2.result in remap:
             assert op1.result == remap[op2.result]
         else:
             remap[op2.result] = op1.result
-        if op1.opnum != rop.JUMP:      # xxx obscure
-            assert op1.descr == op2.descr
-        if op1.fail_args or op2.fail_args:
-            assert len(op1.fail_args) == len(op2.fail_args)
+        if op1.getopnum() != rop.JUMP:      # xxx obscure
+            assert op1.getdescr() == op2.getdescr()
+        if op1.getfailargs() or op2.getfailargs():
+            assert len(op1.getfailargs()) == len(op2.getfailargs())
             if strict_fail_args:
-                for x, y in zip(op1.fail_args, op2.fail_args):
+                for x, y in zip(op1.getfailargs(), op2.getfailargs()):
                     assert x == remap.get(y, y)
             else:
-                fail_args1 = set(op1.fail_args)
-                fail_args2 = set([remap.get(y, y) for y in op2.fail_args])
+                fail_args1 = set(op1.getfailargs())
+                fail_args2 = set([remap.get(y, y) for y in op2.getfailargs()])
                 assert fail_args1 == fail_args2
     assert len(oplist1) == len(oplist2)
     print '-'*57
@@ -209,7 +211,7 @@
         self.metainterp_sd = metainterp_sd
         self.original_greenkey = original_greenkey
     def store_final_boxes(self, op, boxes):
-        op.fail_args = boxes
+        op.setfailargs(boxes)
     def __eq__(self, other):
         return type(self) is type(other)      # xxx obscure
 
@@ -2361,8 +2363,8 @@
         from pypy.jit.metainterp.test.test_resume import ResumeDataFakeReader
         from pypy.jit.metainterp.test.test_resume import MyMetaInterp
         guard_op, = [op for op in self.loop.operations if op.is_guard()]
-        fail_args = guard_op.fail_args
-        fdescr = guard_op.descr
+        fail_args = guard_op.getfailargs()
+        fdescr = guard_op.getdescr()
         assert fdescr.guard_opnum == guard_opnum
         reader = ResumeDataFakeReader(fdescr, fail_args,
                                       MyMetaInterp(self.cpu))
@@ -3080,7 +3082,7 @@
         setarrayitem_gc(p1, 1, 1, descr=arraydescr)
         p2 = new_array(3, descr=arraydescr)
         setarrayitem_gc(p2, 1, 3, descr=arraydescr)
-        arraycopy(0, 0, p1, p2, 1, 1, 2, descr=arraydescr)
+        call(0, p1, p2, 1, 1, 2, descr=arraycopydescr)
         i2 = getarrayitem_gc(p2, 1, descr=arraydescr)
         jump(i2)
         '''
@@ -3097,7 +3099,7 @@
         p2 = new_array(3, descr=arraydescr)
         setarrayitem_gc(p1, 0, i0, descr=arraydescr)
         setarrayitem_gc(p2, 0, 3, descr=arraydescr)
-        arraycopy(0, 0, p1, p2, 1, 1, 2, descr=arraydescr)
+        call(0, p1, p2, 1, 1, 2, descr=arraycopydescr)
         i2 = getarrayitem_gc(p2, 0, descr=arraydescr)
         jump(i2)
         '''
@@ -3114,7 +3116,7 @@
         p2 = new_array(3, descr=arraydescr)
         setarrayitem_gc(p1, 2, 10, descr=arraydescr)
         setarrayitem_gc(p2, 2, 13, descr=arraydescr)
-        arraycopy(0, 0, p1, p2, 0, 0, 3, descr=arraydescr)
+        call(0, p1, p2, 0, 0, 3, descr=arraycopydescr)
         jump(p2)
         '''
         expected = '''
@@ -3131,7 +3133,7 @@
         ops = '''
         [p1]
         p0 = new_array(0, descr=arraydescr)
-        arraycopy(0, 0, p0, p1, 0, 0, 0, descr=arraydescr)
+        call(0, p0, p1, 0, 0, 0, descr=arraycopydescr)
         jump(p1)
         '''
         expected = '''
@@ -3891,7 +3893,606 @@
         """
         self.optimize_loop(ops, 'Not, Not', expected)
 
+    def test_newstr_1(self):
+        ops = """
+        [i0]
+        p1 = newstr(1)
+        strsetitem(p1, 0, i0)
+        i1 = strgetitem(p1, 0)
+        jump(i1)
+        """
+        expected = """
+        [i0]
+        jump(i0)
+        """
+        self.optimize_loop(ops, 'Not', expected)
+
+    def test_newstr_2(self):
+        ops = """
+        [i0, i1]
+        p1 = newstr(2)
+        strsetitem(p1, 0, i0)
+        strsetitem(p1, 1, i1)
+        i2 = strgetitem(p1, 1)
+        i3 = strgetitem(p1, 0)
+        jump(i2, i3)
+        """
+        expected = """
+        [i0, i1]
+        jump(i1, i0)
+        """
+        self.optimize_loop(ops, 'Not, Not', expected)
+
+    def test_str_concat_1(self):
+        ops = """
+        [p1, p2]
+        p3 = call(0, p1, p2, descr=strconcatdescr)
+        jump(p2, p3)
+        """
+        expected = """
+        [p1, p2]
+        i1 = strlen(p1)
+        i2 = strlen(p2)
+        i3 = int_add(i1, i2)
+        p3 = newstr(i3)
+        i4 = strlen(p1)
+        copystrcontent(p1, p3, 0, 0, i4)
+        i5 = strlen(p2)
+        i6 = int_add(i4, i5)      # will be killed by the backend
+        copystrcontent(p2, p3, 0, i4, i5)
+        jump(p2, p3)
+        """
+        self.optimize_loop(ops, 'Not, Not', expected)
+
+    def test_str_concat_vstr2_str(self):
+        ops = """
+        [i0, i1, p2]
+        p1 = newstr(2)
+        strsetitem(p1, 0, i0)
+        strsetitem(p1, 1, i1)
+        p3 = call(0, p1, p2, descr=strconcatdescr)
+        jump(i1, i0, p3)
+        """
+        expected = """
+        [i0, i1, p2]
+        i2 = strlen(p2)
+        i3 = int_add(2, i2)
+        p3 = newstr(i3)
+        strsetitem(p3, 0, i0)
+        strsetitem(p3, 1, i1)
+        i4 = strlen(p2)
+        i5 = int_add(2, i4)      # will be killed by the backend
+        copystrcontent(p2, p3, 0, 2, i4)
+        jump(i1, i0, p3)
+        """
+        self.optimize_loop(ops, 'Not, Not, Not', expected)
+
+    def test_str_concat_str_vstr2(self):
+        ops = """
+        [i0, i1, p2]
+        p1 = newstr(2)
+        strsetitem(p1, 0, i0)
+        strsetitem(p1, 1, i1)
+        p3 = call(0, p2, p1, descr=strconcatdescr)
+        jump(i1, i0, p3)
+        """
+        expected = """
+        [i0, i1, p2]
+        i2 = strlen(p2)
+        i3 = int_add(i2, 2)
+        p3 = newstr(i3)
+        i4 = strlen(p2)
+        copystrcontent(p2, p3, 0, 0, i4)
+        strsetitem(p3, i4, i0)
+        i5 = int_add(i4, 1)
+        strsetitem(p3, i5, i1)
+        i6 = int_add(i5, 1)      # will be killed by the backend
+        jump(i1, i0, p3)
+        """
+        self.optimize_loop(ops, 'Not, Not, Not', expected)
+
+    def test_str_concat_str_str_str(self):
+        ops = """
+        [p1, p2, p3]
+        p4 = call(0, p1, p2, descr=strconcatdescr)
+        p5 = call(0, p4, p3, descr=strconcatdescr)
+        jump(p2, p3, p5)
+        """
+        expected = """
+        [p1, p2, p3]
+        i1 = strlen(p1)
+        i2 = strlen(p2)
+        i12 = int_add(i1, i2)
+        i3 = strlen(p3)
+        i123 = int_add(i12, i3)
+        p5 = newstr(i123)
+        i1b = strlen(p1)
+        copystrcontent(p1, p5, 0, 0, i1b)
+        i2b = strlen(p2)
+        i12b = int_add(i1b, i2b)
+        copystrcontent(p2, p5, 0, i1b, i2b)
+        i3b = strlen(p3)
+        i123b = int_add(i12b, i3b)      # will be killed by the backend
+        copystrcontent(p3, p5, 0, i12b, i3b)
+        jump(p2, p3, p5)
+        """
+        self.optimize_loop(ops, 'Not, Not, Not', expected)
+
+    def test_str_concat_str_cstr1(self):
+        ops = """
+        [p2]
+        p3 = call(0, p2, "x", descr=strconcatdescr)
+        jump(p3)
+        """
+        expected = """
+        [p2]
+        i2 = strlen(p2)
+        i3 = int_add(i2, 1)
+        p3 = newstr(i3)
+        i4 = strlen(p2)
+        copystrcontent(p2, p3, 0, 0, i4)
+        strsetitem(p3, i4, 120)     # == ord('x')
+        i5 = int_add(i4, 1)      # will be killed by the backend
+        jump(p3)
+        """
+        self.optimize_loop(ops, 'Not', expected)
+
+    def test_str_concat_consts(self):
+        ops = """
+        []
+        p1 = same_as("ab")
+        p2 = same_as("cde")
+        p3 = call(0, p1, p2, descr=strconcatdescr)
+        escape(p3)
+        jump()
+        """
+        expected = """
+        []
+        escape("abcde")
+        jump()
+        """
+        self.optimize_loop(ops, '', expected)
+
+    def test_str_slice_1(self):
+        ops = """
+        [p1, i1, i2]
+        p2 = call(0, p1, i1, i2, descr=slicedescr)
+        jump(p2, i1, i2)
+        """
+        expected = """
+        [p1, i1, i2]
+        i3 = int_sub(i2, i1)
+        p2 = newstr(i3)
+        copystrcontent(p1, p2, i1, 0, i3)
+        jump(p2, i1, i2)
+        """
+        self.optimize_loop(ops, 'Not, Not, Not', expected)
+
+    def test_str_slice_2(self):
+        ops = """
+        [p1, i2]
+        p2 = call(0, p1, 0, i2, descr=slicedescr)
+        jump(p2, i2)
+        """
+        expected = """
+        [p1, i2]
+        p2 = newstr(i2)
+        copystrcontent(p1, p2, 0, 0, i2)
+        jump(p2, i2)
+        """
+        self.optimize_loop(ops, 'Not, Not', expected)
+
+    def test_str_slice_3(self):
+        ops = """
+        [p1, i1, i2, i3, i4]
+        p2 = call(0, p1, i1, i2, descr=slicedescr)
+        p3 = call(0, p2, i3, i4, descr=slicedescr)
+        jump(p3, i1, i2, i3, i4)
+        """
+        expected = """
+        [p1, i1, i2, i3, i4]
+        i0 = int_sub(i2, i1)     # killed by the backend
+        i5 = int_sub(i4, i3)
+        i6 = int_add(i1, i3)
+        p3 = newstr(i5)
+        copystrcontent(p1, p3, i6, 0, i5)
+        jump(p3, i1, i2, i3, i4)
+        """
+        self.optimize_loop(ops, 'Not, Not, Not, Not, Not', expected)
+
+    def test_str_slice_getitem1(self):
+        ops = """
+        [p1, i1, i2, i3]
+        p2 = call(0, p1, i1, i2, descr=slicedescr)
+        i4 = strgetitem(p2, i3)
+        escape(i4)
+        jump(p1, i1, i2, i3)
+        """
+        expected = """
+        [p1, i1, i2, i3]
+        i6 = int_sub(i2, i1)      # killed by the backend
+        i5 = int_add(i1, i3)
+        i4 = strgetitem(p1, i5)
+        escape(i4)
+        jump(p1, i1, i2, i3)
+        """
+        self.optimize_loop(ops, 'Not, Not, Not, Not', expected)
+
+    def test_str_slice_plain(self):
+        ops = """
+        [i3, i4]
+        p1 = newstr(2)
+        strsetitem(p1, 0, i3)
+        strsetitem(p1, 1, i4)
+        p2 = call(0, p1, 1, 2, descr=slicedescr)
+        i5 = strgetitem(p2, 0)
+        escape(i5)
+        jump(i3, i4)
+        """
+        expected = """
+        [i3, i4]
+        escape(i4)
+        jump(i3, i4)
+        """
+        self.optimize_loop(ops, 'Not, Not', expected)
+
+    def test_str_slice_concat(self):
+        ops = """
+        [p1, i1, i2, p2]
+        p3 = call(0, p1, i1, i2, descr=slicedescr)
+        p4 = call(0, p3, p2, descr=strconcatdescr)
+        jump(p4, i1, i2, p2)
+        """
+        expected = """
+        [p1, i1, i2, p2]
+        i3 = int_sub(i2, i1)     # length of p3
+        i4 = strlen(p2)
+        i5 = int_add(i3, i4)
+        p4 = newstr(i5)
+        copystrcontent(p1, p4, i1, 0, i3)
+        i4b = strlen(p2)
+        i6 = int_add(i3, i4b)    # killed by the backend
+        copystrcontent(p2, p4, 0, i3, i4b)
+        jump(p4, i1, i2, p2)
+        """
+        self.optimize_loop(ops, 'Not, Not, Not, Not', expected)
+
+    # ----------
+    def optimize_loop_extradescrs(self, ops, spectext, optops):
+        from pypy.jit.metainterp.optimizeopt import string
+        def my_callinfo_for_oopspec(oopspecindex):
+            calldescrtype = type(LLtypeMixin.strequaldescr)
+            for value in LLtypeMixin.__dict__.values():
+                if isinstance(value, calldescrtype):
+                    if (value.get_extra_info() and
+                        value.get_extra_info().oopspecindex == oopspecindex):
+                        # returns 0 for 'func' in this test
+                        return value, 0
+            raise AssertionError("not found: oopspecindex=%d" % oopspecindex)
+        #
+        saved = string.callinfo_for_oopspec
+        try:
+            string.callinfo_for_oopspec = my_callinfo_for_oopspec
+            self.optimize_loop(ops, spectext, optops)
+        finally:
+            string.callinfo_for_oopspec = saved
+
+    def test_str_equal_noop1(self):
+        ops = """
+        [p1, p2]
+        i0 = call(0, p1, p2, descr=strequaldescr)
+        escape(i0)
+        jump(p1, p2)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not, Not', ops)
+
+    def test_str_equal_noop2(self):
+        ops = """
+        [p1, p2, p3]
+        p4 = call(0, p1, p2, descr=strconcatdescr)
+        i0 = call(0, p3, p4, descr=strequaldescr)
+        escape(i0)
+        jump(p1, p2, p3)
+        """
+        expected = """
+        [p1, p2, p3]
+        i1 = strlen(p1)
+        i2 = strlen(p2)
+        i3 = int_add(i1, i2)
+        p4 = newstr(i3)
+        i4 = strlen(p1)
+        copystrcontent(p1, p4, 0, 0, i4)
+        i5 = strlen(p2)
+        i6 = int_add(i4, i5)      # will be killed by the backend
+        copystrcontent(p2, p4, 0, i4, i5)
+        i0 = call(0, p3, p4, descr=strequaldescr)
+        escape(i0)
+        jump(p1, p2, p3)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not, Not, Not', expected)
+
+    def test_str_equal_slice1(self):
+        ops = """
+        [p1, i1, i2, p3]
+        p4 = call(0, p1, i1, i2, descr=slicedescr)
+        i0 = call(0, p4, p3, descr=strequaldescr)
+        escape(i0)
+        jump(p1, i1, i2, p3)
+        """
+        expected = """
+        [p1, i1, i2, p3]
+        i3 = int_sub(i2, i1)
+        i0 = call(0, p1, i1, i3, p3, descr=streq_slice_checknull_descr)
+        escape(i0)
+        jump(p1, i1, i2, p3)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected)
+
+    def test_str_equal_slice2(self):
+        ops = """
+        [p1, i1, i2, p3]
+        p4 = call(0, p1, i1, i2, descr=slicedescr)
+        i0 = call(0, p3, p4, descr=strequaldescr)
+        escape(i0)
+        jump(p1, i1, i2, p3)
+        """
+        expected = """
+        [p1, i1, i2, p3]
+        i4 = int_sub(i2, i1)
+        i0 = call(0, p1, i1, i4, p3, descr=streq_slice_checknull_descr)
+        escape(i0)
+        jump(p1, i1, i2, p3)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected)
+
+    def test_str_equal_slice3(self):
+        ops = """
+        [p1, i1, i2, p3]
+        guard_nonnull(p3) []
+        p4 = call(0, p1, i1, i2, descr=slicedescr)
+        i0 = call(0, p3, p4, descr=strequaldescr)
+        escape(i0)
+        jump(p1, i1, i2, p3)
+        """
+        expected = """
+        [p1, i1, i2, p3]
+        guard_nonnull(p3) []
+        i4 = int_sub(i2, i1)
+        i0 = call(0, p1, i1, i4, p3, descr=streq_slice_nonnull_descr)
+        escape(i0)
+        jump(p1, i1, i2, p3)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected)
+
+    def test_str_equal_slice4(self):
+        ops = """
+        [p1, i1, i2]
+        p3 = call(0, p1, i1, i2, descr=slicedescr)
+        i0 = call(0, p3, "x", descr=strequaldescr)
+        escape(i0)
+        jump(p1, i1, i2)
+        """
+        expected = """
+        [p1, i1, i2]
+        i3 = int_sub(i2, i1)
+        i0 = call(0, p1, i1, i3, 120, descr=streq_slice_char_descr)
+        escape(i0)
+        jump(p1, i1, i2)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not, Not, Not', expected)
+
+    def test_str_equal_slice5(self):
+        ops = """
+        [p1, i1, i2, i3]
+        p4 = call(0, p1, i1, i2, descr=slicedescr)
+        p5 = newstr(1)
+        strsetitem(p5, 0, i3)
+        i0 = call(0, p5, p4, descr=strequaldescr)
+        escape(i0)
+        jump(p1, i1, i2, i3)
+        """
+        expected = """
+        [p1, i1, i2, i3]
+        i4 = int_sub(i2, i1)
+        i0 = call(0, p1, i1, i4, i3, descr=streq_slice_char_descr)
+        escape(i0)
+        jump(p1, i1, i2, i3)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not, Not, Not, Not', expected)
+
+    def test_str_equal_none1(self):
+        ops = """
+        [p1]
+        i0 = call(0, p1, NULL, descr=strequaldescr)
+        escape(i0)
+        jump(p1)
+        """
+        expected = """
+        [p1]
+        i0 = ptr_eq(p1, NULL)
+        escape(i0)
+        jump(p1)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not', expected)
+
+    def test_str_equal_none2(self):
+        ops = """
+        [p1]
+        i0 = call(0, NULL, p1, descr=strequaldescr)
+        escape(i0)
+        jump(p1)
+        """
+        expected = """
+        [p1]
+        i0 = ptr_eq(p1, NULL)
+        escape(i0)
+        jump(p1)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not', expected)
+
+    def test_str_equal_nonnull1(self):
+        ops = """
+        [p1]
+        guard_nonnull(p1) []
+        i0 = call(0, p1, "hello world", descr=strequaldescr)
+        escape(i0)
+        jump(p1)
+        """
+        expected = """
+        [p1]
+        guard_nonnull(p1) []
+        i0 = call(0, p1, "hello world", descr=streq_nonnull_descr)
+        escape(i0)
+        jump(p1)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not', expected)
+
+    def test_str_equal_nonnull2(self):
+        ops = """
+        [p1]
+        guard_nonnull(p1) []
+        i0 = call(0, p1, "", descr=strequaldescr)
+        escape(i0)
+        jump(p1)
+        """
+        expected = """
+        [p1]
+        guard_nonnull(p1) []
+        i1 = strlen(p1)
+        i0 = int_eq(i1, 0)
+        escape(i0)
+        jump(p1)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not', expected)
+
+    def test_str_equal_nonnull3(self):
+        ops = """
+        [p1]
+        guard_nonnull(p1) []
+        i0 = call(0, p1, "x", descr=strequaldescr)
+        escape(i0)
+        jump(p1)
+        """
+        expected = """
+        [p1]
+        guard_nonnull(p1) []
+        i0 = call(0, p1, 120, descr=streq_nonnull_char_descr)
+        escape(i0)
+        jump(p1)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not', expected)
+
+    def test_str_equal_nonnull4(self):
+        ops = """
+        [p1, p2]
+        p4 = call(0, p1, p2, descr=strconcatdescr)
+        i0 = call(0, "hello world", p4, descr=strequaldescr)
+        escape(i0)
+        jump(p1, p2)
+        """
+        expected = """
+        [p1, p2]
+        i1 = strlen(p1)
+        i2 = strlen(p2)
+        i3 = int_add(i1, i2)
+        p4 = newstr(i3)
+        i4 = strlen(p1)
+        copystrcontent(p1, p4, 0, 0, i4)
+        i5 = strlen(p2)
+        i6 = int_add(i4, i5)      # will be killed by the backend
+        copystrcontent(p2, p4, 0, i4, i5)
+        i0 = call(0, "hello world", p4, descr=streq_nonnull_descr)
+        escape(i0)
+        jump(p1, p2)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not, Not', expected)
+
+    def test_str_equal_chars0(self):
+        ops = """
+        [i1]
+        p1 = newstr(0)
+        i0 = call(0, p1, "", descr=strequaldescr)
+        escape(i0)
+        jump(i1)
+        """
+        expected = """
+        [i1]
+        escape(1)
+        jump(i1)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not', expected)
+
+    def test_str_equal_chars1(self):
+        ops = """
+        [i1]
+        p1 = newstr(1)
+        strsetitem(p1, 0, i1)
+        i0 = call(0, p1, "x", descr=strequaldescr)
+        escape(i0)
+        jump(i1)
+        """
+        expected = """
+        [i1]
+        i0 = int_eq(i1, 120)     # ord('x')
+        escape(i0)
+        jump(i1)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not', expected)
+
+    def test_str_equal_chars2(self):
+        ops = """
+        [i1, i2]
+        p1 = newstr(2)
+        strsetitem(p1, 0, i1)
+        strsetitem(p1, 1, i2)
+        i0 = call(0, p1, "xy", descr=strequaldescr)
+        escape(i0)
+        jump(i1, i2)
+        """
+        expected = """
+        [i1, i2]
+        p1 = newstr(2)
+        strsetitem(p1, 0, i1)
+        strsetitem(p1, 1, i2)
+        i0 = call(0, p1, "xy", descr=streq_lengthok_descr)
+        escape(i0)
+        jump(i1, i2)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not, Not', expected)
+
+    def test_str_equal_chars3(self):
+        ops = """
+        [p1]
+        i0 = call(0, "x", p1, descr=strequaldescr)
+        escape(i0)
+        jump(p1)
+        """
+        expected = """
+        [p1]
+        i0 = call(0, p1, 120, descr=streq_checknull_char_descr)
+        escape(i0)
+        jump(p1)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not', expected)
+
+    def test_str_equal_lengthmismatch1(self):
+        ops = """
+        [i1]
+        p1 = newstr(1)
+        strsetitem(p1, 0, i1)
+        i0 = call(0, "xy", p1, descr=strequaldescr)
+        escape(i0)
+        jump(i1)
+        """
+        expected = """
+        [i1]
+        escape(0)
+        jump(i1)
+        """
+        self.optimize_loop_extradescrs(ops, 'Not', expected)
 
+    # XXX unicode operations
+    # XXX str2unicode
 
 
 ##class TestOOtype(BaseTestOptimizeOpt, OOtypeMixin):

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_recursive.py	Thu Sep 30 00:16:20 2010
@@ -319,8 +319,8 @@
         for loop in get_stats().loops:
             assert len(loop.operations) <= length + 5 # because we only check once per metainterp bytecode
             for op in loop.operations:
-                if op.is_guard() and hasattr(op.descr, '_debug_suboperations'):
-                    assert len(op.descr._debug_suboperations) <= length + 5
+                if op.is_guard() and hasattr(op.getdescr(), '_debug_suboperations'):
+                    assert len(op.getdescr()._debug_suboperations) <= length + 5
 
     def test_inline_trace_limit(self):
         myjitdriver = JitDriver(greens=[], reds=['n'])

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_resume.py	Thu Sep 30 00:16:20 2010
@@ -199,10 +199,10 @@
 
 def test_prepare_virtuals():
     class FakeVinfo(object):
-        def allocate(self, decoder):
-            return "allocated"
-        def setfields(self, decoder, virtual):
-            assert virtual == "allocated"
+        def allocate(self, decoder, index):
+            s = "allocated"
+            decoder.virtuals_cache[index] = s
+            return s
     class FakeStorage(object):
         rd_virtuals = [FakeVinfo(), None]
         rd_numb = []
@@ -212,7 +212,97 @@
         _already_allocated_resume_virtuals = None
         cpu = None
     reader = ResumeDataDirectReader(None, FakeStorage())
-    assert reader.virtuals == ["allocated", reader.virtual_default]
+    assert reader.force_all_virtuals() == ["allocated", reader.virtual_default]
+
+# ____________________________________________________________
+
+class FakeResumeDataReader(AbstractResumeDataReader):
+    def allocate_with_vtable(self, known_class):
+        return FakeBuiltObject(vtable=known_class)
+    def allocate_struct(self, typedescr):
+        return FakeBuiltObject(typedescr=typedescr)
+    def allocate_array(self, arraydescr, length):
+        return FakeBuiltObject(arraydescr=arraydescr, items=[None]*length)
+    def setfield(self, descr, struct, fieldnum):
+        setattr(struct, descr, fieldnum)
+    def setarrayitem_int(self, arraydescr, array, i, fieldnum):
+        assert 0 <= i < len(array.items)
+        assert arraydescr is array.arraydescr
+        array.items[i] = fieldnum
+    def allocate_string(self, length):
+        return FakeBuiltObject(string=[None]*length)
+    def string_setitem(self, string, i, fieldnum):
+        value, tag = untag(fieldnum)
+        assert tag == TAGINT
+        assert 0 <= i < len(string.string)
+        string.string[i] = value
+    def concat_strings(self, left, right):
+        return FakeBuiltObject(strconcat=[left, right])
+    def slice_string(self, str, start, length):
+        return FakeBuiltObject(strslice=[str, start, length])
+
+class FakeBuiltObject(object):
+    def __init__(self, **kwds):
+        self.__dict__ = kwds
+    def __eq__(self, other):
+        return (self.__class__ == other.__class__ and
+                self.__dict__ == other.__dict__)
+    def __repr__(self):
+        return 'FakeBuiltObject(%s)' % (
+            ', '.join(['%s=%r' % item for item in self.__dict__.items()]))
+
+class FakeArrayDescr(object):
+    def is_array_of_pointers(self): return False
+    def is_array_of_floats(self): return False
+
+def test_virtualinfo():
+    info = VirtualInfo(123, ["fielddescr1"])
+    info.fieldnums = [tag(456, TAGINT)]
+    reader = FakeResumeDataReader()
+    reader._prepare_virtuals([info])
+    assert reader.force_all_virtuals() == [
+        FakeBuiltObject(vtable=123, fielddescr1=tag(456, TAGINT))]
+
+def test_vstructinfo():
+    info = VStructInfo(124, ["fielddescr1"])
+    info.fieldnums = [tag(456, TAGINT)]
+    reader = FakeResumeDataReader()
+    reader._prepare_virtuals([info])
+    assert reader.force_all_virtuals() == [
+        FakeBuiltObject(typedescr=124, fielddescr1=tag(456, TAGINT))]
+
+def test_varrayinfo():
+    arraydescr = FakeArrayDescr()
+    info = VArrayInfo(arraydescr)
+    info.fieldnums = [tag(456, TAGINT)]
+    reader = FakeResumeDataReader()
+    reader._prepare_virtuals([info])
+    assert reader.force_all_virtuals() == [
+        FakeBuiltObject(arraydescr=arraydescr, items=[tag(456, TAGINT)])]
+
+def test_vstrplaininfo():
+    info = VStrPlainInfo()
+    info.fieldnums = [tag(60, TAGINT)]
+    reader = FakeResumeDataReader()
+    reader._prepare_virtuals([info])
+    assert reader.force_all_virtuals() == [
+        FakeBuiltObject(string=[60])]
+
+def test_vstrconcatinfo():
+    info = VStrConcatInfo()
+    info.fieldnums = [tag(10, TAGBOX), tag(20, TAGBOX)]
+    reader = FakeResumeDataReader()
+    reader._prepare_virtuals([info])
+    assert reader.force_all_virtuals() == [
+        FakeBuiltObject(strconcat=info.fieldnums)]
+
+def test_vstrsliceinfo():
+    info = VStrSliceInfo()
+    info.fieldnums = [tag(10, TAGBOX), tag(20, TAGBOX), tag(30, TAGBOX)]
+    reader = FakeResumeDataReader()
+    reader._prepare_virtuals([info])
+    assert reader.force_all_virtuals() == [
+        FakeBuiltObject(strslice=info.fieldnums)]
 
 # ____________________________________________________________
 
@@ -957,7 +1047,7 @@
 
     metainterp = MyMetaInterp()
     reader = ResumeDataFakeReader(storage, newboxes, metainterp)
-    assert len(reader.virtuals) == 2
+    assert len(reader.virtuals_cache) == 2
     b2t = reader.decode_ref(modifier._gettagged(b2s))
     b4t = reader.decode_ref(modifier._gettagged(b4s))
     trace = metainterp.trace
@@ -972,13 +1062,14 @@
     b4set = [(rop.SETFIELD_GC, [b4t, b2t],     None, LLtypeMixin.nextdescr),
              (rop.SETFIELD_GC, [b4t, b3t],     None, LLtypeMixin.valuedescr),
              (rop.SETFIELD_GC, [b4t, b5t],     None, LLtypeMixin.otherdescr)]
-    if untag(modifier._gettagged(b2s))[0] == -2:
-        expected = [b2new, b4new] + b2set + b4set
-    else:
-        expected = [b4new, b2new] + b4set + b2set
-        
-    for x, y in zip(expected, trace):
-        assert x == y
+    expected = [b2new, b4new] + b4set + b2set
+
+    # check that we get the operations in 'expected', in a possibly different
+    # order.
+    assert len(trace) == len(expected)
+    for x in trace:
+        assert x in expected
+        expected.remove(x)
     ptr = b2t.value._obj.container._as_ptr()
     assert lltype.typeOf(ptr) == lltype.Ptr(LLtypeMixin.NODE)
     assert ptr.value == 111
@@ -1020,7 +1111,7 @@
     # resume
     metainterp = MyMetaInterp()
     reader = ResumeDataFakeReader(storage, newboxes, metainterp)
-    assert len(reader.virtuals) == 1
+    assert len(reader.virtuals_cache) == 1
     b2t = reader.decode_ref(tag(0, TAGVIRTUAL))
     trace = metainterp.trace
     expected = [
@@ -1065,7 +1156,7 @@
     NULL = ConstPtr.value
     metainterp = MyMetaInterp()
     reader = ResumeDataFakeReader(storage, newboxes, metainterp)
-    assert len(reader.virtuals) == 1
+    assert len(reader.virtuals_cache) == 1
     b2t = reader.decode_ref(tag(0, TAGVIRTUAL))
 
     trace = metainterp.trace
@@ -1112,7 +1203,7 @@
 
     metainterp = MyMetaInterp()
     reader = ResumeDataFakeReader(storage, newboxes, metainterp)
-    assert reader.virtuals is None
+    assert reader.virtuals_cache is None
     trace = metainterp.trace
     b2set = (rop.SETFIELD_GC, [b2t, b4t], None, LLtypeMixin.nextdescr)
     expected = [b2set]

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_string.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_string.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_string.py	Thu Sep 30 00:16:20 2010
@@ -1,5 +1,5 @@
 import py
-from pypy.rlib.jit import JitDriver
+from pypy.rlib.jit import JitDriver, dont_look_inside, we_are_jitted
 from pypy.jit.codewriter.policy import StopAtXPolicy
 from pypy.rpython.ootypesystem import ootype
 from pypy.jit.metainterp.test.test_basic import LLJitMixin, OOJitMixin
@@ -72,6 +72,234 @@
         res = self.meta_interp(f, [6, 10])
         assert res == 6
 
+    def test_char2string_pure(self):
+        for dochr in [chr, ]: #unichr]:
+            jitdriver = JitDriver(greens = [], reds = ['n'])
+            @dont_look_inside
+            def escape(x):
+                pass
+            def f(n):
+                while n > 0:
+                    jitdriver.can_enter_jit(n=n)
+                    jitdriver.jit_merge_point(n=n)
+                    s = dochr(n)
+                    if not we_are_jitted():
+                        s += s     # forces to be a string
+                    if n > 100:
+                        escape(s)
+                    n -= 1
+                return 42
+            self.meta_interp(f, [6])
+            self.check_loops(newstr=0, strsetitem=0, strlen=0,
+                             newunicode=0, unicodesetitem=0, unicodelen=0)
+
+    def test_char2string_escape(self):
+        for dochr in [chr, ]: #unichr]:
+            jitdriver = JitDriver(greens = [], reds = ['n', 'total'])
+            @dont_look_inside
+            def escape(x):
+                return ord(x[0])
+            def f(n):
+                total = 0
+                while n > 0:
+                    jitdriver.can_enter_jit(n=n, total=total)
+                    jitdriver.jit_merge_point(n=n, total=total)
+                    s = dochr(n)
+                    if not we_are_jitted():
+                        s += s    # forces to be a string
+                    total += escape(s)
+                    n -= 1
+                return total
+            res = self.meta_interp(f, [6])
+            assert res == 21
+
+    def test_char2string2char(self):
+        for dochr in [chr, ]: #unichr]:
+            jitdriver = JitDriver(greens = [], reds = ['m', 'total'])
+            def f(m):
+                total = 0
+                while m > 0:
+                    jitdriver.can_enter_jit(m=m, total=total)
+                    jitdriver.jit_merge_point(m=m, total=total)
+                    string = dochr(m)
+                    if m > 100:
+                        string += string    # forces to be a string
+                    # read back the character
+                    c = string[0]
+                    total += ord(c)
+                    m -= 1
+                return total
+            res = self.meta_interp(f, [6])
+            assert res == 21
+            self.check_loops(newstr=0, strgetitem=0, strsetitem=0, strlen=0,
+                             newunicode=0, unicodegetitem=0, unicodesetitem=0,
+                             unicodelen=0)
+
+    def test_strconcat_pure(self):
+        for somestr in ["abc", ]: #u"def"]:
+            jitdriver = JitDriver(greens = [], reds = ['m', 'n'])
+            @dont_look_inside
+            def escape(x):
+                pass
+            mylist = [somestr+str(i) for i in range(10)]
+            def f(n, m):
+                while m >= 0:
+                    jitdriver.can_enter_jit(m=m, n=n)
+                    jitdriver.jit_merge_point(m=m, n=n)
+                    s = mylist[n] + mylist[m]
+                    if m > 100:
+                        escape(s)
+                    m -= 1
+                return 42
+            self.meta_interp(f, [6, 7])
+            self.check_loops(newstr=0, strsetitem=0,
+                             newunicode=0, unicodesetitem=0,
+                             call=0, call_pure=0)
+
+    def test_strconcat_escape_str_str(self):
+        jitdriver = JitDriver(greens = [], reds = ['m', 'n'])
+        @dont_look_inside
+        def escape(x):
+            pass
+        mylist = ["somestr"+str(i) for i in range(10)]
+        def f(n, m):
+            while m >= 0:
+                jitdriver.can_enter_jit(m=m, n=n)
+                jitdriver.jit_merge_point(m=m, n=n)
+                s = mylist[n] + mylist[m]
+                escape(s)
+                m -= 1
+            return 42
+        self.meta_interp(f, [6, 7])
+        self.check_loops(newstr=1, strsetitem=0, copystrcontent=2,
+                         call=1, call_pure=0)   # escape
+
+    def test_strconcat_escape_str_char(self):
+        jitdriver = JitDriver(greens = [], reds = ['m', 'n'])
+        @dont_look_inside
+        def escape(x):
+            pass
+        mylist = ["somestr"+str(i) for i in range(10)]
+        def f(n, m):
+            while m >= 0:
+                jitdriver.can_enter_jit(m=m, n=n)
+                jitdriver.jit_merge_point(m=m, n=n)
+                s = mylist[n] + chr(m)
+                escape(s)
+                m -= 1
+            return 42
+        self.meta_interp(f, [6, 7])
+        self.check_loops(newstr=1, strsetitem=1, copystrcontent=1,
+                         call=1, call_pure=0)   # escape
+
+    def test_strconcat_escape_char_str(self):
+        jitdriver = JitDriver(greens = [], reds = ['m', 'n'])
+        @dont_look_inside
+        def escape(x):
+            pass
+        mylist = ["somestr"+str(i) for i in range(10)]
+        def f(n, m):
+            while m >= 0:
+                jitdriver.can_enter_jit(m=m, n=n)
+                jitdriver.jit_merge_point(m=m, n=n)
+                s = chr(n) + mylist[m]
+                escape(s)
+                m -= 1
+            return 42
+        self.meta_interp(f, [6, 7])
+        self.check_loops(newstr=1, strsetitem=1, copystrcontent=1,
+                         call=1, call_pure=0)   # escape
+
+    def test_strconcat_escape_char_char(self):
+        jitdriver = JitDriver(greens = [], reds = ['m', 'n'])
+        @dont_look_inside
+        def escape(x):
+            pass
+        def f(n, m):
+            while m >= 0:
+                jitdriver.can_enter_jit(m=m, n=n)
+                jitdriver.jit_merge_point(m=m, n=n)
+                s = chr(n) + chr(m)
+                escape(s)
+                m -= 1
+            return 42
+        self.meta_interp(f, [6, 7])
+        self.check_loops(newstr=1, strsetitem=2, copystrcontent=0,
+                         call=1, call_pure=0)   # escape
+
+    def test_strconcat_escape_str_char_str(self):
+        jitdriver = JitDriver(greens = [], reds = ['m', 'n'])
+        @dont_look_inside
+        def escape(x):
+            pass
+        mylist = ["somestr"+str(i) for i in range(10)]
+        def f(n, m):
+            while m >= 0:
+                jitdriver.can_enter_jit(m=m, n=n)
+                jitdriver.jit_merge_point(m=m, n=n)
+                s = mylist[n] + chr(n) + mylist[m]
+                escape(s)
+                m -= 1
+            return 42
+        self.meta_interp(f, [6, 7])
+        self.check_loops(newstr=1, strsetitem=1, copystrcontent=2,
+                         call=1, call_pure=0)   # escape
+
+    def test_strconcat_guard_fail(self):
+        for somestr in ["abc", ]: #u"def"]:
+            jitdriver = JitDriver(greens = [], reds = ['m', 'n'])
+            @dont_look_inside
+            def escape(x):
+                pass
+            mylist = [somestr+str(i) for i in range(12)]
+            def f(n, m):
+                while m >= 0:
+                    jitdriver.can_enter_jit(m=m, n=n)
+                    jitdriver.jit_merge_point(m=m, n=n)
+                    s = mylist[n] + mylist[m]
+                    if m & 1:
+                        escape(s)
+                    m -= 1
+                return 42
+            self.meta_interp(f, [6, 10])
+
+    def test_strslice(self):
+        for somestr in ["abc", ]: #u"def"]:
+            jitdriver = JitDriver(greens = [], reds = ['m', 'n'])
+            @dont_look_inside
+            def escape(x):
+                pass
+            def f(n, m):
+                assert n >= 0
+                while m >= 0:
+                    jitdriver.can_enter_jit(m=m, n=n)
+                    jitdriver.jit_merge_point(m=m, n=n)
+                    s = "foobarbazetc"[m:n]
+                    if m <= 5:
+                        escape(s)
+                    m -= 1
+                return 42
+            self.meta_interp(f, [10, 10])
+
+    def test_streq_char(self):
+        for somestr in ["?abcdefg", ]: #u"def"]:
+            jitdriver = JitDriver(greens = [], reds = ['m', 'n'])
+            @dont_look_inside
+            def escape(x):
+                pass
+            def f(n, m):
+                assert n >= 0
+                while m >= 0:
+                    jitdriver.can_enter_jit(m=m, n=n)
+                    jitdriver.jit_merge_point(m=m, n=n)
+                    s = somestr[:m]
+                    escape(s == "?")
+                    m -= 1
+                return 42
+            self.meta_interp(f, [6, 7])
+            self.check_loops(newstr=0, newunicode=0)
+
+
 class TestOOtype(StringTests, OOJitMixin):
     CALL = "oosend"
     CALL_PURE = "oosend_pure"

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualref.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualref.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_virtualref.py	Thu Sep 30 00:16:20 2010
@@ -71,11 +71,11 @@
         #
         ops = self.metainterp.staticdata.stats.loops[0].operations
         [guard_op] = [op for op in ops
-                         if op.opnum == rop.GUARD_NOT_FORCED]
-        bxs1 = [box for box in guard_op.fail_args
+                         if op.getopnum() == rop.GUARD_NOT_FORCED]
+        bxs1 = [box for box in guard_op.getfailargs()
                   if str(box._getrepr_()).endswith('.X')]
         assert len(bxs1) == 1
-        bxs2 = [box for box in guard_op.fail_args
+        bxs2 = [box for box in guard_op.getfailargs()
                   if str(box._getrepr_()).endswith('JitVirtualRef')]
         assert len(bxs2) == 1
         JIT_VIRTUAL_REF = self.vrefinfo.JIT_VIRTUAL_REF
@@ -84,11 +84,11 @@
         # try reloading from blackhole.py's point of view
         from pypy.jit.metainterp.resume import ResumeDataDirectReader
         cpu = self.metainterp.cpu
-        cpu.get_latest_value_count = lambda : len(guard_op.fail_args)
-        cpu.get_latest_value_int = lambda i:guard_op.fail_args[i].getint()
-        cpu.get_latest_value_ref = lambda i:guard_op.fail_args[i].getref_base()
+        cpu.get_latest_value_count = lambda : len(guard_op.getfailargs())
+        cpu.get_latest_value_int = lambda i:guard_op.getfailargs()[i].getint()
+        cpu.get_latest_value_ref = lambda i:guard_op.getfailargs()[i].getref_base()
         cpu.clear_latest_values = lambda count: None
-        resumereader = ResumeDataDirectReader(cpu, guard_op.descr)
+        resumereader = ResumeDataDirectReader(cpu, guard_op.getdescr())
         vrefinfo = self.metainterp.staticdata.virtualref_info
         lst = []
         vrefinfo.continue_tracing = lambda vref, virtual: \
@@ -100,7 +100,7 @@
                                lst[0][0])  # assert correct type
         #
         # try reloading from pyjitpl's point of view
-        self.metainterp.rebuild_state_after_failure(guard_op.descr)
+        self.metainterp.rebuild_state_after_failure(guard_op.getdescr())
         assert len(self.metainterp.framestack) == 1
         assert len(self.metainterp.virtualref_boxes) == 2
         assert self.metainterp.virtualref_boxes[0].value == bxs1[0].value

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/test/test_ztranslation.py	Thu Sep 30 00:16:20 2010
@@ -21,6 +21,7 @@
         # - full optimizer
         # - jitdriver hooks
         # - two JITs
+        # - string concatenation, slicing and comparison
 
         class Frame(object):
             _virtualizable2_ = ['i']
@@ -60,11 +61,15 @@
                 frame.i -= 1
             return total * 10
         #
-        myjitdriver2 = JitDriver(greens = ['g'], reds = ['m', 'x'])
+        myjitdriver2 = JitDriver(greens = ['g'], reds = ['m', 'x', 's'])
         def f2(g, m, x):
+            s = ""
             while m > 0:
-                myjitdriver2.can_enter_jit(g=g, m=m, x=x)
-                myjitdriver2.jit_merge_point(g=g, m=m, x=x)
+                myjitdriver2.can_enter_jit(g=g, m=m, x=x, s=s)
+                myjitdriver2.jit_merge_point(g=g, m=m, x=x, s=s)
+                s += 'xy'
+                if s[:2] == 'yz':
+                    return -666
                 m -= 1
                 x += 3
             return x

Modified: pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/metainterp/warmstate.py	Thu Sep 30 00:16:20 2010
@@ -7,7 +7,8 @@
 from pypy.rlib.rarithmetic import intmask
 from pypy.rlib.nonconst import NonConstant
 from pypy.rlib.unroll import unrolling_iterable
-from pypy.rlib.jit import PARAMETERS, OPTIMIZER_SIMPLE, OPTIMIZER_FULL
+from pypy.rlib.jit import (PARAMETERS, OPTIMIZER_SIMPLE, OPTIMIZER_FULL,
+                           OPTIMIZER_NO_PERFECTSPEC)
 from pypy.rlib.jit import DEBUG_PROFILE
 from pypy.rlib.jit import BaseJitCell
 from pypy.rlib.debug import debug_start, debug_stop, debug_print
@@ -83,6 +84,9 @@
             return history.ConstFloat(value)
         else:
             return history.BoxFloat(value)
+    elif isinstance(value, str) or isinstance(value, unicode):
+        assert len(value) == 1     # must be a character
+        value = ord(value)
     else:
         value = intmask(value)
     if in_const_box:
@@ -187,6 +191,10 @@
             from pypy.jit.metainterp import simple_optimize
             self.optimize_loop = simple_optimize.optimize_loop
             self.optimize_bridge = simple_optimize.optimize_bridge
+        elif optimizer == OPTIMIZER_NO_PERFECTSPEC:
+            from pypy.jit.metainterp import optimize_nopspec
+            self.optimize_loop = optimize_nopspec.optimize_loop
+            self.optimize_bridge = optimize_nopspec.optimize_bridge
         elif optimizer == OPTIMIZER_FULL:
             from pypy.jit.metainterp import optimize
             self.optimize_loop = optimize.optimize_loop

Modified: pypy/branch/fast-forward/pypy/jit/tool/loopviewer.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/tool/loopviewer.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/tool/loopviewer.py	Thu Sep 30 00:16:20 2010
@@ -1,30 +1,93 @@
 #!/usr/bin/env python
-""" Usage: loopviewer.py [loopnum] loopfile
+"""
+Parse and display the traces produced by pypy-c-jit when PYPYLOG is set.
 """
 
 import autopath
 import py
 import sys
+import optparse
+from pprint import pprint
 from pypy.tool import logparser
 from pypy.jit.metainterp.test.oparser import parse
 from pypy.jit.metainterp.history import ConstInt
 from pypy.rpython.lltypesystem import llmemory, lltype
 
-def main(loopnum, loopfile):
+def main(loopfile, options):
+    print 'Loading file:'
     log = logparser.parse_log_file(loopfile)
-    loops = logparser.extract_category(log, "jit-log-opt-")
-    inp = loops[loopnum]
-    loop = parse(inp, no_namespace=True)
-    loop.show()
+    loops, summary = consider_category(log, options, "jit-log-opt-")
+    if not options.quiet:
+        for loop in loops:
+            loop.show()
+            
+    if options.summary:
+        print
+        print 'Summary:'
+        print_summary(summary)
 
-if __name__ == '__main__':
-    if len(sys.argv) == 2:
-        loopnum = -1
-        loopfile = sys.argv[1]
-    elif len(sys.argv) == 3:
-        loopnum = int(sys.argv[1])
-        loopfile = sys.argv[2]
+    if options.diff:
+        # non-optimized loops and summary
+        nloops, nsummary = consider_category(log, options, "jit-log-noopt-")
+        print
+        print 'Summary of optimized-away operations'
+        print
+        diff = {}
+        keys = set(summary.keys()).union(set(nsummary))
+        for key in keys:
+            before = nsummary.get(key, 0)
+            after = summary.get(key, 0)
+            diff[key] = (before-after, before, after)
+        print_diff(diff)
+
+def consider_category(log, options, category):
+    loops = logparser.extract_category(log, category)
+    if options.loopnum is None:
+        input_loops = loops
     else:
-        print __doc__
-        sys.exit(1)
-    main(loopnum, loopfile)
+        input_loops = [loops[options.loopnum]]
+    loops = [parse(inp, no_namespace=True, nonstrict=True)
+             for inp in input_loops]
+    summary = {}
+    for loop in loops:
+        summary = loop.summary(summary)
+    return loops, summary
+        
+
+def print_summary(summary):
+    ops = [(summary[key], key) for key in summary]
+    ops.sort(reverse=True)
+    for n, key in ops:
+        print '%5d' % n, key
+
+def print_diff(diff):
+    ops = [(d, before, after, key) for key, (d, before, after) in diff.iteritems()]
+    ops.sort(reverse=True)
+    tot_before = 0
+    tot_after = 0
+    for d, before, after, key in ops:
+        tot_before += before
+        tot_after += after
+        print '%5d - %5d = %5d     ' % (before, after, d), key
+    print '-' * 50
+    print '%5d - %5d = %5d     ' % (tot_before, tot_after, tot_before-tot_after), 'TOTAL'
+
+if __name__ == '__main__':
+    parser = optparse.OptionParser(usage="%prog loopfile [options]")
+    parser.add_option('-n', '--loopnum', dest='loopnum', default=-1, metavar='N', type=int,
+                      help='show the loop number N [default: last]')
+    parser.add_option('-a', '--all', dest='loopnum', action='store_const', const=None,
+                      help='show all loops in the file')
+    parser.add_option('-s', '--summary', dest='summary', action='store_true', default=False,
+                      help='print a summary of the operations in the loop(s)')
+    parser.add_option('-d', '--diff', dest='diff', action='store_true', default=False,
+                      help='print the difference between non-optimized and optimized operations in the loop(s)')
+    parser.add_option('-q', '--quiet', dest='quiet', action='store_true', default=False,
+                      help='do not show the graphical representation of the loop')
+    
+    options, args = parser.parse_args()
+    if len(args) != 1:
+        parser.print_help()
+        sys.exit(2)
+
+    main(args[0], options)

Modified: pypy/branch/fast-forward/pypy/jit/tool/showstats.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/tool/showstats.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/tool/showstats.py	Thu Sep 30 00:16:20 2010
@@ -17,7 +17,7 @@
         num_dmp = 0
         num_guards = 0
         for op in loop.operations:
-            if op.opnum == rop.DEBUG_MERGE_POINT:
+            if op.getopnum() == rop.DEBUG_MERGE_POINT:
                 num_dmp += 1
             else:
                 num_ops += 1

Modified: pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py
==============================================================================
--- pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py	(original)
+++ pypy/branch/fast-forward/pypy/jit/tool/traceviewer.py	Thu Sep 30 00:16:20 2010
@@ -253,9 +253,10 @@
 def main(loopfile, use_threshold, view=True):
     countname = py.path.local(loopfile + '.count')
     if countname.check():
-        counts = [re.split(r' +', line, 1) for line in countname.readlines()]
-        counts = Counts([(k.strip("\n"), int(v.strip('\n')))
-                         for v, k in counts])
+        counts = [re.split('(<code)|(<loop)', line, maxsplit=1)
+                  for line in countname.readlines()]
+        counts = Counts([('<code' + k.strip("\n"), int(v.strip('\n').strip()))
+                         for v, _, _, k in counts])
         l = list(sorted(counts.values()))
         if len(l) > 20 and use_threshold:
             counts.threshold = l[-20]

Modified: pypy/branch/fast-forward/pypy/module/array/benchmark/sumtst.c
==============================================================================
--- pypy/branch/fast-forward/pypy/module/array/benchmark/sumtst.c	(original)
+++ pypy/branch/fast-forward/pypy/module/array/benchmark/sumtst.c	Thu Sep 30 00:16:20 2010
@@ -1,3 +1,4 @@
+#include <stdlib.h>
 
 double sum(double *img);
 
@@ -5,4 +6,4 @@
   double *img=malloc(640*480*4*sizeof(double));
   int sa=0;
   for (int l=0; l<500; l++) sum(img);
-}    
+}

Modified: pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py
==============================================================================
--- pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py	(original)
+++ pypy/branch/fast-forward/pypy/module/pypyjit/test/test_pypy_c.py	Thu Sep 30 00:16:20 2010
@@ -140,7 +140,7 @@
             for op in loop.operations:
                 if op.getopname() == "debug_merge_point":
                     sliced_loop = BytecodeTrace()
-                    sliced_loop.bytecode = op.args[0]._get_str().rsplit(" ", 1)[1]
+                    sliced_loop.bytecode = op.getarg(0)._get_str().rsplit(" ", 1)[1]
                     self.sliced_loops.append(sliced_loop)
                 else:
                     sliced_loop.append(op)
@@ -798,7 +798,6 @@
                     if i > 750: a = b
                 return sa
             ''', 215, ([], 12481752))
-        assert False
 
     def test_array_sum(self):
         for tc, maxops in zip('bhilBHILfd', (38,) * 6 + (40, 40, 41, 38)):
@@ -869,6 +868,24 @@
                 return intimg[i - 1]
             ''', maxops, ([tc], res))
 
+    def test_unpackiterable(self):
+        self.run_source('''
+        from array import array
+
+        def main():
+            i = 0
+            t = array('l', (1, 2))
+            while i < 2000:
+                a, b = t
+                i += 1
+            return 3
+
+        ''', 100, ([], 3))
+        bytecode, = self.get_by_bytecode("UNPACK_SEQUENCE")
+        # we allocate virtual ref and frame, we don't want block
+        assert len(bytecode.get_opnames('call_may_force')) == 0
+        
+
     def test_intbound_simple(self):
         ops = ('<', '>', '<=', '>=', '==', '!=')
         nbr = (3, 7)

Modified: pypy/branch/fast-forward/pypy/objspace/std/objspace.py
==============================================================================
--- pypy/branch/fast-forward/pypy/objspace/std/objspace.py	(original)
+++ pypy/branch/fast-forward/pypy/objspace/std/objspace.py	Thu Sep 30 00:16:20 2010
@@ -7,7 +7,7 @@
 from pypy.objspace.std import (builtinshortcut, stdtypedef, frame, model,
                                transparent, callmethod, proxyobject)
 from pypy.objspace.descroperation import DescrOperation, raiseattrerror
-from pypy.rlib.objectmodel import instantiate, r_dict
+from pypy.rlib.objectmodel import instantiate, r_dict, specialize
 from pypy.rlib.debug import make_sure_not_resized
 from pypy.rlib.rarithmetic import base_int
 from pypy.rlib.objectmodel import we_are_translated
@@ -350,7 +350,8 @@
             raise self._wrap_expected_length(expected_length, len(t))
         return t
 
-    def fixedview(self, w_obj, expected_length=-1):
+    @specialize.arg(3)
+    def fixedview(self, w_obj, expected_length=-1, unroll=False):
         """ Fast paths
         """
         if isinstance(w_obj, W_TupleObject):
@@ -358,18 +359,26 @@
         elif isinstance(w_obj, W_ListObject):
             t = w_obj.wrappeditems[:]
         else:
-            return ObjSpace.fixedview(self, w_obj, expected_length)
+            if unroll:
+                return make_sure_not_resized(ObjSpace.unpackiterable_unroll(
+                    self, w_obj, expected_length)[:])
+            else:
+                return make_sure_not_resized(ObjSpace.unpackiterable(
+                    self, w_obj, expected_length)[:])
         if expected_length != -1 and len(t) != expected_length:
             raise self._wrap_expected_length(expected_length, len(t))
         return t
 
+    def fixedview_unroll(self, w_obj, expected_length=-1):
+        return self.fixedview(w_obj, expected_length, unroll=True)
+
     def listview(self, w_obj, expected_length=-1):
         if isinstance(w_obj, W_ListObject):
             t = w_obj.wrappeditems
         elif isinstance(w_obj, W_TupleObject):
             t = w_obj.wrappeditems[:]
         else:
-            return ObjSpace.listview(self, w_obj, expected_length)
+            return ObjSpace.unpackiterable(self, w_obj, expected_length)
         if expected_length != -1 and len(t) != expected_length:
             raise self._wrap_expected_length(expected_length, len(t))
         return t

Modified: pypy/branch/fast-forward/pypy/objspace/std/stringtype.py
==============================================================================
--- pypy/branch/fast-forward/pypy/objspace/std/stringtype.py	(original)
+++ pypy/branch/fast-forward/pypy/objspace/std/stringtype.py	Thu Sep 30 00:16:20 2010
@@ -4,6 +4,7 @@
 
 from sys import maxint
 from pypy.rlib.objectmodel import specialize
+from pypy.rlib.jit import we_are_jitted
 
 def wrapstr(space, s):
     from pypy.objspace.std.stringobject import W_StringObject
@@ -32,7 +33,7 @@
 def wrapchar(space, c):
     from pypy.objspace.std.stringobject import W_StringObject
     from pypy.objspace.std.ropeobject import rope, W_RopeObject
-    if space.config.objspace.std.withprebuiltchar:
+    if space.config.objspace.std.withprebuiltchar and not we_are_jitted():
         if space.config.objspace.std.withrope:
             return W_RopeObject.PREBUILT[ord(c)]
         return W_StringObject.PREBUILT[ord(c)]

Modified: pypy/branch/fast-forward/pypy/rlib/jit.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rlib/jit.py	(original)
+++ pypy/branch/fast-forward/pypy/rlib/jit.py	Thu Sep 30 00:16:20 2010
@@ -224,7 +224,8 @@
     """Inconsistency in the JIT hints."""
 
 OPTIMIZER_SIMPLE = 0
-OPTIMIZER_FULL = 1
+OPTIMIZER_NO_PERFECTSPEC = 1
+OPTIMIZER_FULL = 2
 
 DEBUG_OFF = 0
 DEBUG_PROFILE = 1

Modified: pypy/branch/fast-forward/pypy/rlib/rmmap.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rlib/rmmap.py	(original)
+++ pypy/branch/fast-forward/pypy/rlib/rmmap.py	Thu Sep 30 00:16:20 2010
@@ -292,7 +292,8 @@
                 c_munmap(self.getptr(0), self.size)
                 self.setdata(NODATA, 0)
 
-    __del__ = close
+    def __del__(self):
+        self.close()
 
     def unmapview(self):
         UnmapViewOfFile(self.getptr(0))

Modified: pypy/branch/fast-forward/pypy/rlib/rstring.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rlib/rstring.py	(original)
+++ pypy/branch/fast-forward/pypy/rlib/rstring.py	Thu Sep 30 00:16:20 2010
@@ -1,9 +1,10 @@
 """ String builder interface and string functions
 """
 
-from pypy.rpython.extregistry import ExtRegistryEntry
 from pypy.annotation.model import SomeObject, SomeString, s_None,\
      SomeChar, SomeInteger, SomeUnicodeCodePoint, SomeUnicodeString
+from pypy.rlib.rarithmetic import ovfcheck
+from pypy.rpython.extregistry import ExtRegistryEntry
 
 
 # -------------- public API for string functions -----------------------
@@ -46,9 +47,7 @@
 
 # -------------- public API ---------------------------------
 
-# the following number is the maximum size of an RPython unicode
-# string that goes into the nursery of the minimark GC.
-INIT_SIZE = 56
+INIT_SIZE = 100 # XXX tweak
 
 class AbstractStringBuilder(object):
     def __init__(self, init_size=INIT_SIZE):
@@ -84,6 +83,11 @@
     result = None
     factor = 1
     assert mul > 0
+    try:
+        ovfcheck(len(s) * mul)
+    except OverflowError:
+        raise MemoryError
+    
     limit = mul >> 1
     while True:
         if mul & factor:

Modified: pypy/branch/fast-forward/pypy/rlib/test/test_rstring.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rlib/test/test_rstring.py	(original)
+++ pypy/branch/fast-forward/pypy/rlib/test/test_rstring.py	Thu Sep 30 00:16:20 2010
@@ -1,5 +1,8 @@
+import sys
+
+from pypy.rlib.rstring import StringBuilder, UnicodeBuilder, split, rsplit, \
+    string_repeat
 
-from pypy.rlib.rstring import StringBuilder, UnicodeBuilder, split, rsplit
 
 def test_split():
     assert split("", 'x') == ['']
@@ -39,3 +42,6 @@
     s.append_multiple_char('d', 4)
     assert s.build() == 'aabcbdddd'
     assert isinstance(s.build(), unicode)
+
+def test_string_repeat():
+    raises(MemoryError, string_repeat, "abc", sys.maxint)

Modified: pypy/branch/fast-forward/pypy/rpython/annlowlevel.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/annlowlevel.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/annlowlevel.py	Thu Sep 30 00:16:20 2010
@@ -397,6 +397,8 @@
     assert strtype in (str, unicode)
 
     def hlstr(ll_s):
+        if not ll_s:
+            return None
         if hasattr(ll_s, 'chars'):
             if strtype is str:
                 return ''.join(ll_s.chars)
@@ -423,9 +425,14 @@
 
     def llstr(s):
         from pypy.rpython.lltypesystem.rstr import mallocstr, mallocunicode
+        from pypy.rpython.lltypesystem.rstr import STR, UNICODE
         if strtype is str:
+            if s is None:
+                return lltype.nullptr(STR)
             ll_s = mallocstr(len(s))
         else:
+            if s is None:
+                return lltype.nullptr(UNICODE)
             ll_s = mallocunicode(len(s))
         for i, c in enumerate(s):
             ll_s.chars[i] = c

Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/ll2ctypes.py	Thu Sep 30 00:16:20 2010
@@ -29,6 +29,60 @@
 from pypy.translator.platform import platform
 from array import array
 
+# ____________________________________________________________
+
+far_regions = None
+
+def allocate_ctypes(ctype):
+    if far_regions:
+        import random
+        pieces = far_regions._ll2ctypes_pieces
+        num = random.randrange(len(pieces))
+        i1, stop = pieces[num]
+        i2 = i1 + ((ctypes.sizeof(ctype) or 1) + 7) & ~7
+        if i2 > stop:
+            raise MemoryError("out of memory in far_regions")
+        pieces[num] = i2, stop
+        p = lltype2ctypes(far_regions.getptr(i1))
+        return ctypes.cast(p, ctypes.POINTER(ctype)).contents
+    else:
+        return ctype()
+
+def do_allocation_in_far_regions():
+    """On 32 bits: this reserves 1.25GB of address space, or 2.5GB on Linux,
+       which helps test this module for address values that are signed or
+       unsigned.
+
+       On 64-bits: reserves 10 times 2GB of address space.  This should help
+       to find 32-vs-64-bit issues in the JIT.  It is likely that objects
+       are further apart than 32 bits can represent; it is also possible
+       to hit the corner case of being precisely e.g. 2GB - 8 bytes apart.
+
+       Avoid this function if your OS reserves actual RAM from mmap() eagerly.
+    """
+    global far_regions
+    if not far_regions:
+        from pypy.rlib import rmmap
+        if sys.maxint > 0x7FFFFFFF:
+            PIECESIZE = 0x80000000
+        else:
+            if sys.platform == 'linux':
+                PIECESIZE = 0x10000000
+            else:
+                PIECESIZE = 0x08000000
+        PIECES = 10
+        m = rmmap.mmap(-1, PIECES * PIECESIZE,
+                       rmmap.MAP_PRIVATE|rmmap.MAP_ANONYMOUS,
+                       rmmap.PROT_READ|rmmap.PROT_WRITE)
+        m.close = lambda : None    # leak instead of giving a spurious
+                                   # error at CPython's shutdown
+        m._ll2ctypes_pieces = []
+        for i in range(PIECES):
+            m._ll2ctypes_pieces.append((i * PIECESIZE, (i+1) * PIECESIZE))
+        far_regions = m
+
+# ____________________________________________________________
+
 _ctypes_cache = {}
 _eci_cache = {}
 
@@ -91,13 +145,13 @@
             if S._arrayfld is None:
                 if n is not None:
                     raise TypeError("%r is not variable-sized" % (S,))
-                storage = cls()
+                storage = allocate_ctypes(cls)
                 return storage
             else:
                 if n is None:
                     raise TypeError("%r is variable-sized" % (S,))
                 biggercls = build_ctypes_struct(S, None, n)
-                bigstruct = biggercls()
+                bigstruct = allocate_ctypes(biggercls)
                 array = getattr(bigstruct, S._arrayfld)
                 if hasattr(array, 'length'):
                     array.length = n
@@ -139,7 +193,7 @@
             if not isinstance(n, int):
                 raise TypeError, "array length must be an int"
             biggercls = get_ctypes_array_of_size(A, n)
-            bigarray = biggercls()
+            bigarray = allocate_ctypes(biggercls)
             if hasattr(bigarray, 'length'):
                 bigarray.length = n
             return bigarray
@@ -379,7 +433,7 @@
         "Returns the storage address as an int"
         if self._storage is None or self._storage is True:
             raise ValueError("Not a ctypes allocated structure")
-        return ctypes.cast(self._storage, ctypes.c_void_p).value
+        return intmask(ctypes.cast(self._storage, ctypes.c_void_p).value)
 
     def _free(self):
         self._check()   # no double-frees

Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/llarena.py	Thu Sep 30 00:16:20 2010
@@ -472,22 +472,25 @@
     clear_large_memory_chunk = llmemory.raw_memclear
 
 
+llimpl_malloc = rffi.llexternal('malloc', [lltype.Signed], llmemory.Address,
+                                sandboxsafe=True, _nowrapper=True)
+llimpl_free = rffi.llexternal('free', [llmemory.Address], lltype.Void,
+                              sandboxsafe=True, _nowrapper=True)
+
 def llimpl_arena_malloc(nbytes, zero):
-    addr = llmemory.raw_malloc(nbytes)
-    if zero and bool(addr):
-        clear_large_memory_chunk(addr, nbytes)
+    addr = llimpl_malloc(nbytes)
+    if bool(addr):
+        llimpl_arena_reset(addr, nbytes, zero)
     return addr
-register_external(arena_malloc, [int, bool], llmemory.Address,
+llimpl_arena_malloc._always_inline_ = True
+register_external(arena_malloc, [int, int], llmemory.Address,
                   'll_arena.arena_malloc',
                   llimpl=llimpl_arena_malloc,
                   llfakeimpl=arena_malloc,
                   sandboxsafe=True)
 
-def llimpl_arena_free(arena_addr):
-    # NB. minimark.py assumes that arena_free() is actually just a raw_free().
-    llmemory.raw_free(arena_addr)
 register_external(arena_free, [llmemory.Address], None, 'll_arena.arena_free',
-                  llimpl=llimpl_arena_free,
+                  llimpl=llimpl_free,
                   llfakeimpl=arena_free,
                   sandboxsafe=True)
 
@@ -497,6 +500,7 @@
             clear_large_memory_chunk(arena_addr, size)
         else:
             llmemory.raw_memclear(arena_addr, size)
+llimpl_arena_reset._always_inline_ = True
 register_external(arena_reset, [llmemory.Address, int, int], None,
                   'll_arena.arena_reset',
                   llimpl=llimpl_arena_reset,

Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/rlist.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/lltypesystem/rlist.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/rlist.py	Thu Sep 30 00:16:20 2010
@@ -159,7 +159,6 @@
         if 'item_repr' not in self.__dict__:
             self.external_item_repr, self.item_repr = externalvsinternal(self.rtyper, self._item_repr_computer())
         if isinstance(self.LIST, GcForwardReference):
-            ITEM = self.item_repr.lowleveltype
             ITEMARRAY = self.get_itemarray_lowleveltype()
             self.LIST.become(ITEMARRAY)
 

Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/rstr.py	Thu Sep 30 00:16:20 2010
@@ -4,7 +4,7 @@
 from pypy.rlib.objectmodel import malloc_zero_filled, we_are_translated
 from pypy.rlib.objectmodel import _hash_string, enforceargs
 from pypy.rlib.debug import ll_assert
-from pypy.rlib.jit import purefunction
+from pypy.rlib.jit import purefunction, we_are_jitted
 from pypy.rpython.robject import PyObjRepr, pyobj_repr
 from pypy.rpython.rmodel import inputconst, IntegerRepr
 from pypy.rpython.rstr import AbstractStringRepr,AbstractCharRepr,\
@@ -65,8 +65,8 @@
         dst = llmemory.cast_ptr_to_adr(dst) + _str_ofs(dststart)
         llmemory.raw_memcopy(src, dst, llmemory.sizeof(CHAR_TP) * length)
     copy_string_contents._always_inline_ = True
-    copy_string_contents.oopspec = (
-        '%s.copy_contents(src, dst, srcstart, dststart, length)' % name)
+    #copy_string_contents.oopspec = (
+    #    '%s.copy_contents(src, dst, srcstart, dststart, length)' % name)
     return func_with_new_name(copy_string_contents, 'copy_%s_contents' % name)
 
 copy_string_contents = _new_copy_contents_fun(STR, Char, 'string')
@@ -326,6 +326,7 @@
         s1.copy_contents(s1, newstr, 0, 0, len1)
         s1.copy_contents(s2, newstr, 0, len1, len2)
         return newstr
+    ll_strconcat.oopspec = 'stroruni.concat(s1, s2)'
 
     @purefunction
     def ll_strip(s, ch, left, right):
@@ -443,8 +444,8 @@
             if chars1[j] != chars2[j]:
                 return False
             j += 1
-
         return True
+    ll_streq.oopspec = 'stroruni.equal(s1, s2)'
 
     @purefunction
     def ll_startswith(s1, s2):
@@ -696,35 +697,33 @@
         return result
 
     @purefunction
-    def ll_stringslice_startonly(s1, start):
-        len1 = len(s1.chars)
-        newstr = s1.malloc(len1 - start)
-        lgt = len1 - start
-        assert lgt >= 0
+    def _ll_stringslice(s1, start, stop):
+        lgt = stop - start
         assert start >= 0
+        assert lgt >= 0
+        newstr = s1.malloc(lgt)
         s1.copy_contents(s1, newstr, start, 0, lgt)
         return newstr
+    _ll_stringslice.oopspec = 'stroruni.slice(s1, start, stop)'
+    _ll_stringslice._annenforceargs_ = [None, int, int]
+
+    def ll_stringslice_startonly(s1, start):
+        return LLHelpers._ll_stringslice(s1, start, len(s1.chars))
 
-    @purefunction
     def ll_stringslice_startstop(s1, start, stop):
-        if stop >= len(s1.chars):
-            if start == 0:
-                return s1
-            stop = len(s1.chars)
-        newstr = s1.malloc(stop - start)
-        assert start >= 0
-        lgt = stop - start
-        assert lgt >= 0
-        s1.copy_contents(s1, newstr, start, 0, lgt)
-        return newstr
+        if we_are_jitted():
+            if stop > len(s1.chars):
+                stop = len(s1.chars)
+        else:
+            if stop >= len(s1.chars):
+                if start == 0:
+                    return s1
+                stop = len(s1.chars)
+        return LLHelpers._ll_stringslice(s1, start, stop)
 
-    @purefunction
     def ll_stringslice_minusone(s1):
         newlen = len(s1.chars) - 1
-        newstr = s1.malloc(newlen)
-        assert newlen >= 0
-        s1.copy_contents(s1, newstr, 0, 0, newlen)
-        return newstr
+        return LLHelpers._ll_stringslice(s1, 0, newlen)
 
     def ll_split_chr(LIST, s, c):
         chars = s.chars

Modified: pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/lltypesystem/test/test_ll2ctypes.py	Thu Sep 30 00:16:20 2010
@@ -16,6 +16,12 @@
 from pypy.annotation.annrpython import RPythonAnnotator
 from pypy.rpython.rtyper import RPythonTyper
 
+
+if False:    # for now, please keep it False by default
+    from pypy.rpython.lltypesystem import ll2ctypes
+    ll2ctypes.do_allocation_in_far_regions()
+
+
 class TestLL2Ctypes(object):
 
     def setup_method(self, meth):

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/base.py	Thu Sep 30 00:16:20 2010
@@ -39,6 +39,9 @@
     def can_malloc_nonmovable(self):
         return not self.moving_gc
 
+    def can_optimize_clean_setarrayitems(self):
+        return True     # False in case of card marking
+
     # The following flag enables costly consistency checks after each
     # collection.  It is automatically set to True by test_gc.py.  The
     # checking logic is translatable, so the flag can be set to True
@@ -76,7 +79,7 @@
     def set_root_walker(self, root_walker):
         self.root_walker = root_walker
 
-    def write_barrier(self, newvalue, addr_struct):
+    def write_barrier(self, addr_struct):
         pass
 
     def statistics(self, index):

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/generation.py	Thu Sep 30 00:16:20 2010
@@ -147,6 +147,11 @@
     def get_young_var_basesize(nursery_size):
         return nursery_size // 4 - 1
 
+    @classmethod
+    def JIT_max_size_of_young_obj(cls):
+        min_nurs_size = cls.TRANSLATION_PARAMS['min_nursery_size']
+        return cls.get_young_fixedsize(min_nurs_size)
+
     def is_in_nursery(self, addr):
         ll_assert(llmemory.cast_adr_to_int(addr) & 1 == 0,
                   "odd-valued (i.e. tagged) pointer unexpected here")
@@ -321,7 +326,7 @@
         addr = pointer.address[0]
         newaddr = self.copy(addr)
         pointer.address[0] = newaddr
-        self.write_into_last_generation_obj(obj, newaddr)
+        self.write_into_last_generation_obj(obj)
 
     # ____________________________________________________________
     # Implementation of nursery-only collections
@@ -452,11 +457,12 @@
     #  "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()")
     JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS
 
-    def write_barrier(self, newvalue, addr_struct):
+    def write_barrier(self, addr_struct):
         if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS:
-            self.remember_young_pointer(addr_struct, newvalue)
+            self.remember_young_pointer(addr_struct)
 
     def _setup_wb(self):
+        DEBUG = self.DEBUG
         # The purpose of attaching remember_young_pointer to the instance
         # instead of keeping it as a regular method is to help the JIT call it.
         # Additionally, it makes the code in write_barrier() marginally smaller
@@ -464,33 +470,24 @@
         # For x86, there is also an extra requirement: when the JIT calls
         # remember_young_pointer(), it assumes that it will not touch the SSE
         # registers, so it does not save and restore them (that's a *hack*!).
-        def remember_young_pointer(addr_struct, addr):
+        def remember_young_pointer(addr_struct):
             #llop.debug_print(lltype.Void, "\tremember_young_pointer",
             #                 addr_struct, "<-", addr)
-            ll_assert(not self.is_in_nursery(addr_struct),
-                         "nursery object with GCFLAG_NO_YOUNG_PTRS")
-            # if we have tagged pointers around, we first need to check whether
-            # we have valid pointer here, otherwise we can do it after the
-            # is_in_nursery check
-            if (self.config.taggedpointers and
-                not self.is_valid_gc_object(addr)):
-                return
-            if self.is_in_nursery(addr):
-                self.old_objects_pointing_to_young.append(addr_struct)
-                self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS
-            elif (not self.config.taggedpointers and
-                  not self.is_valid_gc_object(addr)):
-                return
-            self.write_into_last_generation_obj(addr_struct, addr)
+            if DEBUG:
+                ll_assert(not self.is_in_nursery(addr_struct),
+                          "nursery object with GCFLAG_NO_YOUNG_PTRS")
+            self.old_objects_pointing_to_young.append(addr_struct)
+            self.header(addr_struct).tid &= ~GCFLAG_NO_YOUNG_PTRS
+            self.write_into_last_generation_obj(addr_struct)
         remember_young_pointer._dont_inline_ = True
         self.remember_young_pointer = remember_young_pointer
 
-    def write_into_last_generation_obj(self, addr_struct, addr):
+    def write_into_last_generation_obj(self, addr_struct):
         objhdr = self.header(addr_struct)
         if objhdr.tid & GCFLAG_NO_HEAP_PTRS:
-            if not self.is_last_generation(addr):
-                objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
-                self.last_generation_root_objects.append(addr_struct)
+            objhdr.tid &= ~GCFLAG_NO_HEAP_PTRS
+            self.last_generation_root_objects.append(addr_struct)
+    write_into_last_generation_obj._always_inline_ = True
 
     def assume_young_pointers(self, addr_struct):
         objhdr = self.header(addr_struct)

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/minimark.py	Thu Sep 30 00:16:20 2010
@@ -1,6 +1,7 @@
 import sys
 from pypy.rpython.lltypesystem import lltype, llmemory, llarena, llgroup
 from pypy.rpython.lltypesystem.lloperation import llop
+from pypy.rpython.lltypesystem.llmemory import raw_malloc_usage
 from pypy.rpython.memory.gc.base import GCBase, MovingGCBase
 from pypy.rpython.memory.gc import minimarkpage, base, generation
 from pypy.rpython.memory.support import DEFAULT_CHUNK_SIZE
@@ -92,26 +93,28 @@
         # PYPY_GC_NURSERY and fall back to half the size of
         # the L2 cache.  For 'major_collection_threshold' it will look
         # it up in the env var PYPY_GC_MAJOR_COLLECT.  It also sets
-        # 'max_heap_size' to PYPY_GC_MAX.
+        # 'max_heap_size' to PYPY_GC_MAX.  Finally, PYPY_GC_MIN sets
+        # the minimal value of 'next_major_collection_threshold'.
         "read_from_env": True,
 
         # The size of the nursery.  Note that this is only used as a
         # fall-back number.
         "nursery_size": 896*1024,
 
-        # The system page size.  Like obmalloc.c, we assume that it is 4K,
-        # which is OK for most systems.
-        "page_size": 4096,
+        # The system page size.  Like obmalloc.c, we assume that it is 4K
+        # for 32-bit systems; unlike obmalloc.c, we assume that it is 8K
+        # for 64-bit systems, for consistent results.
+        "page_size": 1024*WORD,
 
         # The size of an arena.  Arenas are groups of pages allocated
         # together.
         "arena_size": 65536*WORD,
 
         # The maximum size of an object allocated compactly.  All objects
-        # that are larger are just allocated with raw_malloc().  The value
-        # chosen here is enough for a unicode string of length 56 (on 64-bits)
-        # or 60 (on 32-bits).  See rlib.rstring.INIT_SIZE.
-        "small_request_threshold": 256-WORD,
+        # that are larger are just allocated with raw_malloc().  Note that
+        # the size limit for being first allocated in the nursery is much
+        # larger; see below.
+        "small_request_threshold": 35*WORD,
 
         # Full collection threshold: after a major collection, we record
         # the total size consumed; and after every minor collection, if the
@@ -125,7 +128,16 @@
         # in regular arrays of pointers; more in arrays whose items are
         # larger.  A value of 0 disables card marking.
         "card_page_indices": 128,
-        "card_page_indices_min": 800,    # minimum number of indices for cards
+
+        # Objects whose total size is at least 'large_object' bytes are
+        # allocated out of the nursery immediately.  If the object
+        # has GC pointers in its varsized part, we use instead the
+        # higher limit 'large_object_gcptrs'.  The idea is that
+        # separately allocated objects are allocated immediately "old"
+        # and it's not good to have too many pointers from old to young
+        # objects.
+        "large_object": 1600*WORD,
+        "large_object_gcptrs": 8250*WORD,
         }
 
     def __init__(self, config, chunk_size=DEFAULT_CHUNK_SIZE,
@@ -136,7 +148,8 @@
                  small_request_threshold=5*WORD,
                  major_collection_threshold=2.5,
                  card_page_indices=0,
-                 card_page_indices_min=None,
+                 large_object=8*WORD,
+                 large_object_gcptrs=10*WORD,
                  ArenaCollectionClass=None):
         MovingGCBase.__init__(self, config, chunk_size)
         assert small_request_threshold % WORD == 0
@@ -145,16 +158,23 @@
         self.small_request_threshold = small_request_threshold
         self.major_collection_threshold = major_collection_threshold
         self.num_major_collects = 0
+        self.min_heap_size = 0.0
         self.max_heap_size = 0.0
         self.max_heap_size_already_raised = False
         #
         self.card_page_indices = card_page_indices
         if self.card_page_indices > 0:
-            self.card_page_indices_min = card_page_indices_min
             self.card_page_shift = 0
             while (1 << self.card_page_shift) < self.card_page_indices:
                 self.card_page_shift += 1
         #
+        # 'large_object' and 'large_object_gcptrs' limit how big objects
+        # can be in the nursery, so they give a lower bound on the allowed
+        # size of the nursery.
+        self.nonlarge_max = large_object - 1
+        self.nonlarge_gcptrs_max = large_object_gcptrs - 1
+        assert self.nonlarge_max <= self.nonlarge_gcptrs_max
+        #
         self.nursery      = NULL
         self.nursery_free = NULL
         self.nursery_top  = NULL
@@ -218,7 +238,7 @@
         else:
             #
             defaultsize = self.nursery_size
-            minsize = 18 * self.small_request_threshold
+            minsize = 2 * (self.nonlarge_gcptrs_max + 1)
             self.nursery_size = minsize
             self.allocate_nursery()
             #
@@ -229,66 +249,92 @@
                 newsize = generation.estimate_best_nursery_size()
                 if newsize <= 0:
                     newsize = defaultsize
+            newsize = max(newsize, minsize)
             #
             major_coll = base.read_float_from_env('PYPY_GC_MAJOR_COLLECT')
             if major_coll >= 1.0:
                 self.major_collection_threshold = major_coll
             #
+            min_heap_size = base.read_uint_from_env('PYPY_GC_MIN')
+            if min_heap_size > 0:
+                self.min_heap_size = float(min_heap_size)
+            else:
+                # defaults to 8 times the nursery
+                self.min_heap_size = newsize * 8
+            #
             max_heap_size = base.read_uint_from_env('PYPY_GC_MAX')
             if max_heap_size > 0:
                 self.max_heap_size = float(max_heap_size)
             #
             self.minor_collection()    # to empty the nursery
             llarena.arena_free(self.nursery)
-            self.nursery_size = max(newsize, minsize)
+            self.nursery_size = newsize
             self.allocate_nursery()
 
 
     def allocate_nursery(self):
         debug_start("gc-set-nursery-size")
         debug_print("nursery size:", self.nursery_size)
-        # the start of the nursery: we actually allocate a tiny bit more for
+        # the start of the nursery: we actually allocate a bit more for
         # the nursery than really needed, to simplify pointer arithmetic
-        # in malloc_fixedsize_clear().
-        extra = self.small_request_threshold
-        self.nursery = llarena.arena_malloc(self.nursery_size + extra, True)
+        # in malloc_fixedsize_clear().  The few extra pages are never used
+        # anyway so it doesn't even count.
+        extra = self.nonlarge_gcptrs_max + 1
+        self.nursery = llarena.arena_malloc(self.nursery_size + extra, 2)
         if not self.nursery:
             raise MemoryError("cannot allocate nursery")
         # the current position in the nursery:
         self.nursery_free = self.nursery
         # the end of the nursery:
         self.nursery_top = self.nursery + self.nursery_size
-        # initialize the threshold, a bit arbitrarily
-        self.next_major_collection_threshold = (
-            self.nursery_size * self.major_collection_threshold)
+        # initialize the threshold
+        self.min_heap_size = max(self.min_heap_size, self.nursery_size *
+                                              self.major_collection_threshold)
+        self.set_major_threshold_from(0.0)
         debug_stop("gc-set-nursery-size")
 
+    def set_major_threshold_from(self, threshold):
+        # Set the next_major_collection_threshold.
+        if threshold < self.min_heap_size:
+            threshold = self.min_heap_size
+        #
+        if self.max_heap_size > 0.0 and threshold > self.max_heap_size:
+            threshold = self.max_heap_size
+            bounded = True
+        else:
+            bounded = False
+        #
+        self.next_major_collection_threshold = threshold
+        return bounded
+
 
     def malloc_fixedsize_clear(self, typeid, size, can_collect=True,
                                needs_finalizer=False, contains_weakptr=False):
         ll_assert(can_collect, "!can_collect")
         size_gc_header = self.gcheaderbuilder.size_gc_header
         totalsize = size_gc_header + size
-        rawtotalsize = llmemory.raw_malloc_usage(totalsize)
+        rawtotalsize = raw_malloc_usage(totalsize)
         #
         # If the object needs a finalizer, ask for a rawmalloc.
         # The following check should be constant-folded.
         if needs_finalizer:
             ll_assert(not contains_weakptr,
                      "'needs_finalizer' and 'contains_weakptr' both specified")
-            result = self.malloc_with_finalizer(typeid, totalsize)
+            obj = self.external_malloc(typeid, 0)
+            self.objects_with_finalizers.append(obj)
         #
-        # If totalsize is greater than small_request_threshold, ask for
-        # a rawmalloc.  The following check should be constant-folded.
-        elif rawtotalsize > self.small_request_threshold:
+        # If totalsize is greater than nonlarge_max (which should never be
+        # the case in practice), ask for a rawmalloc.  The following check
+        # should be constant-folded.
+        elif rawtotalsize > self.nonlarge_max:
             ll_assert(not contains_weakptr,
                       "'contains_weakptr' specified for a large object")
-            result = self._external_malloc(typeid, totalsize)
+            obj = self.external_malloc(typeid, 0)
             #
         else:
             # If totalsize is smaller than minimal_size_in_nursery, round it
             # up.  The following check should also be constant-folded.
-            min_size = llmemory.raw_malloc_usage(self.minimal_size_in_nursery)
+            min_size = raw_malloc_usage(self.minimal_size_in_nursery)
             if rawtotalsize < min_size:
                 totalsize = rawtotalsize = min_size
             #
@@ -306,8 +352,10 @@
             # If it is a weakref, record it (check constant-folded).
             if contains_weakptr:
                 self.young_objects_with_weakrefs.append(result+size_gc_header)
+            #
+            obj = result + size_gc_header
         #
-        return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
+        return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
 
 
     def malloc_varsize_clear(self, typeid, length, size, itemsize,
@@ -315,32 +363,41 @@
         ll_assert(can_collect, "!can_collect")
         size_gc_header = self.gcheaderbuilder.size_gc_header
         nonvarsize = size_gc_header + size
-        try:
-            varsize = ovfcheck(itemsize * length)
-            totalsize = ovfcheck(nonvarsize + varsize)
-        except OverflowError:
-            raise MemoryError
         #
-        # If totalsize is greater than small_request_threshold, ask for
-        # a rawmalloc.
-        if llmemory.raw_malloc_usage(totalsize) > self.small_request_threshold:
-            result = self._external_malloc_cardmark(typeid, totalsize, length)
+        # Compute the maximal length that makes the object still
+        # below 'nonlarge_max'.  All the following logic is usually
+        # constant-folded because self.nonlarge_max, size and itemsize
+        # are all constants (the arguments are constant due to
+        # inlining) and self.has_gcptr_in_varsize() is constant-folded.
+        if self.has_gcptr_in_varsize(typeid):
+            nonlarge_max = self.nonlarge_gcptrs_max
+        else:
+            nonlarge_max = self.nonlarge_max
+
+        if not raw_malloc_usage(itemsize):
+            too_many_items = raw_malloc_usage(nonvarsize) > nonlarge_max
+        else:
+            maxlength = nonlarge_max - raw_malloc_usage(nonvarsize)
+            maxlength = maxlength // raw_malloc_usage(itemsize)
+            too_many_items = length > maxlength
+
+        if too_many_items:
+            #
+            # If the total size of the object would be larger than
+            # 'nonlarge_max', then allocate it externally.
+            obj = self.external_malloc(typeid, length)
             #
         else:
-            # Round the size up to the next multiple of WORD.  Note that
-            # this is done only if totalsize <= self.small_request_threshold,
-            # i.e. it cannot overflow, and it keeps the property that
-            # totalsize <= self.small_request_threshold.
+            # With the above checks we know now that totalsize cannot be more
+            # than 'nonlarge_max'; in particular, the + and * cannot overflow.
+            totalsize = nonvarsize + itemsize * length
             totalsize = llarena.round_up_for_allocation(totalsize)
-            ll_assert(llmemory.raw_malloc_usage(totalsize) <=
-                      self.small_request_threshold,
-                      "round_up_for_allocation() rounded up too much?")
             #
             # 'totalsize' should contain at least the GC header and
             # the length word, so it should never be smaller than
             # 'minimal_size_in_nursery'
-            ll_assert(llmemory.raw_malloc_usage(totalsize) >=
-                      llmemory.raw_malloc_usage(self.minimal_size_in_nursery),
+            ll_assert(raw_malloc_usage(totalsize) >=
+                      raw_malloc_usage(self.minimal_size_in_nursery),
                       "malloc_varsize_clear(): totalsize < minimalsize")
             #
             # Get the memory from the nursery.  If there is not enough space
@@ -353,10 +410,12 @@
             # Build the object.
             llarena.arena_reserve(result, totalsize)
             self.init_gc_object(result, typeid, flags=0)
+            #
+            # Set the length and return the object.
+            obj = result + size_gc_header
+            (obj + offset_to_length).signed[0] = length
         #
-        # Set the length and return the object.
-        (result + size_gc_header + offset_to_length).signed[0] = length
-        return llmemory.cast_adr_to_ptr(result+size_gc_header, llmemory.GCREF)
+        return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
 
 
     def collect(self, gen=1):
@@ -389,105 +448,108 @@
     collect_and_reserve._dont_inline_ = True
 
 
-    def _full_collect_if_needed(self, reserving_size):
-        reserving_size = llmemory.raw_malloc_usage(reserving_size)
-        if (float(self.get_total_memory_used()) + reserving_size >
-                self.next_major_collection_threshold):
-            self.minor_collection()
-            self.major_collection(reserving_size)
-
-    def _external_malloc(self, typeid, totalsize):
-        """Allocate a large object using raw_malloc()."""
-        return self._external_malloc_cardmark(typeid, totalsize, 0)
-
-
-    def _external_malloc_cardmark(self, typeid, totalsize, length):
-        """Allocate a large object using raw_malloc(), possibly as an
-        object with card marking enabled, if its length is large enough.
-        'length' can be specified as 0 if the object is not varsized."""
+    def external_malloc(self, typeid, length):
+        """Allocate a large object using the ArenaCollection or
+        raw_malloc(), possibly as an object with card marking enabled,
+        if it has gc pointers in its var-sized part.  'length' should be
+        specified as 0 if the object is not varsized.  The returned
+        object is fully initialized and zero-filled."""
+        #
+        # Compute the total size, carefully checking for overflows.
+        size_gc_header = self.gcheaderbuilder.size_gc_header
+        nonvarsize = size_gc_header + self.fixed_size(typeid)
+        if length == 0:
+            # this includes the case of fixed-size objects, for which we
+            # should not even ask for the varsize_item_sizes().
+            totalsize = nonvarsize
+        else:
+            itemsize = self.varsize_item_sizes(typeid)
+            try:
+                varsize = ovfcheck(itemsize * length)
+                totalsize = ovfcheck(nonvarsize + varsize)
+            except OverflowError:
+                raise MemoryError
         #
         # If somebody calls this function a lot, we must eventually
         # force a full collection.
-        self._full_collect_if_needed(totalsize)
+        if (float(self.get_total_memory_used()) + raw_malloc_usage(totalsize) >
+                self.next_major_collection_threshold):
+            self.minor_collection()
+            self.major_collection(raw_malloc_usage(totalsize))
         #
-        # Check if we need to introduce the card marker bits area.
-        if (self.card_page_indices <= 0     # <- this check is constant-folded
-            or length < self.card_page_indices_min   # <- must be large enough
-            or not self.has_gcptr_in_varsize(typeid)):  # <- must contain ptrs
+        # Check if the object would fit in the ArenaCollection.
+        if raw_malloc_usage(totalsize) <= self.small_request_threshold:
+            #
+            # Yes.  Round up 'totalsize' (it cannot overflow and it
+            # must remain <= self.small_request_threshold.)
+            totalsize = llarena.round_up_for_allocation(totalsize)
+            ll_assert(raw_malloc_usage(totalsize) <=
+                      self.small_request_threshold,
+                      "rounding up made totalsize > small_request_threshold")
             #
-            # In these cases, we don't want a card marker bits area.
-            cardheadersize = 0
+            # Allocate from the ArenaCollection and clear the memory returned.
+            result = self.ac.malloc(totalsize)
+            llmemory.raw_memclear(result, totalsize)
             extra_flags = 0
             #
         else:
-            # Reserve N extra words containing card bits before the object.
-            extra_words = self.card_marking_words_for_length(length)
-            cardheadersize = WORD * extra_words
-            extra_flags = GCFLAG_HAS_CARDS
-        #
-        allocsize = cardheadersize + llmemory.raw_malloc_usage(totalsize)
-        #
-        # Allocate the object using arena_malloc(), which we assume here
-        # is just the same as raw_malloc(), but allows the extra flexibility
-        # of saying that we have extra words in the header.
-        arena = llarena.arena_malloc(allocsize, False)
-        if not arena:
-            raise MemoryError("cannot allocate large object")
-        #
-        # Clear it using method 2 of llarena.arena_reset(), which is the
-        # same as just a raw_memclear().
-        llarena.arena_reset(arena, allocsize, 2)
-        #
-        # Reserve the card mark as a list of single bytes
-        # (the loop is empty in C).
-        i = 0
-        while i < cardheadersize:
-            llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char))
-            i += 1
-        #
-        # Initialize the object.
-        result = arena + cardheadersize
-        llarena.arena_reserve(result, totalsize)
-        self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS | extra_flags)
-        #
-        # Record the newly allocated object and its size.
-        size_gc_header = self.gcheaderbuilder.size_gc_header
-        self.rawmalloced_total_size += llmemory.raw_malloc_usage(totalsize)
-        self.rawmalloced_objects.append(result + size_gc_header)
-        return result
-    _external_malloc_cardmark._dont_inline_ = True
-
-
-    def _malloc_nonmovable(self, typeid, totalsize):
-        """Allocate an object non-movable."""
-        #
-        rawtotalsize = llmemory.raw_malloc_usage(totalsize)
-        if rawtotalsize > self.small_request_threshold:
+            # No, so proceed to allocate it externally with raw_malloc().
+            # Check if we need to introduce the card marker bits area.
+            if (self.card_page_indices <= 0  # <- this check is constant-folded
+                or not self.has_gcptr_in_varsize(typeid) or
+                raw_malloc_usage(totalsize) <= self.nonlarge_gcptrs_max):
+                #
+                # In these cases, we don't want a card marker bits area.
+                # This case also includes all fixed-size objects.
+                cardheadersize = 0
+                extra_flags = 0
+                #
+            else:
+                # Reserve N extra words containing card bits before the object.
+                extra_words = self.card_marking_words_for_length(length)
+                cardheadersize = WORD * extra_words
+                extra_flags = GCFLAG_HAS_CARDS
+            #
+            # Detect very rare cases of overflows
+            if raw_malloc_usage(totalsize) > (sys.maxint - (WORD-1)
+                                              - cardheadersize):
+                raise MemoryError("rare case of overflow")
+            #
+            # Now we know that the following computations cannot overflow.
+            # Note that round_up_for_allocation() is also needed to get the
+            # correct number added to 'rawmalloced_total_size'.
+            allocsize = (cardheadersize + raw_malloc_usage(
+                            llarena.round_up_for_allocation(totalsize)))
+            #
+            # Allocate the object using arena_malloc(), which we assume here
+            # is just the same as raw_malloc(), but allows the extra
+            # flexibility of saying that we have extra words in the header.
+            # The memory returned is cleared by a raw_memclear().
+            arena = llarena.arena_malloc(allocsize, 2)
+            if not arena:
+                raise MemoryError("cannot allocate large object")
+            #
+            # Reserve the card mark bits as a list of single bytes
+            # (the loop is empty in C).
+            i = 0
+            while i < cardheadersize:
+                llarena.arena_reserve(arena + i, llmemory.sizeof(lltype.Char))
+                i += 1
             #
-            # The size asked for is too large for the ArenaCollection.
-            return self._external_malloc(typeid, totalsize)
-        #
-        totalsize = llarena.round_up_for_allocation(totalsize)
-        #
-        # If somebody calls _malloc_nonmovable() a lot, we must eventually
-        # force a full collection.
-        self._full_collect_if_needed(totalsize)
-        #
-        # Ask the ArenaCollection to do the malloc.
-        result = self.ac.malloc(totalsize)
-        llmemory.raw_memclear(result, totalsize)
-        self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS)
-        return result
-
-
-    def malloc_with_finalizer(self, typeid, totalsize):
-        """Allocate an object with a finalizer."""
+            # Reserve the actual object.  (This is also a no-op in C).
+            result = arena + cardheadersize
+            llarena.arena_reserve(result, totalsize)
+            #
+            # Record the newly allocated object and its full malloced size.
+            self.rawmalloced_total_size += allocsize
+            self.rawmalloced_objects.append(result + size_gc_header)
         #
-        result = self._malloc_nonmovable(typeid, totalsize)
-        size_gc_header = self.gcheaderbuilder.size_gc_header
-        self.objects_with_finalizers.append(result + size_gc_header)
-        return result
-    malloc_with_finalizer._dont_inline_ = True
+        # Common code to fill the header and length of the object.
+        self.init_gc_object(result, typeid, GCFLAG_NO_YOUNG_PTRS | extra_flags)
+        if self.is_varsize(typeid):
+            offset_to_length = self.varsize_offset_to_length(typeid)
+            (result + size_gc_header + offset_to_length).signed[0] = length
+        return result + size_gc_header
 
 
     # ----------
@@ -502,6 +564,11 @@
     def can_malloc_nonmovable(self):
         return True
 
+    def can_optimize_clean_setarrayitems(self):
+        if self.card_page_indices > 0:
+            return False
+        return MovingGCBase.can_optimize_clean_setarrayitems(self)
+
     def can_move(self, obj):
         """Overrides the parent can_move()."""
         return self.is_in_nursery(obj)
@@ -529,37 +596,16 @@
 
 
     def malloc_fixedsize_nonmovable(self, typeid):
-        """NOT_RPYTHON: not tested translated"""
-        size_gc_header = self.gcheaderbuilder.size_gc_header
-        totalsize = size_gc_header + self.fixed_size(typeid)
-        #
-        result = self._malloc_nonmovable(typeid, totalsize)
-        obj = result + size_gc_header
+        obj = self.external_malloc(typeid, 0)
         return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
 
     def malloc_varsize_nonmovable(self, typeid, length):
-        size_gc_header = self.gcheaderbuilder.size_gc_header
-        nonvarsize = size_gc_header + self.fixed_size(typeid)
-        itemsize = self.varsize_item_sizes(typeid)
-        offset_to_length = self.varsize_offset_to_length(typeid)
-        try:
-            varsize = ovfcheck(itemsize * length)
-            totalsize = ovfcheck(nonvarsize + varsize)
-        except OverflowError:
-            raise MemoryError
-        #
-        result = self._malloc_nonmovable(typeid, totalsize)
-        obj = result + size_gc_header
-        (obj + offset_to_length).signed[0] = length
+        obj = self.external_malloc(typeid, length)
         return llmemory.cast_adr_to_ptr(obj, llmemory.GCREF)
 
     def malloc_nonmovable(self, typeid, length, zero):
         # helper for testing, same as GCBase.malloc
-        if self.is_varsize(typeid):
-            gcref = self.malloc_varsize_nonmovable(typeid, length)
-        else:
-            gcref = self.malloc_fixedsize_nonmovable(typeid)
-        return llmemory.cast_ptr_to_adr(gcref)
+        return self.external_malloc(typeid, length or 0)    # None -> 0
 
 
     # ----------
@@ -647,8 +693,9 @@
                   "unexpected GCFLAG_CARDS_SET")
         # if the GCFLAG_HAS_CARDS is set, check that all bits are zero now
         if self.header(obj).tid & GCFLAG_HAS_CARDS:
-            ll_assert(self.card_page_indices > 0,
-                      "GCFLAG_HAS_CARDS but not using card marking")
+            if self.card_page_indices <= 0:
+                ll_assert(False, "GCFLAG_HAS_CARDS but not using card marking")
+                return
             typeid = self.get_type_id(obj)
             ll_assert(self.has_gcptr_in_varsize(typeid),
                       "GCFLAG_HAS_CARDS but not has_gcptr_in_varsize")
@@ -675,19 +722,23 @@
     #  "if addr_struct.int0 & JIT_WB_IF_FLAG: remember_young_pointer()")
     JIT_WB_IF_FLAG = GCFLAG_NO_YOUNG_PTRS
 
-    def write_barrier(self, newvalue, addr_struct):
+    @classmethod
+    def JIT_max_size_of_young_obj(cls):
+        return cls.TRANSLATION_PARAMS['large_object']
+
+    def write_barrier(self, addr_struct):
         if self.header(addr_struct).tid & GCFLAG_NO_YOUNG_PTRS:
-            self.remember_young_pointer(addr_struct, newvalue)
+            self.remember_young_pointer(addr_struct)
 
-    def write_barrier_from_array(self, newvalue, addr_array, index):
+    def write_barrier_from_array(self, addr_array, index):
         if self.header(addr_array).tid & GCFLAG_NO_YOUNG_PTRS:
             if self.card_page_indices > 0:     # <- constant-folded
-                self.remember_young_pointer_from_array(addr_array, index,
-                                                       newvalue)
+                self.remember_young_pointer_from_array(addr_array, index)
             else:
-                self.remember_young_pointer(addr_array, newvalue)
+                self.remember_young_pointer(addr_array)
 
     def _init_writebarrier_logic(self):
+        DEBUG = self.DEBUG
         # The purpose of attaching remember_young_pointer to the instance
         # instead of keeping it as a regular method is to help the JIT call it.
         # Additionally, it makes the code in write_barrier() marginally smaller
@@ -695,30 +746,22 @@
         # For x86, there is also an extra requirement: when the JIT calls
         # remember_young_pointer(), it assumes that it will not touch the SSE
         # registers, so it does not save and restore them (that's a *hack*!).
-        def remember_young_pointer(addr_struct, addr):
-            # 'addr_struct' is the address of the object in which we write;
-            # 'addr' is the address that we write in 'addr_struct'.
-            ll_assert(not self.is_in_nursery(addr_struct),
-                      "nursery object with GCFLAG_NO_YOUNG_PTRS")
-            # if we have tagged pointers around, we first need to check whether
-            # we have valid pointer here, otherwise we can do it after the
-            # is_in_nursery check
-            if (self.config.taggedpointers and
-                not self.is_valid_gc_object(addr)):
-                return
-            #
-            # Core logic: if the 'addr' is in the nursery, then we need
+        def remember_young_pointer(addr_struct):
+            # 'addr_struct' is the address of the object in which we write.
+            if DEBUG:
+                ll_assert(not self.is_in_nursery(addr_struct),
+                          "nursery object with GCFLAG_NO_YOUNG_PTRS")
+            #
+            # We assume that what we are writing is a pointer to the nursery
+            # (and don't care for the fact that this new pointer may not
+            # actually point to the nursery, which seems ok).  What we need is
             # to remove the flag GCFLAG_NO_YOUNG_PTRS and add the old object
             # to the list 'old_objects_pointing_to_young'.  We know that
             # 'addr_struct' cannot be in the nursery, because nursery objects
             # never have the flag GCFLAG_NO_YOUNG_PTRS to start with.
+            self.old_objects_pointing_to_young.append(addr_struct)
             objhdr = self.header(addr_struct)
-            if self.is_in_nursery(addr):
-                self.old_objects_pointing_to_young.append(addr_struct)
-                objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS
-            elif (not self.config.taggedpointers and
-                  not self.is_valid_gc_object(addr)):
-                return
+            objhdr.tid &= ~GCFLAG_NO_YOUNG_PTRS
             #
             # Second part: if 'addr_struct' is actually a prebuilt GC
             # object and it's the first time we see a write to it, we
@@ -737,17 +780,16 @@
 
 
     def _init_writebarrier_with_card_marker(self):
-        def remember_young_pointer_from_array(addr_array, index, addr):
+        def remember_young_pointer_from_array(addr_array, index):
             # 'addr_array' is the address of the object in which we write,
             # which must have an array part;  'index' is the index of the
-            # item that is (or contains) the pointer that we write;
-            # 'addr' is the address that we write in the array.
+            # item that is (or contains) the pointer that we write.
             objhdr = self.header(addr_array)
             if objhdr.tid & GCFLAG_HAS_CARDS == 0:
                 #
                 # no cards, use default logic.  The 'nocard_logic()' is just
                 # 'remember_young_pointer()', but forced to be inlined here.
-                nocard_logic(addr_array, addr)
+                nocard_logic(addr_array)
                 return
             #
             # 'addr_array' is a raw_malloc'ed array with card markers
@@ -764,22 +806,13 @@
             if byte & bitmask:
                 return
             #
-            # As in remember_young_pointer, check if 'addr' is a valid
-            # pointer, in case it can be a tagged integer
-            if (self.config.taggedpointers and
-                not self.is_valid_gc_object(addr)):
-                return
-            #
-            # If the 'addr' is in the nursery, then we need to set the flag.
-            # Note that the following check is done after the bit check
-            # above, because it is expected that the "bit already set"
-            # situation is the most common.
-            if self.is_in_nursery(addr):
-                addr_byte.char[0] = chr(byte | bitmask)
-                #
-                if objhdr.tid & GCFLAG_CARDS_SET == 0:
-                    self.old_objects_with_cards_set.append(addr_array)
-                    objhdr.tid |= GCFLAG_CARDS_SET
+            # We set the flag (even if the newly written address does not
+            # actually point to the nursery -- like remember_young_pointer()).
+            addr_byte.char[0] = chr(byte | bitmask)
+            #
+            if objhdr.tid & GCFLAG_CARDS_SET == 0:
+                self.old_objects_with_cards_set.append(addr_array)
+                objhdr.tid |= GCFLAG_CARDS_SET
 
         nocard_logic = func_with_new_name(self.remember_young_pointer,
                                           'remember_young_pointer_nocard')
@@ -934,6 +967,8 @@
                         if cardbyte & 1:
                             if interval_stop > length:
                                 interval_stop = length
+                                ll_assert(cardbyte <= 1 and bytes == 0,
+                                          "premature end of object")
                             self.trace_and_drag_out_of_nursery_partial(
                                 obj, interval_start, interval_stop)
                         #
@@ -997,7 +1032,7 @@
         if self.header(obj).tid & GCFLAG_HAS_SHADOW == 0:
             #
             # Common case: allocate a new nonmovable location for it.
-            newhdr = self.ac.malloc(totalsize)
+            newhdr = self._malloc_out_of_nursery(totalsize)
             #
         else:
             # The object has already a shadow.
@@ -1035,6 +1070,33 @@
         self.old_objects_pointing_to_young.append(newobj)
 
 
+    def _malloc_out_of_nursery(self, totalsize):
+        """Allocate non-movable memory for an object of the given
+        'totalsize' that lives so far in the nursery."""
+        if raw_malloc_usage(totalsize) <= self.small_request_threshold:
+            # most common path
+            return self.ac.malloc(totalsize)
+        else:
+            # for nursery objects that are not small
+            return self._malloc_out_of_nursery_nonsmall(totalsize)
+    _malloc_out_of_nursery._always_inline_ = True
+
+    def _malloc_out_of_nursery_nonsmall(self, totalsize):
+        # 'totalsize' should be aligned.
+        ll_assert(raw_malloc_usage(totalsize) & (WORD-1) == 0,
+                  "misaligned totalsize in _malloc_out_of_nursery_nonsmall")
+        #
+        arena = llarena.arena_malloc(raw_malloc_usage(totalsize), False)
+        if not arena:
+            raise MemoryError("cannot allocate object")
+        llarena.arena_reserve(arena, totalsize)
+        #
+        size_gc_header = self.gcheaderbuilder.size_gc_header
+        self.rawmalloced_total_size += raw_malloc_usage(totalsize)
+        self.rawmalloced_objects.append(arena + size_gc_header)
+        return arena
+
+
     # ----------
     # Full collection
 
@@ -1104,30 +1166,26 @@
         # Set the threshold for the next major collection to be when we
         # have allocated 'major_collection_threshold' times more than
         # we currently have.
-        self.next_major_collection_threshold = (
+        bounded = self.set_major_threshold_from(
             (self.get_total_memory_used() * self.major_collection_threshold)
             + reserving_size)
         #
         # Max heap size: gives an upper bound on the threshold.  If we
         # already have at least this much allocated, raise MemoryError.
-        if (self.max_heap_size > 0.0 and
-                self.next_major_collection_threshold > self.max_heap_size):
+        if bounded and (float(self.get_total_memory_used()) + reserving_size >=
+                        self.next_major_collection_threshold):
             #
-            self.next_major_collection_threshold = self.max_heap_size
-            if (float(self.get_total_memory_used()) + reserving_size >=
-                    self.next_major_collection_threshold):
-                #
-                # First raise MemoryError, giving the program a chance to
-                # quit cleanly.  It might still allocate in the nursery,
-                # which might eventually be emptied, triggering another
-                # major collect and (possibly) reaching here again with an
-                # even higher memory consumption.  To prevent it, if it's
-                # the second time we are here, then abort the program.
-                if self.max_heap_size_already_raised:
-                    llop.debug_fatalerror(lltype.Void,
-                                          "Using too much memory, aborting")
-                self.max_heap_size_already_raised = True
-                raise MemoryError
+            # First raise MemoryError, giving the program a chance to
+            # quit cleanly.  It might still allocate in the nursery,
+            # which might eventually be emptied, triggering another
+            # major collect and (possibly) reaching here again with an
+            # even higher memory consumption.  To prevent it, if it's
+            # the second time we are here, then abort the program.
+            if self.max_heap_size_already_raised:
+                llop.debug_fatalerror(lltype.Void,
+                                      "Using too much memory, aborting")
+            self.max_heap_size_already_raised = True
+            raise MemoryError
         #
         # At the end, we can execute the finalizers of the objects
         # listed in 'run_finalizers'.  Note that this will typically do
@@ -1159,8 +1217,7 @@
                 self.rawmalloced_objects.append(obj)
             else:
                 totalsize = size_gc_header + self.get_size(obj)
-                rawtotalsize = llmemory.raw_malloc_usage(totalsize)
-                self.rawmalloced_total_size -= rawtotalsize
+                allocsize = raw_malloc_usage(totalsize)
                 arena = llarena.getfakearenaaddress(obj - size_gc_header)
                 #
                 # Must also include the card marker area, if any
@@ -1175,8 +1232,10 @@
                     length = (obj + offset_to_length).signed[0]
                     extra_words = self.card_marking_words_for_length(length)
                     arena -= extra_words * WORD
+                    allocsize += extra_words * WORD
                 #
                 llarena.arena_free(arena)
+                self.rawmalloced_total_size -= allocsize
         #
         list.delete()
 
@@ -1260,7 +1319,8 @@
                 else:
                     size_gc_header = self.gcheaderbuilder.size_gc_header
                     size = self.get_size(obj)
-                    shadowhdr = self.ac.malloc(size_gc_header + size)
+                    shadowhdr = self._malloc_out_of_nursery(size_gc_header +
+                                                            size)
                     # initialize to an invalid tid *without* GCFLAG_VISITED,
                     # so that if the object dies before the next minor
                     # collection, the shadow will stay around but be collected
@@ -1454,7 +1514,7 @@
         self.total_memory_used = 0
 
     def malloc(self, size):
-        nsize = llmemory.raw_malloc_usage(size)
+        nsize = raw_malloc_usage(size)
         ll_assert(nsize > 0, "malloc: size is null or negative")
         ll_assert(nsize <= self.small_request_threshold,"malloc: size too big")
         ll_assert((nsize & (WORD-1)) == 0, "malloc: size is not aligned")

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/minimarkpage.py	Thu Sep 30 00:16:20 2010
@@ -4,6 +4,7 @@
 from pypy.rlib.debug import ll_assert
 
 WORD = LONG_BIT // 8
+WORD_POWER_2 = {32: 2, 64: 3}[LONG_BIT]
 NULL = llmemory.NULL
 
 
@@ -39,6 +40,9 @@
     # -- The chained list of free blocks.  If there are none, points to the
     #    first uninitialized block.
     ('freeblock', llmemory.Address),
+    # -- The structure above is 4 words, which is a good value:
+    #    '(1024-4) % N' is zero or very small for various small N's,
+    #    i.e. there is not much wasted space.
     )
 PAGE_PTR.TO.become(PAGE_HEADER)
 PAGE_NULL = lltype.nullptr(PAGE_HEADER)
@@ -87,7 +91,7 @@
         self.total_memory_used += nsize
         #
         # Get the page to use from the size
-        size_class = nsize / WORD
+        size_class = nsize >> WORD_POWER_2
         page = self.page_for_size[size_class]
         if page == PAGE_NULL:
             page = self.allocate_new_page(size_class)
@@ -190,7 +194,7 @@
         self.total_memory_used = r_uint(0)
         #
         # For each size class:
-        size_class = self.small_request_threshold / WORD
+        size_class = self.small_request_threshold >> WORD_POWER_2
         while size_class >= 1:
             #
             # Walk the pages in 'page_for_size[size_class]' and
@@ -336,7 +340,7 @@
 
 def _start_of_page_untranslated(addr, page_size):
     assert isinstance(addr, llarena.fakearenaaddress)
-    shift = 4     # for testing, we assume that the whole arena is not
+    shift = WORD  # for testing, we assume that the whole arena is not
                   # on a page boundary
     ofs = ((addr.offset - shift) // page_size) * page_size + shift
     return llarena.fakearenaaddress(addr.arena, ofs)

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_direct.py	Thu Sep 30 00:16:20 2010
@@ -86,19 +86,17 @@
 
     def write(self, p, fieldname, newvalue):
         if self.gc.needs_write_barrier:
-            newaddr = llmemory.cast_ptr_to_adr(newvalue)
             addr_struct = llmemory.cast_ptr_to_adr(p)
-            self.gc.write_barrier(newaddr, addr_struct)
+            self.gc.write_barrier(addr_struct)
         setattr(p, fieldname, newvalue)
 
     def writearray(self, p, index, newvalue):
         if self.gc.needs_write_barrier:
-            newaddr = llmemory.cast_ptr_to_adr(newvalue)
             addr_struct = llmemory.cast_ptr_to_adr(p)
             if hasattr(self.gc, 'write_barrier_from_array'):
-                self.gc.write_barrier_from_array(newaddr, addr_struct, index)
+                self.gc.write_barrier_from_array(addr_struct, index)
             else:
-                self.gc.write_barrier(newaddr, addr_struct)
+                self.gc.write_barrier(addr_struct)
         p[index] = newvalue
 
     def malloc(self, TYPE, n=None):
@@ -507,8 +505,7 @@
                 for index, expected_x in nums.items():
                     assert a[index].x == expected_x
             self.stackroots.pop()
-    test_card_marker.GC_PARAMS = {"card_page_indices": 4,
-                                  "card_page_indices_min": 7}
+    test_card_marker.GC_PARAMS = {"card_page_indices": 4}
 
 class TestMiniMarkGCFull(DirectGCTest):
     from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimark.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimark.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimark.py	Thu Sep 30 00:16:20 2010
@@ -5,26 +5,6 @@
 # Note that most tests are in test_direct.py.
 
 
-def test_stringbuilder_default_initsize_is_small():
-    # Check that pypy.rlib.rstring.INIT_SIZE is short enough to let
-    # the allocated object be considered as a "small" object.
-    # Otherwise it would not be allocated in the nursery at all,
-    # which is kind of bad (and also prevents shrink_array() from
-    # being useful).
-    from pypy.rlib.rstring import INIT_SIZE
-    from pypy.rpython.lltypesystem.rstr import STR, UNICODE
-    #
-    size_gc_header = llmemory.raw_malloc_usage(
-        llmemory.sizeof(llmemory.Address))
-    #
-    size1 = llmemory.raw_malloc_usage(llmemory.sizeof(STR, INIT_SIZE))
-    size1 = size_gc_header + size1
-    assert size1 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"]
-    #
-    size2 = llmemory.raw_malloc_usage(llmemory.sizeof(UNICODE, INIT_SIZE))
-    size2 = size_gc_header + size2
-    assert size2 <= MiniMarkGC.TRANSLATION_PARAMS["small_request_threshold"]
-
 def test_card_marking_words_for_length():
     gc = MiniMarkGC(None, card_page_indices=128)
     assert gc.card_page_shift == 7

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gc/test/test_minimarkpage.py	Thu Sep 30 00:16:20 2010
@@ -7,22 +7,22 @@
 from pypy.rpython.lltypesystem.llmemory import cast_ptr_to_adr
 
 NULL = llmemory.NULL
-SHIFT = 4
+SHIFT = WORD
 hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
 
 
 def test_allocate_arena():
-    ac = ArenaCollection(SHIFT + 8*20, 8, 1)
+    ac = ArenaCollection(SHIFT + 16*20, 16, 1)
     ac.allocate_new_arena()
     assert ac.num_uninitialized_pages == 20
-    ac.uninitialized_pages + 8*20   # does not raise
-    py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 1")
+    ac.uninitialized_pages + 16*20   # does not raise
+    py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 1")
     #
-    ac = ArenaCollection(SHIFT + 8*20 + 7, 8, 1)
+    ac = ArenaCollection(SHIFT + 16*20 + 7, 16, 1)
     ac.allocate_new_arena()
     assert ac.num_uninitialized_pages == 20
-    ac.uninitialized_pages + 8*20 + 7   # does not raise
-    py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 8*20 + 8")
+    ac.uninitialized_pages + 16*20 + 7   # does not raise
+    py.test.raises(llarena.ArenaError, "ac.uninitialized_pages + 16*20 + 16")
 
 
 def test_allocate_new_page():

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gctransform/framework.py	Thu Sep 30 00:16:20 2010
@@ -139,7 +139,7 @@
     def __init__(self, translator):
         from pypy.rpython.memory.gc.base import choose_gc_from_config
         from pypy.rpython.memory.gc.base import ARRAY_TYPEID_MAP
-        from pypy.rpython.memory.gc import inspect
+        from pypy.rpython.memory.gc import inspector
 
         super(FrameworkGCTransformer, self).__init__(translator, inline=True)
         if hasattr(self, 'GC_PARAMS'):
@@ -391,27 +391,27 @@
         else:
             self.id_ptr = None
 
-        self.get_rpy_roots_ptr = getfn(inspect.get_rpy_roots,
+        self.get_rpy_roots_ptr = getfn(inspector.get_rpy_roots,
                                        [s_gc],
                                        rgc.s_list_of_gcrefs(),
                                        minimal_transform=False)
-        self.get_rpy_referents_ptr = getfn(inspect.get_rpy_referents,
+        self.get_rpy_referents_ptr = getfn(inspector.get_rpy_referents,
                                            [s_gc, s_gcref],
                                            rgc.s_list_of_gcrefs(),
                                            minimal_transform=False)
-        self.get_rpy_memory_usage_ptr = getfn(inspect.get_rpy_memory_usage,
+        self.get_rpy_memory_usage_ptr = getfn(inspector.get_rpy_memory_usage,
                                               [s_gc, s_gcref],
                                               annmodel.SomeInteger(),
                                               minimal_transform=False)
-        self.get_rpy_type_index_ptr = getfn(inspect.get_rpy_type_index,
+        self.get_rpy_type_index_ptr = getfn(inspector.get_rpy_type_index,
                                             [s_gc, s_gcref],
                                             annmodel.SomeInteger(),
                                             minimal_transform=False)
-        self.is_rpy_instance_ptr = getfn(inspect.is_rpy_instance,
+        self.is_rpy_instance_ptr = getfn(inspector.is_rpy_instance,
                                          [s_gc, s_gcref],
                                          annmodel.SomeBool(),
                                          minimal_transform=False)
-        self.dump_rpy_heap_ptr = getfn(inspect.dump_rpy_heap,
+        self.dump_rpy_heap_ptr = getfn(inspector.dump_rpy_heap,
                                        [s_gc, annmodel.SomeInteger()],
                                        annmodel.s_Bool,
                                        minimal_transform=False)
@@ -426,7 +426,6 @@
         if GCClass.needs_write_barrier:
             self.write_barrier_ptr = getfn(GCClass.write_barrier.im_func,
                                            [s_gc,
-                                            annmodel.SomeAddress(),
                                             annmodel.SomeAddress()],
                                            annmodel.s_None,
                                            inline=True)
@@ -435,15 +434,13 @@
                 # func should not be a bound method, but a real function
                 assert isinstance(func, types.FunctionType)
                 self.write_barrier_failing_case_ptr = getfn(func,
-                                               [annmodel.SomeAddress(),
-                                                annmodel.SomeAddress()],
+                                               [annmodel.SomeAddress()],
                                                annmodel.s_None)
             func = getattr(GCClass, 'write_barrier_from_array', None)
             if func is not None:
                 self.write_barrier_from_array_ptr = getfn(func.im_func,
                                            [s_gc,
                                             annmodel.SomeAddress(),
-                                            annmodel.SomeAddress(),
                                             annmodel.SomeInteger()],
                                            annmodel.s_None,
                                            inline=True)
@@ -455,8 +452,7 @@
                     self.write_barrier_from_array_failing_case_ptr = \
                                              getfn(func,
                                                    [annmodel.SomeAddress(),
-                                                    annmodel.SomeInteger(),
-                                                    annmodel.SomeAddress()],
+                                                    annmodel.SomeInteger()],
                                                    annmodel.s_None)
         self.statistics_ptr = getfn(GCClass.statistics.im_func,
                                     [s_gc, annmodel.SomeInteger()],
@@ -610,8 +606,10 @@
             
         if self.write_barrier_ptr:
             self.clean_sets = (
-                find_clean_setarrayitems(self.collect_analyzer, graph).union(
-                find_initializing_stores(self.collect_analyzer, graph)))
+                find_initializing_stores(self.collect_analyzer, graph))
+            if self.gcdata.gc.can_optimize_clean_setarrayitems():
+                self.clean_sets = self.clean_sets.union(
+                    find_clean_setarrayitems(self.collect_analyzer, graph))
         super(FrameworkGCTransformer, self).transform_graph(graph)
         if self.write_barrier_ptr:
             self.clean_sets = None
@@ -1023,8 +1021,6 @@
             and not isinstance(v_newvalue, Constant)
             and v_struct.concretetype.TO._gckind == "gc"
             and hop.spaceop not in self.clean_sets):
-            v_newvalue = hop.genop("cast_ptr_to_adr", [v_newvalue],
-                                   resulttype = llmemory.Address)
             v_structaddr = hop.genop("cast_ptr_to_adr", [v_struct],
                                      resulttype = llmemory.Address)
             if (self.write_barrier_from_array_ptr is not None and
@@ -1034,14 +1030,12 @@
                 assert v_index.concretetype == lltype.Signed
                 hop.genop("direct_call", [self.write_barrier_from_array_ptr,
                                           self.c_const_gc,
-                                          v_newvalue,
                                           v_structaddr,
                                           v_index])
             else:
                 self.write_barrier_calls += 1
                 hop.genop("direct_call", [self.write_barrier_ptr,
                                           self.c_const_gc,
-                                          v_newvalue,
                                           v_structaddr])
         hop.rename('bare_' + opname)
 

Modified: pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/gcwrapper.py	Thu Sep 30 00:16:20 2010
@@ -94,7 +94,6 @@
                         assert (type(index) is int    # <- fast path
                                 or lltype.typeOf(index) == lltype.Signed)
                         self.gc.write_barrier_from_array(
-                            llmemory.cast_ptr_to_adr(newvalue),
                             llmemory.cast_ptr_to_adr(toplevelcontainer),
                             index)
                         wb = False
@@ -102,7 +101,6 @@
             #
             if wb:
                 self.gc.write_barrier(
-                    llmemory.cast_ptr_to_adr(newvalue),
                     llmemory.cast_ptr_to_adr(toplevelcontainer))
         llheap.setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue)
 

Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_gc.py	Thu Sep 30 00:16:20 2010
@@ -29,6 +29,7 @@
     GC_CAN_MALLOC_NONMOVABLE = True
     GC_CAN_SHRINK_ARRAY = False
     GC_CAN_SHRINK_BIG_ARRAY = False
+    BUT_HOW_BIG_IS_A_BIG_STRING = 3*WORD
 
     def setup_class(cls):
         cls._saved_logstate = py.log._getstate()
@@ -495,7 +496,8 @@
         # with larger numbers, it gets allocated outside the semispace
         # with some GCs.
         flag = self.GC_CAN_SHRINK_BIG_ARRAY
-        assert self.interpret(f, [12, 0, flag]) == 0x62024241
+        bigsize = self.BUT_HOW_BIG_IS_A_BIG_STRING
+        assert self.interpret(f, [bigsize, 0, flag]) == 0x62024241
 
     def test_tagged_simple(self):
         from pypy.rlib.objectmodel import UnboxedValue
@@ -770,7 +772,7 @@
     from pypy.rpython.memory.gc.minimark import MiniMarkGC as GCClass
     GC_CAN_SHRINK_BIG_ARRAY = False
     GC_CAN_MALLOC_NONMOVABLE = True
+    BUT_HOW_BIG_IS_A_BIG_STRING = 11*WORD
 
 class TestMiniMarkGCCardMarking(TestMiniMarkGC):
-    GC_PARAMS = {'card_page_indices': 4,
-                 'card_page_indices_min': 10}
+    GC_PARAMS = {'card_page_indices': 4}

Modified: pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py	(original)
+++ pypy/branch/fast-forward/pypy/rpython/memory/test/test_transformed_gc.py	Thu Sep 30 00:16:20 2010
@@ -1474,11 +1474,37 @@
                          'page_size': 16*WORD,
                          'arena_size': 64*WORD,
                          'small_request_threshold': 5*WORD,
+                         'large_object': 8*WORD,
+                         'large_object_gcptrs': 10*WORD,
                          'card_page_indices': 4,
-                         'card_page_indices_min': 10,
                          }
             root_stack_depth = 200
 
+    def define_no_clean_setarrayitems(cls):
+        # The optimization find_clean_setarrayitems() in
+        # gctransformer/framework.py does not work with card marking.
+        # Check that it is turned off.
+        S = lltype.GcStruct('S', ('x', lltype.Signed))
+        A = lltype.GcArray(lltype.Ptr(S))
+        def sub(lst):
+            lst[15] = lltype.malloc(S)   # 'lst' is set the single mark "12-15"
+            lst[15].x = 123
+            lst[0] = lst[15]   # that would be a "clean_setarrayitem"
+        def f():
+            lst = lltype.malloc(A, 16)   # 16 > 10
+            rgc.collect()
+            sub(lst)
+            null = lltype.nullptr(S)
+            lst[15] = null     # clear, so that A() is only visible via lst[0]
+            rgc.collect()      # -> crash
+            return lst[0].x
+        return f
+
+    def test_no_clean_setarrayitems(self):
+        run = self.runner("no_clean_setarrayitems")
+        res = run([])
+        assert res == 123
+
 # ________________________________________________________________
 # tagged pointers
 

Modified: pypy/branch/fast-forward/pypy/tool/progressbar.py
==============================================================================
--- pypy/branch/fast-forward/pypy/tool/progressbar.py	(original)
+++ pypy/branch/fast-forward/pypy/tool/progressbar.py	Thu Sep 30 00:16:20 2010
@@ -17,11 +17,11 @@
     )
     PADDING = 7
  
-    def __init__(self, color=None, width=None, block='█', empty=' '):
+    def __init__(self, color=None, width=None, block='.', empty=' '):
         """
         color -- color name (BLUE GREEN CYAN RED MAGENTA YELLOW WHITE BLACK)
         width -- bar width (optinal)
-        block -- progress display character (default '█')
+        block -- progress display character (default '.')
         empty -- bar display character (default ' ')
         """
         if color:

Modified: pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py
==============================================================================
--- pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py	(original)
+++ pypy/branch/fast-forward/pypy/translator/c/gcc/trackgcroot.py	Thu Sep 30 00:16:20 2010
@@ -913,6 +913,7 @@
     visit_leaq = FunctionGcRootTracker._visit_lea
 
     visit_xorq = FunctionGcRootTracker.binary_insn
+    visit_xchgq = FunctionGcRootTracker._visit_xchg
 
     # FIXME: similar to visit_popl for 32-bit
     def visit_popq(self, line):

Modified: pypy/branch/fast-forward/pypy/translator/c/genc.py
==============================================================================
--- pypy/branch/fast-forward/pypy/translator/c/genc.py	(original)
+++ pypy/branch/fast-forward/pypy/translator/c/genc.py	Thu Sep 30 00:16:20 2010
@@ -592,7 +592,7 @@
             if sys.platform == 'win32':
                 python = sys.executable.replace('\\', '/') + ' '
             else:
-                python = ''
+                python = sys.executable + ' '
 
             if self.translator.platform.name == 'msvc':
                 lblofiles = []

Modified: pypy/branch/fast-forward/pypy/translator/c/test/test_lltyped.py
==============================================================================
--- pypy/branch/fast-forward/pypy/translator/c/test/test_lltyped.py	(original)
+++ pypy/branch/fast-forward/pypy/translator/c/test/test_lltyped.py	Thu Sep 30 00:16:20 2010
@@ -783,6 +783,17 @@
         res = fn()
         assert res == 42
 
+    def test_llarena(self):
+        from pypy.rpython.lltypesystem import llmemory, llarena
+        #
+        def f():
+            a = llarena.arena_malloc(800, False)
+            llarena.arena_reset(a, 800, 2)
+            llarena.arena_free(a)
+        #
+        fn = self.getcompiled(f, [])
+        fn()
+
     def test_padding_in_prebuilt_struct(self):
         from pypy.rpython.lltypesystem import rffi
         from pypy.rpython.tool import rffi_platform



More information about the Pypy-commit mailing list